From 8e850dbf3876476c4260fe96d6dce85b710c82e8 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Tue, 5 Sep 2023 22:11:52 +0900 Subject: [PATCH 01/51] initial commit --- baselines/depthfl/.gitignore | 1 + baselines/depthfl/EXTENDED_README.md | 123 +++++++ baselines/depthfl/LICENSE | 202 +++++++++++ baselines/depthfl/README.md | 97 +++++ baselines/depthfl/depthfl.sh | 23 ++ baselines/depthfl/depthfl/__init__.py | 4 + baselines/depthfl/depthfl/client.py | 269 ++++++++++++++ baselines/depthfl/depthfl/conf/config.yaml | 46 +++ baselines/depthfl/depthfl/conf/heterofl.yaml | 46 +++ baselines/depthfl/depthfl/dataset.py | 59 +++ .../depthfl/depthfl/dataset_preparation.py | 77 ++++ baselines/depthfl/depthfl/main.py | 145 ++++++++ baselines/depthfl/depthfl/models.py | 266 ++++++++++++++ baselines/depthfl/depthfl/ray_client_proxy.py | 49 +++ baselines/depthfl/depthfl/resnet.py | 339 ++++++++++++++++++ baselines/depthfl/depthfl/resnet_hetero.py | 205 +++++++++++ baselines/depthfl/depthfl/server.py | 251 +++++++++++++ baselines/depthfl/depthfl/simulation.py | 203 +++++++++++ baselines/depthfl/depthfl/strategy.py | 155 ++++++++ baselines/depthfl/depthfl/strategy_hetero.py | 136 +++++++ baselines/depthfl/depthfl/typing.py | 46 +++ baselines/depthfl/depthfl/utils.py | 111 ++++++ .../2023-09-04/22-24-33/.hydra/config.yaml | 35 ++ .../2023-09-04/22-24-33/.hydra/hydra.yaml | 154 ++++++++ .../2023-09-04/22-24-33/.hydra/overrides.yaml | 1 + ...ics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png | Bin 0 -> 27604 bytes .../2023-09-05/06-03-04/.hydra/config.yaml | 35 ++ .../2023-09-05/06-03-04/.hydra/hydra.yaml | 157 ++++++++ .../2023-09-05/06-03-04/.hydra/overrides.yaml | 3 + ...ics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png | Bin 0 -> 30212 bytes .../2023-09-05/12-21-24/.hydra/config.yaml | 35 ++ .../2023-09-05/12-21-24/.hydra/hydra.yaml | 157 ++++++++ .../2023-09-05/12-21-24/.hydra/overrides.yaml | 3 + ...rics_HeteroFL_iid_C=75_B=50_E=5_R=1000.png | Bin 0 -> 31063 bytes .../2023-09-05/17-39-22/.hydra/config.yaml | 35 ++ .../2023-09-05/17-39-22/.hydra/hydra.yaml | 157 ++++++++ .../2023-09-05/17-39-22/.hydra/overrides.yaml | 3 + baselines/depthfl/pyproject.toml | 136 +++++++ 38 files changed, 3764 insertions(+) create mode 100644 baselines/depthfl/.gitignore create mode 100644 baselines/depthfl/EXTENDED_README.md create mode 100644 baselines/depthfl/LICENSE create mode 100644 baselines/depthfl/README.md create mode 100755 baselines/depthfl/depthfl.sh create mode 100644 baselines/depthfl/depthfl/__init__.py create mode 100644 baselines/depthfl/depthfl/client.py create mode 100644 baselines/depthfl/depthfl/conf/config.yaml create mode 100644 baselines/depthfl/depthfl/conf/heterofl.yaml create mode 100644 baselines/depthfl/depthfl/dataset.py create mode 100644 baselines/depthfl/depthfl/dataset_preparation.py create mode 100644 baselines/depthfl/depthfl/main.py create mode 100644 baselines/depthfl/depthfl/models.py create mode 100644 baselines/depthfl/depthfl/ray_client_proxy.py create mode 100644 baselines/depthfl/depthfl/resnet.py create mode 100644 baselines/depthfl/depthfl/resnet_hetero.py create mode 100644 baselines/depthfl/depthfl/server.py create mode 100644 baselines/depthfl/depthfl/simulation.py create mode 100644 baselines/depthfl/depthfl/strategy.py create mode 100644 baselines/depthfl/depthfl/strategy_hetero.py create mode 100644 baselines/depthfl/depthfl/typing.py create mode 100644 baselines/depthfl/depthfl/utils.py create mode 100644 baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/config.yaml create mode 100644 baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/hydra.yaml create mode 100644 baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/overrides.yaml create mode 100644 baselines/depthfl/outputs/2023-09-04/22-24-33/centralized_metrics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png create mode 100644 baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/config.yaml create mode 100644 baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/hydra.yaml create mode 100644 baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/overrides.yaml create mode 100644 baselines/depthfl/outputs/2023-09-05/06-03-04/centralized_metrics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png create mode 100644 baselines/depthfl/outputs/2023-09-05/12-21-24/.hydra/config.yaml create mode 100644 baselines/depthfl/outputs/2023-09-05/12-21-24/.hydra/hydra.yaml create mode 100644 baselines/depthfl/outputs/2023-09-05/12-21-24/.hydra/overrides.yaml create mode 100644 baselines/depthfl/outputs/2023-09-05/12-21-24/centralized_metrics_HeteroFL_iid_C=75_B=50_E=5_R=1000.png create mode 100644 baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/config.yaml create mode 100644 baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/hydra.yaml create mode 100644 baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/overrides.yaml create mode 100644 baselines/depthfl/pyproject.toml diff --git a/baselines/depthfl/.gitignore b/baselines/depthfl/.gitignore new file mode 100644 index 000000000000..93db21baf618 --- /dev/null +++ b/baselines/depthfl/.gitignore @@ -0,0 +1 @@ +dataset/ diff --git a/baselines/depthfl/EXTENDED_README.md b/baselines/depthfl/EXTENDED_README.md new file mode 100644 index 000000000000..9c8f5bc72fa9 --- /dev/null +++ b/baselines/depthfl/EXTENDED_README.md @@ -0,0 +1,123 @@ + +# Extended Readme + +> The baselines are expected to run in a machine running Ubuntu 22.04 + +While `README.md` should include information about the baseline you implement and how to run it, this _extended_ readme provides info on what's the expected directory structure for a new baseline and more generally the instructions to follow before your baseline can be merged into the Flower repository. Please follow closely these instructions. It is likely that you have already completed steps 1-2. + +1. Fork the Flower repository and clone it. +2. Navigate to the `baselines/` directory and from there run: + ```bash + # This will create a new directory with the same structure as this `baseline_template` directory. + ./dev/create-baseline.sh + ``` +3. All your code and configs should go into a sub-directory with the same name as the name of your baseline. + * The sub-directory contains a series of Python scripts that you can edit. Please stick to these files and consult with us if you need additional ones. + * There is also a basic config structure in `/conf` ready be parsed by [Hydra](https://hydra.cc/) when executing your `main.py`. +4. Therefore, the directory structure in your baseline should look like: + ```bash + baselines/ + ├── README.md # describes your baseline and everything needed to use it + ├── EXTENDED_README.md # to remove before creating your PR + ├── pyproject.toml # details your Python environment + └── + ├── *.py # several .py files including main.py and __init__.py + └── conf + └── *.yaml # one or more Hydra config files + + ``` +> :warning: Make sure the variable `name` in `pyproject.toml` is set to the name of the sub-directory containing all your code. + +5. Add your dependencies to the `pyproject.toml` (see below a few examples on how to do it). Read more about Poetry below in this `EXTENDED_README.md`. +6. Regularly check that your coding style and the documentation you add follow good coding practices. To test whether your code meets the requirements, please run the following: + ```bash + # After activating your environment and from your baseline's directory + cd .. # to go to the top-level directory of all baselines + ./dev/test-baseline.sh + ./dev/test-baseline-structure.sh + ``` + Both `test-baseline.sh` and `test-baseline-structure.sh` will also be automatically run when you create a PR, and both tests need to pass for the baseline to be merged. + To automatically solve some formatting issues and apply easy fixes, please run the formatting script: + ```bash + # After activating your environment and from your baseline's directory + cd .. # to go to the top-level directory of all baselines + ./dev/format-baseline.sh + ``` +7. Ensure that the Python environment for your baseline can be created without errors by simply running `poetry install` and that this is properly described later when you complete the `Environment Setup` section in `README.md`. This is specially important if your environment requires additional steps after doing `poetry install`. +8. Ensure that your baseline runs with default arguments by running `poetry run python -m .main`. Then, describe this and other forms of running your code in the `Running the Experiments` section in `README.md`. +9. Once your code is ready and you have checked: + * that following the instructions in your `README.md` the Python environment can be created correctly + + * that running the code following your instructions can reproduce the experiments in the paper + + , then you just need to create a Pull Request (PR) to kickstart the process of merging your baseline into the Flower repository. + +> Once you are happy to merge your baseline contribution, please delete this `EXTENDED_README.md` file. + + +## About Poetry + +We use Poetry to manage the Python environment for each individual baseline. You can follow the instructions [here](https://python-poetry.org/docs/) to install Poetry in your machine. + + +### Specifying a Python Version (optional) +By default, Poetry will use the Python version in your system. In some settings, you might want to specify a particular version of Python to use inside your Poetry environment. You can do so with [`pyenv`](https://github.com/pyenv/pyenv). Check the documentation for the different ways of installing `pyenv`, but one easy way is using the [automatic installer](https://github.com/pyenv/pyenv-installer): +```bash +curl https://pyenv.run | bash # then, don't forget links to your .bashrc/.zshrc +``` + +You can then install any Python version with `pyenv install ` (e.g. `pyenv install 3.9.17`). Then, in order to use that version for your baseline, you'd do the following: + +```bash +# cd to your baseline directory (i.e. where the `pyproject.toml` is) +pyenv local + +# set that version for poetry +poetry env use + +# then you can install your Poetry environment (see the next setp) +``` + +### Installing Your Environment +With the Poetry tool already installed, you can create an environment for this baseline with commands: +```bash +# run this from the same directory as the `pyproject.toml` file is +poetry install +``` + +This will create a basic Python environment with just Flower and additional packages, including those needed for simulation. Next, you should add the dependencies for your code. It is **critical** that you fix the version of the packages you use using a `=` not a `=^`. You can do so via [`poetry add`](https://python-poetry.org/docs/cli/#add). Below are some examples: + +```bash +# For instance, if you want to install tqdm +poetry add tqdm==4.65.0 + +# If you already have a requirements.txt, you can add all those packages (but ensure you have fixed the version) in one go as follows: +poetry add $( cat requirements.txt ) +``` +With each `poetry add` command, the `pyproject.toml` gets automatically updated so you don't need to keep that `requirements.txt` as part of this baseline. + + +More critically however, is adding your ML framework of choice to the list of dependencies. For some frameworks you might be able to do so with the `poetry add` command. Check [the Poetry documentation](https://python-poetry.org/docs/cli/#add) for how to add packages in various ways. For instance, let's say you want to use PyTorch: + +```bash +# with plain `pip` you'd run a command such as: +pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu117 + +# to add the same 3 dependencies to your Poetry environment you'd need to add the URL to the wheel that the above pip command auto-resolves for you. +# You can find those wheels in `https://download.pytorch.org/whl/cu117`. Copy the link and paste it after the `poetry add` command. +# For instance to add `torch==1.13.1+cu117` and a x86 Linux system with Python3.8 you'd: +poetry add https://download.pytorch.org/whl/cu117/torch-1.13.1%2Bcu117-cp38-cp38-linux_x86_64.whl +# you'll need to repeat this for both `torchvision` and `torchaudio` +``` +The above is just an example of how you can add these dependencies. Please refer to the Poetry documentation to extra reference. + +If all attempts fail, you can still install packages via standard `pip`. You'd first need to source/activate your Poetry environment. +```bash +# first ensure you have created your environment +# and installed the base packages provided in the template +poetry install + +# then activate it +poetry shell +``` +Now you are inside your environment (pretty much as when you use `virtualenv` or `conda`) so you can install further packages with `pip`. Please note that, unlike with `poetry add`, these extra requirements won't be captured by `pyproject.toml`. Therefore, please ensure that you provide all instructions needed to: (1) create the base environment with Poetry and (2) install any additional dependencies via `pip` when you complete your `README.md`. \ No newline at end of file diff --git a/baselines/depthfl/LICENSE b/baselines/depthfl/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/baselines/depthfl/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md new file mode 100644 index 000000000000..3a996b6deb39 --- /dev/null +++ b/baselines/depthfl/README.md @@ -0,0 +1,97 @@ +--- +title: DepthFL: Depthwise Federated Learning for Heterogeneous Clients +url: https://openreview.net/forum?id=pf8RIZTMU58 +labels: [image classification, cross-device, system heterogeneity] # please add between 4 and 10 single-word (maybe two-words) labels (e.g. "system heterogeneity", "image classification", "asynchronous", "weight sharing", "cross-silo") +dataset: [CIFAR100] # list of datasets you include in your baseline +--- + +# DepthFL: Depthwise Federated Learning for Heterogeneous Clients + +> Note: If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. + +****Paper:**** : https://openreview.net/forum?id=pf8RIZTMU58 + +****Authors:**** : Minjae Kim, Sangyoon Yu, Suhyun Kim, Soo-Mook Moon + +****Abstract:**** : Federated learning is for training a global model without collecting private local data from clients. As they repeatedly need to upload locally-updated weights or gradients instead, clients require both computation and communication resources enough to participate in learning, but in reality their resources are heterogeneous. To enable resource-constrained clients to train smaller local models, width scaling techniques have been used, which reduces the channels of a global model. Unfortunately, width scaling suffers from heterogeneity of local models when averaging them, leading to a lower accuracy than when simply excluding resource-constrained clients from training. This paper proposes a new approach based on depth scaling called DepthFL. DepthFL defines local models of different depths by pruning the deepest layers off the global model, and allocates them to clients depending on their available resources. Since many clients do not have enough resources to train deep local models, this would make deep layers partially-trained with insufficient data, unlike shallow layers that are fully trained. DepthFL alleviates this problem by mutual self-distillation of knowledge among the classifiers of various depths within a local model. Our experiments show that depth-scaled local models build a global model better than width-scaled ones, and that self-distillation is highly effective in training data-insufficient deep layers. + + +## About this baseline + +****What’s implemented:**** : The code in this directory replicates the experiments in DepthFL: Depthwise Federated Learning for Heterogeneous Clients (Kim et al., 2023) for CIFAR100, which proposed the DepthFL algorithm. Concretely, it replicates the results for CIFAR100 in Table 2,3 and 4. + +****Datasets:**** : CIFAR100 from PyTorch's Torchvision + +****Hardware Setup:**** : These experiments were run on a server with Nvidia 3090 GPUs. Any machine with 1x 8GB GPU or more would be able to run it in a reasonable amount of time. + +****Contributors:**** : Minjae Kim + + +## Experimental Setup + +****Task:**** : Image Classification + +****Model:**** : ResNet18 with additional bottleneck layers + +**Dataset:** This baseline only includes the CIFAR100 dataset. By default it will be partitioned into 100 clients following IID distribution. The settings are as follow: + +| Dataset | #classes | #partitions | partitioning method | +| :------ | :---: | :---: | :---: | +| CIFAR100 | 100 | 100 | IID | + +**Training Hyperparameters:** +The following table shows the main hyperparameters for this baseline with their default value (i.e. the value used if you run `python main.py` directly) + +| Description | Default Value | +| ----------- | ----- | +| total clients | 1000 | +| clients per round | 10 | +| number of rounds | 100 | +| client resources | {'num_cpus': 2.0, 'num_gpus': 0.0 }| +| data partition | pathological with power law (2 classes per client) | +| optimizer | SGD with proximal term | +| proximal mu | 1.0 | +| stragglers_fraction | 0.9 | + + +## Environment Setup + +:warning: _The Python environment for all baselines should follow these guidelines in the `EXTENDED_README`. Specify the steps to create and activate your environment. If there are any external system-wide requirements, please include instructions for them too. These instructions should be comprehensive enough so anyone can run them (if non standard, describe them step-by-step)._ + + +## Running the Experiments + +:warning: _Provide instructions on the steps to follow to run all the experiments._ +```bash +# The main experiment implemented in your baseline using default hyperparameters (that should be setup in the Hydra configs) should run (including dataset download and necessary partitioning) by executing the command: + +poetry run -m .main # where is the name of this directory and that of the only sub-directory in this directory (i.e. where all your source code is) + +# If you are using a dataset that requires a complicated download (i.e. not using one natively supported by TF/PyTorch) + preprocessing logic, you might want to tell people to run one script first that will do all that. Please ensure the download + preprocessing can be configured to suit (at least!) a different download directory (and use as default the current directory). The expected command to run to do this is: + +poetry run -m .dataset_preparation + +# It is expected that you baseline supports more than one dataset and different FL settings (e.g. different number of clients, dataset partitioning methods, etc). Please provide a list of commands showing how these experiments are run. Include also a short explanation of what each one does. Here it is expected you'll be using the Hydra syntax to override the default config. + +poetry run -m .main +. +. +. +poetry run -m .main +``` + + +## Expected Results + +:warning: _Your baseline implementation should replicate several of the experiments in the original paper. Please include here the exact command(s) needed to run each of those experiments followed by a figure (e.g. a line plot) or table showing the results you obtained when you ran the code. Below is an example of how you can present this. Please add command followed by results for all your experiments._ + +```bash +# it is likely that for one experiment you need to sweep over different hyperparameters. You are encouraged to use Hydra's multirun functionality for this. This is an example of how you could achieve this for some typical FL hyperparameteres + +poetry run -m .main --multirun num_client_per_round=5,10,50 dataset=femnist,cifar10 +# the above command will run a total of 6 individual experiments (because 3client_configs x 2datasets = 6 -- you can think of it as a grid). + +[Now show a figure/table displaying the results of the above command] + +# add more commands + plots for additional experiments. +``` diff --git a/baselines/depthfl/depthfl.sh b/baselines/depthfl/depthfl.sh new file mode 100755 index 000000000000..ed6982fef9f1 --- /dev/null +++ b/baselines/depthfl/depthfl.sh @@ -0,0 +1,23 @@ +#! /bin/bash + +python -m depthfl.main --config-name="heterofl" +python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=1 model.scale=false +python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=2 model.scale=false +python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=3 model.scale=false +python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=4 model.scale=false + +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=1 +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=2 +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=3 +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=4 + +python -m depthfl.main +python -m depthfl.main exclusive_learning=true model_size=1 +python -m depthfl.main exclusive_learning=true model_size=2 +python -m depthfl.main exclusive_learning=true model_size=3 +python -m depthfl.main exclusive_learning=true model_size=4 + +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false fit_config.extended=false + +python -m depthfl.main fit_config.kd=false \ No newline at end of file diff --git a/baselines/depthfl/depthfl/__init__.py b/baselines/depthfl/depthfl/__init__.py new file mode 100644 index 000000000000..bd0e1cd09cc0 --- /dev/null +++ b/baselines/depthfl/depthfl/__init__.py @@ -0,0 +1,4 @@ +"""Template baseline package.""" + +from .typing import FitIns as FitIns +from .typing import FitRes as FitRes \ No newline at end of file diff --git a/baselines/depthfl/depthfl/client.py b/baselines/depthfl/depthfl/client.py new file mode 100644 index 000000000000..2d0185dacc2a --- /dev/null +++ b/baselines/depthfl/depthfl/client.py @@ -0,0 +1,269 @@ +"""Defines the DepthFL Flower Client and a function to instantiate it.""" + +import copy +import torch +import numpy as np +import flwr as fl +from collections import OrderedDict +from typing import Callable, Dict, List, Tuple, Union +from hydra.utils import instantiate +from omegaconf import DictConfig +from torch.utils.data import DataLoader + +from flwr.common import ( + ndarrays_to_parameters, + parameters_to_ndarrays, +) + +from flwr.common.typing import NDArrays, Scalar, Status, Code +from flwr.client import Client +from flwr.client.app import ( + numpyclient_has_get_properties, + numpyclient_has_get_parameters, + numpyclient_has_fit, + numpyclient_has_evaluate, + _get_properties, + _get_parameters, + _evaluate, + _constructor, +) +from flwr.client.numpy_client import NumPyClient + +from depthfl.models import test, train +from depthfl import FitIns, FitRes + +EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_FIT = """ +NumPyClient.fit did not return a tuple with 3 elements. +The returned values should have the following type signature: + + Tuple[NDArrays, Dict, int] +""" + +ClientLike = Union[Client, NumPyClient] + +def prune(state_dict, param_idx): + """prune width of DNN (for HeteroFL)""" + + ret_dict = {} + for k in state_dict.keys(): + if 'num' not in k: + ret_dict[k] = state_dict[k][torch.meshgrid(param_idx[k])] + else: + ret_dict[k] = state_dict[k] + return copy.deepcopy(ret_dict) + +class FlowerClient( + fl.client.NumPyClient +): # pylint: disable=too-many-instance-attributes + """Standard Flower client for CNN training.""" + + def __init__( + self, + net: torch.nn.Module, + trainloader: DataLoader, + valloader: DataLoader, + device: torch.device, + num_epochs: int, + learning_rate: float, + learning_rate_decay: float, + ): # pylint: disable=too-many-arguments + self.net = net + self.trainloader = trainloader + self.valloader = valloader + self.device = device + self.num_epochs = num_epochs + self.learning_rate = learning_rate + self.learning_rate_decay = learning_rate_decay + self.param_idx = {} + state_dict = net.state_dict() + + # for HeteroFL + for k in state_dict.keys(): + self.param_idx[k] = [torch.arange(size) for size in state_dict[k].shape] # store client's weights' shape (for HeteroFL) + + + def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: + """Returns the parameters of the current net.""" + return [val.cpu().numpy() for _, val in self.net.state_dict().items()] + + def set_parameters(self, parameters: NDArrays) -> None: + """Changes the parameters of the model using the given ones.""" + params_dict = zip(self.net.state_dict().keys(), parameters) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + self.net.load_state_dict(prune(state_dict, self.param_idx), strict=True) + + def fit( + self, parameters: NDArrays, prev_grads: Dict, config: Dict[str, Scalar] + ) -> Tuple[NDArrays, Dict, int]: + """Implements distributed fit function for a given client.""" + self.set_parameters(parameters) + num_epochs = self.num_epochs + + curr_round = config["curr_round"] - 1 + + # consistency weight for self distillation in DepthFL + CONSISTENCY_WEIGHT = 300 + current = np.clip(curr_round, 0.0, CONSISTENCY_WEIGHT) + phase = 1.0 - current / CONSISTENCY_WEIGHT + consistency_weight = float(np.exp(-5.0 * phase * phase)) + + train( + self.net, + self.trainloader, + self.device, + epochs=num_epochs, + learning_rate=self.learning_rate * self.learning_rate_decay ** curr_round, + feddyn=config["feddyn"], + kd=config["kd"], + consistency_weight=consistency_weight, + prev_grads = prev_grads, + alpha=config["alpha"], + extended=config["extended"], + ) + + return self.get_parameters({}), prev_grads, len(self.trainloader) + + def evaluate( + self, parameters: NDArrays, config: Dict[str, Scalar] + ) -> Tuple[float, int, Dict]: + """Implements distributed evaluation for a given client.""" + self.set_parameters(parameters) + loss, accuracy, accuracy_single = test(self.net, self.valloader, self.device) + return float(loss), len(self.valloader), {"accuracy": float(accuracy), "accuracy_single":accuracy_single} + + +def gen_client_fn( + num_clients: int, + num_rounds: int, + num_epochs: int, + trainloaders: List[DataLoader], + valloaders: List[DataLoader], + learning_rate: float, + learning_rate_decay: float, + models: List[DictConfig], + cfg: DictConfig +) -> Tuple[ + Callable[[str], FlowerClient], DataLoader +]: # pylint: disable=too-many-arguments + """Generates the client function that creates the Flower Clients. + + Parameters + ---------- + num_clients : int + The number of clients present in the setup + num_rounds: int + The number of rounds in the experiment. This is used to construct + the scheduling for stragglers + num_epochs : int + The number of local epochs each client should run the training for before + sending it to the server. + trainloaders: List[DataLoader] + A list of DataLoaders, each pointing to the dataset training partition + belonging to a particular client. + valloaders: List[DataLoader] + A list of DataLoaders, each pointing to the dataset validation partition + belonging to a particular client. + learning_rate : float + The learning rate for the SGD optimizer of clients. + learning_rate_decay : float + The learning rate decay ratio per round for the SGD optimizer of clients. + models : List[DictConfig] + A list of DictConfigs, each pointing to the model config of client's local model + + Returns + ------- + Tuple[Callable[[str], FlowerClient], DataLoader] + A tuple containing the client function that creates Flower Clients and + the DataLoader that will be used for testing + """ + + def client_fn(cid: str) -> FlowerClient: + """Create a Flower client representing a single organization.""" + + # Load model + device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") + + # each client gets a different model config (different width / depth) + net = instantiate(models[int(cid)]).to(device) + + # Note: each client gets a different trainloader/valloader, so each client + # will train and evaluate on their own unique data + trainloader = trainloaders[int(cid)] + valloader = valloaders[int(cid)] + + return FlowerClient( + net, + trainloader, + valloader, + device, + num_epochs, + learning_rate, + learning_rate_decay, + ) + + return client_fn + + + +def _fit(self: Client, ins: FitIns) -> FitRes: + + """Refine the provided parameters using the locally held dataset. + FitIns & FitRes were modified for FedDyn. Fit function gets prev_grads + as input and return the updated prev_grads with updated parameters + """ + + # Deconstruct FitIns + parameters: NDArrays = parameters_to_ndarrays(ins.parameters) + + # Train + results = self.numpy_client.fit(parameters, ins.prev_grads, ins.config) # type: ignore + if not ( + len(results) == 3 + and isinstance(results[0], list) + and isinstance(results[1], Dict) + and isinstance(results[2], int) + ): + raise Exception(EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_FIT) + + # Return FitRes + parameters_prime, prev_grads, num_examples = results + parameters_prime_proto = ndarrays_to_parameters(parameters_prime) + return FitRes( + status=Status(code=Code.OK, message="Success"), + parameters=parameters_prime_proto, + prev_grads=prev_grads, + num_examples=num_examples, + cid = -1, + ) + + +def _wrap_numpy_client(client: NumPyClient) -> Client: + member_dict: Dict[str, Callable] = { # type: ignore + "__init__": _constructor, + } + + # Add wrapper type methods (if overridden) + + if numpyclient_has_get_properties(client=client): + member_dict["get_properties"] = _get_properties + + if numpyclient_has_get_parameters(client=client): + member_dict["get_parameters"] = _get_parameters + + if numpyclient_has_fit(client=client): + member_dict["fit"] = _fit + + if numpyclient_has_evaluate(client=client): + member_dict["evaluate"] = _evaluate + + # Create wrapper class + wrapper_class = type("NumPyClientWrapper", (Client,), member_dict) + + # Create and return an instance of the newly created class + return wrapper_class(numpy_client=client) # type: ignore + +def to_client(client_like: ClientLike) -> Client: + """Take any Client-like object and return it as a Client.""" + if isinstance(client_like, NumPyClient): + return _wrap_numpy_client(client=client_like) + return client_like \ No newline at end of file diff --git a/baselines/depthfl/depthfl/conf/config.yaml b/baselines/depthfl/depthfl/conf/config.yaml new file mode 100644 index 000000000000..ce319f95322f --- /dev/null +++ b/baselines/depthfl/depthfl/conf/config.yaml @@ -0,0 +1,46 @@ +--- + +num_clients: 100 # total number of clients +num_epochs: 5 # number of local epochs +batch_size: 50 +num_rounds: 1000 +fraction: 0.1 # participation ratio +learning_rate: 0.1 +learning_rate_decay : 0.998 # per round +static_bn: false # static batch normalization (HeteroFL) +exclusive_learning: false # exclusive learning baseline in DepthFL paper +model_size: 1 # model size for exclusive learning + +client_resources: + num_cpus: 1 + num_gpus: 0.5 + +server_device: cuda + +dataset_config: + iid: true + +fit_config: + feddyn: true + kd: true + alpha: 0.1 # alpha for FedDyn + extended: true # if not extended : InclusiveFL + drop_client: false # with FedProx, clients shouldn't be dropped even if they are stragglers + +model: + _target_: depthfl.resnet.multi_resnet18 + n_blocks: 4 # depth (1 ~ 4) + num_classes: 100 + +strategy: + _target_: depthfl.strategy.FedDyn + fraction_fit: 0.00001 # because we want the number of clients to sample on each round to be solely defined by min_fit_clients + fraction_evaluate: 0.0 + # min_fit_clients: ${clients_per_round} + min_evaluate_clients: 0 + # min_available_clients: ${clients_per_round} + evaluate_metrics_aggregation_fn: + _target_: depthfl.strategy.weighted_average + _partial_: true # we dont' want this function to be evaluated when instantiating the strategy, we treat it as a partial and evaluate it when the strategy actuallly calls the function (in aggregate_evaluate()) + + diff --git a/baselines/depthfl/depthfl/conf/heterofl.yaml b/baselines/depthfl/depthfl/conf/heterofl.yaml new file mode 100644 index 000000000000..2ad8fa576c4a --- /dev/null +++ b/baselines/depthfl/depthfl/conf/heterofl.yaml @@ -0,0 +1,46 @@ +--- + +num_clients: 100 # total number of clients +num_epochs: 5 # number of local epochs +batch_size: 50 +num_rounds: 1000 +fraction: 0.1 # participation ratio +learning_rate: 0.1 +learning_rate_decay : 0.998 # per round +static_bn: true # static batch normalization (HeteroFL) +exclusive_learning: false # exclusive learning baseline in DepthFL paper +model_size: 1 # model size for exclusive learning + +client_resources: + num_cpus: 1 + num_gpus: 0.5 + +server_device: cuda + +dataset_config: + iid: true + +fit_config: + feddyn: false + kd: false + alpha: 0.1 # unused + extended: false # unused + drop_client: false # with FedProx, clients shouldn't be dropped even if they are stragglers + +model: + _target_: depthfl.resnet_hetero.resnet18 + n_blocks: 4 # width (1 ~ 4) + num_classes: 100 + scale: true # scaler module in HeteroFL + +strategy: + _target_: depthfl.strategy_hetero.HeteroFL + fraction_fit: 0.00001 # because we want the number of clients to sample on each round to be solely defined by min_fit_clients + fraction_evaluate: 0.0 + # min_fit_clients: ${clients_per_round} + min_evaluate_clients: 0 + # min_available_clients: ${clients_per_round} + evaluate_metrics_aggregation_fn: + _target_: depthfl.strategy.weighted_average + _partial_: true # we dont' want this function to be evaluated when instantiating the strategy, we treat it as a partial and evaluate it when the strategy actuallly calls the function (in aggregate_evaluate()) + diff --git a/baselines/depthfl/depthfl/dataset.py b/baselines/depthfl/depthfl/dataset.py new file mode 100644 index 000000000000..d1a8cbcd6488 --- /dev/null +++ b/baselines/depthfl/depthfl/dataset.py @@ -0,0 +1,59 @@ +"""CIFAR100 dataset utilities for federated learning.""" + +from typing import Optional, Tuple + +import torch +from omegaconf import DictConfig +from torch.utils.data import DataLoader, random_split + +from depthfl.dataset_preparation import _partition_data + + +def load_datasets( # pylint: disable=too-many-arguments + config: DictConfig, + num_clients: int, + val_ratio: float = 0.0, + batch_size: Optional[int] = 32, + seed: Optional[int] = 41, +) -> Tuple[DataLoader, DataLoader, DataLoader]: + """Creates the dataloaders to be fed into the model. + + Parameters + ---------- + config: DictConfig + Parameterises the dataset partitioning process + num_clients : int + The number of clients that hold a part of the data + val_ratio : float, optional + The ratio of training data that will be used for validation (between 0 and 1), + by default 0.1 + batch_size : int, optional + The size of the batches to be fed into the model, by default 32 + seed : int, optional + Used to set a fix seed to replicate experiments, by default 42 + + Returns + ------- + Tuple[DataLoader, DataLoader, DataLoader] + The DataLoader for training, the DataLoader for validation, the DataLoader for testing. + """ + print(f"Dataset partitioning config: {config}") + datasets, testset = _partition_data( + num_clients, + iid=config.iid, + seed=seed, + ) + # Split each partition into train/val and create DataLoader + trainloaders = [] + valloaders = [] + for dataset in datasets: + len_val = 0 + if val_ratio > 0: + len_val = int(len(dataset) / (1 / val_ratio)) + lengths = [len(dataset) - len_val, len_val] + ds_train, ds_val = random_split( + dataset, lengths, torch.Generator().manual_seed(seed) + ) + trainloaders.append(DataLoader(ds_train, batch_size=batch_size, shuffle=True)) + valloaders.append(DataLoader(ds_val, batch_size=batch_size)) + return trainloaders, valloaders, DataLoader(testset, batch_size=batch_size) \ No newline at end of file diff --git a/baselines/depthfl/depthfl/dataset_preparation.py b/baselines/depthfl/depthfl/dataset_preparation.py new file mode 100644 index 000000000000..be12b2829e7d --- /dev/null +++ b/baselines/depthfl/depthfl/dataset_preparation.py @@ -0,0 +1,77 @@ +from typing import List, Optional, Tuple + +import numpy as np +import torch +import torchvision.transforms as transforms +from torch.utils.data import ConcatDataset, Dataset, Subset, random_split +from torchvision.datasets import CIFAR100 + + +def _download_data() -> Tuple[Dataset, Dataset]: + """Downloads (if necessary) and returns the CIFAR-100 dataset. + + Returns + ------- + Tuple[CIFAR100, CIFAR100] + The dataset for training and the dataset for testing CIFAR100. + """ + transform_train = transforms.Compose([ + transforms.ToTensor(), + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.Normalize((0.5071, 0.4867, 0.4408), + (0.2675, 0.2565, 0.2761))]) + + transform_test = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize((0.5071, 0.4867, 0.4408), + (0.2675, 0.2565, 0.2761))]) + + trainset = CIFAR100("./dataset", train=True, download=True, transform=transform_train) + testset = CIFAR100("./dataset", train=False, download=True, transform=transform_test) + return trainset, testset + + +def _partition_data( + num_clients, + iid: Optional[bool] = True, + seed: Optional[int] = 41, +) -> Tuple[List[Dataset], Dataset]: + """Split training set into iid or non iid partitions to simulate the + federated setting. + + Parameters + ---------- + num_clients : int + The number of clients that hold a part of the data + iid : bool, optional + Whether the data should be independent and identically distributed between + the clients or if the data should first be sorted by labels and distributed by chunks + to each client (used to test the convergence in a worst case scenario), by default False + seed : int, optional + Used to set a fix seed to replicate experiments, by default 42 + + Returns + ------- + Tuple[List[Dataset], Dataset] + A list of dataset for each client and a single dataset to be use for testing the model. + """ + trainset, testset = _download_data() + + datasets = list() + + if iid: + + np.random.seed(seed) + num_sample = int(len(trainset)/(num_clients)) + index = [i for i in range(len(trainset))] + for i in range(num_clients): + sample_idx = np.random.choice(index, num_sample, + replace=False) + index = list(set(index)-set(sample_idx)) + datasets.append(Subset(trainset, sample_idx)) + + else: + pass + + return datasets, testset \ No newline at end of file diff --git a/baselines/depthfl/depthfl/main.py b/baselines/depthfl/depthfl/main.py new file mode 100644 index 000000000000..e6ddcb86a8f1 --- /dev/null +++ b/baselines/depthfl/depthfl/main.py @@ -0,0 +1,145 @@ +import copy +import hydra +import flwr as fl +from hydra.core.hydra_config import HydraConfig +from hydra.utils import instantiate +from omegaconf import DictConfig, OmegaConf + +from flwr.server.client_manager import ClientManager, SimpleClientManager +from flwr.common import ndarrays_to_parameters +from depthfl import client, server, utils +from depthfl.simulation import start_simulation +from depthfl.dataset import load_datasets +from depthfl.utils import save_results_as_pickle + +@hydra.main(config_path="conf", config_name="config", version_base=None) +def main(cfg: DictConfig) -> None: + """Run the baseline. + + Parameters + ---------- + cfg : DictConfig + An omegaconf object that stores the hydra config. + """ + + print(OmegaConf.to_yaml(cfg)) + + # partition dataset and get dataloaders + trainloaders, valloaders, testloader = load_datasets( + config=cfg.dataset_config, + num_clients=cfg.num_clients, + batch_size=cfg.batch_size, + ) + + # exclusive learning baseline in DepthFL paper + # (model_size, % of clients) = (a,100), (b,75), (c,50), (d,25) + if cfg.exclusive_learning: + cfg.num_clients = int(cfg.num_clients - (cfg.model_size-1) * (cfg.num_clients // 4)) + + models = [] + for i in range(cfg.num_clients): + model = copy.deepcopy(cfg.model) + + # each client gets different model depth / width + model.n_blocks = i // (cfg.num_clients // 4) + 1 + + # In exclusive learning, every client has same model depth / width + if cfg.exclusive_learning: + model.n_blocks = cfg.model_size + + models.append(model) + + # prepare function that will be used to spawn each client + client_fn = client.gen_client_fn( + num_clients=cfg.num_clients, + num_epochs=cfg.num_epochs, + trainloaders=trainloaders, + valloaders=valloaders, + num_rounds=cfg.num_rounds, + learning_rate=cfg.learning_rate, + learning_rate_decay=cfg.learning_rate_decay, + models=models, + cfg=cfg, + ) + + # get function that will executed by the strategy's evaluate() method + # Set server's device + device = cfg.server_device + + # Static Batch Normalization for HeteroFL + if cfg.static_bn: + evaluate_fn = server.gen_evaluate_fn_hetero(trainloaders, testloader, device=device, model_cfg=model) + else: + evaluate_fn = server.gen_evaluate_fn(testloader, device=device, model=model) + + # get a function that will be used to construct the config that the client's + # fit() method will received + def get_on_fit_config(): + def fit_config_fn(server_round: int): + # resolve and convert to python dict + fit_config = OmegaConf.to_container(cfg.fit_config, resolve=True) + fit_config["curr_round"] = server_round # add round info + return fit_config + + return fit_config_fn + + net = instantiate(cfg.model) + # instantiate strategy according to config. Here we pass other arguments + # that are only defined at run time. + strategy = instantiate( + cfg.strategy, + cfg, + net, + evaluate_fn=evaluate_fn, + on_fit_config_fn=get_on_fit_config(), + initial_parameters=ndarrays_to_parameters([val.cpu().numpy() for _, val in net.state_dict().items()]), + min_fit_clients= int(cfg.num_clients * cfg.fraction), + min_available_clients= int(cfg.num_clients * cfg.fraction), + ) + + # Start simulation + history = start_simulation( + client_fn=client_fn, + num_clients=cfg.num_clients, + config=fl.server.ServerConfig(num_rounds=cfg.num_rounds), + client_resources={ + "num_cpus": cfg.client_resources.num_cpus, + "num_gpus": cfg.client_resources.num_gpus, + }, + strategy=strategy, + server=server.Server_FedDyn(client_manager=SimpleClientManager(), strategy=strategy), + ) + + # Experiment completed. Now we save the results and + # generate plots using the `history` + print("................") + print(history) + + # Hydra automatically creates an output directory + # Let's retrieve it and save some results there + save_path = HydraConfig.get().runtime.output_dir + + # save results as a Python pickle using a file_path + # the directory created by Hydra for each run + save_results_as_pickle(history, file_path=save_path, extra_results={}) + + # plot results and include them in the readme + strategy_name = strategy.__class__.__name__ + file_suffix: str = ( + f"_{strategy_name}" + f"{'_iid' if cfg.dataset_config.iid else ''}" + f"_C={cfg.num_clients}" + f"_B={cfg.batch_size}" + f"_E={cfg.num_epochs}" + f"_R={cfg.num_rounds}" + ) + + utils.plot_metric_from_history( + history, + save_path, + (file_suffix), + ) + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/baselines/depthfl/depthfl/models.py b/baselines/depthfl/depthfl/models.py new file mode 100644 index 000000000000..1c98309a98f0 --- /dev/null +++ b/baselines/depthfl/depthfl/models.py @@ -0,0 +1,266 @@ +"""ResNet18 model architecutre, training, and testing functions for CIFAR100.""" + + +from typing import List, Tuple +from omegaconf import DictConfig + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn.parameter import Parameter +from torch.utils.data import DataLoader + +class KLLoss(nn.Module): + """KL divergence loss for self distillation""" + def __init__(self): + super(KLLoss, self).__init__() + + def forward(self, pred, label): + T = 1 + predict = F.log_softmax(pred/T,dim=1) + target_data = F.softmax(label/T,dim=1) + target_data =target_data+10**(-7) + with torch.no_grad(): + target = target_data.detach().clone() + + loss=T*T*((target*(target.log()-predict)).sum(1).sum()/target.size()[0]) + return loss + +def train( # pylint: disable=too-many-arguments + net: nn.Module, + trainloader: DataLoader, + device: torch.device, + epochs: int, + learning_rate: float, + feddyn: bool, + kd: bool, + consistency_weight: float, + prev_grads: dict, + alpha: float, + extended: bool, +) -> None: + """Train the network on the training set. + + Parameters + ---------- + net : nn.Module + The neural network to train. + trainloader : DataLoader + The DataLoader containing the data to train the network on. + device : torch.device + The device on which the model should be trained, either 'cpu' or 'cuda'. + epochs : int + The number of epochs the model should be trained for. + learning_rate : float + The learning rate for the SGD optimizer. + alpha : float + Hyperparameter for the FedDyn. + """ + criterion = torch.nn.CrossEntropyLoss() + criterion_kl = KLLoss().cuda() + optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, weight_decay=1e-3) + global_params = {k:val.detach().clone().flatten() for (k,val) in net.named_parameters()} + + for k, _ in net.named_parameters(): + prev_grads[k] = prev_grads[k].to(device) + + net.train() + for _ in range(epochs): + _train_one_epoch( + net, global_params, trainloader, device, criterion, criterion_kl, optimizer, feddyn, kd, consistency_weight, prev_grads, alpha, extended, + ) + + # update prev_grads for FedDyn + if feddyn: + for k, param in net.named_parameters(): + curr_param = param.detach().clone().flatten() + prev_grads[k] = prev_grads[k] - alpha * (curr_param - global_params[k]) + prev_grads[k] = prev_grads[k].to(torch.device('cpu')) + + +def _train_one_epoch( # pylint: disable=too-many-arguments + net: nn.Module, + global_params: dict, + trainloader: DataLoader, + device: torch.device, + criterion: torch.nn.CrossEntropyLoss, + criterion_kl: nn.Module, + optimizer: torch.optim.Adam, + feddyn: bool, + kd: bool, + consistency_weight: float, + prev_grads: dict, + alpha: float, + extended: bool, +): + """Train for one epoch. + + Parameters + ---------- + net : nn.Module + The neural network to train. + global_params : List[Parameter] + The parameters of the global model (from the server). + trainloader : DataLoader + The DataLoader containing the data to train the network on. + device : torch.device + The device on which the model should be trained, either 'cpu' or 'cuda'. + criterion : torch.nn.CrossEntropyLoss + The loss function to use for training + optimizer : torch.optim.Adam + The optimizer to use for training + alpha : float + Hyperparameter for the FedDyn. + """ + for images, labels in trainloader: + images, labels = images.to(device), labels.to(device) + loss = 0. + optimizer.zero_grad() + output_lst = net(images) + + for i, branch_output in enumerate(output_lst): + + # only trains last classifier in InclusiveFL + if not extended and i != len(output_lst) - 1: + continue + + loss += criterion(branch_output, labels) + + # self distillation term + if kd and len(output_lst) > 1: + + for j in range(len(output_lst)): + if j == i: + continue + else: + loss += consistency_weight * \ + criterion_kl(branch_output, output_lst[j].detach()) / (len(output_lst) - 1) + + # Dynamic regularization in FedDyn + if feddyn: + for k, param in net.named_parameters(): + + curr_param = param.flatten() + + lin_penalty = torch.dot(curr_param, prev_grads[k]) + loss -= lin_penalty + + quad_penalty = alpha/2.0 * torch.sum(torch.square(curr_param - global_params[k])) + loss += quad_penalty + + loss.backward() + optimizer.step() + + +def test( + net: nn.Module, testloader: DataLoader, device: torch.device +) -> Tuple[float, float, List[float]]: + """Evaluate the network on the entire test set. + + Parameters + ---------- + net : nn.Module + The neural network to test. + testloader : DataLoader + The DataLoader containing the data to test the network on. + device : torch.device + The device on which the model should be tested, either 'cpu' or 'cuda'. + + Returns + ------- + Tuple[float, float, List[float]] + The loss and the accuracy of the input model on the given data. + """ + criterion = torch.nn.CrossEntropyLoss() + correct, total, loss = 0, 0, 0.0 + correct_single = [0] * 4 # accuracy of each classifier within model + net.eval() + with torch.no_grad(): + for images, labels in testloader: + images, labels = images.to(device), labels.to(device) + output_lst = net(images) + + # ensemble classfiers' output + ensemble_output = torch.stack(output_lst, dim=2) + ensemble_output = torch.sum(ensemble_output, dim=2) / len(output_lst) + + loss += criterion(ensemble_output, labels).item() + _, predicted = torch.max(ensemble_output, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + + for i, single in enumerate(output_lst): + _, predicted = torch.max(single, 1) + correct_single[i] += (predicted == labels).sum().item() + + if len(testloader.dataset) == 0: + raise ValueError("Testloader can't be 0, exiting...") + loss /= len(testloader.dataset) + accuracy = correct / total + accuracy_single = [correct / total for correct in correct_single] + return loss, accuracy, accuracy_single + +def test_sbn( + nets: List[nn.Module], trainloaders:List[DictConfig], testloader: DataLoader, device: torch.device +) -> Tuple[float, float, List[float]]: + """Evaluate the networks on the entire test set. + + Parameters + ---------- + nets : List[nn.Module] + The neural networks to test. Each neural network has different width + trainloaders : List[DataLoader] + The List of dataloaders containing the data to train the network on + testloader : DataLoader + The DataLoader containing the data to test the network on. + device : torch.device + The device on which the model should be tested, either 'cpu' or 'cuda'. + + Returns + ------- + Tuple[float, float, List[float]] + The loss and the accuracy of the input model on the given data. + """ + + # static batch normalization + for trainloader in trainloaders: + with torch.no_grad(): + for model in nets: + model.train() + for batch_idx, (images, labels) in enumerate(trainloader): + images, labels = images.to(device), labels.to(device) + output = model(images) + + model.eval() + + criterion = torch.nn.CrossEntropyLoss() + correct, total, loss = 0, 0, 0.0 + correct_single = [0] * 4 + + # test each network of different width + with torch.no_grad(): + for images, labels in testloader: + images, labels = images.to(device), labels.to(device) + + output_lst = [] + + for model in nets: + output_lst.append(model(images)[0]) + + output = output_lst[-1] + + loss += criterion(output, labels).item() + _, predicted = torch.max(output, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + + for i, single in enumerate(output_lst): + _, predicted = torch.max(single, 1) + correct_single[i] += (predicted == labels).sum().item() + + if len(testloader.dataset) == 0: + raise ValueError("Testloader can't be 0, exiting...") + loss /= len(testloader.dataset) + accuracy = correct / total + accuracy_single = [correct / total for correct in correct_single] + return loss, accuracy, accuracy_single \ No newline at end of file diff --git a/baselines/depthfl/depthfl/ray_client_proxy.py b/baselines/depthfl/depthfl/ray_client_proxy.py new file mode 100644 index 000000000000..afeb1d90aa67 --- /dev/null +++ b/baselines/depthfl/depthfl/ray_client_proxy.py @@ -0,0 +1,49 @@ +from typing import Callable, Dict, Optional, cast +from logging import ERROR +import ray + +from flwr import common +from flwr.client import Client, ClientLike +from depthfl.client import to_client +from flwr.client.client import ( + maybe_call_fit, +) +from flwr.simulation.ray_transport.ray_client_proxy import RayClientProxy +from flwr.common.logger import log + +ClientFn = Callable[[str], ClientLike] + +class RayClientProxy_FedDyn(RayClientProxy): + + def fit(self, ins: common.FitIns, timeout: Optional[float]) -> common.FitRes: + + """Train model parameters on the locally held dataset.""" + future_fit_res = launch_and_fit.options( # type: ignore + **self.resources, + ).remote(self.client_fn, self.cid, ins) + try: + res = ray.get(future_fit_res, timeout=timeout) + except Exception as ex: + log(ERROR, ex) + raise ex + return cast( + common.FitRes, + res, + ) + + +@ray.remote +def launch_and_fit( + client_fn: ClientFn, cid: str, fit_ins: common.FitIns +) -> common.FitRes: + """Exectue fit remotely.""" + client: Client = _create_client(client_fn, cid) + return maybe_call_fit( + client=client, + fit_ins=fit_ins, + ) + +def _create_client(client_fn: ClientFn, cid: str) -> Client: + """Create a client instance.""" + client_like: ClientLike = client_fn(cid) + return to_client(client_like=client_like) \ No newline at end of file diff --git a/baselines/depthfl/depthfl/resnet.py b/baselines/depthfl/depthfl/resnet.py new file mode 100644 index 000000000000..7daa9d61b0e9 --- /dev/null +++ b/baselines/depthfl/depthfl/resnet.py @@ -0,0 +1,339 @@ +import torch +import torch.nn as nn +from typing import Type, Any, Callable, Union, List, Optional + +class MyGroupNorm(nn.Module): + def __init__(self, num_channels): + super(MyGroupNorm, self).__init__() + ## change num_groups to 32 + self.norm = nn.GroupNorm(num_groups=16, num_channels=num_channels, eps=1e-5, affine=True) + + def forward(self, x): + x = self.norm(x) + return x + +class MyBatchNorm(nn.Module): + def __init__(self, num_channels): + super(MyBatchNorm, self).__init__() + self.norm = nn.BatchNorm2d(num_channels, track_running_stats=True) + + def forward(self, x): + x = self.norm(x) + return x + + +def conv3x3(in_planes, out_planes, stride=1): + return nn.Conv2d(in_planes, out_planes, kernel_size=3, + stride=stride, padding=1, bias=False) + +def conv1x1(in_planes, planes, stride=1): + return nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False) + + +class SepConv(nn.Module): + + def __init__(self, channel_in, channel_out, kernel_size=3, stride=2, padding=1, affine=True, norm_layer=MyGroupNorm): + super(SepConv, self).__init__() + self.op = nn.Sequential( + nn.Conv2d(channel_in, channel_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=channel_in, bias=False), + nn.Conv2d(channel_in, channel_in, kernel_size=1, padding=0, bias=False), + norm_layer(channel_in), + nn.ReLU(inplace=False), + nn.Conv2d(channel_in, channel_in, kernel_size=kernel_size, stride=1, padding=padding, groups=channel_in, bias=False), + nn.Conv2d(channel_in, channel_out, kernel_size=1, padding=0, bias=False), + norm_layer(channel_out), + nn.ReLU(inplace=False), + ) + + def forward(self, x): + return self.op(x) + + +class BasicBlock(nn.Module): + expansion = 1 + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + output = self.conv1(x) + output = self.bn1(output) + output = self.relu(output) + + output = self.conv2(output) + output = self.bn2(output) + + if self.downsample is not None: + residual = self.downsample(x) + + output += residual + output = self.relu(output) + return output + +class BottleneckBlock(nn.Module): + expansion = 4 + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None): + super(BottleneckBlock, self).__init__() + self.conv1 = conv1x1(inplanes, planes) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + + self.conv2 = conv3x3(planes, planes, stride) + self.bn2 = norm_layer(planes) + + self.conv3 = conv1x1(planes, planes*self.expansion) + self.bn3 = norm_layer(planes*self.expansion) + + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + output = self.conv1(x) + output = self.bn1(output) + output = self.relu(output) + + output = self.conv2(output) + output = self.bn2(output) + output = self.relu(output) + + output = self.conv3(output) + output = self.bn3(output) + + if self.downsample is not None: + residual = self.downsample(x) + + output += residual + output = self.relu(output) + + return output + +class Multi_ResNet(nn.Module): + """Resnet model + Args: + block (class): block type, BasicBlock or BottleneckBlock + layers (int list): layer num in each block + num_classes (int): class num + """ + + def __init__(self, block, layers, n_blocks, num_classes=1000, \ + norm_layer: Optional[Callable[..., nn.Module]] = None): + + super(Multi_ResNet, self).__init__() + self.n_blocks = n_blocks + self.inplanes = 64 + self.norm_layer = norm_layer + #self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.bn1 = norm_layer(self.inplanes) + + self.relu = nn.ReLU(inplace=True) + #self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layer1 = self._make_layer(block, 64, layers[0]) + + self.middle_fc1 = nn.Linear(512 * block.expansion, num_classes) + #self.feature_fc1 = nn.Linear(512 * block.expansion, 512 * block.expansion) + self.scala1 = nn.Sequential( + SepConv( + channel_in=64 * block.expansion, + channel_out=128 * block.expansion, + norm_layer=norm_layer + ), + SepConv( + channel_in=128 * block.expansion, + channel_out=256 * block.expansion, + norm_layer=norm_layer + ), + SepConv( + channel_in=256 * block.expansion, + channel_out=512 * block.expansion, + norm_layer=norm_layer + + ), + nn.AdaptiveAvgPool2d(1) + ) + + self.attention1 = nn.Sequential( + SepConv( + channel_in=64 * block.expansion, + channel_out=64 * block.expansion, + norm_layer=norm_layer + ), + norm_layer(64 * block.expansion), + nn.ReLU(), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), + nn.Sigmoid() + ) + + if n_blocks > 1: + self.layer2 = self._make_layer(block, 128, layers[1], stride=2) + self.middle_fc2 = nn.Linear(512 * block.expansion, num_classes) + #self.feature_fc2 = nn.Linear(512 * block.expansion, 512 * block.expansion) + self.scala2 = nn.Sequential( + SepConv( + channel_in=128 * block.expansion, + channel_out=256 * block.expansion, + norm_layer=norm_layer + ), + SepConv( + channel_in=256 * block.expansion, + channel_out=512 * block.expansion, + norm_layer=norm_layer + ), + nn.AdaptiveAvgPool2d(1) + ) + self.attention2 = nn.Sequential( + SepConv( + channel_in=128 * block.expansion, + channel_out=128 * block.expansion, + norm_layer=norm_layer + ), + norm_layer(128 * block.expansion), + nn.ReLU(), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), + nn.Sigmoid() + ) + + + if n_blocks > 2: + self.layer3 = self._make_layer(block, 256, layers[2], stride=2) + self.middle_fc3 = nn.Linear(512 * block.expansion, num_classes) + #self.feature_fc3 = nn.Linear(512 * block.expansion, 512 * block.expansion) + self.scala3 = nn.Sequential( + SepConv( + channel_in=256 * block.expansion, + channel_out=512 * block.expansion, + norm_layer=norm_layer + ), + nn.AdaptiveAvgPool2d(1) + ) + self.attention3 = nn.Sequential( + SepConv( + channel_in=256 * block.expansion, + channel_out=256 * block.expansion, + norm_layer=norm_layer + ), + norm_layer(256 * block.expansion), + nn.ReLU(), + nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), + nn.Sigmoid() + ) + + + if n_blocks > 3: + self.layer4 = self._make_layer(block, 512, layers[3], stride=2) + self.fc = nn.Linear(512 * block.expansion, num_classes) + self.scala4 = nn.AdaptiveAvgPool2d(1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.GroupNorm) or isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _make_layer(self, block, planes, layers, stride=1, norm_layer=None): + """A block with 'layers' layers + Args: + block (class): block type + planes (int): output channels = planes * expansion + layers (int): layer num in the block + stride (int): the first layer stride in the block + """ + norm_layer = self.norm_layer + downsample = None + if stride !=1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + layer = [] + layer.append(block(self.inplanes, planes, stride=stride, downsample=downsample, norm_layer=norm_layer)) + self.inplanes = planes * block.expansion + for i in range(1, layers): + layer.append(block(self.inplanes, planes, norm_layer=norm_layer)) + + return nn.Sequential(*layer) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + # x = self.maxpool(x) + + x = self.layer1(x) + fea1 = self.attention1(x) + fea1 = fea1 * x + out1_feature = self.scala1(fea1).view(x.size(0), -1) + middle_output1 = self.middle_fc1(out1_feature) + #out1_feature = self.feature_fc1(out1_feature) + + if self.n_blocks == 1: + return [middle_output1] + + x = self.layer2(x) + fea2 = self.attention2(x) + fea2 = fea2 * x + out2_feature = self.scala2(fea2).view(x.size(0), -1) + middle_output2 = self.middle_fc2(out2_feature) + #out2_feature = self.feature_fc2(out2_feature) + if self.n_blocks == 2: + return [middle_output1, middle_output2] + + x = self.layer3(x) + fea3 = self.attention3(x) + fea3 = fea3 * x + out3_feature = self.scala3(fea3).view(x.size(0), -1) + middle_output3 = self.middle_fc3(out3_feature) + #out3_feature = self.feature_fc3(out3_feature) + + if self.n_blocks == 3: + return [middle_output1, middle_output2, middle_output3] + + x = self.layer4(x) + out4_feature = self.scala4(x).view(x.size(0), -1) + output4 = self.fc(out4_feature) + + return [middle_output1, middle_output2, middle_output3, output4] + +def multi_resnet18(n_blocks=1, norm='bn', num_classes=100): + if norm == 'gn': + norm_layer = MyGroupNorm + + elif norm == 'bn': + norm_layer = MyBatchNorm + + return Multi_ResNet(BasicBlock, [2,2,2,2], n_blocks, num_classes=num_classes, norm_layer=norm_layer) + +def multi_resnet34(n_blocks=4, norm='bn', num_classes=100): + if norm == 'gn': + norm_layer = MyGroupNorm + + elif norm == 'bn': + norm_layer = MyBatchNorm + + return Multi_ResNet(BasicBlock, [3,4,6,3], n_blocks, num_classes=num_classes, norm_layer=norm_layer) + +if __name__ == "__main__": + + from ptflops import get_model_complexity_info + + model = multi_resnet18(n_blocks=4, num_classes=100) + + with torch.cuda.device(0): + macs, params = get_model_complexity_info(model, (3, 32, 32), as_strings=True, + print_per_layer_stat=False, verbose=True, units='MMac') + + print('{:<30} {:<8}'.format('Computational complexity: ', macs)) + print('{:<30} {:<8}'.format('Number of parameters: ', params)) + diff --git a/baselines/depthfl/depthfl/resnet_hetero.py b/baselines/depthfl/depthfl/resnet_hetero.py new file mode 100644 index 000000000000..094a84d130af --- /dev/null +++ b/baselines/depthfl/depthfl/resnet_hetero.py @@ -0,0 +1,205 @@ +import torch +import torch.nn as nn +from typing import Type, Any, Callable, Union, List, Optional +import numpy as np + +class Scaler(nn.Module): + def __init__(self, rate, scale): + super().__init__() + if scale: + self.rate = rate + else: + self.rate = 1 + + def forward(self, input): + output = input / self.rate if self.training else input + return output + + +class MyBatchNorm(nn.Module): + def __init__(self, num_channels, track=True): + super(MyBatchNorm, self).__init__() + ## change num_groups to 32 + self.norm = nn.BatchNorm2d(num_channels, track_running_stats=track) + + def forward(self, x): + x = self.norm(x) + return x + + +def conv3x3(in_planes, out_planes, stride=1): + return nn.Conv2d(in_planes, out_planes, kernel_size=3, + stride=stride, padding=1, bias=False) + +def conv1x1(in_planes, planes, stride=1): + return nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion = 1 + def __init__(self, inplanes, planes, stride=1, scaler_rate=1, downsample=None, track=True, scale=True): + super(BasicBlock, self).__init__() + self.conv1 = conv3x3(inplanes, planes, stride) + self.scaler = Scaler(scaler_rate, scale) + self.bn1 = MyBatchNorm(planes, track) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = MyBatchNorm(planes, track) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + output = self.conv1(x) + output = self.scaler(output) + output = self.bn1(output) + output = self.relu(output) + + output = self.conv2(output) + output = self.scaler(output) + output = self.bn2(output) + + if self.downsample is not None: + residual = self.downsample(x) + + output += residual + output = self.relu(output) + return output + + +class BottleneckBlock(nn.Module): + expansion = 4 + def __init__(self, inplanes, planes, stride=1, scaler_rate=1, downsample=None, track=True, scale=True): + super(BottleneckBlock, self).__init__() + self.conv1 = conv1x1(inplanes, planes) + self.bn1 = MyBatchNorm(planes) + self.relu = nn.ReLU(inplace=True) + + self.conv2 = conv3x3(planes, planes, stride) + self.bn2 = MyBatchNorm(planes) + + self.conv3 = conv1x1(planes, planes*self.expansion) + self.bn3 = MyBatchNorm(planes*self.expansion) + + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + output = self.conv1(x) + output = self.bn1(output) + output = self.relu(output) + + output = self.conv2(output) + output = self.bn2(output) + output = self.relu(output) + + output = self.conv3(output) + output = self.bn3(output) + + if self.downsample is not None: + residual = self.downsample(x) + + output += residual + output = self.relu(output) + + return output + + +class Multi_ResNet(nn.Module): + + def __init__(self, hidden_size, block, layers, num_classes, scaler_rate, track, scale): + + super(Multi_ResNet, self).__init__() + + self.inplanes = hidden_size[0] + self.norm_layer = MyBatchNorm + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.scaler = Scaler(scaler_rate, scale) + self.bn1 = self.norm_layer(self.inplanes, track) + + self.relu = nn.ReLU(inplace=True) + #self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layer1 = self._make_layer(block, hidden_size[0], layers[0], scaler_rate = scaler_rate, track=track, scale=scale) + self.layer2 = self._make_layer(block, hidden_size[1], layers[1], stride=2, scaler_rate = scaler_rate, track=track, scale=scale) + self.layer3 = self._make_layer(block, hidden_size[2], layers[2], stride=2, scaler_rate = scaler_rate, track=track, scale=scale) + self.layer4 = self._make_layer(block, hidden_size[3], layers[3], stride=2, scaler_rate = scaler_rate, track=track, scale=scale) + self.fc = nn.Linear(hidden_size[3] * block.expansion, num_classes) + self.scala = nn.AdaptiveAvgPool2d(1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, nn.GroupNorm) or isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _make_layer(self, block, planes, layers, stride=1, scaler_rate = 1, track=True, scale=True): + """A block with 'layers' layers + Args: + block (class): block type + planes (int): output channels = planes * expansion + layers (int): layer num in the block + stride (int): the first layer stride in the block + """ + norm_layer = self.norm_layer + downsample = None + if stride !=1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion, track), + ) + layer = [] + layer.append(block(self.inplanes, planes, stride=stride, scaler_rate = scaler_rate, downsample=downsample, track=track, scale=scale)) + self.inplanes = planes * block.expansion + for i in range(1, layers): + layer.append(block(self.inplanes, planes, scaler_rate = scaler_rate, track=track, scale=scale)) + + return nn.Sequential(*layer) + + def forward(self, x): + x = self.conv1(x) + x = self.scaler(x) + x = self.bn1(x) + x = self.relu(x) + #x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + out = self.scala(x).view(x.size(0), -1) + out = self.fc(out) + + return [out] + + +def resnet18(n_blocks=4, track=False, scale=True, num_classes=100): + + # width pruning ratio : (0.25, 0.50, 0.75, 0.10) + model_rate = n_blocks / 4 + classes_size = num_classes + + hidden_size = [64, 128, 256, 512] + hidden_size = [int(np.ceil(model_rate * x)) for x in hidden_size] + + scaler_rate = model_rate + + return Multi_ResNet(hidden_size, BasicBlock, [2,2,2,2], num_classes=classes_size, scaler_rate=scaler_rate, track=track, scale=scale) + + +if __name__ == "__main__": + from ptflops import get_model_complexity_info + + model = resnet18(100, 1.0) + + with torch.cuda.device(0): + macs, params = get_model_complexity_info(model, (3, 32, 32), as_strings=True, + print_per_layer_stat=False, verbose=True, units='MMac') + + print('{:<30} {:<8}'.format('Computational complexity: ', macs)) + print('{:<30} {:<8}'.format('Number of parameters: ', params)) + diff --git a/baselines/depthfl/depthfl/server.py b/baselines/depthfl/depthfl/server.py new file mode 100644 index 000000000000..0ef5f638b14e --- /dev/null +++ b/baselines/depthfl/depthfl/server.py @@ -0,0 +1,251 @@ +import concurrent.futures +import copy +import torch +from collections import OrderedDict +from typing import Callable, Dict, Optional, Tuple, List, Union +from logging import DEBUG, INFO + +from flwr.common.typing import NDArrays, Scalar +from flwr.common.logger import log +from flwr.server import Server +from flwr.server.server import fit_clients +from flwr.server.client_proxy import ClientProxy +from hydra.utils import instantiate +from omegaconf import DictConfig +from torch.utils.data import DataLoader + +from depthfl import FitIns, FitRes +from depthfl.models import test, test_sbn +from depthfl.client import prune + +from flwr.common import ( + Code, + Parameters, + Scalar, + parameters_to_ndarrays, +) + +FitResultsAndFailures = Tuple[ + List[Tuple[ClientProxy, FitRes]], + List[Union[Tuple[ClientProxy, FitRes], BaseException]], +] + + +def gen_evaluate_fn( + testloader: DataLoader, + device: torch.device, + model: DictConfig, +) -> Callable[ + [int, NDArrays, Dict[str, Scalar]], Optional[Tuple[float, Dict[str, Scalar]]] +]: + """Generates the function for centralized evaluation. + + Parameters + ---------- + testloader : DataLoader + The dataloader to test the model with. + device : torch.device + The device to test the model on. + + Returns + ------- + Callable[ [int, NDArrays, Dict[str, Scalar]], Optional[Tuple[float, Dict[str, Scalar]]] ] + The centralized evaluation function. + """ + + def evaluate( + server_round: int, parameters_ndarrays: NDArrays, config: Dict[str, Scalar] + ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + # pylint: disable=unused-argument + """Use the entire CIFAR-100 test set for evaluation.""" + + net = instantiate(model) + params_dict = zip(net.state_dict().keys(), parameters_ndarrays) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + net.load_state_dict(state_dict, strict=True) + net.to(device) + + loss, accuracy, accuracy_single = test(net, testloader, device=device) + # return statistics + return loss, {"accuracy": accuracy, "accuracy_single":accuracy_single} + + return evaluate + +def gen_evaluate_fn_hetero( + trainloaders: List[DataLoader], + testloader: DataLoader, + device: torch.device, + model_cfg: DictConfig, +) -> Callable[ + [int, NDArrays, Dict[str, Scalar]], Optional[Tuple[float, Dict[str, Scalar]]] +]: + """Generates the function for centralized evaluation. + + Parameters + ---------- + testloader : DataLoader + The dataloader to test the model with. + device : torch.device + The device to test the model on. + + Returns + ------- + Callable[ [int, NDArrays, Dict[str, Scalar]], Optional[Tuple[float, Dict[str, Scalar]]] ] + The centralized evaluation function. + """ + + def evaluate( + server_round: int, parameters_ndarrays: NDArrays, config: Dict[str, Scalar] + ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + # pylint: disable=unused-argument + """Use the entire CIFAR-100 test set for evaluation.""" + + # test per 50 rounds (sbn takes a long time) + if server_round % 50 != 0: + return 0., {"accuracy": 0., "accuracy_single":[0]*4} + + # models with different width + models = [] + for i in range(4): + model_tmp = copy.deepcopy(model_cfg) + model_tmp.n_blocks = i + 1 + models.append(model_tmp) + + # load global parameters + param_idx_lst = [] + nets = [] + net_tmp = instantiate(models[-1], track=False) + for model in models: + net = instantiate(model, track=True, scale=False) + nets.append(net) + param_idx = {} + for k in net_tmp.state_dict().keys(): + param_idx[k] = [torch.arange(size) for size in net.state_dict()[k].shape] + param_idx_lst.append(param_idx) + + params_dict = zip(net_tmp.state_dict().keys(), parameters_ndarrays) + state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) + + for net, param_idx in zip(nets, param_idx_lst): + net.load_state_dict(prune(state_dict, param_idx), strict=False) + net.to(device) + net.train() + + loss, accuracy, accuracy_single = test_sbn(nets, trainloaders, testloader, device=device) + # return statistics + return loss, {"accuracy": accuracy, "accuracy_single":accuracy_single} + + return evaluate + +class Server_FedDyn(Server): + + def fit_round( + self, + server_round: int, + timeout: Optional[float], + ) -> Optional[ + Tuple[Optional[Parameters], Dict[str, Scalar], FitResultsAndFailures] + ]: + """Perform a single round of federated averaging.""" + # Get clients and their respective instructions from strategy + client_instructions = self.strategy.configure_fit( + server_round=server_round, + parameters=self.parameters, + client_manager=self._client_manager, + ) + + if not client_instructions: + log(INFO, "fit_round %s: no clients selected, cancel", server_round) + return None + log( + DEBUG, + "fit_round %s: strategy sampled %s clients (out of %s)", + server_round, + len(client_instructions), + self._client_manager.num_available(), + ) + + # Collect `fit` results from all clients participating in this round + results, failures = fit_clients( + client_instructions=client_instructions, + max_workers=self.max_workers, + timeout=timeout, + ) + log( + DEBUG, + "fit_round %s received %s results and %s failures", + server_round, + len(results), + len(failures), + ) + + # Aggregate training results + aggregated_result: Tuple[ + Optional[Parameters], + Dict[str, Scalar], + ] = self.strategy.aggregate_fit(server_round, results, failures, parameters_to_ndarrays(self.parameters)) + # ] = self.strategy.aggregate_fit(server_round, results, failures) + + parameters_aggregated, metrics_aggregated = aggregated_result + return parameters_aggregated, metrics_aggregated, (results, failures) + + +def fit_clients( + client_instructions: List[Tuple[ClientProxy, FitIns]], + max_workers: Optional[int], + timeout: Optional[float], +) -> FitResultsAndFailures: + """Refine parameters concurrently on all selected clients.""" + with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + submitted_fs = { + executor.submit(fit_client, client_proxy, ins, timeout) + for client_proxy, ins in client_instructions + } + finished_fs, _ = concurrent.futures.wait( + fs=submitted_fs, + timeout=None, # Handled in the respective communication stack + ) + + # Gather results + results: List[Tuple[ClientProxy, FitRes]] = [] + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] + for future in finished_fs: + _handle_finished_future_after_fit( + future=future, results=results, failures=failures + ) + return results, failures + + +def fit_client( + client: ClientProxy, ins: FitIns, timeout: Optional[float] +) -> Tuple[ClientProxy, FitRes]: + """Refine parameters on a single client.""" + + fit_res = client.fit(ins, timeout=timeout) + # tag client id + fit_res.cid = int(client.cid) + return client, fit_res + +def _handle_finished_future_after_fit( + future: concurrent.futures.Future, # type: ignore + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], +) -> None: + """Convert finished future into either a result or a failure.""" + # Check if there was an exception + failure = future.exception() + if failure is not None: + failures.append(failure) + return + + # Successfully received a result from a client + result: Tuple[ClientProxy, FitRes] = future.result() + _, res = result + + # Check result status code + if res.status.code == Code.OK: + results.append(result) + return + + # Not successful, client returned a result where the status code is not OK + failures.append(result) \ No newline at end of file diff --git a/baselines/depthfl/depthfl/simulation.py b/baselines/depthfl/depthfl/simulation.py new file mode 100644 index 000000000000..e317b02e85a4 --- /dev/null +++ b/baselines/depthfl/depthfl/simulation.py @@ -0,0 +1,203 @@ +# Copyright 2020 Adap GmbH. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Flower simulation app.""" + + +import sys +from logging import ERROR, INFO +from typing import Any, Callable, Dict, List, Optional + +import ray + +from flwr.client.client import Client +from flwr.common import EventType, event +from flwr.common.logger import log +from flwr.server import Server +from flwr.server.app import ServerConfig, _fl, _init_defaults +from flwr.server.client_manager import ClientManager +from flwr.server.history import History +from flwr.server.strategy import Strategy +from depthfl.ray_client_proxy import RayClientProxy_FedDyn + +INVALID_ARGUMENTS_START_SIMULATION = """ +INVALID ARGUMENTS ERROR + +Invalid Arguments in method: + +`start_simulation( + *, + client_fn: Callable[[str], Client], + num_clients: Optional[int] = None, + clients_ids: Optional[List[str]] = None, + client_resources: Optional[Dict[str, float]] = None, + server: Optional[Server] = None, + config: ServerConfig = None, + strategy: Optional[Strategy] = None, + client_manager: Optional[ClientManager] = None, + ray_init_args: Optional[Dict[str, Any]] = None, +) -> None:` + +REASON: + Method requires: + - Either `num_clients`[int] or `clients_ids`[List[str]] + to be set exclusively. + OR + - `len(clients_ids)` == `num_clients` + +""" + + +def start_simulation( # pylint: disable=too-many-arguments + *, + client_fn: Callable[[str], Client], + num_clients: Optional[int] = None, + clients_ids: Optional[List[str]] = None, + client_resources: Optional[Dict[str, float]] = None, + server: Optional[Server] = None, + config: Optional[ServerConfig] = None, + strategy: Optional[Strategy] = None, + client_manager: Optional[ClientManager] = None, + ray_init_args: Optional[Dict[str, Any]] = None, + keep_initialised: Optional[bool] = False, +) -> History: + """Start a Ray-based Flower simulation server. + + Parameters + ---------- + client_fn : Callable[[str], Client] + A function creating client instances. The function must take a single + str argument called `cid`. It should return a single client instance. + Note that the created client instances are ephemeral and will often be + destroyed after a single method invocation. Since client instances are + not long-lived, they should not attempt to carry state over method + invocations. Any state required by the instance (model, dataset, + hyperparameters, ...) should be (re-)created in either the call to + `client_fn` or the call to any of the client methods (e.g., load + evaluation data in the `evaluate` method itself). + num_clients : Optional[int] + The total number of clients in this simulation. This must be set if + `clients_ids` is not set and vice-versa. + clients_ids : Optional[List[str]] + List `client_id`s for each client. This is only required if + `num_clients` is not set. Setting both `num_clients` and `clients_ids` + with `len(clients_ids)` not equal to `num_clients` generates an error. + client_resources : Optional[Dict[str, float]] (default: None) + CPU and GPU resources for a single client. Supported keys are + `num_cpus` and `num_gpus`. Example: `{"num_cpus": 4, "num_gpus": 1}`. + To understand the GPU utilization caused by `num_gpus`, consult the Ray + documentation on GPU support. + server : Optional[flwr.server.Server] (default: None). + An implementation of the abstract base class `flwr.server.Server`. If no + instance is provided, then `start_server` will create one. + config: ServerConfig (default: None). + Currently supported values are `num_rounds` (int, default: 1) and + `round_timeout` in seconds (float, default: None). + strategy : Optional[flwr.server.Strategy] (default: None) + An implementation of the abstract base class `flwr.server.Strategy`. If + no strategy is provided, then `start_server` will use + `flwr.server.strategy.FedAvg`. + client_manager : Optional[flwr.server.ClientManager] (default: None) + An implementation of the abstract base class `flwr.server.ClientManager`. + If no implementation is provided, then `start_simulation` will use + `flwr.server.client_manager.SimpleClientManager`. + ray_init_args : Optional[Dict[str, Any]] (default: None) + Optional dictionary containing arguments for the call to `ray.init`. + If ray_init_args is None (the default), Ray will be initialized with + the following default args: + + { "ignore_reinit_error": True, "include_dashboard": False } + + An empty dictionary can be used (ray_init_args={}) to prevent any + arguments from being passed to ray.init. + keep_initialised: Optional[bool] (default: False) + Set to True to prevent `ray.shutdown()` in case `ray.is_initialized()=True`. + + Returns + ------- + hist : flwr.server.history.History. + Object containing metrics from training. + """ + # pylint: disable-msg=too-many-locals + event( + EventType.START_SIMULATION_ENTER, + {"num_clients": len(clients_ids) if clients_ids is not None else num_clients}, + ) + + # Initialize server and server config + initialized_server, initialized_config = _init_defaults( + server=server, + config=config, + strategy=strategy, + client_manager=client_manager, + ) + log( + INFO, + "Starting Flower simulation, config: %s", + initialized_config, + ) + + # clients_ids takes precedence + cids: List[str] + if clients_ids is not None: + if (num_clients is not None) and (len(clients_ids) != num_clients): + log(ERROR, INVALID_ARGUMENTS_START_SIMULATION) + sys.exit() + else: + cids = clients_ids + else: + if num_clients is None: + log(ERROR, INVALID_ARGUMENTS_START_SIMULATION) + sys.exit() + else: + cids = [str(x) for x in range(num_clients)] + + # Default arguments for Ray initialization + if not ray_init_args: + ray_init_args = { + "ignore_reinit_error": True, + "include_dashboard": False, + } + + # Shut down Ray if it has already been initialized (unless asked not to) + if ray.is_initialized() and not keep_initialised: # type: ignore + ray.shutdown() # type: ignore + + # Initialize Ray + ray.init(**ray_init_args) # type: ignore + log( + INFO, + "Flower VCE: Ray initialized with resources: %s", + ray.cluster_resources(), # type: ignore + ) + + # Register one RayClientProxy object for each client with the ClientManager + resources = client_resources if client_resources is not None else {} + for cid in cids: + client_proxy = RayClientProxy_FedDyn( + client_fn=client_fn, + cid=cid, + resources=resources, + ) + initialized_server.client_manager().register(client=client_proxy) + + # Start training + hist = _fl( + server=initialized_server, + config=initialized_config, + ) + + event(EventType.START_SIMULATION_LEAVE) + + return hist diff --git a/baselines/depthfl/depthfl/strategy.py b/baselines/depthfl/depthfl/strategy.py new file mode 100644 index 000000000000..cdb674ed0c49 --- /dev/null +++ b/baselines/depthfl/depthfl/strategy.py @@ -0,0 +1,155 @@ +from typing import List, Tuple, Union, Optional, Dict +from functools import reduce +from logging import DEBUG, INFO, WARNING +from hydra.utils import instantiate +from omegaconf import DictConfig + +from flwr.common import ( + NDArrays, + Parameters, + Scalar, + ndarrays_to_parameters, + parameters_to_ndarrays, + Metrics, +) + +from flwr.common.typing import FitRes +from flwr.common.logger import log +from flwr.server.client_proxy import ClientProxy +from flwr.server.client_manager import ClientManager +from flwr.server.strategy import FedAvg +from depthfl import FitIns, FitRes + +import numpy as np +import torch +import torch.nn as nn + + +def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: + """Aggregation function for weighted average during evaluation. + + Parameters + ---------- + metrics : List[Tuple[int, Metrics]] + The list of metrics to aggregate. + + Returns + ------- + Metrics + The weighted average metric. + """ + # Multiply accuracy of each client by number of examples used + accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] + examples = [num_examples for num_examples, _ in metrics] + + # Aggregate and return custom metric (weighted average) + print("here and nothing is breaking!!!") + return {"accuracy": int(sum(accuracies)) / int(sum(examples))} + + +class FedDyn(FedAvg): + """Applying dynamic regularization in FedDyn paper""" + def __init__(self, cfg:DictConfig, net: nn.Module, *args, **kwargs): + self.cfg = cfg + self.h = [np.zeros(v.shape) for (k, v) in net.state_dict().items()] + self.prev_grads = [{k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()}]*100 + self.is_weight = [] + + # tagging real weights / biases + for k in net.state_dict().keys(): + if 'weight' not in k and 'bias' not in k: + self.is_weight.append(False) + else: + self.is_weight.append(True) + + super().__init__(*args, **kwargs) + + def configure_fit( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, FitIns]]: + """Configure the next round of training.""" + config = {} + if self.on_fit_config_fn is not None: + # Custom fit config function provided + config = self.on_fit_config_fn(server_round) + + # Sample clients + sample_size, min_num_clients = self.num_fit_clients( + client_manager.num_available() + ) + clients = client_manager.sample( + num_clients=sample_size, min_num_clients=min_num_clients + ) + + # Return client/config pairs + return [(client, FitIns(parameters, self.prev_grads[int(client.cid)], config)) for client in clients] + + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + origin: NDArrays + ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Aggregate fit results using weighted average.""" + if not results: + return None, {} + # Do not aggregate if there are failures and failures are not accepted + if not self.accept_failures and failures: + return None, {} + + for _, fit_res in results: + self.prev_grads[fit_res.cid] = fit_res.prev_grads + + # Convert results + weights_results = [ + (parameters_to_ndarrays(fit_res.parameters), fit_res.num_examples) + for _, fit_res in results + ] + parameters_aggregated = ndarrays_to_parameters(aggregate(weights_results, origin, self.h, self.is_weight, self.cfg)) + + # Aggregate custom metrics if aggregation fn was provided + metrics_aggregated = {} + if self.fit_metrics_aggregation_fn: + fit_metrics = [(res.num_examples, res.metrics) for _, res in results] + metrics_aggregated = self.fit_metrics_aggregation_fn(fit_metrics) + elif server_round == 1: # Only log this warning once + log(WARNING, "No fit_metrics_aggregation_fn provided") + + return parameters_aggregated, metrics_aggregated + + + +def aggregate(results: List[Tuple[NDArrays, int]], origin: NDArrays, h:List, is_weight:List, cfg:DictConfig) -> NDArrays: + + param_count = [0] * len(origin) + weights_sum = [np.zeros(v.shape) for v in origin] + + # summation & counting of parameters + for weight, _ in results: + for i, layer in enumerate(weight): + weights_sum[i] += layer + param_count[i] += 1 + + # update parameters + for i, weight in enumerate(weights_sum): + + if param_count[i] > 0: + weight = weight / param_count[i] + # print(np.isscalar(weight)) + + # update h variable for FedDyn + h[i] = h[i] - cfg.fit_config.alpha * param_count[i] * (weight - origin[i]) / cfg.num_clients + + # applying h only for weights / biases + if is_weight[i] and cfg.fit_config.feddyn: + weights_sum[i] = weight - h[i] / cfg.fit_config.alpha + else: + weights_sum[i] = weight + + else: + weights_sum[i] = origin[i] + + return weights_sum + + diff --git a/baselines/depthfl/depthfl/strategy_hetero.py b/baselines/depthfl/depthfl/strategy_hetero.py new file mode 100644 index 000000000000..65f0f1ced715 --- /dev/null +++ b/baselines/depthfl/depthfl/strategy_hetero.py @@ -0,0 +1,136 @@ +from typing import List, Tuple, Union, Optional, Dict +from functools import reduce +from logging import DEBUG, INFO, WARNING +from hydra.utils import instantiate +from omegaconf import DictConfig + +from flwr.common import ( + NDArrays, + Parameters, + Scalar, + ndarrays_to_parameters, + parameters_to_ndarrays, +) + +from flwr.common.typing import FitRes +from flwr.common.logger import log +from flwr.server.client_proxy import ClientProxy +from flwr.server.client_manager import ClientManager +from flwr.server.strategy import FedAvg +from depthfl import FitIns, FitRes + +import numpy as np +import torch +import torch.nn as nn + + +class HeteroFL(FedAvg): + """Custom FedAvg for HeteroFL""" + def __init__(self, cfg:DictConfig, net: nn.Module, *args, **kwargs): + self.cfg = cfg + self.parameters = [np.zeros(v.shape) for (k, v) in net.state_dict().items()] + self.prev_grads = [{k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()}]*100 + self.param_idx_lst = [] + model = cfg.model + + # store parameter shapes of different width + for i in range(4): + model.n_blocks=i+1 + net_tmp = instantiate(model) + param_idx = [] + for k in net_tmp.state_dict().keys(): + param_idx.append([torch.arange(size) for size in net_tmp.state_dict()[k].shape]) + + # print(net_tmp.state_dict()['conv1.weight'].shape[0]) + self.param_idx_lst.append(param_idx) + + self.is_weight = [] + + # tagging real weights / biases + for k in net.state_dict().keys(): + if 'num' in k: + self.is_weight.append(False) + else: + self.is_weight.append(True) + + super().__init__(*args, **kwargs) + + def configure_fit( + self, server_round: int, parameters: Parameters, client_manager: ClientManager + ) -> List[Tuple[ClientProxy, FitIns]]: + """Configure the next round of training.""" + config = {} + if self.on_fit_config_fn is not None: + # Custom fit config function provided + config = self.on_fit_config_fn(server_round) + + # Sample clients + sample_size, min_num_clients = self.num_fit_clients( + client_manager.num_available() + ) + clients = client_manager.sample( + num_clients=sample_size, min_num_clients=min_num_clients + ) + + # Return client/config pairs + return [(client, FitIns(parameters, self.prev_grads[int(client.cid)], config)) for client in clients] + + def aggregate_fit( + self, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + origin: NDArrays + ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Aggregate fit results using weighted average.""" + if not results: + return None, {} + # Do not aggregate if there are failures and failures are not accepted + if not self.accept_failures and failures: + return None, {} + + for _, fit_res in results: + self.prev_grads[fit_res.cid] = fit_res.prev_grads + + # Convert results + weights_results = [ + (parameters_to_ndarrays(fit_res.parameters), fit_res.cid) + for _, fit_res in results + ] + + self.parameters = origin + self.aggregate_hetero(weights_results) + parameters_aggregated = ndarrays_to_parameters(self.parameters) + + # Aggregate custom metrics if aggregation fn was provided + metrics_aggregated = {} + if self.fit_metrics_aggregation_fn: + fit_metrics = [(res.num_examples, res.metrics) for _, res in results] + metrics_aggregated = self.fit_metrics_aggregation_fn(fit_metrics) + elif server_round == 1: # Only log this warning once + log(WARNING, "No fit_metrics_aggregation_fn provided") + + return parameters_aggregated, metrics_aggregated + + + def aggregate_hetero(self, results: List[Tuple[NDArrays, int]]) -> NDArrays: + + for i, v in enumerate(self.parameters): + count = np.zeros(v.shape) + tmp_v = np.zeros(v.shape) + if self.is_weight[i]: + for weights, cid in results: + if self.cfg.exclusive_learning: + cid = self.cfg.model_size * (self.cfg.num_clients // 4) - 1 + + tmp_v[torch.meshgrid(self.param_idx_lst[cid // (self.cfg.num_clients // 4)][i])] += weights[i] + count[torch.meshgrid(self.param_idx_lst[cid // (self.cfg.num_clients // 4)][i])] += 1 + tmp_v[count > 0] = np.divide(tmp_v[count > 0], count[count > 0]) + v[count > 0] = tmp_v[count > 0] + + else: + for weights, cid in results: + tmp_v += weights[i] + count += 1 + tmp_v = np.divide(tmp_v, count) + v = tmp_v \ No newline at end of file diff --git a/baselines/depthfl/depthfl/typing.py b/baselines/depthfl/depthfl/typing.py new file mode 100644 index 000000000000..1fc8e7f9020c --- /dev/null +++ b/baselines/depthfl/depthfl/typing.py @@ -0,0 +1,46 @@ +from enum import Enum +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +Scalar = Union[bool, bytes, float, int, str] + +class Code(Enum): + """Client status codes.""" + + OK = 0 + GET_PROPERTIES_NOT_IMPLEMENTED = 1 + GET_PARAMETERS_NOT_IMPLEMENTED = 2 + FIT_NOT_IMPLEMENTED = 3 + EVALUATE_NOT_IMPLEMENTED = 4 + +@dataclass +class Status: + """Client status.""" + + code: Code + message: str + +@dataclass +class Parameters: + """Model parameters.""" + + tensors: List[bytes] + tensor_type: str + +@dataclass +class FitIns: + """Fit instructions for a client.""" + + parameters: Parameters + prev_grads: Dict + config: Dict[str, Scalar] + +@dataclass +class FitRes: + """Fit response from a client.""" + + status: Status + parameters: Parameters + prev_grads: Dict + num_examples: int + cid: int diff --git a/baselines/depthfl/depthfl/utils.py b/baselines/depthfl/depthfl/utils.py new file mode 100644 index 000000000000..8578027acbf3 --- /dev/null +++ b/baselines/depthfl/depthfl/utils.py @@ -0,0 +1,111 @@ +"""Contains utility functions for CNN FL on MNIST.""" + +import pickle +from pathlib import Path +from secrets import token_hex +from typing import Dict, Optional, Union + +import matplotlib.pyplot as plt +import numpy as np +from flwr.server.history import History + + +def plot_metric_from_history( + hist: History, + save_plot_path: Path, + suffix: Optional[str] = "", +) -> None: + """Function to plot from Flower server History. + + Parameters + ---------- + hist : History + Object containing evaluation for all rounds. + save_plot_path : Path + Folder to save the plot to. + suffix: Optional[str] + Optional string to add at the end of the filename for the plot. + """ + metric_type = "centralized" + metric_dict = ( + hist.metrics_centralized + if metric_type == "centralized" + else hist.metrics_distributed + ) + rounds, values = zip(*metric_dict["accuracy"]) + + rounds_loss, values_loss = zip(*hist.losses_centralized) + + fig, axs = plt.subplots(nrows=2, ncols=1, sharex="row") + axs[0].plot(np.asarray(rounds_loss), np.asarray(values_loss)) + axs[1].plot(np.asarray(rounds_loss), np.asarray(values)) + + axs[0].set_ylabel("Loss") + axs[1].set_ylabel("Accuracy") + + # plt.title(f"{metric_type.capitalize()} Validation - MNIST") + plt.xlabel("Rounds") + # plt.legend(loc="lower right") + + plt.savefig(Path(save_plot_path) / Path(f"{metric_type}_metrics{suffix}.png")) + plt.close() + + +def save_results_as_pickle( + history: History, + file_path: Union[str, Path], + extra_results: Optional[Dict] = {}, + default_filename: Optional[str] = "results.pkl", +) -> None: + """Saves results from simulation to pickle. + + Parameters + ---------- + history: History + History returned by start_simulation. + file_path: Union[str, Path] + Path to file to create and store both history and extra_results. + If path is a directory, the default_filename will be used. + path doesn't exist, it will be created. If file exists, a + randomly generated suffix will be added to the file name. This + is done to avoid overwritting results. + extra_results : Optional[Dict] + A dictionary containing additional results you would like + to be saved to disk. Default: {} (an empty dictionary) + default_filename: Optional[str] + File used by default if file_path points to a directory instead + to a file. Default: "results.pkl" + """ + + path = Path(file_path) + + # ensure path exists + path.mkdir(exist_ok=True, parents=True) + + def _add_random_suffix(path_: Path): + """Adds a randomly generated suffix to the file name (so it doesn't + overwrite the file).""" + print(f"File `{path_}` exists! ") + suffix = token_hex(4) + print(f"New results to be saved with suffix: {suffix}") + return path_.parent / (path_.stem + "_" + suffix + ".pkl") + + def _complete_path_with_default_name(path_: Path): + """Appends the default file name to the path.""" + print("Using default filename") + return path_ / default_filename + + if path.is_dir(): + path = _complete_path_with_default_name(path) + + if path.is_file(): + # file exists already + path = _add_random_suffix(path) + + print(f"Results will be saved into: {path}") + + data = {"history": history, **extra_results} + + # save results to pickle + with open(str(path), "wb") as handle: + pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL) diff --git a/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/config.yaml b/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/config.yaml new file mode 100644 index 000000000000..14257c31c8ac --- /dev/null +++ b/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/config.yaml @@ -0,0 +1,35 @@ +num_clients: 100 +num_epochs: 5 +batch_size: 50 +num_rounds: 1000 +fraction: 0.1 +learning_rate: 0.1 +learning_rate_decay: 0.998 +static_bn: true +exclusive_learning: false +model_size: 1 +client_resources: + num_cpus: 1 + num_gpus: 0.5 +server_device: cuda +dataset_config: + iid: true +fit_config: + feddyn: false + kd: false + alpha: 0.1 + extended: false + drop_client: false +model: + _target_: depthfl.resnet_hetero.resnet18 + n_blocks: 4 + num_classes: 100 + scale: true +strategy: + _target_: depthfl.strategy_hetero.HeteroFL + fraction_fit: 1.0e-05 + fraction_evaluate: 0.0 + min_evaluate_clients: 0 + evaluate_metrics_aggregation_fn: + _target_: depthfl.strategy.weighted_average + _partial_: true diff --git a/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/hydra.yaml b/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/hydra.yaml new file mode 100644 index 000000000000..7e940d030577 --- /dev/null +++ b/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/hydra.yaml @@ -0,0 +1,154 @@ +hydra: + run: + dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: [] + job: + name: main + chdir: null + override_dirname: '' + id: ??? + num: ??? + config_name: heterofl + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/peterpan/flower/baselines/depthfl + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /home/peterpan/flower/baselines/depthfl/outputs/2023-09-04/22-24-33 + choices: + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/overrides.yaml b/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/overrides.yaml new file mode 100644 index 000000000000..fe51488c7066 --- /dev/null +++ b/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/overrides.yaml @@ -0,0 +1 @@ +[] diff --git a/baselines/depthfl/outputs/2023-09-04/22-24-33/centralized_metrics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png b/baselines/depthfl/outputs/2023-09-04/22-24-33/centralized_metrics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png new file mode 100644 index 0000000000000000000000000000000000000000..936af4e679475861e59881fc47044da681ddc45c GIT binary patch literal 27604 zcmeFZWms12_cpj`L=X^_l8_WZx*J70B&3vX=>{c51yoX`Lj^&Q4i!W|3F#1N5Tv_1 zXKkNnes9eGotb0i%Y2yUI6NNlzS(i@b**)-^E}rMyR9ZqL_kG=APAA7f~+QjUBOtVXvOjq3e15BP)SUlpGeQ2#Vv+4iU+l9|V`|~Z`)e<4(X->i zClq!FT$lnTdgx>4`wfz4+%g5i!OV%!$W}B#=AI1Tp;yZU=QGPg&@-GtkGcI)dH`Dm zzJ8tpdGKbl6VnPC-2(^cWi4T%e~p0;_;k=65slym8qN8Vaig0By`rvt8G1qt^wd)D zN$BR^FEN>8)>gR9G6)J@x^m@8==0|{b#(e}^&3~XGK;#;1x7`YxX=H@S?tR%tqRzf zQhY%rK#*@xe&hIHyE{jVwJlxBc!T|(VBaZv5^P4y+KcQu^dcgZT3ISGYHH^yJ(r)= z*UMhGbg9xX01iUIZ$l#Yf+A>n_!A==+Zjbg#isUlkAjyeDKEAc`zKSqet%1oc*yO! zq=%l!t}m~6(e9YzBSQA?(e+EM{zS{ueuLOZ74UfT=8ayNP4w`#vCs6yt<^EQ$;R-T zKO$&&JQlRK=DM-mX4+f|N<9{J2&n}LKaF})o0*%h&t*iuwd*gCx?5_4SCc*zNcqpD z7ZM}v*RDmSriMR$j6G|2a&n@epdc+L_jI_^!ootY4=)hzqxHb=VC!3wfSvPH6Y14= zMJ|(aOJg;3UcY^^+sG>j|9t|O5~)GyZby4A+IQ|;xpu8(ebhkd}jyL=xuE-rZ=pNh@Zn3x#$eWmyp z8G-0yd(_{LDETax_pc3Fr<=AX2{d(dG`1!3@wiSY_#UnQDEDV!X2$vR=TAof4-XH6 z&!qU@$ZB4+Ll_wtNJvNmA3efsUBtn`35|&GJWQOMn|odmaIj^olE94t*YQ0N0}DZf zMx3!cmj;4rPPPjLk|IBZ(@D`bH#c|GC@U$+K9aU3<7Py|`uzB|K%k6_%-mop>(8E^ z)IyQV=Q6Xi86_ouFI!JcPF`Qck~T0%ayxEmQPS4Y`93ywu#JU@Iqocczg^kcnHx4T z$0^gE3&Y=oO~JuxE7By=oab*XibV_78U6)p36lKw(q;Ij0{$&-M@dWF_JFwTQJ^aeHfWdPZm*@YVy+`v=7^M zR=uuR(@8f5z)6^D)m`4tUu0)vlkxTze)jx%xgcImstf*0$vw>D!pO#>D-z}Jl;Ml6 z{Z22v{ph6A>;K6Gp=d6k`%m(?(|=m*l7^-OGCh9QNvfk0tBC~7Q(NZf6%hZ;Ug*XAEy;gR`{H^nx)MQfN1~@p zBywm(X40XlZxp-jf6tHqBxVXSq$5>~ytud+|I8Vgd-pDvl$3B*p0vDT?|yR=@A2cu zua)CD^*%WohI{NR4F*44slbC+x9NI~;aIgUH#I%2==Y?x@t&YqZ1NBzkk2+Y0+u4@?tP9(f3eN#v;E%PbOyOE0B#UDk?fWs!tB=ww8y< zX++&?;h7fM^>eBu@t=X*=;@blAd-&#{hvovt4_&1>f6gk`mrL!!LdNL#jffj~z!VO;3*Yk-!i-e~Rse-nwC@8o!49YyXK4(o2^- z8|LrL8H+_uG*1tc_J3s4IWn&L=#DY2VtKKJod%ycKU=^McAu7lVU6kpY?_M~FG3cO zyK&>urU2v+CMG6&M#j2jYhGSn4%mTaW^dj6Lh6D7P7lZSrnv&bAdAV$$uV$pp8KF% zSZ!m}(%Pz_scE*)D&tC9YZS0gVQ9RE-;jN6S`e*&M(vAOwUa+0GjekYG&D5g`ZM0X zjY>~n{hP_i4Qq;uNLba=>bK^5x@!Uge7CygKd)9FI-efzns3gum(M_n{!c#A-CyQZ zdwYM!VqYJTUg)MS6GBOx%Gcrc~Swuw- z-Y&|^%F=Ullcb7zl;rgko6Fk|mb~k$!p6g66c*l7s&2G&>A-5jM`I7$+mFcP_HB{} zJ4<0(Tkf-V09oee=R+gCq@)=jp_ND?neX0-^b->hd=D4;9qMO-QC3Nb05NGx z;F;?$q_gTs3B$m`r56ymIQ-F}?h($}6x#@Dp>tSx=jtJEJu^OD(ABH)mw;>>VA_+V znzr}M(dTr}9)f^3niONaHVDgQX@Ey1O`H;{%UoYRbzhgBoU_;|d4)6G|jXvr$E zY~^;DkVW(S@=$r*Lhpy}3ODOn(cMumx|z=O$k^BmFK8vGpe+B8I^9=?4bYD!GAU0C z6neosq$fxI_}wfQ*%!WkLMCLVOpchP9}6Mlqo?E{$%W6mk^~{!oDn8| zjp2v*MNj>$Ke8y~`C#?8fY~74duyh?aN3TV(vlLi(6^cp=H^slMul&pc|wLCIT~H{ z4+*Ch!W7953>ClXaOI0$iAC5{Rya#9PSoF&h4CH3#M1KJW-}#Sz!47|krt5-IY+a0 zygR1sSn1exxtQk!>Li1-Fj94Lu*ef|;_G`bpRJ&*eDn0=7z07^X{l8^laK>{jt|0| z9QfffLGmu$&GmVuVh1$M(!RwQSLXWqxaRbD_4D$_VQ!l)diUj_i%5}GJC$`0ckdJ- zBJjg5Mj+71_W?=WB}>!sF+S{1=tdI_l{_=`aoDkLeewtF$44xxNy5efu%Ae&sIFB& zke%7abjR;Q3Wof#ks_fQZF>}tW`@`hjnDUFYZw`=5S~;Y@7!%mlb}A_S-xpukvWyM zi2bgjBO+KD)(M~F9J2a#*Qjc{?=BMf_ANtwLjzhs6_k{|SNUwhJqf;HF@c*$@4l_M z`P=nRBbbOc{0hm%i%*FECpz^)EI@==`=w zS87iE_YMyyvyvUZe;V^=|9j;UgnC{td7)+`D)J*$E0v3A9lb2+OF`$qcV(q&Qw@T6tv3RDwqbWHU^NAc z1aYi6_8jpV^NUFqOuBuxXq~7w`&!2X(X;=a3?J$4?gog|2w%ZNpb5Fi8gxRbxrtC2 zVA$kc3%LQG5|wS2r7K}T(fHbnISAe z+&}?^ZFZIYPFnZ<&5*sF5Z>r5^w*!-AbTTjou(ZrA_pdPRz@f=ypbf9-xDQib{R8-ysPlrM~%b{@qVzU9V*n6cxWY`EOEl z-2F(1wk3cVAeCHSEHEf1NcY>JG$^xinKstRSkS0Fn8k3{w+b$b{z!As-J}1R;i5GS z{?=}=l>99+QXd>xITV6I%0O%3ZEfwxu-!%iPLKB{o^!433_BU?7(g2N&KN2F=ucfp zhE}#(f>eOyjhi=%ZMqpgE!J$bu+NRvNHvC2heXhbnJxUvnd{Dw52tEol#dv1W=*gE zvReK8!S;gTx>xV~5k1M*3wS|Mcqs+%Z$#$UeqP-^y{~@wJfnf4oFgP7|H(9Y`nS`& zl|*-|MOUEJjb@qHL_XSdzil*$OcnE*Xcro*hnmC57XIuRfrYiTIaJ$@{-fjL=%}c` z7cU4ur8hSH!;5DyXmBjMcKTu3~YJ`&fi5ui=QnYTE6g2sZ-*CI$w^)vH&Fs*G;m4xF2_`IH_O5m7yN zknVrLMsW6Qccq6NHZCp$;FmG~!|Q1e7TD?=8!J`$goPSAllKlqjvaE->6C8~-HD{q} z^(1j3RU7NNpIr<@BPcG|u)mYNpUO-lYV_10M2*7Zruq43$w0`Lr?&l*8ar5k{ZoCKsFTVXI?%~~B zrj)L}Tg!g+rAv_ELdHpUDvF!Kuj8$ESo=^UcbzjQ^Uo1*I8T7o)zt;;9KX34Vx*G< zoh^l+Jq2v1C%?ZwG>7bPm7U!U*A5cjt=72f(J?WY2nC-N;lcJ|NFtxL8Kf8mRn>-% z4#O!}>c9j%wQ5h&|EN)DBq1v=pW*lLE8g$v7;2m)|3T|w!Ue*USob%p{^HElM5BkJ zQj$_HM&+LsA1V~@AZuTjR*{qsUw=^|Rsn~MCy8#H4x<<*9(2Y5k`>aA6_C%~>t@Tg z=PnDA0Hskk)%-F#Jv|a_A%J`72%`1z@mbrI8hbmMtxks~Ey&jOkI#^G6&Q-aF0E|5 z9Qa#>S7DN#gdw+wP`u%ePGbCgPtq5K&$%nO4f8iGL)Eb|dfsbf+;`X*Ewj}M@2B-$ zlM&w=4~|bcfu$+nwywNGj2XRzrC_gGp02qNgJ)=X?j%;$BeKqAB_PZVmy#F1zF%Q^ zu>gaa9a;Y7`jNL-oUn}-ql*1KR_@(giqmGaMa7U!dxX=Iun&P&X zvxKgQ_XGoR6anHv$5XJ+Kdr~Z{F$XvyY~yf-QNS9yG08f8#t9@ne}?kut;|pJaX0Q9@-E%SIv2l2Q*h>oQ$=bP|4&YAr9^ zra*hJv@ofAs+m%FB#oas^A}}d!uh+_W?f1%rN3hrRFlbMT2XevJ_mz;N==KuiDxa- z%obB{Mu?w_o9FA;TS}1`A|pfk!IDfptgFx9!ebV&Y%Iv4+4i>(E&rQh2R-wWW-4rV zby;wSvVNKM%XunYc##mI1?=XdC^qWl@;KbSti-*m!aaxA zV_y9V5827~_&j1i_j@(>B``r~07eaz28J&&t@!Jo#p+~>VO&WWCZTHf`ctBIaEK)R zwqO4C;vO!9`cT5wJwvvbQRduEuRM%cQ*A`pYvmqP4+bVCY!?@ork0kNPsXN^2~-bb6L zfnCna%w4P|Wg(Z=o{m5CLgo3&IjajY`Gg|cvR}NN8Eo_$u>;w4@&G#UW~n4TN=dnl zwnAZHVOKdg+`pXk<=^Fj_UHu#59aVECwWcH^T2gRB`2Rnggq86xVpM>+x}#lnwlDi z?(7GEHRn!=-GTjbPNn+_5+)rL-St821=IEuFQvD<&U@A5z1+dj!i`9Xha0X$k~Lc` z$;t416g{s`$aa;yID=TXCq0p;^}&WNTpk)0Zoq<%pFTC~etXNYKVbTmZWpi@FQDnB z5%aA7m7@i4i})|6Lqba%QC7w;zSVWpW~^Gmv2vaT{rADa!KT*M(#;ROCaqUZZgyPv z3~&5JX|F7Jci1JUOX@UpULeqFY7p4GtX~Cva-LD~lusCaf`5d)wxlFQbhNeU#l)!3 zoH_G<09r+P#0V@=c>yuf(9p29xp@ zsxOzYY+9b^x$gw7?sYbwGg}0mR77s>HQ8IY^o=5)J^Kzd z4(Jgnsj=mQzg)DG+YDiv&kQ9Y6+b@U3djEco_cQGr;2Y=e^1aAfq`AA*umrR$MZrtlEz`B}WQgWVH zVv8hJGeOA7D1Y%?W+uJ3_{csN3yU1T@epx91ZIJs7*$UUq3e=&((@@jYQ9+v{z;qY z5uv3yNE1B6jj_q@%_k+jLXk^YSL^;yz*FY-hu|8tRkgLXE$=(QsQvH2+aLBLIr7;Q zyxWJPzxFM)?&B}r-}vPd?WL-8n(|;aUX@TqrJ3)rb*SzSrKdVK27uJ_%hp`Vw2(;g&x~PeOfPg^BgirPk7!EfvBnCa1XiN2JYE{%@#_ z4jtFFvaUZNlSyt3l?+6pKpioyd-_JV&}eS-i;z`kS~$>(gmjXZ5pMUn+hz8H=K%>L zfjs7ouZ?T`6GYrta0w{_fBn+7AO1*m_7Vr;%(?e)JP0r9<;&o5yZ*Pixh;cIMYcUz z_yhzSZR8F6Ig(}R#6+w8na7W>YSTjb*s+&qCEK*0;ODmLJNL!7PnFH@Taw|APb0O| zU*{EnbEK|fy%*3w2;=#`C-=MJ5 zz&nlk#{QW>xgds}at;gGp^Co$jT{FXwa&I6c2}${2#>3yUON5sT!Uee(@9$In$#Y8 zoC6>LJThi2ARF%czpj2-g@!z*(T-Shj~)d$pB8^KHa>8md%hVQ=%JhaMKC+*+ea$Q zaNApW%K;JD{ECiK6(j)($}qbFcJa$&H^ZG_W>Nt~(AT1@yAdoAWw zoWUV}7;bL)>2!WuUOB&5mmqVer{ugm!}Vsimn!>2%$seHk5J_hP?M*=Cw`h?Ca!jq zAZhb2k~zowLOk3&Zh&KWH|^!=q`Q3(50($bUz(o05ZAfqDL&f&YT-EIANY1dGb1p+ zbfqw82lNrs;OVPvZ%$wWs)tVBqqPm;=gOB`diWx zb^?EVlnmsxRg|!r8}YPwq|=;4@pal>{ZvWo0J&DjRbRO)#(0Pre9Tzi>x+v?E^`o; zZ|yL=+KYGFu4Ic0imAeaBE6?-&Raf*=IGYAm-&tEmg(X1{$ip%z3pu?+E~dG_PH3b zR8#N7x<2-;ot@TqF3{1c^j5&lhf;w8%hYsSIqq?uOx&(&{oN-?`Q`5(MEtt-yWrg3 z47L_$>m$EvcV0iGi_Jr>so!Gl$%=l7G^bJfA0HZe&`IM4o(I@+MGQA6G>zmjCRuZ! z-U8;8ZD;pUTV<(`-q8}Z%1BDGU>&>g_z}-c$2catR@M0lQ-7iwm7y1>7AW+6TP~l} zQ{W(`n;r*|ljqm-_&!23k7d)3Z^-_B2KISDQ)x-BT@mUf%LF_RZ*s3{+yu~8DLob{ZM9waE3Vbwg zx*CvEE9;tBurF{_+1Wk&b5gbogd!*3dwP8Q7fmvi7Y_I4z1z*9T8a0F z%2h4J9vTYn8#=9|%Gx2#ZIleOQp;f&3TXRN$)QDGBIM00kYB0zQK{&R9*Uz_1@vx~ z>D1LyVdmH}l=T}Byt^*ipUUj8d4MEoV%ZF5w%j;Owcxf?BRbu;%$fIeFEt-x@B0UF z-t+P(p1Jgm#YF1>U;of&UPYJF=2dPl_gCE8LS*Sp^mJC~<|;(5iH_%0D!!FEM8)4G z>D*KQi-3pz+;K9QI0$Kc%rz3xQD*<)3JHNh`8$KuG?f~b4lEnXiK`|M0E?dPS(cte zHDN;DkQ=u8>(dX@1p@yrA8=|o#M+CVj}LlmmA5bFi1`dk^6=UY_sc6diiuK64Gm>c zrGK!T2xMzs!KnIHn#)Oy`}a3RicMPJcAI^^m+jb{_ua{-IZoOlzE_-WrNr9oud2DU z@ag?0;_-dU<8QHxo|#F3?O(q2&?r~T#6Wp?0ua}Aba470%T{IY(N>w{S-m2H+j(J4 z<&^Ty6h*(0u4R_8sHGol4N2B~ybfdLy^kxEDSqcWfmtIC z$-)zA3lrv3r8jWw<~cSW+Y-^X+4sem>AqfG^h5GzH7eX<2Bvz4G>_RNiVc*(x$yl3 z0RlYP^ML_95a{SZRsy!3+%RCe<<%UphBt29=z6D2o_F_C$40d#!An+kInaePYW)3x z9T@*mSRHdj5HEM1R{C4^{ZN7Ip$lb7hEwHIU+bT;FhDBEsgA2uMB|pxBU;F!HrBxp z5m38rKSq>R?QUt!62)fSZ_kUwnyUOi6#J~&5`uJ%{Vs0KcFJ2>u~6~bpejGmmW>64 z({bfz23|a``sJqZPV^R*V6VabG|I82B%NYw&fz;)S3gpIJ1IH%i8F;87501lj7LLM z9!8n<0+rT=ujF-r4Rr~Kb6=jm_f)hzDq*`X{5pf4#>VFArRxS$p2xaIJ~+pRyFWg= zOm=}drIIRoQR2Z?Eu0)l`QnZ8_-IG}b@%skYRAtQDEl5kEAg!NgYNKNJEz*}IJSr; zt84R#yXV&33;DJeTT6{xRhJKbRz^+T`u$4#AKo`8XTHnmkTaRw zi5W)>o6?&+yU}uO*0p)Qbm*t2YIcc|Q<}Q&=c)Y{)vg7?4^MWr2n;rt9^}QoT4bL+ zd+c0-DHHTyrB1nXz|A-PYLA!NH4(QHJrPD9-}HhmFW>Zu_2rg`$z<4-N3j?;N6;k$ z_S+1sC!vv%UezaCJ?f}3+0@pSx1RX=H9d!6!xbT2>;qGUc!Xfp%Lrd~k94%-; zIj-sIvUMh3x9`B$fTisBKNbw*QLX(=|KCZX48yi07Xp(D?O0+^;f&3(x`2`jdp^g7 zg`F`e@PF;uFg^~DkqLT-TlD%h0-c*%@(e)T>+LPQ0)dqP>;I&Zx(xL&gFr|G-8eS` zLP$Vxqccq+IxQ_CGm{AktotMV;DM-IIMwtH-}?Hxg1Y*TfnxL2!gnBYCP#q``5YKY zVEV+cs$*i#TUc35tpA|%O11*q#-rfia=*gof6l3cSfU9o3JlclQ}>kUW}(s7t}k9r zXyjU2T2e^*3L_?v18fFMm^C%QtZ3f&LZ-(5nA>eeg^+?9$;rtX_~<}=i9-(=zZWO~ zO5hRb(=uSQ==$hz7uDRs9OjIh2&a+ti2h#z&B*ufuOiY$MzlxAonE#}J=w(YxSxiR zv6_8&e`V*NfK77kaB;I|-a*J9)1FarmC zQBwm`Hu%}VkC4{!RKAFK{0?Z^!Vk8tQVHD8^c#v>5Wyv;lUyo2CR?g?{fO5(NXudU zuqb8uihhR@8yw~Bt>>5GFfywj3k2$Z8ylkokJb56IXyiJb#6fuk>TKJd*ZlK3B8V; zsB6~7`THMcl1;hL%Of`bj|uWY&iJ^AN+NH=NBPeEYyW)vKflUx4)Y3#dmB7?e-+&g z9zxXK5$a3$A9GyS+4le0d?B97oB@uT@kVMVS+L)MUJ?f3vo!pvv!nyKP;Rgt@!Rzg z5>X4*I|Urxy>Q`zUSD%-YaQ5#NT{fuH8m-qJ+*abTKVP&3iE@ld2|o`4YcAH+aj)0 z_$Rvor{v&cx{2D!piV%)(!T#+)`^l#mx=l_gtX#lOJy_inFn=|K{**ZzEh=h=gvjY zOCJL#`xca!y#_A-pd77i1KNv?uGPNjz8hy&st+B@Y`QW1{r&a6J}95|GqkJ(W5vhy>qM||wmEg*8Z(DZLhs><3Tu2P%pv7#15ceKv16cvb5xt8SE_jRDHG+1e{hXXj z2h1Y(ftOwAuQ@5dKU!HX>a&sQmI@9am4^EI_s}4ggD1(OM^z|C((gccOzPoI-oi*l zsgi=iZQwg}At7nO^ScjfOWK~2(RFrq<%Y(_TuAjY;go!h`=YISk&yr23pxzFA3HtS zy~f9^~G>Uu%T?}Y&GmDnB082ae&K`mW!`CfLw=}9iQ z+!jhZCAC0O$f*R`_19~eVDa@=RX;%o)CL9W*HF1#uh5uphEBn~94bNk_eNl&n-%4l zZG~=3bQ^@p91sAEF>Zr3tfZ4Ixd7xk7bA0){E zHD?LhY`$E?<@uv8xw%TPhBu(!_vY5MV&~w{Mt;q-C%@8E0?SGc@a;LT*mXtw!5$Hb z`5N3Rv6{1L24K6p2INFV2WZmTdU^@hxVS#;p7k#N`BSqOi0xN#aalmb&k`c~jR)eJ zZG5>-W7Xx=tIopO?(Ss|_NU`h3(=#)Mkx2)cYPPDY3#rM4)e9->5<#4=;?lEKx}$? zx~S_^=)D;tUXxm6J4EW}+r|)ZOoh0_w0W^+B8$$Z$omf}#sSA3y_t$+tv-{9)@d4` z%95YRSpmgMp7Z{_w5O-w`Sa&f3il5W8LwQyproXvZA%;kgBJ#(nkD9}1{N=_#1VD@EaUV&R=5OMIH+l5tF7L} z5O4IL#vJah;vq{T6=Zim+J7_j8pR2m=8(}%M1|45yH2-m-;Q!i1+W9VnON#%FGF># zQQY@Z5&6I38wa^868aBcBoWJA3f{gKG-8(^SolP}RtOQ)F9&`ZDj`REeW8COx(<(# z<8!^Mq)|yp&w%XnJ=vcX_-#(P=(aX4)0Qkm3_?o+w<#w2C{U{jI0r#@Nhr-5dG*dt zUtUhm;5RWLp)43&^!ucIb_T6c7sNRlQF?lM0tpn1ODS)}fT@3Yy&uNtLKcv=HzO0%JmRNoU3rMl~kH7Z~mB>UyE47}*gbR$d zL4kpRRE*4L2?$7NXqMmHOSJp-UPJGT=XF8Hkx(5li5W*mM9AH`g###yWARIz8^q@D z*qZ|h<9M)_f&mTnrUnHCp+Y9?i1mdJg{aO4{wfZ0)g&RO2(?sEeWS&JV#bY)4J3u@ z!}C-jur+Du>Dl<4E)A9nyG&rW9|taSedlI+?KsS{F^3)1U2@BWId zlUesEbO>kuQ~g(b+y6MD!2v}dB33_E69l{qaLG>~hySvnLcN@jPpsKy!7Xuxi>p(= z1HL)%_2H-K`j>HWPm79*sL0KY-4B;PneVQQLMjj0G={p*?Y(9)%L^$BP*X)CzZ}G# ze6pYeg1|$%1$X~054@vMiHWvsi=c(W?hxxAEHcHqd=777V#0p!((hFuUDtL$8J+*f zA&L~GFwb4<0h5RCMlx5ZAu1J+W}0ltDPgHMk6g7yQip;%;rOZ5&gc$>!y zY)YsPr5ql2IaokF+6DTZpT#_ProqX((5e@om&b9Dg5o{c0kf(P7TC8TLUOyhZf|#p ztqS)iO8QmaANgE-Fdj_&tHAKfFM#Zep4Y-*=YD8wQz^G>y@aN&^t8088vv8E!2jj0 zhP8QexcUKXIokk8#rjP}jH@BVm4g=%{evS7HBs4NtlH2IR$7To!qPxnJM-<^x4+;S zYdbm?D5f&#b_-)^U&!E?DcQm{D^yHzkC`bUBUZWoIxyCisfP8MNOKivH=H->`4<}51N1`nA zI>83a)eB~^iBfbx|y(Cy;%C}Ux; zJy9CB2B2v%)Wl?0)U7*38I;x$$oa3hf!gOje-=hzFEIB5pPc9z7#Z#Ij5RcZQ1e=J zbTsOs>H_n!FFZ_uF2%=*tN_JIG*W#o{hu31XW`KC#{QHKcUNwGeXw0%ll(u7Qh43j zM-TMy>bu(p2G5|YtR1US6L@M=BKnWUz|VgH&P)rsN=hLyIN$;Gg1k$7-**N;i$Ic~ z!!rnCUnn%6XOisJzKk88>lEK4#KgqpwEZ&Xw@YJYX6EikgkEYl_*A-c@35;|*D)^0 zlC7a8wL5p7z|(JCgtRsA#j9|*^Rl=%3ALbo?fAD~bgeE6J0t4i>)0SF@mRK8_>}%1 zWnp1pAFMg}XV3O*IRFM#&{R}B1Lx{`ufeEp-C({Ti+D3bYJg=dHu;8$nmYkP3Ikj; z^535}z^VnaqWJ02rhpOg*|UG)uLXtiO7$I9s_Or;V}fTm8DDes?p?(va44Xa>(;F& z!<{d{)(#p=M?eTMUEeMY(`#S+?`rJWLJKE1~Cz(K47K%0u)b zJ2o~Jl$gt~2KMJz7%DMe-8mPVCh|OsXvFL5x`6yMwG3Muk_9}&A$N=Tn{4m=x`ON8 ztOcxEOb*#mnH(9?lxH1}^U9*A|I)g zpGhl&*YJLK2tO_)P%6(li>%K2hMf8_lHJN4A*QI(#3N9jn3<<`ZhaAM(1R0bndmcj zY$m$nYy4JV6g+0qL-9C!hcm(O8PccLYzF;~w)}$tFys?=+N(eQ_7F_0hXai?ub@lM}!M}>6^B3xFN zeX6L$pqFRa*30jNaU?b-f({!Kzh_DK z9#eIA4BxAX;jZ>8>8{e(6w~tjggoEmBK;w|d=Kttnpk5Gf(JjScPId5^ge-r}-=T{#v{KiwCm&!>{K z!kYMEONr?jrD4h_tjSyIep*Cu==*|ynoFY);Q3)ucopUom5vq)lLsi%-=EV=IT(0b z$v=R}5sX0}Ept-5EavN0^&)jW?0pB2$|bNEl=Xk2K3utaRU~+~q~_-S;I;hu63P(q z@3qGr_0m87p=n@P27_Va9G21Xg&*oV<6Y~#M{LDkvrBJIZF{>hK3AP6(Zf=ZMFJ;T zJHs>LqSJ}pIQQ^|6I@Bn+$en>EjqH6pFJr4yZ9RqsCO@#=?wXID@KK>SpRsE z(4;mJZVB?u#)CTYfOE(k$tZr!%FmIyiYw{kR4VJdvfAwZEh3}xN*?)wi>DSvxKS@$ z{*I?fOWx=`2-$UQXSv(fL{_$oaWo&W(g|er`7>r(DX-X$e{r^0S zw$>&t3EsZLLDLMdIJVJV6S3K)(vm8N^dw}gk91bz70m$LmFabjr$@vhlx}c9QSX#w&Ts#o>Rl#+=%Z+@~ z1AKQ<{|iPxeyalH?|?bhk2X7@ zG!>Tfe44uGt>dD+;i8tcMat_+D;;mClaSC{PvtL$>*Ep-rvX*|AO0Jf_iy`2H8oPu z%|}H?Yb?$hdH=zI`53eMP*P8em%b$G_pEqbNbu#^jRPBG+%-_(ckV|dojg!ZfX6us z0ZW*!{{HGHRSG!2YJVmfdZHi9PT5sem#3$v|IW-6T&CKDF*b^mr4>4VT zH-ENlsWv4)R;`^qnO$M6%Mb{Ju_y08PspR7tS@>hD=YJWg%X_z1dJPhQ~{XhaY%^R zDUeI}GcoU1qAQIyL^)cUhHha6g17 zLJV5+`#GjxwBbL%98H~_{LSO2=nEwe?<-vbC z0iSw)Nd>%@@%i(>{whG2_*C5uhM82maB)&9Ff$HM2o0nL*RvZR~l=G2z zfnuIZ(mFbqL_L>6(TOOS!utV|JU9^Cn&+``aNYo<1KQcEdd68e3TWxQy*;W$uemM^ zaP&8V>pA5VozFO0(5?9nKi$tCJbUgO(4h&_bg6YLZ!7ES(=}m>Vr6Udlnfg1raz4h zV(+IG?+G@i(ZepOG9yulq?K5M#u_y*CGc7j{PvzC8u6YCN9SVzDZ4A;-2h|KCC+=H z@08=sfE)LQ`AL)>2BeFHJUvJA4DJ~VLG>%>eOhb4jGdv8AvZTt!3UsEwuWo}9GSXF zjE~?uVK8sJ9^rf-ptYe--EH0w^*uU;r#Y6F`;{&sW=>vSeeRtPMPTKG0ir+XSOm3e)8`9*H2}KdI>qK7$Lx_)*Go^e$aTC{I2qW36dMooZQdr0ooo_hG?5; zqIY>*Nfhz}628Sgq-LDu@^F=wS3$S>&nB)iH= zK4B&Qq>|Sgbs5`?z|w?Onyhv3;$dq`UtD^?rJ_EIc~;ux3%YuGp>1t#RE5@7R%Reo z&^D1qi)1E^mb2O!-7}k_Cuu%gtWC!uSa^5vXG-pV_5~UqV+qo-$o|Jl>o7+G6wxQQ zc^VN{Y;e%Iqfa28D@!Y(9*ToVe`Zkl9%ofhptSS9OT``W9r@WAg$ydCR4iVAh zK*dM!EX7uq0AYA#Tkvt59y@)E#J&o0#sSE5g0Z;J$4 z?72Mj1g2!j#k^KzfyCzlr4y`VP0J~PvIdkC;RC#{Zye&>CCu@t&(+^VFkxctpzaay68v%MR*_YKb2#5?O9_%g zmrSqU<45dxw?-{0VL`|N6BmlM&g%{2ds8pfyFTc0*$ z`N;SghnqWD+HQ2U=_l4_iwGn85YLqnO7!``ouL9R4ARhp-wk|YwLsAqzJn(z^pcca8<2g`fnzWjmO<}lmU&Q z2!?Rgt-=+~jw1%i+OGU)dB72e>{H1*Uf@dM+lWi8>-9Gtaeb79l z?2|_f1AfeySXj}?$#ytmW22UEzx7a0m62624Yclz}$uXTnbA_r}j`3d; zx5b(Gi0Ppnqdki(_I?NfU#eSy6t!ZXg#ylUmN7k3q9#Afjw+w4rX^Oej@HKfnXDHZ zUd(aUi57lo^70r`yCZJs&V&-NT&WVONca4FVR7%cCw|`tWT|EaHvU#lDfp+ ztXN^Er|0*@x>~nrMe!j#XBhhub{da^v5I3n>Jiv6p?WLRWmn{8Hh z>^m6l%J8X|ScZ2QZr9k@>05`x%UAN^!^j14X#&1)5>}m~zGWXZbptoqN{S&S*e9Bg=+0f)n592l>m;1^WPP{E+v8{lVY+w?~*+RilKa8=Kg zW}MM`Sb{5<-69#IoONM|z`JAGSc^{jdd|HT&$a}u&ib63xyx`Qbb3{y!YAFlJ*SNt)mOhn4S3*qgStjeNa8eL0 zfbn|2{=mY531z|1Uw4Ci7yB~stEz0}Pth8Vg8}B7;Nhq6rV)HLLQgX7sjog!Fx;J7 zrd`PrF?ERs=cwmY^Dw<|(*{Y1j}HNYzt8(C$Z_D*-jt455bPJz4Y|@CY{Tl(zf2*l z`k2S>F%)`U?k+*MN}Ao)p-&kuH80z<7d=(Pt1Y=tUjFGuxOsWcgCQYz{Q&rAaP{)R zWkv9XK!Fu8J=qW|7uJewVcQdcr1krSXncA15tZ!sRhuqE7~W(E#M; zVj#HTg&=PrWuq)<0+0FHS{*T3b7R7^?uJJA6scVJQ2JE5BuleeiYB?-BAAVh1vmdK zj7$-Dt``L>{t)sq*Qcey5r@&z%cAc1FkdT%iPdS@@W&bTS%5jZ)^y@ldo7jpm;1ag z_K9%p@SAAm)UX}u3Rt$6?uuqt`l=F^p&JIZGo!o2_+*28X`mPjpr(90r(su?Dz&=R zhmMjo`*z{(0RfG?J`18%MhgZwhK;c@;?Lfkk%shknL3V2SrJBxmm-B%mjf{+JH*OJC6EMn_HS%iE0Da6dl&Zn9N zn3vX`+=iMn{tb@~VATSQ%B^5tX{aYH&$`#NWHteS&P3^1qRa<+WjbTO=1LYzGZTgb zZ>4W1PcqX1A*oC5V}nLfXjkRmZ|L>srMhIr($>uXY)LCG_~QKM1}ga5{LDN9I>;li zt359;?w^w^P)W}YW2KVu6t9-$TRbsQNV^`85O2LKBA9`nN9cP#lnG37omLqg8!){N zufT!srPuo_$cR^A+A*(8Cr>YEYRUqP7dRp!B6CCK?B~hI03pnI9z*b%!f5FL*qTA$ zMqdsAaE?qq;&R2?RWrAAkqw@2J% zCi~)@VDxP~Uz`f@g!k~kQ-eJMZyyo$Sa8?31)1nThb(5YY4S`BYc!9y`Scv7J0szJ{_N!M(tL?^vNT9OKdu|4c84*;uzO4Ixvaa-kP z)ghL1ze?Zq#r#Ppj68>#$ZvZd9qsl#-tON^Ki#K8Q2!9>wAqE+8`JZzCJwJeS@mrP zPy>FkrlzLPnf!23PvHG8{0>7~8`Y0xF2@D;ymXx&aZ4-O^q%15 zJ;krSvR_Q9_xJ~v^!SL)IBN6OCkU2o||1dB*_uPM{FyNRSY#D%nHg)0Z)1;+EnGFdtt>rPMK)b$_FW=mZ3?H}&s z_fd;~!Tpf1&aSi*KmAhL!YA*HFbM0)aU9=3#|;DL7kZmlxww9SCd~B^QTT6f$bdJd zUdY;}H zB}1uDQaDKAn8#$E$7HS)QF8K{>o|>Xrb?nrx!DrQC9G{7AZjgoy5pGO=Kz0S&l1q>dG)HyDh#7yvH?Z87TE9ApW8#Rd zan+`#42RVWDNgpb`|6l4=}u2l%dXnS@Gq_(*%HZgk;FB~>4yjFCi+zyj(}!N2&L!0 zo=}~p855lW8h9R{w$A=|#8EF+>vQqb^pdB72IN__0-jv`F;dc{C?!_rkd&@+S4|fm1ix#)VEG2fo7U zNn7QtZ*Q`XREK#=v0YKt4S8yR>98y%<|UQEuIl1N3Wh3CWsyM0vG%PEHknLWwUl-> z$=|OJcQrQ$6}pWq+xBgvY{O;sH!d%;PMo#r*b`6?H0Gm_vuJ#d5JzK->QUHnxUAxu zuYZJKudSxtv*mk!xj`18V!qV{S8Y#<4o`*EUN-9}-+%k37vuR4gcmnb=GN4ySdR&* z{A~1I>x8{j%E%7RFUyr!6x~~s5}pmy*hM}(ZJzau_(49-tzi~1JBEC`<4zofLs3>o zIZe?k$I0IP&RF>+ziy!<<1BwI)y%BXNR_3 zUB#53C-+C$Yi7gK%`v@R!k`Dbh-C@ACclvTEp_{bbXKgK=6n`6dgse67mDk^)!0h+ zTn5?`E1s)wtWK3ijKLjZscXYd4HZXUGhAjwJNU3|#$X`NIG-Jm%H}-=E&&#ynuxUaz*N z%AKCyz1Cfz+-au+N8WFAfRJB$t=fCohh$G_D?R#t|Dkm*8^Dj?5T4xFN-DERR7#8M zK&NSrgay}8ht-$H%K1gT-p$$%$=*`8KE6vd;p?>afc(mMZ^C-uq9%rX>0Q%y>%6o^ zLZzeY;`qR|68Y0_%H6p%T=y4X0&yPtbbo~R%OACFSnjl)BHpi}#(ylx^sA!BU)@dbNr<(%to~4YOnU&!}E&biL2{yD_c5i*wQM%`aaTd0kdtOQd&AjWqXK zRqT}axhL{pRv*vTkG5}raw#vrBko^=wU6uO{p&0CJBt27Ee(Ay5MX0ktH^X;r_5RX ziouNcWLLbIRL5Mm;%f_8k5off9erghRv^K~fU)Zm7Zh&2D;Mfxl2qbI-{iaA5hPU9 zKFyVbZK~Wn`$uFP^Q3u>^?u?mhbIJMT=t`!o2`4IShV;)k0<3H-mRBUGR{*xeJM0_ zF^Vd}c0SPT#drdXk>@=o=o3$t<;}Du*SW`45`YGma`LkJW4l+|0xYxNM7x;oVh!R< zXC>oO1t0B45e{AMS0(AHH`KD3k&8~N$g2x7@cFf(mz$%{@wY*R2!9Jmk`^cT1#9-I#m%>ra3_DxlbA>TenCP>A=01wOtW0g zag}`)Z`j<8`RUr{xOz*JJ%!yfS|)1wyOwGUxFXd&5i#Pgrs}&CF?v}le6r@7Td+UO z*LG?P(`k)CW`ijQY<o*tj*YJ7Vt-KOHkk;9)Tyr5d-LP_n1$}j@-3E>fwZ@x*lM^*!zp6&AoH&E- z*^jv{Wnlo=T~v6zIxA;`dOz)}iKx72iH~J(YPHFeiZR~JT#cu8qP^n7eePN8?ncy1 z0QT?-c%Qx@WT|O155sl~82h|B&0ok2xvXxLVXxhU{835;b$D=Z4@f*jnHBR{&Z{lLX=9dZ(P ztPbbjz{;gZmFZA>Y9$?V`@~)mDP!r+QJI4i@n2~%o7I0y?`eLWL}+$Rt-XPI`Wkmv z1yLKUVoOwPn2X*p=v<56uuD|0r*e=co*{1&=U7eYzzwR2W^SPV29@r0_39y1hAGA) z`a|DD>>hnsD>soB6{)X~Wq(yk&YOkK5JT<+T~tQBFRe*AsJ+doVr=$-j=?g)%9FZs zIV&vWLgEH;d@tC>SIykVHgQ3_F&gfcW-@UwzDiO(Y3Z#re)9F%VJc| z4KJ>p$u<>q&zNZQ+2>&s3;arW-Z)UsdFq?UnbH3#!mjxC3w>Q$aSIrfBIiLLx=Y*g zd+gZ?aeC!Phr9I4kF}R?&0{bQOfh_rIH}K;{RJO!POhq^T(HIFcBZUNTxCCtkxt4A ziuu3a)BQ^qv{BZciRhY;hbQ(xC8fT1$$ej5)$V#fx3Jfu@-27K2tHptT2f}s_9;dU zEiWp37`f<)0uBDuCKySCDDP=`TqHe)QnI8~W!uDDN&V-Qb6-D3dc1y^fu&!(inlnr z4wsZY-}NU?C0Yf+%{=oi&PBiP$cnQ<5alS|*2R;JY1hGJ)Xpra=t@6hrUZ?Av&@G+ zY)cZ$fvh5K-IhP&RkLWNzTJfsj*&7;yZYmc()a7%eqv|%63>oeu@~K) zZw0k;s@~12ztp3DZi49LJL4{;6%TSGQaX00=65C1VM9sj;GS9UGB#*r`Sz5w#PTBF z&V_d@^v_Pi3zVe@xee*FNliKplwDt67Po$t<{4IPw2Ak>5Y4vRl;zkVRR9)N)o!7< zC`8jYJJ)TvI|8jnL3#O#hfZkJm7)?w`Vq5(s0?01X^{u=(-XIi{}JJ-^h@LI5bv6j zaUR?|ifmzmP}eZWou{G#w+>(I^+JLbHh|*AIrk`)1Iv!b{VRp`YRLH+A8UYLsYH3B za>lNSXYx)Su*D2B!&e*n6lgH@{1lU{aomV+^Og6z-y-l7 zIwmkppAAlPkxQhqKEo84>U-hPYv{9@!@A!YmR>ZrDZIy(3y%?^_9aGju;^eZJT%yHKbgGob?;3 ze4qY3>vMiof^^LXZW@F$&ir%@-%~)2qh#1Sm${28|0o^%S`K;;s*Td7)80dIW6c{W zhp#+)<{9nva|9EzYXZs$tb*A3>ZPa}sNZaU_vI)mKjEz zAQ1aPZsHD4OXCILemB$z^W$|6RuxO~( ze}VmTzG0eL=T}4q3V=csA~`}sF1-gWdlN0Kpv}Ae>qQ%*%sblK4T1PLv%9C#3jjeMoirD#R zC+zGS8E$GC9jq#t8?Sgu%SrRkFRuHbLWMxZ@Pcp&%4-Tes7_b;Oju0=*~|r5lMkjL zAB^dPJM0~^pD{ri01)yVMH=3mLPI1$^5fXH?pLqufy-nO^yRdtXAgGTrfA~!Ac}gX zt}*)txbOrBLkeJNP+2O@&p(KBXoT^@^Z3^R#CX*?wFr4%N4hJ%yZDYB7E-p2)?!&SERki=sbam6GAiEPXB%KcC0&-^&%R4=GQpU?(sW~> zpJ`lM{2#U7ES|@zae64~AW|485WavysEqMMUcL)zHYa5#2M6NG0V8v|m`KhrtB`)M z;=AALB5)fmV`eLj5pI_2B{f>2f+g|^+;2CLbai3H-z_IEKQS+NxDexw5Srf?=NTyI)3%_@atftx20P__U8dQ9k7^+G zkA%Fl&`3v`ta{=uVPS>Zd=hRYq~&eZ0Qgxov(i4URbU{dvl zP&O^#yE18CBRCDu`eRG85ADyY3IzoPr9QGSGv7oLR&p$j2z@d2Xb|NPJmhG(?@v-( zjoY7@5?evBaZH5UjC5>tGv^x6{SA3D?8QdOGvHP}ErgL-hH#akQTzQUK&as zl<&`~?DS3q$_fDG6V!@~6a*TsJ%4TuDLpTyu&GlYMz4`v^jUQL5wnpavO*GfAjgf$ zaA|U$fjxjOOu*{+FXBcNW&~J2XN-y;QxT4Vw|PA-i59B5$t8Osce{C$5o0lS+NMKU zMP+c}qtRk&NeA4^ke8BWT642H5>xg7hR?zS!?|%|2=ei=A22g+iK#HqaWNX}zLzk&Ke*4aI~=f>UE6j3Z*ba%V5tG_X+>>& z3_VH$R*|rO3{airc=uTHXE7qqCoXr~|1JuvNt7L?>57;sIFyhrqf633)r|k%Fm(Er z9>lg578VgkfHzmU{Jmyx|d8D&U&z`M;Z#jAn z=uFBIIty?AoSreanW3X*glCO07AfLnA?QE-lQ6OJlNsHr$-ki%gu@Q)N7^;l7|nih zEZk>i7Q2#-%>?(hv8~_qujepAOphPWJ%_$)^4R_~H5)>F`T>n_fi`>;Jr3`kzv_T^ zGCg)Id$8Ow41b(rXlQsk1LgWW+>a_bmwYkjnBoa=4xDkySZT71t+R99V546h`NfzH zO<)!_48=s!n-!BPCnYX^5bGHiTw`%1CG#v$r`QJo43fvJo6&dpynfY*7giXNd1MBL zQ2cSXrQAx&?WOfHhsUkyFm8{cxnl(S?Eo6aqmA<)j3H}m5Kcj|GOzAK?Y^E8TU%`{ zt=yc{b3dD!nGG(uKlV66X_M=vBLfsiSk#s5>}CKIjga&xtp`I%yQLRx0=|>{Ihu=g zD8i;Wrr-gkv3j$DF4e8y=*pEVc?ifAfK}Srbk!VU`vZ8m9UxZ#g7i_;=}qy}N_KSN zjGcNrRO693G&PAHP0Yc$b}v{W2(3vjah2P^LH#tf7sVyVC3k08XDbto<)%uc-+yP4 z|60lr3H3i9%YXm%+EYXBw~t`-rgk!O-Z(@;>=vYt{gO^+LC%L(V9tTZ_XDy$kay|? z#|yQ&R9E*VK7rcJ7}~8^035Ki42AuZyea~i@Y#{9IMo!YfI!Iu2<7FhK)wUP0{r>% zWJ-kO8q)v+CDnh8Lh&-N(pe?lkr7TbhjE+QOu z=hs%qW$~NWmpRm$LBt2)-sTq-ZJqs&`+NJjFURX)+a@d%LF5meY%qNqM-UM=upOC2 zm=_81jb$KuYV26!5+xdte+a&KVsYpgeXA&?Xt$EcBk{{ZaUt!?jky7>B@OZ(7mDUp zHVt$T$x9OudHf-vMQijjeE3@_WY1RV($hXWi>1E8@h(c!?30rO4kCKx7jJ{P6v2nJFmWj?McRf%u0IFd*liAfI8er{Acv4xz&;FGDj*#t07B{!x^0TvY>}H6$@jz6DXD0{mCSViS7OfPe<9NeLN-7n?|swmS^Z z*icy{hK7cW|2WzhJU7WROa9p7W-wOEfapRPn+}-*Ql<9*w}xOnK-tk}{*k;L21ALA ztmppo6W=iC!!dTh;J`tEzQ*REhBOZ`Qb-4iyfUD1xW?mX{nGdOA76_*KOG@-i^dDe z#ZBMO6%D6^ygxXbw`ylI*)ASPft-kJXa9d*AQivFk#}fUt0rS+8s4x)(bG22D&Auk F_#a|`V1WPt literal 0 HcmV?d00001 diff --git a/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/config.yaml b/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/config.yaml new file mode 100644 index 000000000000..5e4f045aeddb --- /dev/null +++ b/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/config.yaml @@ -0,0 +1,35 @@ +num_clients: 100 +num_epochs: 5 +batch_size: 50 +num_rounds: 1000 +fraction: 0.1 +learning_rate: 0.1 +learning_rate_decay: 0.998 +static_bn: true +exclusive_learning: true +model_size: 1 +client_resources: + num_cpus: 1 + num_gpus: 0.5 +server_device: cuda +dataset_config: + iid: true +fit_config: + feddyn: false + kd: false + alpha: 0.1 + extended: false + drop_client: false +model: + _target_: depthfl.resnet_hetero.resnet18 + n_blocks: 4 + num_classes: 100 + scale: false +strategy: + _target_: depthfl.strategy_hetero.HeteroFL + fraction_fit: 1.0e-05 + fraction_evaluate: 0.0 + min_evaluate_clients: 0 + evaluate_metrics_aggregation_fn: + _target_: depthfl.strategy.weighted_average + _partial_: true diff --git a/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/hydra.yaml b/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/hydra.yaml new file mode 100644 index 000000000000..ffb228743fe6 --- /dev/null +++ b/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/hydra.yaml @@ -0,0 +1,157 @@ +hydra: + run: + dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - exclusive_learning=true + - model_size=1 + - model.scale=false + job: + name: main + chdir: null + override_dirname: exclusive_learning=true,model.scale=false,model_size=1 + id: ??? + num: ??? + config_name: heterofl + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/peterpan/flower/baselines/depthfl + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /home/peterpan/flower/baselines/depthfl/outputs/2023-09-05/06-03-04 + choices: + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/overrides.yaml b/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/overrides.yaml new file mode 100644 index 000000000000..0b957ff61e83 --- /dev/null +++ b/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/overrides.yaml @@ -0,0 +1,3 @@ +- exclusive_learning=true +- model_size=1 +- model.scale=false diff --git a/baselines/depthfl/outputs/2023-09-05/06-03-04/centralized_metrics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png b/baselines/depthfl/outputs/2023-09-05/06-03-04/centralized_metrics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png new file mode 100644 index 0000000000000000000000000000000000000000..3c190dd6bfd1826e79058b8faf518ad06b94178f GIT binary patch literal 30212 zcmeFZbx@W6*EYIIg^h}UbW5l-(jf>4B8^HnlG5EFA`J=>N{Wam-5nw!f`lO5As`J> zQfJ+MpZLx5ywAKd?>T4Y{BfLd(9PcW{kcD}*0rwdT02ZtS?&TpB|Zv;x*&g7MjeI1 z3_zhUrqAQTEBt-q^YBC1<&KuieS32kcVj0rl#;QFgN?n5jg<+#o0*fdmA&0fPC-r{ zc6v(}7YAn%E-u^ucmt=slLglp{`nBN2%f`TZD$nfk}>iZBS+%B6$<6*C@*vSfk(>P zgs)!w?uo=ES!h7g>;>Y12YJWY*`CQytH(*v$+~La;w2J)9{;rF<$oP-uJh`1HJ`0W zDo4J?Gv}Lwe?+K^M^m2SW%fmBYbW79YKzJVNLkqraS}?TzPq{8(Cz7L(9<3i(*C*i z$ftWTWu<0vXmK6CguN5t=P}u$F(MI3?@cKMtp8XR2cjplYm$f z{+o&V{Qv*@|0~m?7+Q$Vc-6Vk{Nk!yZ-KrjHV)2VjgX9tj0*;PrU*b< z=rxuu=1G*O??%xUMHOX2VK?ti343+Zf-*uo zH!qLi#ful$I5^BO;d)uvJyZBBfe#)&WME}Ihlhu^zW5|4=*9Er0eN}sa;6#@8ku={ zU3=1S=d7qH)`0eC8fhh^3z=`<+V;rF$-ONq>g`MV@Zm#fWTea4@1?%C+J!3c-gyPT zbkv`?ky4AdWn}{=&P%-vhGfWD$muw~g%KGZE|sg1ulnG@BkzWsobx<9Jkf0gXteav z-kPn1i-z&C)$s;;V^-o^15sG&POIabBYYM!9`66<%^Pw78*<*8Hz#_k^*A4yn={_J zHEC~xTQicpH}maMgyWENZDf0c06%eM;a922H$-n8BRW;)LKG@Z*P?@D=P;X+=}3&t zM`<5CNaLZcJ$B)@*7K8aj|dGtPJ-*Bp2x;Mu}JqN<*M7nymh=OJ?=i`o0F5%G~W@2 zfg)yAZdn~Eofy-E`QqZbUclS^kBiz z?QKris)dy{X`0UoR$bC=bzc=~b2sc>*4hop!)5EaV&{efTwy~WQ!!4|gH*?`mU4D9 z=bH#D9JDN<8gb5D)JE0ZMX}w1`$>ZKw78dOBkStKY(`32#lzlaWT50?XkG37=f7Mg zpy0)pen#-A@2%X{z%`26M zKc@E{iaH*BOci=)w&%Uwb`*S&UVYZgR*N6mh7({YcE%|yv74~9v3E}HtHZZu!L9G2 zhU8Ib6qmVg^arkWfi#lz9I>Dy!Fh(A&zUf9VYg0xVSjhKLT(>c$&P*u_pN=#<%Tg` z&!w<6{CMN-{G<4-VJdgJ<}tcty3@ErmLAIE@!3e+=6X-#Cs$1wiio4+C~wDA znJYiD7v~m-_IOFj0*;!E?yo0S^&9yfZ{@3O{!07o;o4*D(_oobT!nJ@9J_`>CE^@6 z3OSo~9~TWzTs5xglF%;3LaBOvq*R&W8P*!dy0eh9Tj`7HO7vWjYxNMWMSZ$0dAyJs z)yBfi5%9?1ohlj0<#I93b6y0># z+WRhaHIyRbJWGJzmoQEv+Nvf$97aZoww;&k@KO#Z*Gy%4=oiCc<9i9M&KcMKzKv9p zwkn5t#_CDV!VI~jHr$Y~T~t+}*^ZjAhCGG*dJv3!E-bFQo%8aBhZnuIr$<>_D|T

4Q#FMQUc3r1x?jVWIcT|a zfOEwRyW60y8)q0pQ)ru^}ihch;X zA_^;X@dw2vy?L}wgBvAlpyjfGJ3+3rxZgXo5ArC@0==-|VXdbjAy+v$FV?!PS-TuX zQ3=ACX*hrBQhQS%ROEhQ>q4Nucbi@b; zv9Nyc^?mylU3oW*ou)et^zn(@Q*XRHBArvVcmh(e8SmY}ta4daA9wB%PZIMiseQ-E z$cXuPs9?9|WP72L$GnTY>oq_6WhTZ{4Y)JXYVCd^WtNq#^%h zE2oCv4#M?)+?h#i-7cEc5N?qhu;VfPUEcKZYYXAS`m2UU=nPIsS{nb$Ojzh27awmn zMvEdMCbk`mp%x~BaZ_HTyG77hvN@=0KQk92(H12*Ic%hTgN;p1L&N;&1w9|0mXanZ zzv7X_HG#Y14q{^-+MkQxS)SREzb_Ku^Lth?L-Z${t``!bnA2d$HLLntfIN-apOT>yb4KEMSS-` z{ra4av9a-aG`e=U@^^k#O+c8>?W5q3w(Xs#@(uL-cBzII6n;~f#Wp2(gJ2zK`j24y=7x;qnv zvIomu;n2@fzA1s*>mYf~lD*WejhHt0NU+h}>(CcX@8H&vARotKe6Tq~&SQd|z@hsr zJRDy!O^gD??KmsrvN1^mQ;Z6rxM^+#=tisL5nfv~O+;B4-|A#N)#JfD3{;u@6zyBN zn6^Ca(%x((s^7nUpvNlh+atIdLIKwOS@j)^t??SFs^VC-SYcG(uJ5lZv{a-GrJS4G zAI{>Cx`XM_PUAkETiDQC=eeV*r4{-q=$vV10>^q7Ee;M&#b6yER1?_WNuutYKf7K9 z-SYiIFD_1lg-hUIAD58u43^Hc`6-@|-?5O_?yulrG}-K=_lE273IEe`F2?w|nsej~ zD#yzA6--39U;z~jrmNdguP-Pp0#Lx9|Cdx);nf4N;^JIe)MIV~MMZ+ad|iu=9Jp=i#=e#~BWFc?nJ;cV-ALMa9+eb#K2fMUq}k+^a6%D;VoJs0V5E1NYVG_Ii}$ z-cm1Zk1zg=vi7s^nBlk z?_@d3a_rl0^Uo=@XNt%{30-wzyi z;R5-*$0~;y8s3LWs={wI=ExriOQ0o_4iVsR~06iBmGTV8Z$EM zbWSlCB-Y9j`*c@iR9f!+YKr^H$ru$!{4!Cy(_L4dN0&s2<5=&!PQq(!hlQ{4p;1wH zJUoPEXJ?BbC&^Y$RXJY$v$qzpx#>VaEku|sXkSqI4as)N_6-zJc3F;9a1>Pkq$D9B zxmzF1((g3;qGe0-LcjV=<;eH_>SO~Ymeg3+My;=tBrm5ET)~@uc7Y11NDg-vuL=s1 z1>s(hGBIHckBF$ZYjj>zj-->Iy`Qc4DNWq_`OBA2v$B}x7Z+D(- zasM4Qm(5iLIA6iS~GK}UiZ8I9o(_R`Xq$6e5 z3V0(OV$vFZW!;w6bN=%2lAJ{IY-|Jx<>rOtnD_Ccl(O_a8yX%TacQdU}yi2paWe-XXtfPH=j%;XelupcTk~XKQ1Xf?5d! z0}o_mWo!JselIz`*yygU3C_6FoZBo?R2p-6+xYhDrnDpnf(()l?WP+c9zv3)<(9&B zC zlZ$QR-xafxAAU(vdV93f{>kd_cIlb5jbU%ohAv84TAIgZm>UZ8nsn!Cy)Ru|GLW-} z8pC2@Ds3;JC@3h_*Wf*l-}r}&&Q4D%Y;T<&m_qXG+!VE}$*8#v89NQ8tv+C18nrl#;{SH3ClMUyF5_{q>0# zuV3S%l6XvUWn*Zg5Y)3eTzpqS;q&@LZL#?`O7|}9QuCQ;(e}vu_rJWE#}=ca4ZMu;H}Q zxQ_=GHvGZg`T0%S*q8xXI)J=YtX^>5>4mBac%0|IywGa&6aU$r8X@fbGeaf))?F2q z_5cj5@v(8Qm3*ywFMjp=_j!E(xafH<#2*|@okgp?l@&fc?SAh_biN%Wuu1f!@Xf-} z8-HIB8nc9~cM2o2GpB1UIBAD5>JAJ`&KUe@27H7{s7o{=Pct&;5r~fp(64nf1_-e< z)hGc-cr;53%HO-n%6orA?epCr;eW1joTH37j4(XLY&>Y?di~%ctGqHM4~@PZjbtz@ z?QP|8wwLF0-w|t)u;e2Fma@_Pr+0q$EQ9>BCGw`T{j*cLNv{AxmJ4I8gz0{qdFc}xc0$9s94<>4JsPXg)V4Wra^jq7VTyuwu>4`p8KYrB&Ea)tkt!j#) zhJ_sqRr~o^?o-OIPnkeg$W%AwtJut?HpvRG#MIK)Uo#L`_$D~j0)GimOJkK>(X`?> z4!?iIQvygqGa%Z>6X5hFszgP(x0tZ3t=9Vaj!Zl1MshMpZhi?`sYN2k#+!##^>y1?y*}p>bZm`NfLL(m@ zEH2tUbrXk{H87M&3FW_EJ2j)?|A)9?zkb8`_+X2i&yo-|T|Q*s?0(_?XC~IboL5mF zd{ViYtAnE2c%1P%(<&s>)9YeaHNQt@?C{eR{)p!&?aIhOb}0_7nth-~N{Ex<_ zpd^aoYB<1zzl>>~KPfsA?JhrJ4gA8v^Oj+KWZLP?1lOCQ1M676+fZq58IH0RSTfc4 zv*l5LplItldlbqNz^ala1C=leb#}Vn=s0Fu$si;|K4jp{(Qq(}?nu;7`dF>IfYCaH zlM(S#rt>Ci;I)ZHegg))?;h{z`sC^HC8U*LKe826VJ3$Y9dtvQ4H1Rn6ccaU+&1|Mjp`?-N#&YEdyQ13D^vPUYV>ADJ-9 zntlK1c$RQ{i>-_D0c_L24H_zx8B7ZbO0}S*Byyj_g+x?<0$2UZ2uVZwKJ6y!XinNo z@h`O%hHRwySn}K2QC_z+wjHg<8$APu9K`t3-!${~YiH^=aVDZw#I~>*uU_o~avZ69 zXhd9^pbCcyj0q0KhX|TA)2B=^94v7H@7v0AtzK>Wbnyh3@U2hPHHmUntQB{ySV94? zg8E8mi{Uf>LT(ePd}BzfRqHh?D3qw%YLjV<__O}^DyB2d!LeM03Eni49C9-GV)h@6 zUx{*i`B}%#Ef0)eZ+hEn9jl@H%C%R>dE|P68HIvLIb8yO{8T*w!H&e`OuT6{S6zU_ z*P1cM0?dBTtVAuo@cQz$$7-$H|e!*4r}H=lwqig1d1Yz?V(28I0aFcnHvq8$g;y1vIgKIAYx|Ts%GSzJmw73) z6($5^Ot+%a>0ef6TOD59#@wj$fMneMlg~&E#*=Y7s0gr2b?)ZfD7>BmcrHW+!y2X~q{ybSf3=`!1bGx*opV$JL*$UL64 zp(w+ols?JR^UCNVoN!c(#3^~@q!-)0d-qh;)t`*n)n3?WJiB$eo!H1HJoz=A?J^Y= zf@=}!VyHkr%C6CIP8LC!Gcz;fLJmf6IB|me!GfloM|MQ=j>tKTUcwZi8|njeH($5Xyk4%~y3nmlWnj}IZ$8Uh#RI!Gl;BxHo-S+6Az#{xQvdLe z0;jV*SB&FGVlt2HFivdWbwgP07GlZ!37q=jFphI^tSYjARRD!q13hZAHd<~_Y1rT+ zjBql5BHkD_`e_vzVLZD?qhqf)3#!1@is2b0SK~3SPNm&ii*B`1HZg6-rZ;V)7IvdF`j_1m8h(MS-jysA9|tiWY>N*9>!tPZVQ@+cH4_t4RXY0OMOi5+%#xCl zx>F?$jRc@RU;@f)H=L>+hVAQE&XRD<9-CoGa%abuPdv|nZW@bkAhxE0~oaRJWQ2dsrs#YJR1>UHx zVp+<7NAW=HwZlwEgtSJB}%bCK`nG9LQ%8-q>vLJ z&(^_Q3_7uE&RVWOPUhQfNxqpEuV38IGUqU@ExSKLGFE0ip(REINQZ%O@?c8 zJWeuF_MHx+eiIZ{QKv`%0$5%*?V+ZoX8GgemGJOzn;x<&R~T4W+E%7oT3YyOPW(@I z=zyqvYtfT-9uMy}JQb1!#5S9-ppx6Abp97)z4T;C`u-PJ3~w<3YW;PCri7vPHbc1% zCPLO6>Y+zMXx9PF479W{!JM=sIPB7`yr#kOYO-id+eXb-Z;Z#Qj3wd;ADnmG@rx35&iRrl zQ0j7|W-egHP-brBEu9T81-Oqpo`b{3)|D)H3APZqmI<{tupF%!C0YxEfUH>%s{+<9H65tGuoc^YeaY6vau;{6teTQJZ+rf23xsn6R9MH zH4!|sva(%Kr#m3_-83peJ8IBS-9K$foE`6&Cv=rLqi>@TIICy(>b%7tF(uhSUaJfS z$_Oq`@B19P4AuS$eZvB)VPqZ`T``goW=!iGcke(s>UEmh!q%|mBVpxKdPNr7%mtSv zSH>|;S5U#2jI^*WV0n13I;;18oo<*mx&sWBX)O9Sara{e?%_L+*wyDgmHVVol06wzuS{d=WEa4F z4jB|@--o6;ktZxp@~@5~Q^*;=s{N$$mCO8+NrQq6ryIu(lsJAHTIO=0>r)j&YEFT2 z2989bh_DkHC;3ar)zzvZSn?WoHLqWUIchyW_F#1jXAx{=SAXo_CEKs4mk{5#8xC^X^I zNgAIUM(+H;sVy%l*XC+*tNH0Y+6{5q@-}w8l0g(Xnp#f*}vx^7v^gpG9|48ZK%8~-Xm%Y$(Rep}%(fgHa8Lmh? zx24smG7|FuW&S$04;QI8j>1Wz%qmQAuibu#S>?2FucfUGh%jnLN5|pv$C_`)?8mDv z`yTBfG#>6n>df`N2_s7L{Ee-A$^{3_goB~t?hVYplom6fcBhg+$I)@%`~2NG)&u&rT7J0Yuy)w>6!@F)qUfYSXsJPl$R^Xehz1 zRfGb^3b1%=C={esq}c88+q;R75mkGmkBd!O@#p8~Gcq!oIx00(^wAm1JOQ*Z%z#&I z83T33_jck#_v(}hna6me+5Q#gqcjbm7e9Wy z0oS5uVEA;R=itB%1mE-c_=R12fH^i}SdyS31;Gz1tLsmmJn4NSeeUXl9p^r<=8pUY zwk+P|Ylm8v1Q}bLmwVbpZrzD04RB2ud3BH3s<%CKhEx(xA>l#hM@+HrN5 zXJBA}UPNSc&&S1u@7ABCtA+mewX~L8kD8mM3dhoK`BGjI|4rByLl?8RHs(E%T~b1X zl*ea@@$vi%wtT74Cv*YR`I$_$WYkNqU+S?)sh|V(*kVZDn-9d^#5}n`s3+1*y$6G{ zy?s}PcYdqCKs|f*%w)L87|1#oiAAUC=dWIc9UOQR`V-U9*@O7W%gbvsHpJq8?jm&) z5ZYmY5(IUki;B3R3ZC4%dfS>Pbj>LDQO|e3B7T4M#|QpoS2S1X9T_M)R|ifxD$TQt zi!zI={nN>GH(kfkw@af8pN8}NW4ZaiC&D=h93CyJ_=m$`sXP6%0&vzb=F;lG2VyB{ zY2O`L26rx__c;_ycHY!Wto*kV=HFkFnR$Bs{gQm>3ih#7FOZ{mOvs7|;GF*N7`)s6 zGlPdhgNrEYmqpV4d{! zRp$jU0UF)#1hkmi6EY8-XNH#VSTL{yK%Hk|W%WLpsIVRTB`#<;E)+>6*uAn>TwLtu z{T=8A1vjp{oY-sccfD@O;fA4Q2h=)6YVW6xYAQO}oC#1Ucs{O9IMH>R@$VVpq1J6L zc%J}M{pi_OQp~<}>BFG>wz$_;B8~2}F}|9SERK1>Q%kSC#v9j)>m?eNs(woSh*@i5 zB6P#(M1V)jI?n1az2hsbl5@I2tmU$&?tR7YzOMFWVQ^^vj%D?{KHk_9@$F>nv0i`5 z!6amTMBqw_-BA{oE!1PK4W;z(>2rJ-J+}qbRfIT?@dDvtOSOIsC>)TAuI1PfW~fEy z+qUhzBacV7itN^-9Vmw|=5Z!Hi{I_pcYLm&-Y*cK1=<41E3W&P3CY-*@6@DSzHt=W z-Ku6%T9E2Ziz)Zn;ETri?ZYmz)cNQdWbZG?)0D6J+(UAUx~6FK1pDMFMdtb&BXnQ~ zdyII)?F}FPTX0T*tP1~{3cDg;s zds#h>CEo8xi%`6xRn}HjmYHlmq?WxFPNToiyYx^j1QjdIWKU`sv3O3DL;kfXnSxZA z-n4w|c;n+aU||q%jQj*zYtG2$B9R1F1=izaiuao`HWB$u$xb#zp&eStn5+Foue@%; z?&SguF|3TAX(~k!t*<6@SI;H8fOujsmeqt=&V^&)cgx99-SBv0eelC_|Ju1?JXLIB zNq?qN{K!=!{Wm=Km0iQ`UZ@&`&+XN|xmxh|aU__EK&3Dx%*%RXiq@zaQx0sCxn-up3VxPLxO0?|54kF&O|!Ff6s!E5 z&njANr}-mW+>FjHn$Q*fH4*J}7A)Mjdqi)_v8yOUVwrT>0Z+4-Rgg2lBqlYdz%(no zMXAogl9^8_H5Vh(UM}S?a5{4{KQqVr3y8k#)Y|=KV)Y{2^kC zmPGY@IgX6DyPc=s&|)!Ku}2wNx$i9W`Z@Bqj|AIMUsdB!cEd#&h@>Lnf6K|)na8r1 z9^|#ms;c3YJyG}dmV>Q19<#3`V1YabRqfq-_oV#%8oZNRBgp1nS)_gX^&|I1YHAdO zEJhxl%kNdv-G15nvXzRyY&ZSX*J&~*$wzDREm)jPB|BNg4ev5UCUQ;QnaiuM%jz%g z+rMgBY@Iv(c9Q(-ejJKZzlLOg-1!oVN?IhSQl{Xt1@~wZ7-B*B^f`2b`W3g-{F_vx z-${|tCrknw5fV0yJd}f5h*pIS1tPB?`WY~5{02T-cc_J(uz|h3*iicC3AH80OY!qb zb)yN?_E()%Us8VY6{t|ORM^bRb_}Ec5T{c=1cJ&I<#=PYzjr&XBn6_K1%u5K5h2Zc z($@Sqfj|bQ?3b@!xBm!&6bS`)v3O!q(vtG}MOv{?z!L~*<4;FH5eCNGM`mU--J%j@FJz!*#>9ylsQTr05*zm;^gKWD(NnosY`OZf zoqGEBHGh7;|D(+$F|~lr`C-%uuV$)aHCPv8D=hRn zpTG-wA0vFfZ#K+^9SQE+t3Z%whak!OgdOK?Dwvm^fc9ky(nV-YjJw3ybkHReFy4OY z?k?CD1b?l^OfVU@-?7KOv;XlN9m31J1&=PW{H{a#Ay?(1GeGKIxAogAO=q}v?L3u$ zO%7S7a}f zS&_tDC8ax$9%UW9htDz=_6w9`2DlTqqhS!`TtIOp{LY&dGzo65{& z)I}Sqsa*!;ZI%Bg>{Hu`ujL#8+2Ev^d3W8hZI0;jWvkB+umOJd_No$42@t$`^$IpJ zCaUTmH?gqr2#`hX?N@K!B))0U?IMx*;>FCm6wH_)08&6i%gP5Jxjct`ezacO^SBG( z&(q*w3U9-x8L~9kqgj7_HDHg6V^M0}ocYXS(sCXcQGzR1u7H7!7iBct5{8h^z_Sd82=zK{gYFwRJ zUjh3l!|~!Z_9miR!X^*)-1+&YsE9D=+@&_~Fo9@t0d?)#HAx4DiRW$qBiG_!zXNNZ znVE@+NJsMWc(PHH!Jt>tgHK>guPjaO+;N*%0sQ#CiKko=yU=YKJMQN#@yxeeCMkv&GsdG5|%Q0`RF~eQ)jsCOmLgETgY2cm^bz7;H zZ2jlm|Nha0R(X14CNZ9I1pM<8j~*UREx;IY#>rez=8`o&OYt8Z?*q1FfiRGM|Wpqvc3;|X@~~{ z$mzf}4dcAUZ11DB%B{HcV{F(@(yFS2-~0P*8wDS}hco-xz41sN9HiiBqt&YODE_V6 z-^17#n~-20{Op+y9R2c1|1;keAm)ojT$YSrTOZa~T0@%9SsBbP-QL;B2fO)-oW!3O zKT0iBR$9V{*KOD9_YHF4dK%{D+2!8*b}OTg3(IArsh3Yrzy_L!frYCD0d){;EO}6W z>jHs02xr%KE8%z)QeGZdT33GM7FL*dCC&Jyx!SY=;h%3%@1+gi{7UG?u=U*=u^6PX zuQT@D?qFTFg-LyMM^;t`LjK@ny>~v;DkHxpYAaoT4P4(BfO5AOJgblD!3{Zx5W*|r zYz0M7_-#Z~&L@HjVd=U)KH_zT$yG`g7zE9Z_5Eb_J;0M)p_i}bS@maE zpBdi2ZwmD4w|h!TyT%_}*f=>WP+H&{tYq&j0R<)x%y+tQXBYdOnGiZ1R33C=be43} zmO*nyJ|viZ0dP8&z^dCn)X)$)!adgV^sN5zUWJYg2xUrfuQE}LM>NShz0#Mo-^xZ= zZ|-v@c7}(A75kkXP1UBeUB6xo-i|y?1J73IV)_G7fE_hhyV0;R`P1<1PvR94l1DVR ze6$KS!yzgS{%Pg%GxlDu7_%#7QA{$@CS4hMx&yL^>z{Us)4pHB#c$WDwvb!F_1dp9Z@w z0@l7}MVO5Zjc_iYMR+HJr}$-j{LIPzlzY+dQ-C#AFIZjB*Cl!P!mF$PJQ9y0gH$VhiRg@l09{9Q1o z%z|+j7qvQ3ONPjeN`e#E0lVy<9;BK>)&`5MoTX*X-;u0NHPV3|6(zkPO9wJ)47I2m z0Y5)~1u-K^Mow<-ZOpB0wGS>}bV(9+W+|v$7a<_$MxhWpC!`sdrQS~!wm>`JqtO#% z9!J#H{&JE)xdX)7TCd$0JfeY<-*fAT02Wp1-aUN8<_TMy43tk8z%>@7*YgEz_O8p? zL;5u?#V5Czn6OhtTut=TcQ!v?dJ7Fb2!92iMGQ;e|1<$RD5}Ye)aau!8jbF&b>Hyf zvGyMVlfaAZ$L-k9bs$7#Ge}6#!6R?Fs_DE0#>W^mEFgO%Jv^!kBlNM8;crrNiWgarEM! zME!A6u>-O)s4oXO54@OJS?9oJ)2?e@9}JT7z=;p|Oh1172!f-@*koy%sfkIQ-^KDq zv$RG(BQF1X5B`klwzD2xLJh0z*sVTD9NFpEnR-CH3|B z-Nr$%xDCSK+P+11$|r~u+Cz(oLHv@$G531ijxyrPbDwY}TW>t`%gWA%#w`WzhwsG_ z_$~pHNc10~&A+;heYJS?p(o7Ai5EHQHWM}F2189n#-Dp@Tp#O-Z@|U?bg}Lg9TU^; zy-{x=x(TFA2CT@ga$DfiGb+0#cr^rK;(iZ;28^4r_e}?jDL}T7=k@Eim6fBtlR>_0UT|W7-5lsXbB2(D(?&-Xf_?|Q*8%cr1&H6-o4WXv-RLkkA%JN=BIOm;LOg4Y%y$p zPkj#d-z_#?-tVIow#lx)m(=!qfF_(68;3z_^rVUB!_9SI2UURWIv=>8{f)K=_Fs+x-@j_#1Y6{7?HS zxs3tDaFE)75{ta{R;_|VLUf@+@YU{@t1-g~q{iZc6L69ic0I(P)uFSb z>&G&Z?Z7XFEVl-D-|}I)g?CARK(A_yS@+-myH>F20!I3Ox)~LUqyTepgdnCUaN7`) zkj$^l5?;RChg09PuoR1i&dAjSn zj`Hx*7fo<@BUqRGzg_a*vElzKzb2Pt@7(rS{rm)G=7kE+fAkS!g<9kqsy^TNiabR_ zD>iq4<=XCvcX#5!x3+qr`*Yi_YA|Y@r(hxvNO|i@FpWjEkBKU%&%5a-vZ_v(_zbEq zi3(IKB{OTTk3KeR0MbA_Bh;47@4x#YvkAH6UAH5Rv=5(hy&Gg4N35#JVD1^OCZTW% z(N3l)O`8+!ef!}Ex-;XDktP_$BWhwvUdFi~MwFzZ=ermMDR^z_8C-Nm1X%^_e`|sk zLVA`g&>7jTNHg>ob|5W7^f=M{3Bl7D^kw+wh8!@>uUbhZCCbEhP=Ca>xy1HHk7Vb( zx-owuTNN2vZ;bY{jmb_s!`i~HY&_;8V;-lC=X~`^V0?0arK5c3Y+`{P2Swb#_fyB@ zbXOAQiv9hjS2AH1g~O4CtL71c=fbb^px$oags$xuDt|R9jsVj3zspQ4+4vwanSvI} zk`J*sTOqtuM>)#NUlE=p5a}Ag7fHn6mA?viLtAz*A%1m4$qmagq53)nV8 z|4=AU>Q|wthQ`uelN8V-`m>d!p;CA3U{0Lg8?%>FQ+v@-`GZ{@Dwe2XcC=x74ir}~ ztr&hU{tw9;=y!>vhBz4CWpr<-&RXgmgJWJITYfO|iMkCp$hD zqA{{|x+Z_qJ`+D=;k~taatEflV><%dVfp*Js>L*KZqMyUzc;4nay1KOVIvF{=&!gQ zf!o8hD~TtG|1s&G-IWj=G^r6(e@Jh&cXI7|4I1&xN=ZE#B8PP4v0lAc1YKXp)8{0R zR9j>>A+kDNO}tUJ6aD3=PW84cMTQz_GRcR_RvWaKD6!P7;=lXvyHDD`HE^{jp7a5Tfydy|NORZ}xc`X1a{OVBMY^PKs|3P*$Pv!tfFhUAF>4%yL?N^_jLxp8og&uY$I&XmuaS4Y#oi*TfSDH8# zJlZ`T$_BCdvV{YJJkJs$|rMPYwEmRW# zmoNtDXGXw}uG^;h{joYeXqRYPtDK5nDQu*L#`<9Z`>hYcF0aeeH*mk?Jj99Ic*ontq4+L-Ek*+`qFD)yJTTs8Jqxgy!$I$PP87W2tr+kQEJdGKM zalC?5Oa(;o-2HKIjdRJ zdL^rfsH}U^Z|+1&MxQ9$xLyP{Jx1V=smyQN*YT*I;OhhE<*^X2NeJ~~7%}VY^80kz zsRmyFqu_8w<}mUK-}Ae!?{%oL@7~%R(PZ81Y=kctxn;;V_!lQ+yuzF2AJPj#zI;Ke z-1-X8GJw%nyvBa7Pf)o}dM@4}<0ky!is4KnS%QAjP@E8=pnO_)wk{j>tZ{(N9a6Tle++C{exd^sA6a^T?o-B@m2cS4Bl@CwBm@ z14*-ghwR35SU%R$R#FGETF}StDsB^F7FqT#nI9C1g%NXP;hoCPC1k?CI_;H_EpzRL z=o*~HyKwm4D<%Kjh#h3H^yL34z86r)rT0 zwk)*6RYOB}w$KS?GY=j{6-M;U=8x`Ow=>u>tSecWwHLX)%MZ^3g*~*y_ImYP0(f1& zPRnmNQ6KQFZGnAy$03EJ4@Z0N=ZNU2%~x@DqN#=Nt}=hVwfQay+N1Z#5{_!FDKS3m z;Y?)yE}}=JHaBc>0aoc*aPW`XD^w_IaW4`E2D0)Jf;a1(1uM_*PFmVnEns6heP31D zm>C^Ve3>@T|NYPYI2Q)0=XCKVUE+Lm{?W#50$YV736!peS+TK1z>=MV@6osvb%j$^9`f;Qs$}RFy1J5FRO>?gaMFE$ zt-e;6$Eb~yQx37CiZmvr?{jCzsD*fo&FQHv03%@|X`}&9BSiQIV>R9C^#O=AaJ`sVCcL3v|8M4ad zo?9)q9U}#1mS3AxB*YZ8vY938VqBHGRC#@0!N#+ZP9c40+d-C(%HXQD0%Pm=v3KvHlNQ?5!r|{;Hwk4!G z1qO0I@O?#kL-wFizH|m1*>APuB!P(w zgj(ZmRTVkFC6BX{{VhNzrqD(^40YrTBW@b~2hl(ePUzJvBj;GiLcirnA5wo5<*G1{ zRMW;xN0t?At$~>oA0Nz`=79-S6!5#U-~mAxXz(X0rr(-?6^MR-rhidrb*SuZz+O#Y zq4EBs_2%g0)r#7vq$Z4=f$v3VEsav5rq{12%N`G1L=j%O5)ASL3WfB+LS>cArXPSG zONq5~sMS#|r69cAY`OeBk(yIfP@Jyca3sj^v~|_DKmGwnAC4(HBhNevgPPk2g)AOC z8qz=y4r^K9Ux1De>g25La-rxT1ar0s>65|9kDua7QWJhX*SlG|$4Ru|<2G)o=Qot| zZbnlF8B(?LBEqYqHd}~Qrhz5nX#jYY>Actt?uwv3OkBol(ZDbHSl8|#qe@y=j%Tbk z&n8(TQ;=EA1@)Ol5)TdoF){H(Okve5K4Qs5`g@@m{X*e~JCKj{1xuuy-8%OAr$MLd zI=?EIjZ?>7r7diLi6Mt21|zSaKo+d6z{T43xc?dm>quuXamT#8Iv5Lm9e1_b9TIyx z%#yKZKR@9Mj5QGnxiWix%(L}iBDZ_i_P`_B@*7{JHy7!wznC#IFE@91 zyQ~X3$wi~^>Jv_6<#|%ya$EFP`Fs2{|7mXkh8g*9lvq_@EAj#A7U7SO#RCO#)Gs|b z`Eiy*VEB})^@jnb`}e*s%bULXVD^Htq6J)^FhB5(0t941VqsAj4}?o!l|TlIXvQt*8Rg8CP?RRz(*?5Q`YTwj(lzo;7?+_tKL zwkDmg@*IhJ=!}sn7md<3RBRso8}Co@-Gi~2SxE!u$H*C&>8@=gPhbi?=i}xnj!VYr z3Qy?GBW;?Vo|ZkD7nVMl9a!+sw#Q)|%4c?CC0wxJUt17VmiNs`;VH~Y8E}$U(tDK2 z@m3oTY4x@~@^^LR2eWYynSloeIPa@oJi+0GJ#no?#8T}yf%vA!YOM)hSev(RggSUC zML*PP;@rd_>C403OwhIaus;qBMECVve>{Z!ga!D;sSgKjqa2Aba9g3dH}-DY!0#09 zT_wyuA&4iW{d201+h-d4{uL%~x zX3yGq??*K2ep>#X$2lEMBRYzEqMH!Bz3l{55b}i&yc$)K3k}5QV`jx(@HdyZlu24r zCubM)E6ufBQKQCM663_qzX!+WeY)T1ULf|lb916ohZkpOXOYe((_r)i6OI7-t9II{cPO0VJyeI<$?&i$sg zbb0w*u$x8W)Zem<@;K&99gbQ)Uav)K28xBh>{=2VX@sRld4X=_MIhYs_K%@F+iFI0 z>AAeYO!X!TovFG}J4=!l$~;oT;;R+S(l@l3*NE0qQbMZ>;mOR|XdX#zNUfl5 zG8;;P8#g{q?f_H!$sRhGA~;D>-+@E1BW-gjm$0KpwKGzur$naz9r$?vs5$MIMB0G;&%rCz$%DS;AYd@I0SftgdZ~foB&oA zR>FAwrVC;$;tyC@@M{Cat>p7wlEhIOAp;IQYy9z}$H=s`pvG140V-}qEvHxUls zf-sFp+4^k(SNYD#K+C#yQbOE&Tepv~Qjf2q~kt=6)^#}|bhg*!$bd-k z$}!`v?GistS8?Gttzcy>0~P(HKm5>URL;-^O3`K{2vWlU%dav0)q662qvIjQ(A2>l zy4qTY5~(^gp^J}g${LSY3Lfp)*rsN(M}*lmiwI6EYxa}{!j-)D(b(*(86|~>+!aYgYbo5 zvNlf$T8$YKas7$Rq8M}4>z}7-Tdj{9RMUuY5*p_U-#Qked3`wGw@kwUgG*r#5s0H0cwZOT@Yj(ULn+>=H4qy`y8XfHqm7>F3Vf-cAZntmu z=00HK<`iCime#eej<%A)UbOU5VEJ$pK(_Lj&Z!5n!P*)^?OKn%#H|wJoZHYQ>1}DD z5xPO#FwV$|lO^p4o&+F{jXQTfdiNEr=L4`;g!fVZLo>*fS*}gn_L>Aaj{Hu0maJ#r ztqNy)1If;e=$&+!Nv?o^n2N>I>i-^^oPFQ8 zHuRO8*f%lv_jaSEcf7XaO^3l|HSs=7yFsKiMAHLH{#`` zOk5rtC05>8#ZteB>ytsBood+Ywk&(LNe~GcmYR&NC=&?a_`czgQrRlcFHDp@;7pny zn1&D?M>D|b|8IgtYd^{QG&dz$(L)vpQviD4$I*I+U-3;R_dfAhChcq?IAk~SP zh)r{X0i^5=c4DaZW50E~W@?NcteqoOs|5p{s;;n4wFQfp@wtJ|ES~obj7unFoBU(C zcmWAD%!+d(JARCuQhIUL^DFlXuATSP>`uaDVrOSkp*5*PZ~d7uojz^^}gJfE}tgzXywgvBf~yspV(1dzf0rB zKKtc$wHWQZPnf4Tm`WOcY?obXxp+%T+cHGXNPmH_0Vr)3p>LFCN>_a$lj>ITn=!Fn zuBkaV?SNJ~j43eaOn=`sK0coJnPDSAg*>3Yj&BFrIy+FkG$L& z(^Gy*?80vij`hPod3P+Zz3mt4!8S-9^bBdl9Y*s#X@2AsA-~+*+`@&u8`b|i#E2PQ ztre8$e~&WS9k-`8UWN^kc>>?R4PC-o*GJ3>`DeO-?=9*XQ4Sj!v2ll5{X5hE*uwot zW0Lez-XfU6vS^i#??mxeEi2jGO|GXW^glJ)oTOIXD1Z7yHP|{V1!&I1+)ar;4ue>I zwIC)d{Z;9RraHxIB0Q-Ftr0>^(lId*4_PKH_c?59;_0jZCur&AKcJ=Zs{h1Dv!SG| zxe#$d@>8uAV z$7D>phJ`;p(PGsc%{m{T%W|{az=z9_qEKP3c0T{KOlev?a|RZ)OG9g`yOt@NcB%Jn zKlGtYaaz1VX}u;&a*K+UH=?2}P+$`0A%K8$m_E|7ndK|#`m~Q5m`;o8DkN6kj@{dv zNL>W(H8I5Lx7k9v{+*}2zO)&Yl)M$%-Ex&rqf|Y{ux1?yUt5BX8aq^OQ8JsDf-6M| z!+tz);KJ8KLocFQOtW7c-R?wT9Xq)EX>Po^E1;%Fe)sm$~yCXAI3^igBTzSBPJzf2+%^QAJBXsd8=g^EYAMY)j zLGZwMmB@qWEjlIGp7l1MH1FixG!{}@92{IVNDF>l7kyeIi)l;Q6Uk?buefmjVs`@t zOE>Y3*40o{j2s#D??bMI&+IvFg5|aoCHCe`9#Tu97V~O6d#zHjmgoM*Sy@>{9vcJ#;09QN}k$qHonq@(A5f`BmNcXT^%cKJ-P`YeH#@4-7mO@)KTqcT_)RM@;=* z4+ZP3Ir}qT2*Cl)`StvilBGr9nv9V4H`Hk0CooyRiaBYvVhP7)H=i%^_o5F{R17Ht zO1mo!d}uT(m|(4-D5=V{=Hs`3g9)b7EN43|^G0WJ1GcdAUQ1k1?Sp3&gn4DOjMs1* zFD};?5T{G!O}O09sa6Zh9C-aOB-ioIKxdM<)OT6+XRg7H446fT*|{l~vi!5=TDsrv zCXdroi#EA#*4b^H9lB6iSYGDP7N+MhY4cs~o@ZE8n3JNEmZH7+yc&>Oe4%98*2&HE z+zpvrX4&7uGL5*dEZzHFjnxM*&s(|eyALn368i!WTY>M>Os`s&@hg=6kY290a_-@& zVB@}99qm%Gs0nd4Ih?VXxf%`%I|7kbuzmx)wqP~>9}BpU9+b!c2^|O z=%u8$lkzJ6eN>@r7QIj0af9|l8VA95+Wk?uyRMblMl>wHRaS8qcj?9UQMrqpLF&2W zwiE>N#3T%jSUi#3M;qnd=5)`=DrTwR2bTzIj&!4f1MkZ&>g?zGc5QqTq=hSc!?!OS z&WyO$U)J(Q=dLt8*0fRfO1r>)3MJ(-lF83^f?`P+j@_w#&NLL`>|nXY#E##1`aYFR zYn|5*Xlu>=)8c#ud1(o>_I>GcO*6Kaxm*$ggR9vYxzpStQ|T=F*;2Jv^*pE*0&N5L z`b`f^?Q3^@aP%|ho?FLc~tin%ia#&I4#<`^Ix})9BL|Fc{5mB zE;l1I(x8M(X@6ADs4=%(^q+~1sv~;m5<_?j0w(Y32bD?FEZ^RVUTpHg{Dth!OS%T7&T7p|Z^^1FOH`p&r4#wp}#a1@^4B_T{Nw@YXlmzlVW&W`%!gs_&cv$-A4 z6(=N@RXJzjb?0_`q5wyKM6WK&aSxR*t!=Ls4oWI5KAg4eA$n%ml7sUq7RX(3O(#>H z>yYAhkCMS~y|EB+#jG7nUvk}d;X#Gkyl?RmUcEp}<4lvuw-XimnC-ay$>2Yz(b6!R zYT3Aa@P$>CXv8Dgra7Ue`Yr9WtP{H)3AbCS@dVnOWanYpA|)1M;r=-(xvkZP;RcZa zWg3`mWb;+L!S#VOrNlP&J(bE&N9mQ3 zr|#+rT`%B}_=mUNxA|h;6&)A79g4T~wn&C5q%9L{53|_EbGk{wOJuhPRWeLNsLkYe ztHO*PPSaaguYO#;DkkvvH~QGaNq=cQVe{wvkla2l9ZREpc4n01Xh}P1_Oigc=^DdM zj{9$0)w8!^R~gX%%)FzRwLC&JN8{rK;U^@j(MRF-+ zjQ94~6t@3fLlGout39+s7g5>4YBSN&pZnA9+jdxQv1yXf%$`#VsM;>qIAL?%-e($o z0ElbaAZXdE)MjQQj)V3*d%kwfoMKx1w@F1vK85Nn`#L)K5oI~g>Nj5Q^mwZ^dO3Fz z%f-w>DzvC09&hP2e(8LM_nPVjWtP@mYMTq^W@$QY)AztZEU{7c#>HUV5FPuhj0&4- z*wg#4k2b8H>kACF7P}GEYH#72x?9TVxxj_z7I}5{EPJb+a^>hht`D(r37=uUd;8e) z9lTg3$sOsG^?n`ipNm_Xz2aI=X>|yV#as5AN7ZJ zg+PVSMb%nHl!87)Qx2=5DEKG(`;}+;pT8>N&vM$9!ErdEhQ8tSRY}dM3&Od4>OlB1 zA7@txB%3U&xa`6ij8uYm%eBaq)o-%+0Qd^~@`Sb(b$wDbehS=&J}%lM%W2xZj4@@> zPAuz$ykKqQ)U0+GRdLX~GB|3|pSm+qtF~$ph&>J;=_28RR>mzv9=G2`8QwkSZ*T5? zb?D<1TWYYRcNnX~%AlBC>K_DF-^KSHyu;;UBam13Z1a=R1Y1){Sy7cQn-5Byx0M(A zlVq6QE^)c9JO5t$Prv4b*C>n zat@^d=pQBFynde(A|YYyd+2`V%prN&)I}+mUfwR3Qaf+R^145AvrMIijsF#k%t%^+ zw_liE<=ADUu$1w0`24!{uAWV|Eq(roIU?fbw6)11(w39X!JDXSDn2#2SdK8=ap}=ed;}(K>htV7~!gLwm>pBPOb3E)RIiE(?x2m#{ z&?{>7p$cgWw20l{`f!`>XL{y5z30@H_UB{s^vcH&708FP^C`opW5PDKclDN}8Rto9 zRygm^s0p*6@w6X919;yuIw+*5>bXyx9;|&80+|0JPXy+%Y3NAK|B4-UH5pOOoro54 z&T)~k&sj8}W%c?IC04L0K;(&H(uu&+`yV)rm-_bh7LFyvZm)Jflg)Nao=*hS6D`Ku zxTN=o5QEVZdqHyxi#5Ew!5}wA+H>!}{S5ZbA7L+q8HP3WB}7}>Vx&$Jy8^6k#99qw zDSRAEdA7mGg;~ZXM9hDvyGdWx2h9vlud|v1GCfDn#7CK#Z*ESoifnb^e#_>|WNhb? z<6ZmOaD4aPXcof-r$ua<;li$MF`e5vTo4SmRLjWjM5(RA@lR!uu~FqnO8K#w;U5|D z|C=Nf_i1So6+=3>2>FF#$!*(_r-B$ktt7?I?<`>!CHf8U)Qc^qUOZ=u=cSPmw(X~9 zaYQEb;oq%e>jca$h?cH6Ec#ZOBhO?{8bizAq1D^3UX9FZJ78LI$U=(UaD?&Dqgr=t7g3vsYQ zW4}(G12GkrI=Agi?4E4uP*VA%XF2h$aKGSa#oRN|vZ{PV$+aAmXEFu@oFx6~ppFhy7mihHPxoc=t?P49Im&Nn`gl0DU`Hz9g--GPz<)}(WXeB7joDELPs8Ta zh?v&IxoH!@x%>~)vwQ4hrq7OcpGll^UcH1N;EOAx%fHb~vWWx6)#}Ly2QQjQPx56< zPo5Nn*QRShTts9pFTX`J(~y~8VE58fM(mMAu00(KH`h`if2$!(sKQ)4d#h1YZ#wEUbOI<_4<==r(! zYFYlybH;TBiiao&YW%Zb)%Ji30XKikTmBP&0s+{RSBsI@)e)~$*$@33!lBEh2Xg3K z8MZhW7_29ulni6pLt&R4f3Lqjx|t4F-j;{6?rT;5gZ|T~EquX85Eq4{qG@Tv{4;(C zX*M#7Z)suu%ZCIBNV$Ecka;x)XJ1Ibs7j3G2tOl{bBB){xd^*4GWWIO{UI;$quoc5 zfQRQAuL2kG@Ssx@ij3o~r_L6&u~L-RD>)P3uTONJQ2RF2umqw;Ih^+aXSxbDA!(dK z5)4RFhk%2`t&UV^K-7Kct|_C>;I6%jT#MG1!2gMx>I~n#eEIU#8;n}?Wq=1{m=^Pr z87N-Hp$=$8>fO5*CEg5(H+BEAS65fpY`8IP%hs)>_m13pc(R(^_ex7pkSo&ELgVAF zai4k2P+D3_03gya-#uMU%(w0vU)fugHL#-*^w$|2&Jr*qu9f$iQ)9WQm-{5A(Ov@A zc*5kftK9XDl4y#F@5Dq-prd;2A|Y50|ISkxmvEF0*uQ@j%73~gD`7mM$~r1q2Iiv@ z=|gfCK~en^soI~NNd41fm|~q=RXgxsXK(u_3@uSW)a){72Lqie|`m_wwiN9wHS$Gbh(7hKW(eO{=&Z= zfAekszxX0&3slqLqw9^e&LRflIs~{vcR2nl8Wamv5RiprJdDTvVH)QEF9At>m8ZFP zF9V6=_V-_k;6i5bpwEF5#k-4GiP9b|g$oTZ#+^BtX1WY<)?lr0a2)Mi{}0884i0I0 z?7tOWs01(~PJNQ?P56-;TXoL$zFc;L|o31tex#J3vh7iIJ3QFOj%eeVa{+%QhPeCfRb~!6+E-aGP zMGHUYnvv6C;RIY{+qP{JPH!7Y$`{GuCLVTpLYqT8zwr~}dNUL>B!&``KK@TE8Ys0Q#BHa8tJwXKbo zd@(Qosr4ERa)w22li8z?P(|eAI=;gXm9guc%Zu+2+d4aNQ(pyKyY>j;NhV%B8eCyH zO~4jDfhQQX1wdlK%C+;A8xq$th=sIy-7%x%L3~Q@IL=mzQ;i%LcdwzoURwmQ9kCL}eIk;Q z&K6BniRi&Co1rv6bV#d6GXf=h45DB4;goe)D86&2d>GhZL~QKv3+*92BO@Dzhk0ka z&lp6Wi~th=sUZ*F{PEr3i&aXGDEg3>B2OkLoQ7A_kc6GtBD3-&F`lPqWH?T-&d|}7 zypNDpy#og>OCc`{?HgI?56M?}v*A=}>KQW@i&}?v{5+y1ot2J(V!FT5>d~X??CtHZU%%e-cMSP>rK1Ry%1m(_Zd^to z>He7c3>7fyIPmNpTO3e^kZc=}2h-B1RknzTyg)yF%xq}B19~HoNDPe`Gc3YQE_{|E z(=)`&{b-;{AiG(9ZX{>4`!Ct5Ao$ufWz6rHS#pjay^(PDG+#|s6`ZL=c1}*hg&m$Y z2)IRj$jb^ouN9Qmr_SO4IxfbYDOg7NWZ;t~TZ@!JlCulz?5`_nE%s7+Hap0D z5CTe*%%3MA*a$0A{DXtb;o#5_A`J4wlGXT?G7ASrIbO-5iJ&E$hXOr*TSj zRHK_gk!{+vsr8wM90^wS_4P$MPOOuSD%sT_?YqMPiNX6Z!+IAKSdBb}>U0TtryXN& z;$<#jqR7d56NCOcujkD1PS+0!NPygk)K0NZuZ4NQ+{`6zk-*PfMgQPM15RM)&fi{K zX5A0^b4~{GKc*`X)^mUX^&|aY@GZo<-U^zkDC^>1K4Q0b7#UcMJ0~~5i#z@9!khCw z;F|UT8TyoPjp;4xu1;_KD>;E_GzsI79Dy)ekzug|>|KFdH78EisUY4y%1t~b+$DOG z6P_mqsjZXozm_XZW6u{7aQs$j>6DH+Ksrbgl!u9e(di99o(i#u-^NP?QRG2k zSXXh-_@wA>*%}%eIRvBpkmU7_Fq9c-I(KOVZmPYJT8{DOp$DWLCO+v;K)gqj;8ZTi ze}aq71nWpct(1$%J=?O13PT8dquniHN9_B8cG1W*4~9Eq%>9F$QA+BX#2jyGpp#7% z0b#+xS-*eo*L;y_Nk){ZXIp^3|3eeEAwlf$*s0kB$DQxvTX+Zs@Kl$5HPs}l9%Q6H zA!I+d(q9Zisox`h$o?59J_D3g&f-ite>4b*Y};1l1}yB?ESyzX*h!cwLQ}DL=fZ<; z4GOH8!_@t_L1c_9733(+{>sEnt|pLceSLifR$nouuT;`~Ay&;_)EM3_>zDp(n&rj{ z`#x<>V=Vdf21{aRN4%_4)%T_co+qJ2za>|Hrf5=++@8GMNnSsnI5lYQGQj*#CrcHS zoRW0h_nEGbcAmfH-K2#dccsiqCQqDAZu?qfF5e3{m6c5h^^;51V0b@B!UFL@KsOx65bH(R`%H{$O{ggg!gT!o#mW&y zP3Q=KfBZPW4fDu8;M^!RxvJN!pzhZ1tB98a0s4qvr+7olCu$;` zN_!E0My!`)t(wmV;U-VUQeaeKVlc6WF7RC#kzBzm&*=q8el=oXU%gT#FZ~vAap$u- zsNsoQJ4{1tFQyJe8HBkFAUI;dpPW>Jt_hyY48obL9t+`m!3=G-l7usnj^hh}7BEss zHT?q_`V}Oj7XeO2c4g~O_Pj(17WdmUtPCn}zMFW>JCS53GF!n6)BkfGfn^Up=FXEZ zDuHj}tW8HAE37P|-MCZS80Zzq9=`|^C^<^yF+N#a_{ZiHOEV6MHEC0*LHcmHrMTmV z4GgaK^qAq$AV%M#=XD2RHCzUWQ3W4lnT)EaPO&8@0u35riVA2UUwry)`223c{5rBn$vY zK`_MI@{ltUuq@9_gA0C6@;ZMm5Qu|hqW(Hk5COF~tf2>fU%-sY4&Bmg-l`lY_o6w^ z){xwBVgW)C8VJ>${1T3D`PjYyrJOQya&jvAjQm>LIWLSXBe53dC_1V~5=e*1&^dB#|rQOuIknb@o`TgbqY)_%=d zP1$k0mHD7DwTj>f*3F_BwA|%T$oVX5WBt%}NJhDg90N(h&2H7XLm`e1@JbgEzi$TX z0gO!L0AssTTb^QTA%S!of(ye0bpQp>7g$=6+dumKJE4{_9xfBAf=BK=X#>8ue`3@1 z5*tc$#6?imaeWB2rQ`ZGz~ck|2EmN0s;ecD!&(Sn_G2rZ-#f=*lFZlSD~J+K;=746 zAX?VRD%|8NY$^&`IGQWzN53#Obv(Q-Jq*9>Cf!KCvuS^{EJR_7lWd0YX7%7HmmWQ4 zeDYfn+ZyA08$bPEAUj9p#uU31|24(p|BzH1Twr2bQ#fbsR5GUG$(hE_}C7)N?t_L1kh(}yQHs#MM&ZMsgG;gLIHKb4r(X~K5j@Q5N23Uk7f2aBk*9@yv1#DH z??QF||Cj&2GA)cXJ?G1`vs81wZ*_=V@vHOlI(K2(hvZ33o^IW!h3o#h?vGfWE6&c& z`%770*5^geuH82J`87HEH*dV`RU)E~`b{D0TU(~953keF(cxgz%4=x6X>NWrXeK;N z%y{Q*WF#Z{xpNH__RIBCb$$)KpOqxk)uV5)vpZC}Sy<%mkFB{gjXTZY>Xwc)B%U45 zvxY4G+?Z)Pk2+oV6FHu`;<5i}sso!(VuILKrLyFyKFCb z`BA%*$_XFLGI;Q?uv~=knRTN(F>sQHM@3z~dGk_JDD}ffj~=y&kiw*EHhHQ3jPIq@@sL}RrEbhj}M)S{~yT(6!EhgT+#rv~-V>afd zw4NlQ5`^8z?CtIS{rztVQRCx#!8g};NB8yhW#s1GJ$0KYhTfXQ=eYHEFN@1eDI{^G&(wBF#VgOvWy1R?Udxw*za7rb+d zY?*}F9qeb%p3(STrVPl=zB$8}-tz_?>l+$0$Zp@b(M-D5WbI)XvbnYI@nxmq=%Jis z=!t^D`R={Mw^UL7{>^I6C#0gNN0O5F6cq{d^z?3-4c@w`RdPv1MI~C$nG5x-J4JF~ z{F{KHk`h|4+~)qmr_i7#CfT?eSO%RR?_bdNIN6CJ3ez;){dM1IubKJlmNj;j_MHbN zE0wBWub{Bj@LuD)eNSy<|122FzUn<`xi#ih13TZf=n<+ZnA}e(T~>GetFsj^1?e4Q zJT5M-AV~@zAD=WIiok$?TTbiGX3jhgdlJ{vMNY7W!St+AI8+lEG5q;`H9c*fc15CZ_9rd_$|I zxRN^F85uXUe{T9l1xTt@57<_rqn>+g1aA*YF5BcU7xoMuk4%wv7l!o?jtC3BS*~=b zITN=FCxZ!l={FFHM&I3)+aY@nb$9maJ-hyZ`lA3KR;_~hWl&673!=Wg1^C*)yreA+1% zt1Xz5rUB+ZzvZN5!?Uye^!^1j7%T z5cYAsf|s>Nd*e8V6f%VD(7@nG^ADLEy;JQcPd}xIoyuQ%c;Gzi#!$_UniL=6>+R=D zZ6Xqz)Ru|ghS_qiBDR<1=AmMl*))_Ckw$%!|4EXno_})tt7}Sqj|46C_J(A+T$_p+M~BlquvVgBRw&=JlTI=Q`Ns9S4K&zCvu zR_7nbf9lpJBozHJTQ?znf7bpvbw~qI-$Zp65x?7m->%Qyl_$s*6hGljU}?Dm&l zoF|EtglRo%J|aWqIn0wN&YT`~N}_fMd!M`RPEgNin^Ptnr;o;_etR03oN+8TF5;qT zqMcM_(8Y{GD`C&9!RQRkmx#~B{Q5DUs)N#w+jubeBxS-EA6{VPURh!JImNNc?|X2@ zGULt*R7VBei#**hV!)YDBE994|$F6~um*zV6T%L?T@gW96vo z&F1+e!`evAQ9~fh7&BHinj5c+=&f7W&wL9KeYcEyj82@5-|-m+PR)2aZiDBui3vqH z(rlus{7|nn&f)9e552)=5{Twz^vot9iblD7^o@^~Bm=Bvad?x}00aL#df_=w!682E*Ueke+63I8xI~9~@Zr&MIzmo1 zMZPQd8+U#MdfcbGqDzve2&?THhoE=@tv2Le^?JP0Z z+Fl;GV%ndraDcBVJ@>I5=Qa29#4AHYdbmp6OXYe+{_Jx_LFb9+j`sV%`8>Vb@2jav z?@7*MA^!OB#a|tXp~uHBFH!K~VPj+8U}m+Z1W{{BoTr|mCk2vGWMk!-4N*@mAFr%lJ>-!YZ_{#mi&#V?CK zwIP$hC`5ag(QCa??K~SnO{!Fs-cs*p2+{>ziUYYi0@+%nKUa6#qBxosx;}B5_TSuI z9U;LXVXuEndq2-+?y-LCSf%52W@hZt33q|y#kbFIVnre=$6;N8l*|R z*Rw$D{q(~QA16d;sHyZf+i^i?c%vju$}7MmZf@@8wzm5(Uw++)Yd3^P!GglVu~S(G zhmserD1gvg<{~fmt;~iB&I3ev;-Rlq9kC0aY%7ggH&M1nrdCvt5fKrkQ4^63poO<_ zCom4^7OP)~R8pBcRw}sMy!Cuy(we|u^FLxJnqS@2t|P_eragHPuPpjRL8-_

fiP0p?+VcO4(}3NDP~li3tmUc5ot4qz+d(R+s%k9wa>()&}Hh`4J@4uuz^Dzs^x z9-dE75foKc)kwgK<%eKFM)W%v?z2VevM4>Dy78xmctxyQG#n@5vU|qZ@CNJL^;Z7eq zRV$u=+=eF=51P1Kb17ykQ`Q{gFp|RuN>P}Yn0gDHADdfSxe{#o?iLppXXNDgv>Tr8pPrst@yEXxxz~10K!C|BBURUJ;k_ug-w}<5FQ50^ z^Tb55Y0fjHx@9et*CB(}`}gnh2noG)>wI;G3JfwbGr!NagrT9r7-huveviG2iMfc1 z=G4c05JK^WK`Qduk7&+EQc`tQZbx|ztC}d$M~|?Omoy3uKbnt}Ec9iOjJvK0eLY;t zQm?T4+$3z|-FPh5UW>6VCf%Q}_jIQ8SM<-qoYu6gZL%)9)Q!*W2pQ_8(t;75gFF*owc`s4e?QKFr$mz)u`AAA?Dw3H+ z^IFqAmXhkqQsWTZ8?|*>_~>`@>?O(VMwE%uR9C=xBO-Q;945+8AZ2Z7fu$E(V)EQJzDJiGHm{Hg<(FLJT4;=>}ie5t?gI(6$A89Y`PXQJ>HQx_gpJb zvs<%I3zrt1#(U5p#`oqGZoQUGNSYbDOY*AU=$lxZI_FVbm}t@0+D|UhC^sx7FLk*l zm&kFG7i8B{ni#&1qG#xT5K_HF_u_AqNXf;CmcEmXN2!ISy{O~T9dKUq94wEjT8O9- z2vju2>+E`JK5@@!WGVf{+B)lDSyC~Wndg%zDAQ-%e4>GjwmE=Ul##J>vqZ6p7+vTE zukPb0n{pzV^31`{x8ivR#Sk+0bTnyOiV`ChC-}{Tk<0ooUdd!%`TBM-lnrAR zUmI(ffW>m1Yc@%6Pr=m=vjl+y9g(J0%Rd}3YlvhKx7pp;yXInc zc6R`RIiJ;_%tlIbTJBPN9CQAzIznL$v|D!YD^@{WJ!9#5QS^5F(E4pmWV$!8LZTNqd^czs`Vwjsem~wCON$TsSLmMr#zOJaKc+2DTMgr)gB^%x9_5({a?L8*N^QGIitA#AV?I(Hvu z)M9$#11qaM73wUFR^nH1S0zc_DGtZ!j?DyBK7gB@9Lr$dn$1JmglbK*Gk&-0>hIV? zho)U&$DAe`AJ7uU^a-#?z0eSMxY*Rse!0JV%>7_#QewUEo-cq2fruHd0`TO=KLI?zpJos}zr(cg~ zvo;?3E@!c5=RGiZ{wDWLx29#nnx1sIPr135p)^gKhcbTBig)xhq;9Hgc9(&Hf!Fre zC1N(s*8s#b)n_LhzrXE08Fid+b6Weh_rDG!63#B&}7M5tW`-$`jl0;QH zteTYEym#*&z_dwwlQDjE<2_Hs&t`p{1wJ*ewz66|Ju@>#dBWK&SosoKv6V6xcyZL3 zaaiuH(&Lrqli$$6(hSr0S#oy6`S+E2`AK|P8E?-n>g|UQFU+VRsewe{+QSeEPV+5T zpUG-po&bn1w@}N~62HoN!*2gc-!b!dOE`PsvHcKrE+cID<{|Nax0pxW zHvCc3{O)@G^A>`jYqv39ym&EKYDwLnt7BRp$8Ad72*vq`MJ2AtVOQweckjGP$6as) z99GurAKp|OX*pf9vzs-h;OzRLusy>Q)Xia(iCU@MKv#jF% zMbet5$I~QEdt;7hy&UbEVTs2W<>lp0OP|y8p4Fm9SmqQr5{q)1vY=IHcW*PNtNb9r zl34rL#jlIoD~tY)LK|NwOjXA_E4>XjAq3~Gl29u4vwQc@VAz9wTYct!SUAo)g>Fq8^cFR9T zKzKoeDdJgMLlZ!6AG8c@t-^&b;Pa zWum96dnC&Wi!SURQrw+9%__os2)FrEwW-=dPR6RG&wAx!P^TNA9`#Sp4%J58k5;2^ z>7rv^rociC78nF}aZAfEo1)pdWjB>e9C0$3gfW-r+c)|oaSK&N{IpXdt(;;DmqqXL z?j{%1adaGV?MCamK~_}_f<)n5$pVAK`Ybx<9Vxi+uC9OO9FpJzX0wqQQnL4 z*l^R`-G$xv>!{KH%n%J79r{zZ9ik_0J1W`g`C{;4Il4vzi{C%ZiFFcpT3@1BE77gU zTm&E&+|}mP6yK`TVs_gVu;+1lK)D{5f{YESnuWpq>!`^;LAr8Hc+l+bs7)Ii!rLlW z6^{oIi`w^y+VO3eC`yOQm!t%If2ple*K(crf;h)x z!kXnT)l`d%i@ff~yyNai=17%?@_hfE;Pva*88GFqWW6`~*sd<0=3lLlL+^SW!luNp zSdJjTa=sxMlJ%xyPkwdYTCbcqxvZd|Kzi4Vbb5Lk)mw^rnMzO{E>foNXRbbumm-;} z7o@^kotd?gG$xA$Uo>QzrU>}CZz36XIQzM+t;9dTUIVK-m>Y%mtT#iUY;W8ZxogU7 z7&#@Tawjy_I*s*Ziu{=J7RKkQS(Q5^XZ$0v0tDieG?9<$w$L7~-Hl-604 z&cLN%qO@(>xwSD4T{E<>G^=r5zqnP!XrSsmtvbw*%eknJvX|Dh=P0b8?Px{DaimjM z9**m2a$^Wz0`-JtE;PCW?0ZYB`a?QY`dAB<=(5E8b-P+SLEvMh*^U77hYQ+Rype6I?F9 zPn@`n%KUf-?d4pA+IngA2`->?>BguG&)93=DQ4gqHo_XijNu`b@y{AvuhI$vA7MGa zDQ(@#A~I!Zv!w0(67jIw< zc6TcR&%)y@%+F7jnwrWop=sL4<7Hgd7;dI&kKYos;*#HMO@Ot?r~jnsLmTmEM{iSq zirAgwZi-YMC)|zbv(GS0D3EC&YZ{t8h+tKlPK&irL?TQa2A!*DPMiMccRNon$j5Se z65D-XrYz0K>{E0Q#SLRt(`3wvkw#Y-o=%;)=`qlBvBQx*5r)(dNTLrVtJ8eEV-Pp0 zu&~hjc;6aQQkgler$l92%WphN0@XRml}@yrgxD9OVG~gZza%}|^EexrcKR;~r?ThW2c7q?)OK`S0p7vXx;&+DmYk(m zdLT+>v0S3-d`~7}sj&X*Z7P=}Np4dO!>ndOk;Ny)6%^j0`Z~4*{$!5{MN$4iLG`vB zLVg~nTZ#K1x4bb*bR(W{KVp00vY1NTF!aBl7{6j1se&wgu+ZoV-Sz8QJJ$>#jHmAT z8#}q(3g*aP_<0ndU*4PBjZHKxjc)Cn-I9>5|3RhO=6%#M-jz4HDzfOD-$EIW;=6s4 z$}cA=wi1eJM6&AyKqarYIoq-@@P!&d3<$%zV-@xmrBAC~@;wNp3Py+m%ZVyvTlikO z*8N<1`VAq|`g+}!nVo^B5219(-oF9$4S|0QTJglmLl(9sR7F*2i0M)bt_-`~Novz{a4_eCs0W1b{f#XH%*G39h| zs*~xTEUHW3SghN{V0>>2N7%n~xx@!LI+P&jNOwFG7q6MJDm$U{tK0ZCat7EYE-H$m za-MzH#`5rAxT&J`u%fpQ1geP&FQ-;}`{oGp7fdIoZB~40tNJ)I?89HOzU=OueqDu! zrb@Qu<|I@IYwy2AMMQjd8X;~0zG`!>?OkG`$B@7PMD=&?-|HP$Cnt+Z5*W7(WWKPm z==#PgYhY&Hh>B^%THJWp%lRSs*NcJ?4Y71x<_9FVwnP}h={-Mw{5WxRm?9a*F7U+3 ziTC*AL_u5I%%!TVjMSlWY;|pLbyv#b?DS~uQ%VX3aAkL%e96kH(+?H$HE4^RYU8Lj zBLIO33)12!o!|Xjji|>VS)0jeCtUbsj7ljXA#9vb0vDN0mD0SJ+*`{0R(qS#>rwDy zT=?qcfiF>H9Jg-aK?NqEs!9ws8d`7YL>ur~HZ}!+_^kWzwBgUV7lKic6K6l%pY*;` zL3tf{Ko*r^+k_(UDvM#F+Ek`CHrA;Qkx4%c=#+m3*q*8Pxi*KSn`wv5I`PYvbXf;| z0q9`8SxDr;Q(Sn=&=g4#`Xihj0}AlR7ykq0#r3{eGK}$~>)!82GBW2;5HQXmsuRKw z8vY(biIEE2t+F zOJ|pDJc~Fd?=(|O>>j1ADudqjJY98UQ~Qjg(zCCV5siY!fYB}Na-#pAkO8L8n?ya43Mvc3#^!Y>;WMPnbkYZqSV9p+j( z>+0NR7E-;V5vx3h`{X`lo`N_oTcpe zGxM7TtH>e6;6`YY-z&M>u?!2E(y4(gZMf$00+{k)fiWSlY^awNjRO~Kyp7INwso*3 zBZ9))%y^wPV)O_$&s?IMOoPV;thpf`_dAF_N!V#C$uY1tHnZr=hw(VIW_;baMf}PT zGXn#z;W=PU1zr(*I*CuX{XKL|>l7h%?INrWox0mRTSR{Sv3hRwn^G|jK!MgPqD0Rk6I>L@GE%v%2fqzA|N*``Q_Zo+)i_YiJ zCXBziR-=KV6jtw6ocw~{#1i^A92axiyNOjN#X2h@HFR*Qi6paNWGtUY*Zv``IlvSQ zrtNhZ#6=|w)5-iCiOkm{z*;^|R;6mm*J(mMrj%4@<&yLZyZpc?oz*DhDYr~miEa9HvHRy(g3it17$18F{s)_!h=>i;eWWFB(NjK2+ zm;=v6n=9u=YU>+8#Tm^zwGTptW~TKe!W6Z*8`U(P+rM-p@V|RxbsO@fTKGCp9J{Y* z`jB9$urad0%DOwL*(6D`DW*etM_N;bi<wR>5OcJ2Y4=e`s8Ftl`3j^^5!= z_*_vMq1DL--W@~@zjmgkb9t{2r<`~4c2qDMvs}L_gFc0y>#K@AnW>V6M?~Z^Ir*%< zv5}64XYc)p|xXBdANrg6#= zYN=z9WRGx+)-w-MQc|9s9v?J!bTokn;oaM}NWoD$vNryWoRCqvCY)I*&*RLUo{^D` zlN0~^`SU2xLZc2k5fN%Rk&`={TU+_gTZWL!VIaA01a;OmT%3Dt!qF+drl)Rp<$9W; zi{exgIb@HGuuTd~jjJ;X7z5J~ES}k5izSj2{3~expV5EVDDZhm7#cECIZdLCTGio} z%|&V<${ZL=KIY|hwubhz$?p)ACG426uCF6OWqOQqZVy2p&UMR?JGP!{X!3hpA8m1Ozc- zRH)XrwyF7fzr4I#%ySlO`UL*7W&BTid&kWyg;jec=)XzTL&WkB$6XY$9nxq@fm_2k`fbvFs^Yz=g-R#foa|AON_&)W|<+B^~A7h3#l@8C0sAFlGD4Qfn zpp*V5GF?H5dm++9o?$V}O>^lErX2fL$P4#{t#;t#5IFTE1J&WRy(m;7TdMXQ| z9q6UaKYrXXpLlyrDdci>?d#U%3cE#{78{K7=V|Hb>$g9>Vh9tx!$-T|UJ&7-V0!AI zc2zBOtd@(vB<=I_21Vf%CMm!m@+1PyAT$im@bIhj3=Fi4jF@%4xOD{vZSpEAK2Qyx zLov$4Al2P(jKU)i! zW?9`(*b~Ox7(~zTz58D=fZqQ(19%Q48^`O#rd5JM0p&6&I>ep?jq82k8F&c6_kcuQcj;|%}B`HJz?@%@Zxc5?_v0$C{axioBvltZ#G6# z{vgAUxL{RC^zERB@l9TzP;wkMTDb=yb&woO5q%^wjUzr66eQhGqcj7Sg z`bb4TM&~ zDyqJbZCA>L`{~d1T2qPwO&RO_vcWnc@GO69Y! z3RI~in5;7>&SlKgSgRCKoI@fGCl!Lvu&ODJDxF|q{{i0;Bs&bOz@W!4eVLKxBl_#J zwhGT>E9)#@MjdhKv|N^oQNfr+>+FnV{SP|-$i_glT69!e`mNo*cAZo8c4%t7sza%a z>dahi#f%n9o5gx>=NyC^Q>YUzOQRqa;MRFx#F*db(;w2BqM~p=%Z{b`X4QreeNzwT z2b+)W^j{3Aq~4g<;!=fL7B`i$&sf?ceiUx@b9M2F-t`;-=C#8<-7l)hmu)CC?_!G^ z-lUrNH7Hm$=4*KFpB9qx>{x~y)?b5tz3U^|XDe?c6{zAykmw27Fq^k7w%Waf(tmbi zY+s!VdG8NY1UOlO!)4GF9KGcG4ASi4)s>8e7`-x*XT)*oHgO?`MW!{yg22B`zx-pY z8})msj(gKfn$F5ywi_lvG9>zHzV8|GtGAAf~51EmGRAFXDIH20SJV>6X8 zHe&l~=H>qP!f!`TW>`bBY)HM^^kK0FhFDTEDX;JU+K`b!+&H<80-06cADJ&F{}52~ zVPcoa5oFN{4cEEFMs_{7-Dfa!Z)#(8H$NOMndrQ!a8c$=Ebvo^$cV_5jU<}=VoBMR z5_4lEAuZnZWd!=%Z`!8_7*%C%DM&;~^r+-7`92^L*+pYTl8bV9OH6McqmuY@Zps1n zq9sf@A{~o$<$`I<(8WX~e~!yM_c+Wp@N?&5t+cCse(XBpkG0r{f<+Yhf3_z}(o|4P zv|6k?y^NjV-r5hC+1zL3L9)GpO3Kd{yt>0Y+w>tzn)NX!&@$0ZCE-@lS-sfA{bJ&L z_N}g2xD27^O_5|#O{IJ#L~NdB=*q7k2jr4Le@CjeDp|85o@*=^&t5o?l*Ya^Q|^(- z8e*zKQk8sfnq{#Ey?LhcV2}X5v8tY2^3ohZAkbjQB`uZ}vYOK7E!58~d)L45{~GBY zCC+9Xfy&h^z6f+Aq%#kI!?m%s?QiU_Yk<7$%+ca0 zG9MuZ8IIRwJEJw!%2~RtrWvQLzWztR(%UEVt(x50Gp!bH6nb4!qy|1nZYijvfVNaZM=_Ci-QXhny+tARc^BznznY1PYZ7(5Tp z>4AeJTELMVm`SQpyB-;*ojx@MW#!k~+jcl28cOcOMW{?6?B=f{s(j-Q+9ehGM1LO+ znuy`}sBu$9VJX@xm~ILSR_Tm;BSYC6L}fhiOvka}GchrF8x`d@RA@xrF=TePzNzWD zfWXM=+H)hL>kJGH+uCAcuilMP3OHN>!3hyYmx1N*4-S@aaJbtTO3iWRt<5+Pe=<_Y zjdgK7il8*}@YTltOLNM{Stcp<=RNt2W1 z%%2WGEWWAj(7eL)+r59G zr*1eW?s}b;?HsZ{)+5BnU!3k=u1wNNic_`W>Pg1o6%?cZYY}*~?+QKD*1l9Z_IuZH z#o>sd)N;SHtZc9sU0qu{=$ru9;G)&3&9CGk14)C-cYI+ht%{t^ng78Bk|l zUS6d9b~g$N3f@LXW1&FlMJ!GKmIAV)g$W~fsHD`;(0K4&O+mp67Wwfno@#6Izgehm z;ek{uKg3wA0^VXY8U}{*l9H1CZ{E~`Q$5dYh)Y>TMHjdul;?ks;qg4gF#Yx8gVXMi zQ5Uc-+wk>B2>}#oFnt)9yBADSWYBpnMtS4-?JsWqYWJ?2s!6)%jcBEi8KXpXb!p8? z8fk;;{H`>Ce0R(7xAu<@w=cLIZr^ip;XmA4vozZRtNj81gCa-n~o1PN{m$=BZX=SU449nyYX~MN3O7SgY>2 zV}{#b$gSSd|4c~hzKYP1N!!zN6Vk4KhDS;$3O9j{x(=gL3JNe?sMaFgcH+M132Ax% z%lon#Bx#Tf6ZU2E{Oj>QG=I2!RLN4M1T2Gox&rKQsWm z_VLjL!wGmHg8HZ%Ou_8`$5{eZX&0DE5t|3_+#eUSzN>ezmDt8>v>Bss}xjcd(egCZgbj0*;742(7Nu*PV+;N44>Zh!_x~A~+T=%{cex zQ-G3#coC-6st+TYTUzj+zTEI}0?D-jBnU&ZLgOwvdGWVec)qo@Ra{>Ft=%_uPe*gX03q?)T_56atw*$0!Ls zeE9H_QAff9AtNIr{Tfd+2qw2mB*8c(A#om7mYMG2w-J!fIf3d{(m~JG|4?E!^eQ@< zG>B4wSf|pVVY1eHV>*z7>2#kYUukyZ&%}wRA#r`^9$M z8u~8XU(!na;wvnqe+=On1eIC7@x{yjh~py!LkcTI(JLtE9f3~}hqj>?BM^DPI;v0Z z=;7x(fCc{4`=F!m{vI2d|J1jL(+=ZD)ybX;8Tccj%A9MD|f zt*=|{A06?0VAl8xg+D4Tvhwj=gVdm*a;+-RG`GCbY22dp6Nq941_o)XI?F>ArIjZF zcfS5w2D%6`%2%}aeQ9WDP-tjqNdA$h*KiR70|VR!uk3G1mRgJYl4RhpAZG5#zpm>YW${lH$M~z zkN$0+k~6GTd@&jrxMkG=V~OR&c#7#Wa5)?Iqy>LY2&LeA1^(Yp2{Y5v>r;NzF|vZN z>ws2zVZ*sA@##VEcWCbNKX>6WsR$=0CpdtUH=^xtHszF#4?n;52>|=*H8lFl^z@oJ%#ekH`&-^fUwgP2SNZ3`q|J&C{*lZ}szhyaAg+5iG0myg;^+Vr4$O;q*7G{@4X*6wq-Ral*gI70hJOXP$V6o{y%o9ic z_-Qc65kqjGgXe?DxF7$O3Ght3yv`^cO~Y@uKm$o|Js8;~3=C2usOz#+l$HG<*8UkN zRCCs=#RCE92`;TgMOe$_bK`D%I;Hb7FhR7hzw!$?ttXLk>b(Zv9|2TeB#@M`Uz0+5a<491Lc;THS&F2SpTzuvTBI95o7%#@;eVh zsS?;RVH%3RLocLtJ2!ZZNvXhVf6;Db5MT7<@>K$Y>8Wl!eEhCC#2gh$B^0z=#-WGq zbe^oj?Y@}UpFZ3djZuD<2Zl{N@Imjc=>LZvT;X5ZGL)@9`{A=8VVk`NW^?Hug&#} z>E%v8ELv?U*E}T4oK^VyyMqS)o1H;H116@Xe`ss~O5=$tAxQi=Uv$&MbbrtbkBrot zu73kz=pF!Hn)HWP&{TTk;N^HtabHJAhwU7MP`ZxI01PYWX@KU+T5#!rU>^!ilXSGS zXyBT)D4l?64%VnAvr^i#v(uAD($eC3dUO{rLU%Y|v;^Q*g8ShM#N7?me&_1$A+qF9``A+lUC+RSjaaMmL0&+40@T1A|KqJ_ zVM;VBH~}FAA&7IdO36CLzypT#UP4+tvnvEx83-L9Q{Z9~>qrpVfOH6PI77<90r-wW zK}SZ}a&F~1cr>naac#P`=|Cyg*(r~-($dqXa8r*dk}SoQoqj$;p`OGN1VZztkTy&P z-SVQtiE8Z(+kf3eWbELI0!ObOB<7Ij29bhy>cQp(6qNIMLuL@Xt;zrC*g!sb%t;VU z(CzTf($W$vHeDBC@XB5Gt6N>;r3fK5_c%RvMsz5!7QTA7C_r)9Zgse?NC#XQ{Eojz z&Eoz=Q7FsqSYXqRb>YK=W!dcLEQqF@Q&K{V6xpxw)B#3_8e)W8?}CgHoH)}ID#en@ zEjTeG2ditG2AZU?u3Yh}t`-Rh43z00At3>rtURHt9COzg;i30H%zJ<|7EAM`Bdaou zAGFJ?&>)>|udS^+2!;5hDl0o<1_2tH+JUjpw-qwPqkr61gJGqdcV&A(OaNf?-z$zbF`yr76Dp4+rP38oEt>PSW0j}6*U z{rCfa_J55OK@Bh!Q_$lb&0|SLarpw`=7xe-Kl$Nb#d2dS^nmMwf`L#+PJ0OfI?h|A<(}A(f=ZJW`i?TL^WIe)nn=T}qmtJ>m1Sy4iDx~Ws4Q?0VTPYVC0qbq>cN$P2SoM?R{KD{J? z|9$`I4lkfbqYkeQ9EY~RsRZxV{^ywmhoIX}P#`yJwi12?@2{N)if?*Z$kEK751nYh z_(yQtW;kYlhI|^KXd)f&MdWGZSFJ7~XC0vZ9hblkK|MXZ@E4p^LHo=V{FOZhco)#M z%zwWh8BGs8-cKw>xB`v8-zc3$Py@}(H`Vh{Q0Er3-M?5~atJM%$PzAQK?BbS<6X<~ zAk#13h(!)sqD)I{%=$mWAYojaitMBrP-oiva{Hx{iz8u@1Yo2xzm0>tFE zi^U_B)s*0z{|=I#=K3NzWG^&#?p%e+7fFFtF0sbn6!9zTT}65BR99RR3HZG*?1%hA(OlNE%A94N)S17#Bn5WPb#nA~Gt&~Mim76Q1d#A4Y@&D!= z1SBe7E*%k|2w8IzQLEl>K;Fe6{@?+UvN}z_VX~TS#y1=PI<&eA97@{IPsrEVTtr|W z&{=k7V&s7p4VYa3x3qa%8=J|swNNNenSGSZA?r>X;*T%s@~ocmz8c?^IGy`S5}4Hy4?03T0YF8{O9j^Sj4ThE){GIm^{5+tQ|+g1qFJO5wyYqnc>l#@jU zEcWNbjtqFedWALLyfF!C5EB(?B7?7t5dHl#m_{H|Gzj^mgQP+*hDJ%O6Wdc?eR3jDVwGR1(f z_%A-Huz5O;TKjcT4krQVAf2sp?weu6P0$EUak`x@f}H4pq!dWa`p@_-n;`hY>`!|z zRJ2fJH-LQu43fv^z(Kw+QC*$i9?7PKocMrrqo8iNA3I&55*$=#WCRr*k;i|R&0L1e zAKCtO2(<_Ge-^h&GNn5q780FJ`uIJO6>eTS<_m=+ruT&_nDP^D(DCl|{?jIbWp&)sgjd+_fCDsWM52MJ2cpYW| zcm>Pw@bDy{UU*;b-*Wgp`UY0}%3%I|8+=LP9U?{V?{h2mvYt3`hAP-xUj&3>`#`mU~DdZ3u_?1Z1}G~q)Ao6rb2(c{!xKYG8E zBIs%5Llf-?u8S0;)ecu4uno})?f#UL)BPDsJ*^!qOh`4KMlum1$b{GmfS_*$D5 zZd3|b)SMmBYI(K&>@N*R&QN)1BP@u)Swz=?+X>N%P< z*7BT4RbB;$pFI-mytEj^Uw81I44tIDA4Jvs5DO+|@`TWP4=RQ7{zNfc=OQH=OydNM zWVx;BUbU^xy~Rs$nc2Ir;vT&_@i9nXf2G!ZoI#qH$Un2Fr(JTcsV(m}%yso#-u57z zjS(BnoOCgV?A$)IN$UQLkCsgOYb@F}Wyuh5t*A?erAS z?sc|Od5g)l46<2|+_cLi^m5WP74*0rj;>Yvm`YE_{1l|dx9k+yM?Ng!wCc54V9*w- z2BT?%NGW`@avN|5A+({o`?k0PM>jD0Y^mIZrndLo<3B{aN-q;n<#!M>WO-yfRNz)&&#a^Jo@J#bQmEZ2)dC+z|3 zOAseo8fetq0}Rn{8VWr>GY>K4ZrUzIPO0zJa-wRI&tB4Tqm4e@XG2Z4S^j(+_&H`# zed8J5!o^!JI^pz!4A@jwAfObq-D5AiO!LmKoPw&L@?&d9|8(NP)0y;hl^;!;o_)?I zU&jp1#gqkuW~PchN|Mch&GY+-KAq0$>G2%`H8(hv#-{S8c;Q*OO{h7)|9LHu? zbC`@3!+ALBpH&$=A#(i`Zk1)eX~?;aY0jscJGfzM&bgc&cMa2OUSrZH+pbIWQ2=0_4Kx-J!c-2GXp0 zXhY;QXbop&kLFO?QL$pt;jTp=zWS3 zk65s|2JQOi<*`5Oqb$qkh(Z2nxwIIj4E}!BcyK!#Y@Hr{DSbai^zhJDNZ5`~(T(-f)m?pZ@?@>NzvqFcWD2k}t=o)ldCw32^KZ2(FZ@#7<;tMX!uq5jX_4G-c zb{p(~`?3o{ADm>gFj`)?(eqh}31M9z8?J*RvnDcMW&baJ0uG`379epz8y>VC-@?2^ z>IquYhbI$Xoo^!`hv-+JmhysJ5=J`Tk=O*3d{?RziQPgc8dBXrg{h_D{vq2KjXwF; zqSFc_`+_ou-w2?0_%kf-V`zplqh}U9$)2P>p1gqQW{}isQds^?MZ|EM;=zF^Pym9K zhxT`WTdY|%h|wU(puure`-^fO0l4IMX*fCY!GHkZEP_gcd==DW(W1M>r!iR!5%S|RpZK`AUW1A< z-Efk=&cuQR1D5tWA|>^T-PEbL3e(r{=e!9xeWey!)GXmVlgn^)*7LB{D2}Hg$T1(l zP@p`4cBTUkmQ^jc9Yv0a!_^ zHn!47CqZmoWtWpi%e1j#1lyTW9=Jb$<+5F?D9T<`{F#NFH7b4BZSa6P50eQOw>T5W z>Ev){u;_(S*gTK<@N2av&Z!dl|I}Od8^!)@he^A|n(UJ&RKOm|SSNq_1X^5&;mkjK zt;Onsm?Ux*cS4<~^;k15EN~q>YLld4KR>tgUhtknofx!iD%N#7YDS>a($eSz1;4gz zA<7Fi=mf+K{QoJ>|~{s%9v#8yl*|{?0wE} zKfm*wbDi_gxt>3|_U^j6thK(r>wACh&*%Mm-DWa#b5Y@K_vqz>Xhx?_NP)&ldKCLI z|8->HWBx~Q;8bv-o5UXMLoh4T$)ZOFE#=|yPe@3p@eOMkvgO-CP4;N(ukC3{dQs9O zPQ8Ci-{P%2U8{gG=NM`-+S|(-1Hfa@j#v!o+RIX(okwb~pFtP2NW|plyur~-<<-CM zp=3Wj+lVOwepAm;;?}3=3ha*o=8=$_o2!1@$Uv6kdbrAy#~zx7`S%VsB*&O?xr{D4 zUn@T8jxb-ANdZN|qL|al_xt)iAuG<)>EoQ36u^?=IQaKWYL%H?|4`;2iz_cbp9fc7 zL{yaBAijg`tGFK`4;ZqMzz=)}WG00o0`Tb?V#_{05kkrXU&?$#Lie@;!_@;DHKxBl8q7;zAVx5|Af)*`AbIIHIr>V z1-%$4?!A;fHE+?}90ZC;NwcbZfuI2Pn=T6qve##)`ob- zVJ+_(AWuDJg=UVSfq_vBJ8O>vJB>8(P6hC!BLRuahQFbq10B*`AD2G^EA7trtfoGi zzP@XD2oFw^EVJ{-2Nc{&tCoQv$@JkSyN5f@zZc(1J<=3yMu`>Nyq6<)&VaDOXlAMd z?-nek^$aOGd4iI?zO0uoChUwZg(_gyjbOvYyT=K~Q7A-cPZ@cF0L6wpi4-ryHg zm*(!1`83M(sML-H<=luNA~l-ygs;Ht<35Mvmk}S4gEy%50Z(Y?DrY!@T*PbBTUrSM{;^N!?F1X4DVdbdyjv=UlYKNgi5SdM4|P zU8XI178)jPr?17|maw*5UHfoZr7Ml&Y5yB}0lmn>T6P>~N{YRO#7a80Z(3!QuCZ*J zc4S#%-(ht?H&`%Mh`X|RY+p(H-V-_4pe>n*h!trIBh(I+#`6WMucVpHe-*^RbS{aR zt+*>Px}LeS`RhCFNHz016K(Pi)10Fkl3mrdhS@nNERJGhytv8xAb#qTees(M0>`l1 zB&a!}IkY$D`HUi-*&m;upuUX=R;2B;bVfvO$34CJF6&yu(YUR>?x$~*$z0^v)ptu> zoQ`&^-vIF=kV#{H4m~MlJ4f;5hVTSI;}@HCr~S^AJUzPCKoM-HjE+BR!wYpTHc*FjZb(>Od zHJ|HoSU@G>gKw$PMxh3@bFG{q$bX7!%@kVq#=3*Idkw9d1`^8$r37eIfApd;_7)Wr zGmdCX-R8S2#=`6bSNbp6-w&$C&k6GE=pPZw3k7ZiK?n8Yo)3$DKgs4|x{VqU-NA$fFTiCIZiPt_yC6E;2(6ix^Vomho; ze}Vi^=g#zYm^~|v*Z`ZxbXUiTUzB^nT$ow% zYiqKr{~U4p?I^cvBO>{uPtR@0I-1R-olB+t(sl9;C?^tJew&)caiA(HL|#JJ+4xG5 zwoh-=_1*(VBCF!W_5`O{?0!C}^!xI@EB&_5+AX9aG*3UibM9yO*I+ka?#MfjMUv$P zZK?^53ek_=?yAZ$Ze%(c)=1G)u9@gochuUpJ+!MdIapMZ)04)S^CAo%hV~DdgPx6) zaI#|%wwt695UE1+lLgORiVF&iF48eRnD20Tq^`v<1V*OC8)AD){R66Bj(g~qDBxjc zw&9h$9_MXFoZW^EqTlQnU94fm&Z(86Yk8^vx1gd*S+s{M(FV^w@@&Q1w{H_rWI{mn z03C5%CTT#-w2+r5Pv$n%dGBxzx^|}ia(c7j=<*!DZl+oqktk}R@zvqMXXk}H22iMUn!Pk2M zXjZ*_$gru_380>wSMYlF%_^b#E1IiiQ|O|YyJe=Q??SDM&`-#FXITOK_i}9P=h2hU z0_wKUQbGHJImNWCf-p#@o$L{0WeYWH)q{Bx(e&q6plylsoA$yeY*&khlgtf=uU=zSSMc)q;yU^5?9l1`rzE%pXI zaiMCdioTYnrj+1oWSg<2=3W)`U(( zPZLwyqJ|gvpEu9J8WZh|(M9RuDm;SclaFw^&OOF8Fr1oCF?r`u(qu+=~L4$wQ*RgB- zEethdJhootoSl$Z*{Cd|5cdxW2r*iK`cTeg|4c?V=7=pjaRpoFwY`AkswijPzw%A0 zkDq6cd@&5KiEgm7=J6~_)5-VzB;3H{{Db*J|S6RY2tunsx3#tHSzr=`qzgbiz&Y?$&N zYJq?311*|5rp6cpf*e0F@d~ZsXa%_wpAH}lSlC8GgMuhd@T}_^{fZQsHCljKqDa?d zrO)pD^~{z6GqbA)y!K}Xe*j2M#4nZsU!4`Z*KwMfW%oEdUB~(K%mO;!CAM2BEAG;J zDtlHB24CNB=E*&kv>UT;vP>U-81wX%_jN107I*@n(2HZ-0mbD%?nTJFo{^GW1pMLH z%YLnFaRR|?BABRGuH;&5VCOrT%Gi|usqb9e(v%Txvk1LHrZsnfvwR^XT4a2wI~*D_ zhQQKn)^Zhz%lW+G_XcHf&l|)p)SkN95$At9{d(zaQAw9Q|(mK+g`deTp@3{p#($<5K(MDlzvG zbDTtt+5E$mhCAlI`08`3(y&!EficIRXF7p6d#W0v%;UcDOB@Qy*b+zfV3Dc7v01#v zzu%Hmvqk?7*X2sIgmp$QI55FZGsfKUjMz2@U9oH(EKX*4TpV#I*t!?+@?C{%yajRB zs)`z1CogE=VMJcO$&<}|Yqn-*XQ3cf&17@%u|bwtsF6>3 z8r*S9`>6PW$?=jQm3X`F+~4$xzKn4?HSP0IV{+f49ws9^Ya}>2`mLc0$C>DB{1F}& zp}XxGKE7|5m-;N^;>~OJt<0dIa1&3tFowwehZ7QULKF0enTv43)AQMf63c+*4k{y{ zQ)N7}_=up@af^&fio51AyUYWjyJaDn)a_4vI`~X;_1hT-0q##hzPR8I4eGZX@9+-S z?g>028Jg82&E@^V_r7|v)s3{OL6-^|9>2U`pdn8r)RL#w9Q$Vx2VCzodkQhk&Z_pD zKhVjq;>10d>guSI+;pop1(-I=+%7Ve!7`|0nXiIXV3Fqo!5KJTDtRMc`ejLPp+IJ4 zuO-C@-Y>py-8A>I`)X!s8;nAAof1Yfx<;&7z1)k!n>)RPE$n zVhoz9Zctq6Wd(CtX&6w|Uknxy+PvA6&Ags_?aspL{$}H5Rcncqx6(VQLQW0rX@{Jm zL+uKkbrsdA;f`(tP36r!V=Ys6=|KB`iUF4KQzaCSRF{ew)jyr{Tc>0CfPY%rd6k~* zlnH(n-5}>Ti{b>UiWoOER)x9sZSYE8UP!({1VTgI-c#8OI57EKxo&@wXm<3{?uxLS z?b+2ii+}aW)Rs=RZKwuz*gm4f=w^J1FlclUzk$W}^;FHz>$rw=w)6rby@jHfMxn!o zGF@izySf`x?*wMqO80!=&W|y;s$g*`T*c^wSIM-7r3JO@<7I4(3BqUO!xiu-#oLro znyC6&n(KF=BBeNngs|?H`I~J@)4Xz5i(9uz>vP)<-lc;h+=b#+T5MIy{uyn@%&k^J z_q;P;q2tPTxzF!y#hlE89>#Wjx$v^- zuR@rBV`pmZlAIc9sR}jY!fm!z_$#>EzaADluI^{t$I()?f2sKgeie1`*^ZIfaPEAf zHhHpYEK4e*-*9_(&>Xx@i#aEwAs&|9lP@)E28J^nt?1+JJml1|Tqb z90?OurdlpJ1v1nJ3~rm`&ePH?Swb0}*9_w_eateCl;l4aQZb^xC`XfGiiQfJB;Zfh z%F6O04-(no`xkv`K2>YXcY9m11V}3hf%BD$-c9+se+fg7iags%aFf8Xtf!r_JaOgJ zeiLi~zEcw!64$k=21P#l8p`K(-juL5_S;Q0YrHD>I>@ZiL!mT6&w|-Cb_9J8te37G z#axIlU03!~0>Mu>f_3ZGB_M_Tc@+K1O8*5*x7bzlAhRkI7#K)`bh~z)VLpRnlZikx z303ddv4ceT(2~M1#mn}_-wV-ADP_;H{L1g+aol~AWHyE0gw zxvrh^MeUMSr7M#(!Yc8uUKF`mZAK@NBx2M_rl~V99MLFek`^mV_?k0bEPA0q*a0m(_@5)Ws8$g`56b< zg39<$IKkbiSj?AUUbr5o4axVFyC{iZt68n2^gB%vaTFqaMTLYw3CjNv2nNc_%lX@} zMSFZCZ_UPy^N{n+Dc5h4&Za)dIC(mh;u;;7aOQ$`pt(^C|F=H3I!*E6`YT$j!Is?{ z2LqFaxXrCfhkh-nWVr$5yY{`SwWpGm8K!+~T<6*P!qU?Po*`3isw$3A-&Rgk(qg76 ze0w)%;Rx%$kL)R7PP0X z{l~UnY88mP+a!zriOhXLPEI=dfdcIonXpJ<3sLz)OjNYApH-kG;3vTD>|5N$Gd8?4 zUeqfL!%m$Ai8@XveG}JxNv*ISm~7X$TJEcuYM|M%)b9P&i0{v)HeM~C8g2&t>$L>W3Lf=D5!o4xfhG=-_$VnK+dl86i-2GH@$;0@?bIJ~SF?({6<*Dmn_!ze zx&Jx&c|NA!?WR*0rR$TlCsaP*y@D=Zj<={viH|qTd!4RO>UO7)UFgC*H7-H6 z;VnPg%zKa-%O$LBg$G-eop5rWVV zV*KY{NSaZ1VEprXae9g4lUQT%dAku?8IrLm^0~9<(t@sKX_=W;xAr&dkbh{HiY_BY z7;zF8G;%q{7XEQAP(#_KghPQ*_?Q#U!}-g8(u99Q1dUqqIN2j2BG5qG_DpJEy)*Dd z%&1!}ehq2(*5$sHfQNt3ga*cSrLTrsAILRY;uVF3D*p&y8e+>$77m1L6{uG;R{R-t zoZEL$T#p@Jb7a$<|Fewxi0Xe7{8L^bS!Js#n!@|{{2lffR=fn#qT%@I=$l7&Qhwz9G^*rrBWsORQ7EK&nC zR@2;=ob3VuM-vX*c;WxlNGn)%w69TnO{S2|rhNLV5@L>|>~vILyn zWiP?|Ax_PQuEl7%#*iaQ5n)kLEdr9;AfH@0oN2oQqJQ-q%lqXyzF);Ihlj5%9qZ;8 z9Bp?Cu2Y(*eR%{uVvw&!SUH1{1PcNWWex_&JZ9&~=P!>S4Q2ynxDEEK-H4L$wOaYz zCjs$|N>@z1fvl^c=|ZI1mA?-Xb=IFK(I3$lTM+r7aH`1NFs^;!LJY_4kJM$NtzPz(W7SwAqT1juEG$F_H_ zk+;~UOCvlt4)h9MbhNp!A-c))=F>PS^=MP$n4*6?=00V7=+1}BH>1nTg8 z3PhlgG`#1p@h4|F&89F<_FLNCfci!GQC*!rLe{F(H!{+=LkIEBUyXG5eCyl@$W_GO zTCwKXadwdmy&pr7WdU&3MLieLR~YmsBMcrt;rrOr1vmYbl6=(q>4=G?6c;7i3KBu6dg}Ad`K595s1k(_acBYifK2Rm?f@4L% zc=1DCN3dVugsciat8JFbCqD77TzoM-d-v|$IO@fT(f9vUAXOzxSQQtR2!-NktkdHk zaDEYP3LDH6iie-?WXp{+{>?ZQ+H`V=2+kC7n0Xr*2zMcUThQCsyAkpJggyVH5&uae z{*y-hCyn@jlSbTh3?#oTI^qN*ylf7-WUqPIcykVhYx7W*U`?7!paIf;Mrm`Z!~vj^ zIG&5Eg-f_e=?X4lACji`?kQmKP87!^6`)AnCM=v&!K)(NU#03&=u&-jv78Mo9vM!wKjNb6!K??-Rz3f*?`_5Ddgrd_CTtC z0w;waHAQ(qJ{0jO(;&wF4R{mrnxm2t5f^va*Fi$@=)S)!@f0g`?F_2kv!3?WT^J zf>eG;hGCa{2&Yw&6PG?VXArAD3ptb76f#UFoS(MjeSg@JsiUV*JKvies`=@0e>hKw zDCNNU?f?QaSMvS{x8~6|nTb9CcOYy^XXpl$UWc$BfGsD* z)kauYZ03H}0?~-$EXXUZgq#71G!l$N=zGz|(0VA~ zcJB=S?E7fHQMkmyx4WBd3t_c9jDON^ME(NY;eVR#t^F}?iqd~penp1@4mqZ1s_Us` It6HD^7v^)PWB>pF literal 0 HcmV?d00001 diff --git a/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/config.yaml b/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/config.yaml new file mode 100644 index 000000000000..e79db86678b5 --- /dev/null +++ b/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/config.yaml @@ -0,0 +1,35 @@ +num_clients: 100 +num_epochs: 5 +batch_size: 50 +num_rounds: 1000 +fraction: 0.1 +learning_rate: 0.1 +learning_rate_decay: 0.998 +static_bn: true +exclusive_learning: true +model_size: 3 +client_resources: + num_cpus: 1 + num_gpus: 0.5 +server_device: cuda +dataset_config: + iid: true +fit_config: + feddyn: false + kd: false + alpha: 0.1 + extended: false + drop_client: false +model: + _target_: depthfl.resnet_hetero.resnet18 + n_blocks: 4 + num_classes: 100 + scale: false +strategy: + _target_: depthfl.strategy_hetero.HeteroFL + fraction_fit: 1.0e-05 + fraction_evaluate: 0.0 + min_evaluate_clients: 0 + evaluate_metrics_aggregation_fn: + _target_: depthfl.strategy.weighted_average + _partial_: true diff --git a/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/hydra.yaml b/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/hydra.yaml new file mode 100644 index 000000000000..2a1ae68a42ab --- /dev/null +++ b/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/hydra.yaml @@ -0,0 +1,157 @@ +hydra: + run: + dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: RUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=RUN + task: + - exclusive_learning=true + - model_size=3 + - model.scale=false + job: + name: main + chdir: null + override_dirname: exclusive_learning=true,model.scale=false,model_size=3 + id: ??? + num: ??? + config_name: heterofl + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/peterpan/flower/baselines/depthfl + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /home/peterpan/flower/baselines/depthfl/outputs/2023-09-05/17-39-22 + choices: + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/overrides.yaml b/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/overrides.yaml new file mode 100644 index 000000000000..ee6825129f33 --- /dev/null +++ b/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/overrides.yaml @@ -0,0 +1,3 @@ +- exclusive_learning=true +- model_size=3 +- model.scale=false diff --git a/baselines/depthfl/pyproject.toml b/baselines/depthfl/pyproject.toml new file mode 100644 index 000000000000..123ae044d980 --- /dev/null +++ b/baselines/depthfl/pyproject.toml @@ -0,0 +1,136 @@ +[build-system] +requires = ["poetry-core>=1.4.0"] +build-backend = "poetry.masonry.api" + +[tool.poetry] +name = "depthfl" # <----- Ensure it matches the name of your baseline directory containing all the source code +version = "1.0.0" +description = "Flower Baselines" +license = "Apache-2.0" +authors = ["The Flower Authors "] +readme = "README.md" +homepage = "https://flower.dev" +repository = "https://github.com/adap/flower" +documentation = "https://flower.dev" +classifiers = [ + "Development Status :: 3 - Alpha", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Operating System :: MacOS :: MacOS X", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: Implementation :: CPython", + "Topic :: Scientific/Engineering", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: Scientific/Engineering :: Mathematics", + "Topic :: Software Development", + "Topic :: Software Development :: Libraries", + "Topic :: Software Development :: Libraries :: Python Modules", + "Typing :: Typed", +] + +[tool.poetry.dependencies] +python = ">=3.8.15, <3.10.0" # ray 1.11.1 doesn't support python 3.10 +flwr = "1.3.0" # don't change this +ray = "1.11.1" # don't change this +hydra-core = "1.3.2" # don't change this +matplotlib = "3.7.1" + +[tool.poetry.dev-dependencies] +isort = "==5.11.5" +black = "==23.1.0" +docformatter = "==1.5.1" +mypy = "==0.961" +pylint = "==2.8.2" +flake8 = "==3.9.2" +pytest = "==6.2.4" +pytest-watch = "==4.2.0" +ruff = "==0.0.272" +types-requests = "==2.27.7" + +[tool.isort] +line_length = 88 +indent = " " +multi_line_output = 3 +include_trailing_comma = true +force_grid_wrap = 0 +use_parentheses = true + +[tool.black] +line-length = 88 +target-version = ["py38", "py39", "py310", "py311"] + +[tool.pytest.ini_options] +minversion = "6.2" +addopts = "-qq" +testpaths = [ + "flwr_baselines", +] + +[tool.mypy] +ignore_missing_imports = true +strict = false +plugins = "numpy.typing.mypy_plugin" + +[tool.pylint."MESSAGES CONTROL"] +disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import-alias" +good-names = "i,j,k,_,x,y,X,Y" +signature-mutators="hydra.main.main" + +[[tool.mypy.overrides]] +module = [ + "importlib.metadata.*", + "importlib_metadata.*", +] +follow_imports = "skip" +follow_imports_for_stubs = true +disallow_untyped_calls = false + +[[tool.mypy.overrides]] +module = "torch.*" +follow_imports = "skip" +follow_imports_for_stubs = true + +[tool.docformatter] +wrap-summaries = 88 +wrap-descriptions = 88 + +[tool.ruff] +target-version = "py38" +line-length = 88 +select = ["D", "E", "F", "W", "B", "ISC", "C4"] +fixable = ["D", "E", "F", "W", "B", "ISC", "C4"] +ignore = ["B024", "B027"] +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".hg", + ".mypy_cache", + ".nox", + ".pants.d", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "venv", + "proto", +] + +[tool.ruff.pydocstyle] +convention = "numpy" From 8690fcd6f55fb9df5f78d6c580b3e19718807688 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Wed, 6 Sep 2023 13:14:08 +0900 Subject: [PATCH 02/51] update gitignore --- baselines/depthfl/.gitignore | 1 + baselines/depthfl/README.md | 2 +- .../2023-09-04/22-24-33/.hydra/config.yaml | 35 ---- .../2023-09-04/22-24-33/.hydra/hydra.yaml | 154 ----------------- .../2023-09-04/22-24-33/.hydra/overrides.yaml | 1 - ...ics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png | Bin 27604 -> 0 bytes .../2023-09-05/06-03-04/.hydra/config.yaml | 35 ---- .../2023-09-05/06-03-04/.hydra/hydra.yaml | 157 ------------------ .../2023-09-05/06-03-04/.hydra/overrides.yaml | 3 - ...ics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png | Bin 30212 -> 0 bytes .../2023-09-05/12-21-24/.hydra/config.yaml | 35 ---- .../2023-09-05/12-21-24/.hydra/hydra.yaml | 157 ------------------ .../2023-09-05/12-21-24/.hydra/overrides.yaml | 3 - ...rics_HeteroFL_iid_C=75_B=50_E=5_R=1000.png | Bin 31063 -> 0 bytes .../2023-09-05/17-39-22/.hydra/config.yaml | 35 ---- .../2023-09-05/17-39-22/.hydra/hydra.yaml | 157 ------------------ .../2023-09-05/17-39-22/.hydra/overrides.yaml | 3 - 17 files changed, 2 insertions(+), 776 deletions(-) delete mode 100644 baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/config.yaml delete mode 100644 baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/hydra.yaml delete mode 100644 baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/overrides.yaml delete mode 100644 baselines/depthfl/outputs/2023-09-04/22-24-33/centralized_metrics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png delete mode 100644 baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/config.yaml delete mode 100644 baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/hydra.yaml delete mode 100644 baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/overrides.yaml delete mode 100644 baselines/depthfl/outputs/2023-09-05/06-03-04/centralized_metrics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png delete mode 100644 baselines/depthfl/outputs/2023-09-05/12-21-24/.hydra/config.yaml delete mode 100644 baselines/depthfl/outputs/2023-09-05/12-21-24/.hydra/hydra.yaml delete mode 100644 baselines/depthfl/outputs/2023-09-05/12-21-24/.hydra/overrides.yaml delete mode 100644 baselines/depthfl/outputs/2023-09-05/12-21-24/centralized_metrics_HeteroFL_iid_C=75_B=50_E=5_R=1000.png delete mode 100644 baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/config.yaml delete mode 100644 baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/hydra.yaml delete mode 100644 baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/overrides.yaml diff --git a/baselines/depthfl/.gitignore b/baselines/depthfl/.gitignore index 93db21baf618..d41f139dfc7e 100644 --- a/baselines/depthfl/.gitignore +++ b/baselines/depthfl/.gitignore @@ -1 +1,2 @@ dataset/ +outputs/ \ No newline at end of file diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 3a996b6deb39..6ad0e85aceb6 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -1,5 +1,5 @@ --- -title: DepthFL: Depthwise Federated Learning for Heterogeneous Clients +title: DepthFL:Depthwise Federated Learning for Heterogeneous Clients url: https://openreview.net/forum?id=pf8RIZTMU58 labels: [image classification, cross-device, system heterogeneity] # please add between 4 and 10 single-word (maybe two-words) labels (e.g. "system heterogeneity", "image classification", "asynchronous", "weight sharing", "cross-silo") dataset: [CIFAR100] # list of datasets you include in your baseline diff --git a/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/config.yaml b/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/config.yaml deleted file mode 100644 index 14257c31c8ac..000000000000 --- a/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/config.yaml +++ /dev/null @@ -1,35 +0,0 @@ -num_clients: 100 -num_epochs: 5 -batch_size: 50 -num_rounds: 1000 -fraction: 0.1 -learning_rate: 0.1 -learning_rate_decay: 0.998 -static_bn: true -exclusive_learning: false -model_size: 1 -client_resources: - num_cpus: 1 - num_gpus: 0.5 -server_device: cuda -dataset_config: - iid: true -fit_config: - feddyn: false - kd: false - alpha: 0.1 - extended: false - drop_client: false -model: - _target_: depthfl.resnet_hetero.resnet18 - n_blocks: 4 - num_classes: 100 - scale: true -strategy: - _target_: depthfl.strategy_hetero.HeteroFL - fraction_fit: 1.0e-05 - fraction_evaluate: 0.0 - min_evaluate_clients: 0 - evaluate_metrics_aggregation_fn: - _target_: depthfl.strategy.weighted_average - _partial_: true diff --git a/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/hydra.yaml b/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/hydra.yaml deleted file mode 100644 index 7e940d030577..000000000000 --- a/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/hydra.yaml +++ /dev/null @@ -1,154 +0,0 @@ -hydra: - run: - dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][HYDRA] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - loggers: - logging_example: - level: DEBUG - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: RUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=RUN - task: [] - job: - name: main - chdir: null - override_dirname: '' - id: ??? - num: ??? - config_name: heterofl - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/peterpan/flower/baselines/depthfl - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf - schema: file - provider: main - - path: '' - schema: structured - provider: schema - output_dir: /home/peterpan/flower/baselines/depthfl/outputs/2023-09-04/22-24-33 - choices: - hydra/env: default - hydra/callbacks: null - hydra/job_logging: default - hydra/hydra_logging: default - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/overrides.yaml b/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/overrides.yaml deleted file mode 100644 index fe51488c7066..000000000000 --- a/baselines/depthfl/outputs/2023-09-04/22-24-33/.hydra/overrides.yaml +++ /dev/null @@ -1 +0,0 @@ -[] diff --git a/baselines/depthfl/outputs/2023-09-04/22-24-33/centralized_metrics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png b/baselines/depthfl/outputs/2023-09-04/22-24-33/centralized_metrics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png deleted file mode 100644 index 936af4e679475861e59881fc47044da681ddc45c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 27604 zcmeFZWms12_cpj`L=X^_l8_WZx*J70B&3vX=>{c51yoX`Lj^&Q4i!W|3F#1N5Tv_1 zXKkNnes9eGotb0i%Y2yUI6NNlzS(i@b**)-^E}rMyR9ZqL_kG=APAA7f~+QjUBOtVXvOjq3e15BP)SUlpGeQ2#Vv+4iU+l9|V`|~Z`)e<4(X->i zClq!FT$lnTdgx>4`wfz4+%g5i!OV%!$W}B#=AI1Tp;yZU=QGPg&@-GtkGcI)dH`Dm zzJ8tpdGKbl6VnPC-2(^cWi4T%e~p0;_;k=65slym8qN8Vaig0By`rvt8G1qt^wd)D zN$BR^FEN>8)>gR9G6)J@x^m@8==0|{b#(e}^&3~XGK;#;1x7`YxX=H@S?tR%tqRzf zQhY%rK#*@xe&hIHyE{jVwJlxBc!T|(VBaZv5^P4y+KcQu^dcgZT3ISGYHH^yJ(r)= z*UMhGbg9xX01iUIZ$l#Yf+A>n_!A==+Zjbg#isUlkAjyeDKEAc`zKSqet%1oc*yO! zq=%l!t}m~6(e9YzBSQA?(e+EM{zS{ueuLOZ74UfT=8ayNP4w`#vCs6yt<^EQ$;R-T zKO$&&JQlRK=DM-mX4+f|N<9{J2&n}LKaF})o0*%h&t*iuwd*gCx?5_4SCc*zNcqpD z7ZM}v*RDmSriMR$j6G|2a&n@epdc+L_jI_^!ootY4=)hzqxHb=VC!3wfSvPH6Y14= zMJ|(aOJg;3UcY^^+sG>j|9t|O5~)GyZby4A+IQ|;xpu8(ebhkd}jyL=xuE-rZ=pNh@Zn3x#$eWmyp z8G-0yd(_{LDETax_pc3Fr<=AX2{d(dG`1!3@wiSY_#UnQDEDV!X2$vR=TAof4-XH6 z&!qU@$ZB4+Ll_wtNJvNmA3efsUBtn`35|&GJWQOMn|odmaIj^olE94t*YQ0N0}DZf zMx3!cmj;4rPPPjLk|IBZ(@D`bH#c|GC@U$+K9aU3<7Py|`uzB|K%k6_%-mop>(8E^ z)IyQV=Q6Xi86_ouFI!JcPF`Qck~T0%ayxEmQPS4Y`93ywu#JU@Iqocczg^kcnHx4T z$0^gE3&Y=oO~JuxE7By=oab*XibV_78U6)p36lKw(q;Ij0{$&-M@dWF_JFwTQJ^aeHfWdPZm*@YVy+`v=7^M zR=uuR(@8f5z)6^D)m`4tUu0)vlkxTze)jx%xgcImstf*0$vw>D!pO#>D-z}Jl;Ml6 z{Z22v{ph6A>;K6Gp=d6k`%m(?(|=m*l7^-OGCh9QNvfk0tBC~7Q(NZf6%hZ;Ug*XAEy;gR`{H^nx)MQfN1~@p zBywm(X40XlZxp-jf6tHqBxVXSq$5>~ytud+|I8Vgd-pDvl$3B*p0vDT?|yR=@A2cu zua)CD^*%WohI{NR4F*44slbC+x9NI~;aIgUH#I%2==Y?x@t&YqZ1NBzkk2+Y0+u4@?tP9(f3eN#v;E%PbOyOE0B#UDk?fWs!tB=ww8y< zX++&?;h7fM^>eBu@t=X*=;@blAd-&#{hvovt4_&1>f6gk`mrL!!LdNL#jffj~z!VO;3*Yk-!i-e~Rse-nwC@8o!49YyXK4(o2^- z8|LrL8H+_uG*1tc_J3s4IWn&L=#DY2VtKKJod%ycKU=^McAu7lVU6kpY?_M~FG3cO zyK&>urU2v+CMG6&M#j2jYhGSn4%mTaW^dj6Lh6D7P7lZSrnv&bAdAV$$uV$pp8KF% zSZ!m}(%Pz_scE*)D&tC9YZS0gVQ9RE-;jN6S`e*&M(vAOwUa+0GjekYG&D5g`ZM0X zjY>~n{hP_i4Qq;uNLba=>bK^5x@!Uge7CygKd)9FI-efzns3gum(M_n{!c#A-CyQZ zdwYM!VqYJTUg)MS6GBOx%Gcrc~Swuw- z-Y&|^%F=Ullcb7zl;rgko6Fk|mb~k$!p6g66c*l7s&2G&>A-5jM`I7$+mFcP_HB{} zJ4<0(Tkf-V09oee=R+gCq@)=jp_ND?neX0-^b->hd=D4;9qMO-QC3Nb05NGx z;F;?$q_gTs3B$m`r56ymIQ-F}?h($}6x#@Dp>tSx=jtJEJu^OD(ABH)mw;>>VA_+V znzr}M(dTr}9)f^3niONaHVDgQX@Ey1O`H;{%UoYRbzhgBoU_;|d4)6G|jXvr$E zY~^;DkVW(S@=$r*Lhpy}3ODOn(cMumx|z=O$k^BmFK8vGpe+B8I^9=?4bYD!GAU0C z6neosq$fxI_}wfQ*%!WkLMCLVOpchP9}6Mlqo?E{$%W6mk^~{!oDn8| zjp2v*MNj>$Ke8y~`C#?8fY~74duyh?aN3TV(vlLi(6^cp=H^slMul&pc|wLCIT~H{ z4+*Ch!W7953>ClXaOI0$iAC5{Rya#9PSoF&h4CH3#M1KJW-}#Sz!47|krt5-IY+a0 zygR1sSn1exxtQk!>Li1-Fj94Lu*ef|;_G`bpRJ&*eDn0=7z07^X{l8^laK>{jt|0| z9QfffLGmu$&GmVuVh1$M(!RwQSLXWqxaRbD_4D$_VQ!l)diUj_i%5}GJC$`0ckdJ- zBJjg5Mj+71_W?=WB}>!sF+S{1=tdI_l{_=`aoDkLeewtF$44xxNy5efu%Ae&sIFB& zke%7abjR;Q3Wof#ks_fQZF>}tW`@`hjnDUFYZw`=5S~;Y@7!%mlb}A_S-xpukvWyM zi2bgjBO+KD)(M~F9J2a#*Qjc{?=BMf_ANtwLjzhs6_k{|SNUwhJqf;HF@c*$@4l_M z`P=nRBbbOc{0hm%i%*FECpz^)EI@==`=w zS87iE_YMyyvyvUZe;V^=|9j;UgnC{td7)+`D)J*$E0v3A9lb2+OF`$qcV(q&Qw@T6tv3RDwqbWHU^NAc z1aYi6_8jpV^NUFqOuBuxXq~7w`&!2X(X;=a3?J$4?gog|2w%ZNpb5Fi8gxRbxrtC2 zVA$kc3%LQG5|wS2r7K}T(fHbnISAe z+&}?^ZFZIYPFnZ<&5*sF5Z>r5^w*!-AbTTjou(ZrA_pdPRz@f=ypbf9-xDQib{R8-ysPlrM~%b{@qVzU9V*n6cxWY`EOEl z-2F(1wk3cVAeCHSEHEf1NcY>JG$^xinKstRSkS0Fn8k3{w+b$b{z!As-J}1R;i5GS z{?=}=l>99+QXd>xITV6I%0O%3ZEfwxu-!%iPLKB{o^!433_BU?7(g2N&KN2F=ucfp zhE}#(f>eOyjhi=%ZMqpgE!J$bu+NRvNHvC2heXhbnJxUvnd{Dw52tEol#dv1W=*gE zvReK8!S;gTx>xV~5k1M*3wS|Mcqs+%Z$#$UeqP-^y{~@wJfnf4oFgP7|H(9Y`nS`& zl|*-|MOUEJjb@qHL_XSdzil*$OcnE*Xcro*hnmC57XIuRfrYiTIaJ$@{-fjL=%}c` z7cU4ur8hSH!;5DyXmBjMcKTu3~YJ`&fi5ui=QnYTE6g2sZ-*CI$w^)vH&Fs*G;m4xF2_`IH_O5m7yN zknVrLMsW6Qccq6NHZCp$;FmG~!|Q1e7TD?=8!J`$goPSAllKlqjvaE->6C8~-HD{q} z^(1j3RU7NNpIr<@BPcG|u)mYNpUO-lYV_10M2*7Zruq43$w0`Lr?&l*8ar5k{ZoCKsFTVXI?%~~B zrj)L}Tg!g+rAv_ELdHpUDvF!Kuj8$ESo=^UcbzjQ^Uo1*I8T7o)zt;;9KX34Vx*G< zoh^l+Jq2v1C%?ZwG>7bPm7U!U*A5cjt=72f(J?WY2nC-N;lcJ|NFtxL8Kf8mRn>-% z4#O!}>c9j%wQ5h&|EN)DBq1v=pW*lLE8g$v7;2m)|3T|w!Ue*USob%p{^HElM5BkJ zQj$_HM&+LsA1V~@AZuTjR*{qsUw=^|Rsn~MCy8#H4x<<*9(2Y5k`>aA6_C%~>t@Tg z=PnDA0Hskk)%-F#Jv|a_A%J`72%`1z@mbrI8hbmMtxks~Ey&jOkI#^G6&Q-aF0E|5 z9Qa#>S7DN#gdw+wP`u%ePGbCgPtq5K&$%nO4f8iGL)Eb|dfsbf+;`X*Ewj}M@2B-$ zlM&w=4~|bcfu$+nwywNGj2XRzrC_gGp02qNgJ)=X?j%;$BeKqAB_PZVmy#F1zF%Q^ zu>gaa9a;Y7`jNL-oUn}-ql*1KR_@(giqmGaMa7U!dxX=Iun&P&X zvxKgQ_XGoR6anHv$5XJ+Kdr~Z{F$XvyY~yf-QNS9yG08f8#t9@ne}?kut;|pJaX0Q9@-E%SIv2l2Q*h>oQ$=bP|4&YAr9^ zra*hJv@ofAs+m%FB#oas^A}}d!uh+_W?f1%rN3hrRFlbMT2XevJ_mz;N==KuiDxa- z%obB{Mu?w_o9FA;TS}1`A|pfk!IDfptgFx9!ebV&Y%Iv4+4i>(E&rQh2R-wWW-4rV zby;wSvVNKM%XunYc##mI1?=XdC^qWl@;KbSti-*m!aaxA zV_y9V5827~_&j1i_j@(>B``r~07eaz28J&&t@!Jo#p+~>VO&WWCZTHf`ctBIaEK)R zwqO4C;vO!9`cT5wJwvvbQRduEuRM%cQ*A`pYvmqP4+bVCY!?@ork0kNPsXN^2~-bb6L zfnCna%w4P|Wg(Z=o{m5CLgo3&IjajY`Gg|cvR}NN8Eo_$u>;w4@&G#UW~n4TN=dnl zwnAZHVOKdg+`pXk<=^Fj_UHu#59aVECwWcH^T2gRB`2Rnggq86xVpM>+x}#lnwlDi z?(7GEHRn!=-GTjbPNn+_5+)rL-St821=IEuFQvD<&U@A5z1+dj!i`9Xha0X$k~Lc` z$;t416g{s`$aa;yID=TXCq0p;^}&WNTpk)0Zoq<%pFTC~etXNYKVbTmZWpi@FQDnB z5%aA7m7@i4i})|6Lqba%QC7w;zSVWpW~^Gmv2vaT{rADa!KT*M(#;ROCaqUZZgyPv z3~&5JX|F7Jci1JUOX@UpULeqFY7p4GtX~Cva-LD~lusCaf`5d)wxlFQbhNeU#l)!3 zoH_G<09r+P#0V@=c>yuf(9p29xp@ zsxOzYY+9b^x$gw7?sYbwGg}0mR77s>HQ8IY^o=5)J^Kzd z4(Jgnsj=mQzg)DG+YDiv&kQ9Y6+b@U3djEco_cQGr;2Y=e^1aAfq`AA*umrR$MZrtlEz`B}WQgWVH zVv8hJGeOA7D1Y%?W+uJ3_{csN3yU1T@epx91ZIJs7*$UUq3e=&((@@jYQ9+v{z;qY z5uv3yNE1B6jj_q@%_k+jLXk^YSL^;yz*FY-hu|8tRkgLXE$=(QsQvH2+aLBLIr7;Q zyxWJPzxFM)?&B}r-}vPd?WL-8n(|;aUX@TqrJ3)rb*SzSrKdVK27uJ_%hp`Vw2(;g&x~PeOfPg^BgirPk7!EfvBnCa1XiN2JYE{%@#_ z4jtFFvaUZNlSyt3l?+6pKpioyd-_JV&}eS-i;z`kS~$>(gmjXZ5pMUn+hz8H=K%>L zfjs7ouZ?T`6GYrta0w{_fBn+7AO1*m_7Vr;%(?e)JP0r9<;&o5yZ*Pixh;cIMYcUz z_yhzSZR8F6Ig(}R#6+w8na7W>YSTjb*s+&qCEK*0;ODmLJNL!7PnFH@Taw|APb0O| zU*{EnbEK|fy%*3w2;=#`C-=MJ5 zz&nlk#{QW>xgds}at;gGp^Co$jT{FXwa&I6c2}${2#>3yUON5sT!Uee(@9$In$#Y8 zoC6>LJThi2ARF%czpj2-g@!z*(T-Shj~)d$pB8^KHa>8md%hVQ=%JhaMKC+*+ea$Q zaNApW%K;JD{ECiK6(j)($}qbFcJa$&H^ZG_W>Nt~(AT1@yAdoAWw zoWUV}7;bL)>2!WuUOB&5mmqVer{ugm!}Vsimn!>2%$seHk5J_hP?M*=Cw`h?Ca!jq zAZhb2k~zowLOk3&Zh&KWH|^!=q`Q3(50($bUz(o05ZAfqDL&f&YT-EIANY1dGb1p+ zbfqw82lNrs;OVPvZ%$wWs)tVBqqPm;=gOB`diWx zb^?EVlnmsxRg|!r8}YPwq|=;4@pal>{ZvWo0J&DjRbRO)#(0Pre9Tzi>x+v?E^`o; zZ|yL=+KYGFu4Ic0imAeaBE6?-&Raf*=IGYAm-&tEmg(X1{$ip%z3pu?+E~dG_PH3b zR8#N7x<2-;ot@TqF3{1c^j5&lhf;w8%hYsSIqq?uOx&(&{oN-?`Q`5(MEtt-yWrg3 z47L_$>m$EvcV0iGi_Jr>so!Gl$%=l7G^bJfA0HZe&`IM4o(I@+MGQA6G>zmjCRuZ! z-U8;8ZD;pUTV<(`-q8}Z%1BDGU>&>g_z}-c$2catR@M0lQ-7iwm7y1>7AW+6TP~l} zQ{W(`n;r*|ljqm-_&!23k7d)3Z^-_B2KISDQ)x-BT@mUf%LF_RZ*s3{+yu~8DLob{ZM9waE3Vbwg zx*CvEE9;tBurF{_+1Wk&b5gbogd!*3dwP8Q7fmvi7Y_I4z1z*9T8a0F z%2h4J9vTYn8#=9|%Gx2#ZIleOQp;f&3TXRN$)QDGBIM00kYB0zQK{&R9*Uz_1@vx~ z>D1LyVdmH}l=T}Byt^*ipUUj8d4MEoV%ZF5w%j;Owcxf?BRbu;%$fIeFEt-x@B0UF z-t+P(p1Jgm#YF1>U;of&UPYJF=2dPl_gCE8LS*Sp^mJC~<|;(5iH_%0D!!FEM8)4G z>D*KQi-3pz+;K9QI0$Kc%rz3xQD*<)3JHNh`8$KuG?f~b4lEnXiK`|M0E?dPS(cte zHDN;DkQ=u8>(dX@1p@yrA8=|o#M+CVj}LlmmA5bFi1`dk^6=UY_sc6diiuK64Gm>c zrGK!T2xMzs!KnIHn#)Oy`}a3RicMPJcAI^^m+jb{_ua{-IZoOlzE_-WrNr9oud2DU z@ag?0;_-dU<8QHxo|#F3?O(q2&?r~T#6Wp?0ua}Aba470%T{IY(N>w{S-m2H+j(J4 z<&^Ty6h*(0u4R_8sHGol4N2B~ybfdLy^kxEDSqcWfmtIC z$-)zA3lrv3r8jWw<~cSW+Y-^X+4sem>AqfG^h5GzH7eX<2Bvz4G>_RNiVc*(x$yl3 z0RlYP^ML_95a{SZRsy!3+%RCe<<%UphBt29=z6D2o_F_C$40d#!An+kInaePYW)3x z9T@*mSRHdj5HEM1R{C4^{ZN7Ip$lb7hEwHIU+bT;FhDBEsgA2uMB|pxBU;F!HrBxp z5m38rKSq>R?QUt!62)fSZ_kUwnyUOi6#J~&5`uJ%{Vs0KcFJ2>u~6~bpejGmmW>64 z({bfz23|a``sJqZPV^R*V6VabG|I82B%NYw&fz;)S3gpIJ1IH%i8F;87501lj7LLM z9!8n<0+rT=ujF-r4Rr~Kb6=jm_f)hzDq*`X{5pf4#>VFArRxS$p2xaIJ~+pRyFWg= zOm=}drIIRoQR2Z?Eu0)l`QnZ8_-IG}b@%skYRAtQDEl5kEAg!NgYNKNJEz*}IJSr; zt84R#yXV&33;DJeTT6{xRhJKbRz^+T`u$4#AKo`8XTHnmkTaRw zi5W)>o6?&+yU}uO*0p)Qbm*t2YIcc|Q<}Q&=c)Y{)vg7?4^MWr2n;rt9^}QoT4bL+ zd+c0-DHHTyrB1nXz|A-PYLA!NH4(QHJrPD9-}HhmFW>Zu_2rg`$z<4-N3j?;N6;k$ z_S+1sC!vv%UezaCJ?f}3+0@pSx1RX=H9d!6!xbT2>;qGUc!Xfp%Lrd~k94%-; zIj-sIvUMh3x9`B$fTisBKNbw*QLX(=|KCZX48yi07Xp(D?O0+^;f&3(x`2`jdp^g7 zg`F`e@PF;uFg^~DkqLT-TlD%h0-c*%@(e)T>+LPQ0)dqP>;I&Zx(xL&gFr|G-8eS` zLP$Vxqccq+IxQ_CGm{AktotMV;DM-IIMwtH-}?Hxg1Y*TfnxL2!gnBYCP#q``5YKY zVEV+cs$*i#TUc35tpA|%O11*q#-rfia=*gof6l3cSfU9o3JlclQ}>kUW}(s7t}k9r zXyjU2T2e^*3L_?v18fFMm^C%QtZ3f&LZ-(5nA>eeg^+?9$;rtX_~<}=i9-(=zZWO~ zO5hRb(=uSQ==$hz7uDRs9OjIh2&a+ti2h#z&B*ufuOiY$MzlxAonE#}J=w(YxSxiR zv6_8&e`V*NfK77kaB;I|-a*J9)1FarmC zQBwm`Hu%}VkC4{!RKAFK{0?Z^!Vk8tQVHD8^c#v>5Wyv;lUyo2CR?g?{fO5(NXudU zuqb8uihhR@8yw~Bt>>5GFfywj3k2$Z8ylkokJb56IXyiJb#6fuk>TKJd*ZlK3B8V; zsB6~7`THMcl1;hL%Of`bj|uWY&iJ^AN+NH=NBPeEYyW)vKflUx4)Y3#dmB7?e-+&g z9zxXK5$a3$A9GyS+4le0d?B97oB@uT@kVMVS+L)MUJ?f3vo!pvv!nyKP;Rgt@!Rzg z5>X4*I|Urxy>Q`zUSD%-YaQ5#NT{fuH8m-qJ+*abTKVP&3iE@ld2|o`4YcAH+aj)0 z_$Rvor{v&cx{2D!piV%)(!T#+)`^l#mx=l_gtX#lOJy_inFn=|K{**ZzEh=h=gvjY zOCJL#`xca!y#_A-pd77i1KNv?uGPNjz8hy&st+B@Y`QW1{r&a6J}95|GqkJ(W5vhy>qM||wmEg*8Z(DZLhs><3Tu2P%pv7#15ceKv16cvb5xt8SE_jRDHG+1e{hXXj z2h1Y(ftOwAuQ@5dKU!HX>a&sQmI@9am4^EI_s}4ggD1(OM^z|C((gccOzPoI-oi*l zsgi=iZQwg}At7nO^ScjfOWK~2(RFrq<%Y(_TuAjY;go!h`=YISk&yr23pxzFA3HtS zy~f9^~G>Uu%T?}Y&GmDnB082ae&K`mW!`CfLw=}9iQ z+!jhZCAC0O$f*R`_19~eVDa@=RX;%o)CL9W*HF1#uh5uphEBn~94bNk_eNl&n-%4l zZG~=3bQ^@p91sAEF>Zr3tfZ4Ixd7xk7bA0){E zHD?LhY`$E?<@uv8xw%TPhBu(!_vY5MV&~w{Mt;q-C%@8E0?SGc@a;LT*mXtw!5$Hb z`5N3Rv6{1L24K6p2INFV2WZmTdU^@hxVS#;p7k#N`BSqOi0xN#aalmb&k`c~jR)eJ zZG5>-W7Xx=tIopO?(Ss|_NU`h3(=#)Mkx2)cYPPDY3#rM4)e9->5<#4=;?lEKx}$? zx~S_^=)D;tUXxm6J4EW}+r|)ZOoh0_w0W^+B8$$Z$omf}#sSA3y_t$+tv-{9)@d4` z%95YRSpmgMp7Z{_w5O-w`Sa&f3il5W8LwQyproXvZA%;kgBJ#(nkD9}1{N=_#1VD@EaUV&R=5OMIH+l5tF7L} z5O4IL#vJah;vq{T6=Zim+J7_j8pR2m=8(}%M1|45yH2-m-;Q!i1+W9VnON#%FGF># zQQY@Z5&6I38wa^868aBcBoWJA3f{gKG-8(^SolP}RtOQ)F9&`ZDj`REeW8COx(<(# z<8!^Mq)|yp&w%XnJ=vcX_-#(P=(aX4)0Qkm3_?o+w<#w2C{U{jI0r#@Nhr-5dG*dt zUtUhm;5RWLp)43&^!ucIb_T6c7sNRlQF?lM0tpn1ODS)}fT@3Yy&uNtLKcv=HzO0%JmRNoU3rMl~kH7Z~mB>UyE47}*gbR$d zL4kpRRE*4L2?$7NXqMmHOSJp-UPJGT=XF8Hkx(5li5W*mM9AH`g###yWARIz8^q@D z*qZ|h<9M)_f&mTnrUnHCp+Y9?i1mdJg{aO4{wfZ0)g&RO2(?sEeWS&JV#bY)4J3u@ z!}C-jur+Du>Dl<4E)A9nyG&rW9|taSedlI+?KsS{F^3)1U2@BWId zlUesEbO>kuQ~g(b+y6MD!2v}dB33_E69l{qaLG>~hySvnLcN@jPpsKy!7Xuxi>p(= z1HL)%_2H-K`j>HWPm79*sL0KY-4B;PneVQQLMjj0G={p*?Y(9)%L^$BP*X)CzZ}G# ze6pYeg1|$%1$X~054@vMiHWvsi=c(W?hxxAEHcHqd=777V#0p!((hFuUDtL$8J+*f zA&L~GFwb4<0h5RCMlx5ZAu1J+W}0ltDPgHMk6g7yQip;%;rOZ5&gc$>!y zY)YsPr5ql2IaokF+6DTZpT#_ProqX((5e@om&b9Dg5o{c0kf(P7TC8TLUOyhZf|#p ztqS)iO8QmaANgE-Fdj_&tHAKfFM#Zep4Y-*=YD8wQz^G>y@aN&^t8088vv8E!2jj0 zhP8QexcUKXIokk8#rjP}jH@BVm4g=%{evS7HBs4NtlH2IR$7To!qPxnJM-<^x4+;S zYdbm?D5f&#b_-)^U&!E?DcQm{D^yHzkC`bUBUZWoIxyCisfP8MNOKivH=H->`4<}51N1`nA zI>83a)eB~^iBfbx|y(Cy;%C}Ux; zJy9CB2B2v%)Wl?0)U7*38I;x$$oa3hf!gOje-=hzFEIB5pPc9z7#Z#Ij5RcZQ1e=J zbTsOs>H_n!FFZ_uF2%=*tN_JIG*W#o{hu31XW`KC#{QHKcUNwGeXw0%ll(u7Qh43j zM-TMy>bu(p2G5|YtR1US6L@M=BKnWUz|VgH&P)rsN=hLyIN$;Gg1k$7-**N;i$Ic~ z!!rnCUnn%6XOisJzKk88>lEK4#KgqpwEZ&Xw@YJYX6EikgkEYl_*A-c@35;|*D)^0 zlC7a8wL5p7z|(JCgtRsA#j9|*^Rl=%3ALbo?fAD~bgeE6J0t4i>)0SF@mRK8_>}%1 zWnp1pAFMg}XV3O*IRFM#&{R}B1Lx{`ufeEp-C({Ti+D3bYJg=dHu;8$nmYkP3Ikj; z^535}z^VnaqWJ02rhpOg*|UG)uLXtiO7$I9s_Or;V}fTm8DDes?p?(va44Xa>(;F& z!<{d{)(#p=M?eTMUEeMY(`#S+?`rJWLJKE1~Cz(K47K%0u)b zJ2o~Jl$gt~2KMJz7%DMe-8mPVCh|OsXvFL5x`6yMwG3Muk_9}&A$N=Tn{4m=x`ON8 ztOcxEOb*#mnH(9?lxH1}^U9*A|I)g zpGhl&*YJLK2tO_)P%6(li>%K2hMf8_lHJN4A*QI(#3N9jn3<<`ZhaAM(1R0bndmcj zY$m$nYy4JV6g+0qL-9C!hcm(O8PccLYzF;~w)}$tFys?=+N(eQ_7F_0hXai?ub@lM}!M}>6^B3xFN zeX6L$pqFRa*30jNaU?b-f({!Kzh_DK z9#eIA4BxAX;jZ>8>8{e(6w~tjggoEmBK;w|d=Kttnpk5Gf(JjScPId5^ge-r}-=T{#v{KiwCm&!>{K z!kYMEONr?jrD4h_tjSyIep*Cu==*|ynoFY);Q3)ucopUom5vq)lLsi%-=EV=IT(0b z$v=R}5sX0}Ept-5EavN0^&)jW?0pB2$|bNEl=Xk2K3utaRU~+~q~_-S;I;hu63P(q z@3qGr_0m87p=n@P27_Va9G21Xg&*oV<6Y~#M{LDkvrBJIZF{>hK3AP6(Zf=ZMFJ;T zJHs>LqSJ}pIQQ^|6I@Bn+$en>EjqH6pFJr4yZ9RqsCO@#=?wXID@KK>SpRsE z(4;mJZVB?u#)CTYfOE(k$tZr!%FmIyiYw{kR4VJdvfAwZEh3}xN*?)wi>DSvxKS@$ z{*I?fOWx=`2-$UQXSv(fL{_$oaWo&W(g|er`7>r(DX-X$e{r^0S zw$>&t3EsZLLDLMdIJVJV6S3K)(vm8N^dw}gk91bz70m$LmFabjr$@vhlx}c9QSX#w&Ts#o>Rl#+=%Z+@~ z1AKQ<{|iPxeyalH?|?bhk2X7@ zG!>Tfe44uGt>dD+;i8tcMat_+D;;mClaSC{PvtL$>*Ep-rvX*|AO0Jf_iy`2H8oPu z%|}H?Yb?$hdH=zI`53eMP*P8em%b$G_pEqbNbu#^jRPBG+%-_(ckV|dojg!ZfX6us z0ZW*!{{HGHRSG!2YJVmfdZHi9PT5sem#3$v|IW-6T&CKDF*b^mr4>4VT zH-ENlsWv4)R;`^qnO$M6%Mb{Ju_y08PspR7tS@>hD=YJWg%X_z1dJPhQ~{XhaY%^R zDUeI}GcoU1qAQIyL^)cUhHha6g17 zLJV5+`#GjxwBbL%98H~_{LSO2=nEwe?<-vbC z0iSw)Nd>%@@%i(>{whG2_*C5uhM82maB)&9Ff$HM2o0nL*RvZR~l=G2z zfnuIZ(mFbqL_L>6(TOOS!utV|JU9^Cn&+``aNYo<1KQcEdd68e3TWxQy*;W$uemM^ zaP&8V>pA5VozFO0(5?9nKi$tCJbUgO(4h&_bg6YLZ!7ES(=}m>Vr6Udlnfg1raz4h zV(+IG?+G@i(ZepOG9yulq?K5M#u_y*CGc7j{PvzC8u6YCN9SVzDZ4A;-2h|KCC+=H z@08=sfE)LQ`AL)>2BeFHJUvJA4DJ~VLG>%>eOhb4jGdv8AvZTt!3UsEwuWo}9GSXF zjE~?uVK8sJ9^rf-ptYe--EH0w^*uU;r#Y6F`;{&sW=>vSeeRtPMPTKG0ir+XSOm3e)8`9*H2}KdI>qK7$Lx_)*Go^e$aTC{I2qW36dMooZQdr0ooo_hG?5; zqIY>*Nfhz}628Sgq-LDu@^F=wS3$S>&nB)iH= zK4B&Qq>|Sgbs5`?z|w?Onyhv3;$dq`UtD^?rJ_EIc~;ux3%YuGp>1t#RE5@7R%Reo z&^D1qi)1E^mb2O!-7}k_Cuu%gtWC!uSa^5vXG-pV_5~UqV+qo-$o|Jl>o7+G6wxQQ zc^VN{Y;e%Iqfa28D@!Y(9*ToVe`Zkl9%ofhptSS9OT``W9r@WAg$ydCR4iVAh zK*dM!EX7uq0AYA#Tkvt59y@)E#J&o0#sSE5g0Z;J$4 z?72Mj1g2!j#k^KzfyCzlr4y`VP0J~PvIdkC;RC#{Zye&>CCu@t&(+^VFkxctpzaay68v%MR*_YKb2#5?O9_%g zmrSqU<45dxw?-{0VL`|N6BmlM&g%{2ds8pfyFTc0*$ z`N;SghnqWD+HQ2U=_l4_iwGn85YLqnO7!``ouL9R4ARhp-wk|YwLsAqzJn(z^pcca8<2g`fnzWjmO<}lmU&Q z2!?Rgt-=+~jw1%i+OGU)dB72e>{H1*Uf@dM+lWi8>-9Gtaeb79l z?2|_f1AfeySXj}?$#ytmW22UEzx7a0m62624Yclz}$uXTnbA_r}j`3d; zx5b(Gi0Ppnqdki(_I?NfU#eSy6t!ZXg#ylUmN7k3q9#Afjw+w4rX^Oej@HKfnXDHZ zUd(aUi57lo^70r`yCZJs&V&-NT&WVONca4FVR7%cCw|`tWT|EaHvU#lDfp+ ztXN^Er|0*@x>~nrMe!j#XBhhub{da^v5I3n>Jiv6p?WLRWmn{8Hh z>^m6l%J8X|ScZ2QZr9k@>05`x%UAN^!^j14X#&1)5>}m~zGWXZbptoqN{S&S*e9Bg=+0f)n592l>m;1^WPP{E+v8{lVY+w?~*+RilKa8=Kg zW}MM`Sb{5<-69#IoONM|z`JAGSc^{jdd|HT&$a}u&ib63xyx`Qbb3{y!YAFlJ*SNt)mOhn4S3*qgStjeNa8eL0 zfbn|2{=mY531z|1Uw4Ci7yB~stEz0}Pth8Vg8}B7;Nhq6rV)HLLQgX7sjog!Fx;J7 zrd`PrF?ERs=cwmY^Dw<|(*{Y1j}HNYzt8(C$Z_D*-jt455bPJz4Y|@CY{Tl(zf2*l z`k2S>F%)`U?k+*MN}Ao)p-&kuH80z<7d=(Pt1Y=tUjFGuxOsWcgCQYz{Q&rAaP{)R zWkv9XK!Fu8J=qW|7uJewVcQdcr1krSXncA15tZ!sRhuqE7~W(E#M; zVj#HTg&=PrWuq)<0+0FHS{*T3b7R7^?uJJA6scVJQ2JE5BuleeiYB?-BAAVh1vmdK zj7$-Dt``L>{t)sq*Qcey5r@&z%cAc1FkdT%iPdS@@W&bTS%5jZ)^y@ldo7jpm;1ag z_K9%p@SAAm)UX}u3Rt$6?uuqt`l=F^p&JIZGo!o2_+*28X`mPjpr(90r(su?Dz&=R zhmMjo`*z{(0RfG?J`18%MhgZwhK;c@;?Lfkk%shknL3V2SrJBxmm-B%mjf{+JH*OJC6EMn_HS%iE0Da6dl&Zn9N zn3vX`+=iMn{tb@~VATSQ%B^5tX{aYH&$`#NWHteS&P3^1qRa<+WjbTO=1LYzGZTgb zZ>4W1PcqX1A*oC5V}nLfXjkRmZ|L>srMhIr($>uXY)LCG_~QKM1}ga5{LDN9I>;li zt359;?w^w^P)W}YW2KVu6t9-$TRbsQNV^`85O2LKBA9`nN9cP#lnG37omLqg8!){N zufT!srPuo_$cR^A+A*(8Cr>YEYRUqP7dRp!B6CCK?B~hI03pnI9z*b%!f5FL*qTA$ zMqdsAaE?qq;&R2?RWrAAkqw@2J% zCi~)@VDxP~Uz`f@g!k~kQ-eJMZyyo$Sa8?31)1nThb(5YY4S`BYc!9y`Scv7J0szJ{_N!M(tL?^vNT9OKdu|4c84*;uzO4Ixvaa-kP z)ghL1ze?Zq#r#Ppj68>#$ZvZd9qsl#-tON^Ki#K8Q2!9>wAqE+8`JZzCJwJeS@mrP zPy>FkrlzLPnf!23PvHG8{0>7~8`Y0xF2@D;ymXx&aZ4-O^q%15 zJ;krSvR_Q9_xJ~v^!SL)IBN6OCkU2o||1dB*_uPM{FyNRSY#D%nHg)0Z)1;+EnGFdtt>rPMK)b$_FW=mZ3?H}&s z_fd;~!Tpf1&aSi*KmAhL!YA*HFbM0)aU9=3#|;DL7kZmlxww9SCd~B^QTT6f$bdJd zUdY;}H zB}1uDQaDKAn8#$E$7HS)QF8K{>o|>Xrb?nrx!DrQC9G{7AZjgoy5pGO=Kz0S&l1q>dG)HyDh#7yvH?Z87TE9ApW8#Rd zan+`#42RVWDNgpb`|6l4=}u2l%dXnS@Gq_(*%HZgk;FB~>4yjFCi+zyj(}!N2&L!0 zo=}~p855lW8h9R{w$A=|#8EF+>vQqb^pdB72IN__0-jv`F;dc{C?!_rkd&@+S4|fm1ix#)VEG2fo7U zNn7QtZ*Q`XREK#=v0YKt4S8yR>98y%<|UQEuIl1N3Wh3CWsyM0vG%PEHknLWwUl-> z$=|OJcQrQ$6}pWq+xBgvY{O;sH!d%;PMo#r*b`6?H0Gm_vuJ#d5JzK->QUHnxUAxu zuYZJKudSxtv*mk!xj`18V!qV{S8Y#<4o`*EUN-9}-+%k37vuR4gcmnb=GN4ySdR&* z{A~1I>x8{j%E%7RFUyr!6x~~s5}pmy*hM}(ZJzau_(49-tzi~1JBEC`<4zofLs3>o zIZe?k$I0IP&RF>+ziy!<<1BwI)y%BXNR_3 zUB#53C-+C$Yi7gK%`v@R!k`Dbh-C@ACclvTEp_{bbXKgK=6n`6dgse67mDk^)!0h+ zTn5?`E1s)wtWK3ijKLjZscXYd4HZXUGhAjwJNU3|#$X`NIG-Jm%H}-=E&&#ynuxUaz*N z%AKCyz1Cfz+-au+N8WFAfRJB$t=fCohh$G_D?R#t|Dkm*8^Dj?5T4xFN-DERR7#8M zK&NSrgay}8ht-$H%K1gT-p$$%$=*`8KE6vd;p?>afc(mMZ^C-uq9%rX>0Q%y>%6o^ zLZzeY;`qR|68Y0_%H6p%T=y4X0&yPtbbo~R%OACFSnjl)BHpi}#(ylx^sA!BU)@dbNr<(%to~4YOnU&!}E&biL2{yD_c5i*wQM%`aaTd0kdtOQd&AjWqXK zRqT}axhL{pRv*vTkG5}raw#vrBko^=wU6uO{p&0CJBt27Ee(Ay5MX0ktH^X;r_5RX ziouNcWLLbIRL5Mm;%f_8k5off9erghRv^K~fU)Zm7Zh&2D;Mfxl2qbI-{iaA5hPU9 zKFyVbZK~Wn`$uFP^Q3u>^?u?mhbIJMT=t`!o2`4IShV;)k0<3H-mRBUGR{*xeJM0_ zF^Vd}c0SPT#drdXk>@=o=o3$t<;}Du*SW`45`YGma`LkJW4l+|0xYxNM7x;oVh!R< zXC>oO1t0B45e{AMS0(AHH`KD3k&8~N$g2x7@cFf(mz$%{@wY*R2!9Jmk`^cT1#9-I#m%>ra3_DxlbA>TenCP>A=01wOtW0g zag}`)Z`j<8`RUr{xOz*JJ%!yfS|)1wyOwGUxFXd&5i#Pgrs}&CF?v}le6r@7Td+UO z*LG?P(`k)CW`ijQY<o*tj*YJ7Vt-KOHkk;9)Tyr5d-LP_n1$}j@-3E>fwZ@x*lM^*!zp6&AoH&E- z*^jv{Wnlo=T~v6zIxA;`dOz)}iKx72iH~J(YPHFeiZR~JT#cu8qP^n7eePN8?ncy1 z0QT?-c%Qx@WT|O155sl~82h|B&0ok2xvXxLVXxhU{835;b$D=Z4@f*jnHBR{&Z{lLX=9dZ(P ztPbbjz{;gZmFZA>Y9$?V`@~)mDP!r+QJI4i@n2~%o7I0y?`eLWL}+$Rt-XPI`Wkmv z1yLKUVoOwPn2X*p=v<56uuD|0r*e=co*{1&=U7eYzzwR2W^SPV29@r0_39y1hAGA) z`a|DD>>hnsD>soB6{)X~Wq(yk&YOkK5JT<+T~tQBFRe*AsJ+doVr=$-j=?g)%9FZs zIV&vWLgEH;d@tC>SIykVHgQ3_F&gfcW-@UwzDiO(Y3Z#re)9F%VJc| z4KJ>p$u<>q&zNZQ+2>&s3;arW-Z)UsdFq?UnbH3#!mjxC3w>Q$aSIrfBIiLLx=Y*g zd+gZ?aeC!Phr9I4kF}R?&0{bQOfh_rIH}K;{RJO!POhq^T(HIFcBZUNTxCCtkxt4A ziuu3a)BQ^qv{BZciRhY;hbQ(xC8fT1$$ej5)$V#fx3Jfu@-27K2tHptT2f}s_9;dU zEiWp37`f<)0uBDuCKySCDDP=`TqHe)QnI8~W!uDDN&V-Qb6-D3dc1y^fu&!(inlnr z4wsZY-}NU?C0Yf+%{=oi&PBiP$cnQ<5alS|*2R;JY1hGJ)Xpra=t@6hrUZ?Av&@G+ zY)cZ$fvh5K-IhP&RkLWNzTJfsj*&7;yZYmc()a7%eqv|%63>oeu@~K) zZw0k;s@~12ztp3DZi49LJL4{;6%TSGQaX00=65C1VM9sj;GS9UGB#*r`Sz5w#PTBF z&V_d@^v_Pi3zVe@xee*FNliKplwDt67Po$t<{4IPw2Ak>5Y4vRl;zkVRR9)N)o!7< zC`8jYJJ)TvI|8jnL3#O#hfZkJm7)?w`Vq5(s0?01X^{u=(-XIi{}JJ-^h@LI5bv6j zaUR?|ifmzmP}eZWou{G#w+>(I^+JLbHh|*AIrk`)1Iv!b{VRp`YRLH+A8UYLsYH3B za>lNSXYx)Su*D2B!&e*n6lgH@{1lU{aomV+^Og6z-y-l7 zIwmkppAAlPkxQhqKEo84>U-hPYv{9@!@A!YmR>ZrDZIy(3y%?^_9aGju;^eZJT%yHKbgGob?;3 ze4qY3>vMiof^^LXZW@F$&ir%@-%~)2qh#1Sm${28|0o^%S`K;;s*Td7)80dIW6c{W zhp#+)<{9nva|9EzYXZs$tb*A3>ZPa}sNZaU_vI)mKjEz zAQ1aPZsHD4OXCILemB$z^W$|6RuxO~( ze}VmTzG0eL=T}4q3V=csA~`}sF1-gWdlN0Kpv}Ae>qQ%*%sblK4T1PLv%9C#3jjeMoirD#R zC+zGS8E$GC9jq#t8?Sgu%SrRkFRuHbLWMxZ@Pcp&%4-Tes7_b;Oju0=*~|r5lMkjL zAB^dPJM0~^pD{ri01)yVMH=3mLPI1$^5fXH?pLqufy-nO^yRdtXAgGTrfA~!Ac}gX zt}*)txbOrBLkeJNP+2O@&p(KBXoT^@^Z3^R#CX*?wFr4%N4hJ%yZDYB7E-p2)?!&SERki=sbam6GAiEPXB%KcC0&-^&%R4=GQpU?(sW~> zpJ`lM{2#U7ES|@zae64~AW|485WavysEqMMUcL)zHYa5#2M6NG0V8v|m`KhrtB`)M z;=AALB5)fmV`eLj5pI_2B{f>2f+g|^+;2CLbai3H-z_IEKQS+NxDexw5Srf?=NTyI)3%_@atftx20P__U8dQ9k7^+G zkA%Fl&`3v`ta{=uVPS>Zd=hRYq~&eZ0Qgxov(i4URbU{dvl zP&O^#yE18CBRCDu`eRG85ADyY3IzoPr9QGSGv7oLR&p$j2z@d2Xb|NPJmhG(?@v-( zjoY7@5?evBaZH5UjC5>tGv^x6{SA3D?8QdOGvHP}ErgL-hH#akQTzQUK&as zl<&`~?DS3q$_fDG6V!@~6a*TsJ%4TuDLpTyu&GlYMz4`v^jUQL5wnpavO*GfAjgf$ zaA|U$fjxjOOu*{+FXBcNW&~J2XN-y;QxT4Vw|PA-i59B5$t8Osce{C$5o0lS+NMKU zMP+c}qtRk&NeA4^ke8BWT642H5>xg7hR?zS!?|%|2=ei=A22g+iK#HqaWNX}zLzk&Ke*4aI~=f>UE6j3Z*ba%V5tG_X+>>& z3_VH$R*|rO3{airc=uTHXE7qqCoXr~|1JuvNt7L?>57;sIFyhrqf633)r|k%Fm(Er z9>lg578VgkfHzmU{Jmyx|d8D&U&z`M;Z#jAn z=uFBIIty?AoSreanW3X*glCO07AfLnA?QE-lQ6OJlNsHr$-ki%gu@Q)N7^;l7|nih zEZk>i7Q2#-%>?(hv8~_qujepAOphPWJ%_$)^4R_~H5)>F`T>n_fi`>;Jr3`kzv_T^ zGCg)Id$8Ow41b(rXlQsk1LgWW+>a_bmwYkjnBoa=4xDkySZT71t+R99V546h`NfzH zO<)!_48=s!n-!BPCnYX^5bGHiTw`%1CG#v$r`QJo43fvJo6&dpynfY*7giXNd1MBL zQ2cSXrQAx&?WOfHhsUkyFm8{cxnl(S?Eo6aqmA<)j3H}m5Kcj|GOzAK?Y^E8TU%`{ zt=yc{b3dD!nGG(uKlV66X_M=vBLfsiSk#s5>}CKIjga&xtp`I%yQLRx0=|>{Ihu=g zD8i;Wrr-gkv3j$DF4e8y=*pEVc?ifAfK}Srbk!VU`vZ8m9UxZ#g7i_;=}qy}N_KSN zjGcNrRO693G&PAHP0Yc$b}v{W2(3vjah2P^LH#tf7sVyVC3k08XDbto<)%uc-+yP4 z|60lr3H3i9%YXm%+EYXBw~t`-rgk!O-Z(@;>=vYt{gO^+LC%L(V9tTZ_XDy$kay|? z#|yQ&R9E*VK7rcJ7}~8^035Ki42AuZyea~i@Y#{9IMo!YfI!Iu2<7FhK)wUP0{r>% zWJ-kO8q)v+CDnh8Lh&-N(pe?lkr7TbhjE+QOu z=hs%qW$~NWmpRm$LBt2)-sTq-ZJqs&`+NJjFURX)+a@d%LF5meY%qNqM-UM=upOC2 zm=_81jb$KuYV26!5+xdte+a&KVsYpgeXA&?Xt$EcBk{{ZaUt!?jky7>B@OZ(7mDUp zHVt$T$x9OudHf-vMQijjeE3@_WY1RV($hXWi>1E8@h(c!?30rO4kCKx7jJ{P6v2nJFmWj?McRf%u0IFd*liAfI8er{Acv4xz&;FGDj*#t07B{!x^0TvY>}H6$@jz6DXD0{mCSViS7OfPe<9NeLN-7n?|swmS^Z z*icy{hK7cW|2WzhJU7WROa9p7W-wOEfapRPn+}-*Ql<9*w}xOnK-tk}{*k;L21ALA ztmppo6W=iC!!dTh;J`tEzQ*REhBOZ`Qb-4iyfUD1xW?mX{nGdOA76_*KOG@-i^dDe z#ZBMO6%D6^ygxXbw`ylI*)ASPft-kJXa9d*AQivFk#}fUt0rS+8s4x)(bG22D&Auk F_#a|`V1WPt diff --git a/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/config.yaml b/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/config.yaml deleted file mode 100644 index 5e4f045aeddb..000000000000 --- a/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/config.yaml +++ /dev/null @@ -1,35 +0,0 @@ -num_clients: 100 -num_epochs: 5 -batch_size: 50 -num_rounds: 1000 -fraction: 0.1 -learning_rate: 0.1 -learning_rate_decay: 0.998 -static_bn: true -exclusive_learning: true -model_size: 1 -client_resources: - num_cpus: 1 - num_gpus: 0.5 -server_device: cuda -dataset_config: - iid: true -fit_config: - feddyn: false - kd: false - alpha: 0.1 - extended: false - drop_client: false -model: - _target_: depthfl.resnet_hetero.resnet18 - n_blocks: 4 - num_classes: 100 - scale: false -strategy: - _target_: depthfl.strategy_hetero.HeteroFL - fraction_fit: 1.0e-05 - fraction_evaluate: 0.0 - min_evaluate_clients: 0 - evaluate_metrics_aggregation_fn: - _target_: depthfl.strategy.weighted_average - _partial_: true diff --git a/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/hydra.yaml b/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/hydra.yaml deleted file mode 100644 index ffb228743fe6..000000000000 --- a/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/hydra.yaml +++ /dev/null @@ -1,157 +0,0 @@ -hydra: - run: - dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][HYDRA] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - loggers: - logging_example: - level: DEBUG - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: RUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=RUN - task: - - exclusive_learning=true - - model_size=1 - - model.scale=false - job: - name: main - chdir: null - override_dirname: exclusive_learning=true,model.scale=false,model_size=1 - id: ??? - num: ??? - config_name: heterofl - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/peterpan/flower/baselines/depthfl - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf - schema: file - provider: main - - path: '' - schema: structured - provider: schema - output_dir: /home/peterpan/flower/baselines/depthfl/outputs/2023-09-05/06-03-04 - choices: - hydra/env: default - hydra/callbacks: null - hydra/job_logging: default - hydra/hydra_logging: default - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/overrides.yaml b/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/overrides.yaml deleted file mode 100644 index 0b957ff61e83..000000000000 --- a/baselines/depthfl/outputs/2023-09-05/06-03-04/.hydra/overrides.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- exclusive_learning=true -- model_size=1 -- model.scale=false diff --git a/baselines/depthfl/outputs/2023-09-05/06-03-04/centralized_metrics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png b/baselines/depthfl/outputs/2023-09-05/06-03-04/centralized_metrics_HeteroFL_iid_C=100_B=50_E=5_R=1000.png deleted file mode 100644 index 3c190dd6bfd1826e79058b8faf518ad06b94178f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30212 zcmeFZbx@W6*EYIIg^h}UbW5l-(jf>4B8^HnlG5EFA`J=>N{Wam-5nw!f`lO5As`J> zQfJ+MpZLx5ywAKd?>T4Y{BfLd(9PcW{kcD}*0rwdT02ZtS?&TpB|Zv;x*&g7MjeI1 z3_zhUrqAQTEBt-q^YBC1<&KuieS32kcVj0rl#;QFgN?n5jg<+#o0*fdmA&0fPC-r{ zc6v(}7YAn%E-u^ucmt=slLglp{`nBN2%f`TZD$nfk}>iZBS+%B6$<6*C@*vSfk(>P zgs)!w?uo=ES!h7g>;>Y12YJWY*`CQytH(*v$+~La;w2J)9{;rF<$oP-uJh`1HJ`0W zDo4J?Gv}Lwe?+K^M^m2SW%fmBYbW79YKzJVNLkqraS}?TzPq{8(Cz7L(9<3i(*C*i z$ftWTWu<0vXmK6CguN5t=P}u$F(MI3?@cKMtp8XR2cjplYm$f z{+o&V{Qv*@|0~m?7+Q$Vc-6Vk{Nk!yZ-KrjHV)2VjgX9tj0*;PrU*b< z=rxuu=1G*O??%xUMHOX2VK?ti343+Zf-*uo zH!qLi#ful$I5^BO;d)uvJyZBBfe#)&WME}Ihlhu^zW5|4=*9Er0eN}sa;6#@8ku={ zU3=1S=d7qH)`0eC8fhh^3z=`<+V;rF$-ONq>g`MV@Zm#fWTea4@1?%C+J!3c-gyPT zbkv`?ky4AdWn}{=&P%-vhGfWD$muw~g%KGZE|sg1ulnG@BkzWsobx<9Jkf0gXteav z-kPn1i-z&C)$s;;V^-o^15sG&POIabBYYM!9`66<%^Pw78*<*8Hz#_k^*A4yn={_J zHEC~xTQicpH}maMgyWENZDf0c06%eM;a922H$-n8BRW;)LKG@Z*P?@D=P;X+=}3&t zM`<5CNaLZcJ$B)@*7K8aj|dGtPJ-*Bp2x;Mu}JqN<*M7nymh=OJ?=i`o0F5%G~W@2 zfg)yAZdn~Eofy-E`QqZbUclS^kBiz z?QKris)dy{X`0UoR$bC=bzc=~b2sc>*4hop!)5EaV&{efTwy~WQ!!4|gH*?`mU4D9 z=bH#D9JDN<8gb5D)JE0ZMX}w1`$>ZKw78dOBkStKY(`32#lzlaWT50?XkG37=f7Mg zpy0)pen#-A@2%X{z%`26M zKc@E{iaH*BOci=)w&%Uwb`*S&UVYZgR*N6mh7({YcE%|yv74~9v3E}HtHZZu!L9G2 zhU8Ib6qmVg^arkWfi#lz9I>Dy!Fh(A&zUf9VYg0xVSjhKLT(>c$&P*u_pN=#<%Tg` z&!w<6{CMN-{G<4-VJdgJ<}tcty3@ErmLAIE@!3e+=6X-#Cs$1wiio4+C~wDA znJYiD7v~m-_IOFj0*;!E?yo0S^&9yfZ{@3O{!07o;o4*D(_oobT!nJ@9J_`>CE^@6 z3OSo~9~TWzTs5xglF%;3LaBOvq*R&W8P*!dy0eh9Tj`7HO7vWjYxNMWMSZ$0dAyJs z)yBfi5%9?1ohlj0<#I93b6y0># z+WRhaHIyRbJWGJzmoQEv+Nvf$97aZoww;&k@KO#Z*Gy%4=oiCc<9i9M&KcMKzKv9p zwkn5t#_CDV!VI~jHr$Y~T~t+}*^ZjAhCGG*dJv3!E-bFQo%8aBhZnuIr$<>_D|T

4Q#FMQUc3r1x?jVWIcT|a zfOEwRyW60y8)q0pQ)ru^}ihch;X zA_^;X@dw2vy?L}wgBvAlpyjfGJ3+3rxZgXo5ArC@0==-|VXdbjAy+v$FV?!PS-TuX zQ3=ACX*hrBQhQS%ROEhQ>q4Nucbi@b; zv9Nyc^?mylU3oW*ou)et^zn(@Q*XRHBArvVcmh(e8SmY}ta4daA9wB%PZIMiseQ-E z$cXuPs9?9|WP72L$GnTY>oq_6WhTZ{4Y)JXYVCd^WtNq#^%h zE2oCv4#M?)+?h#i-7cEc5N?qhu;VfPUEcKZYYXAS`m2UU=nPIsS{nb$Ojzh27awmn zMvEdMCbk`mp%x~BaZ_HTyG77hvN@=0KQk92(H12*Ic%hTgN;p1L&N;&1w9|0mXanZ zzv7X_HG#Y14q{^-+MkQxS)SREzb_Ku^Lth?L-Z${t``!bnA2d$HLLntfIN-apOT>yb4KEMSS-` z{ra4av9a-aG`e=U@^^k#O+c8>?W5q3w(Xs#@(uL-cBzII6n;~f#Wp2(gJ2zK`j24y=7x;qnv zvIomu;n2@fzA1s*>mYf~lD*WejhHt0NU+h}>(CcX@8H&vARotKe6Tq~&SQd|z@hsr zJRDy!O^gD??KmsrvN1^mQ;Z6rxM^+#=tisL5nfv~O+;B4-|A#N)#JfD3{;u@6zyBN zn6^Ca(%x((s^7nUpvNlh+atIdLIKwOS@j)^t??SFs^VC-SYcG(uJ5lZv{a-GrJS4G zAI{>Cx`XM_PUAkETiDQC=eeV*r4{-q=$vV10>^q7Ee;M&#b6yER1?_WNuutYKf7K9 z-SYiIFD_1lg-hUIAD58u43^Hc`6-@|-?5O_?yulrG}-K=_lE273IEe`F2?w|nsej~ zD#yzA6--39U;z~jrmNdguP-Pp0#Lx9|Cdx);nf4N;^JIe)MIV~MMZ+ad|iu=9Jp=i#=e#~BWFc?nJ;cV-ALMa9+eb#K2fMUq}k+^a6%D;VoJs0V5E1NYVG_Ii}$ z-cm1Zk1zg=vi7s^nBlk z?_@d3a_rl0^Uo=@XNt%{30-wzyi z;R5-*$0~;y8s3LWs={wI=ExriOQ0o_4iVsR~06iBmGTV8Z$EM zbWSlCB-Y9j`*c@iR9f!+YKr^H$ru$!{4!Cy(_L4dN0&s2<5=&!PQq(!hlQ{4p;1wH zJUoPEXJ?BbC&^Y$RXJY$v$qzpx#>VaEku|sXkSqI4as)N_6-zJc3F;9a1>Pkq$D9B zxmzF1((g3;qGe0-LcjV=<;eH_>SO~Ymeg3+My;=tBrm5ET)~@uc7Y11NDg-vuL=s1 z1>s(hGBIHckBF$ZYjj>zj-->Iy`Qc4DNWq_`OBA2v$B}x7Z+D(- zasM4Qm(5iLIA6iS~GK}UiZ8I9o(_R`Xq$6e5 z3V0(OV$vFZW!;w6bN=%2lAJ{IY-|Jx<>rOtnD_Ccl(O_a8yX%TacQdU}yi2paWe-XXtfPH=j%;XelupcTk~XKQ1Xf?5d! z0}o_mWo!JselIz`*yygU3C_6FoZBo?R2p-6+xYhDrnDpnf(()l?WP+c9zv3)<(9&B zC zlZ$QR-xafxAAU(vdV93f{>kd_cIlb5jbU%ohAv84TAIgZm>UZ8nsn!Cy)Ru|GLW-} z8pC2@Ds3;JC@3h_*Wf*l-}r}&&Q4D%Y;T<&m_qXG+!VE}$*8#v89NQ8tv+C18nrl#;{SH3ClMUyF5_{q>0# zuV3S%l6XvUWn*Zg5Y)3eTzpqS;q&@LZL#?`O7|}9QuCQ;(e}vu_rJWE#}=ca4ZMu;H}Q zxQ_=GHvGZg`T0%S*q8xXI)J=YtX^>5>4mBac%0|IywGa&6aU$r8X@fbGeaf))?F2q z_5cj5@v(8Qm3*ywFMjp=_j!E(xafH<#2*|@okgp?l@&fc?SAh_biN%Wuu1f!@Xf-} z8-HIB8nc9~cM2o2GpB1UIBAD5>JAJ`&KUe@27H7{s7o{=Pct&;5r~fp(64nf1_-e< z)hGc-cr;53%HO-n%6orA?epCr;eW1joTH37j4(XLY&>Y?di~%ctGqHM4~@PZjbtz@ z?QP|8wwLF0-w|t)u;e2Fma@_Pr+0q$EQ9>BCGw`T{j*cLNv{AxmJ4I8gz0{qdFc}xc0$9s94<>4JsPXg)V4Wra^jq7VTyuwu>4`p8KYrB&Ea)tkt!j#) zhJ_sqRr~o^?o-OIPnkeg$W%AwtJut?HpvRG#MIK)Uo#L`_$D~j0)GimOJkK>(X`?> z4!?iIQvygqGa%Z>6X5hFszgP(x0tZ3t=9Vaj!Zl1MshMpZhi?`sYN2k#+!##^>y1?y*}p>bZm`NfLL(m@ zEH2tUbrXk{H87M&3FW_EJ2j)?|A)9?zkb8`_+X2i&yo-|T|Q*s?0(_?XC~IboL5mF zd{ViYtAnE2c%1P%(<&s>)9YeaHNQt@?C{eR{)p!&?aIhOb}0_7nth-~N{Ex<_ zpd^aoYB<1zzl>>~KPfsA?JhrJ4gA8v^Oj+KWZLP?1lOCQ1M676+fZq58IH0RSTfc4 zv*l5LplItldlbqNz^ala1C=leb#}Vn=s0Fu$si;|K4jp{(Qq(}?nu;7`dF>IfYCaH zlM(S#rt>Ci;I)ZHegg))?;h{z`sC^HC8U*LKe826VJ3$Y9dtvQ4H1Rn6ccaU+&1|Mjp`?-N#&YEdyQ13D^vPUYV>ADJ-9 zntlK1c$RQ{i>-_D0c_L24H_zx8B7ZbO0}S*Byyj_g+x?<0$2UZ2uVZwKJ6y!XinNo z@h`O%hHRwySn}K2QC_z+wjHg<8$APu9K`t3-!${~YiH^=aVDZw#I~>*uU_o~avZ69 zXhd9^pbCcyj0q0KhX|TA)2B=^94v7H@7v0AtzK>Wbnyh3@U2hPHHmUntQB{ySV94? zg8E8mi{Uf>LT(ePd}BzfRqHh?D3qw%YLjV<__O}^DyB2d!LeM03Eni49C9-GV)h@6 zUx{*i`B}%#Ef0)eZ+hEn9jl@H%C%R>dE|P68HIvLIb8yO{8T*w!H&e`OuT6{S6zU_ z*P1cM0?dBTtVAuo@cQz$$7-$H|e!*4r}H=lwqig1d1Yz?V(28I0aFcnHvq8$g;y1vIgKIAYx|Ts%GSzJmw73) z6($5^Ot+%a>0ef6TOD59#@wj$fMneMlg~&E#*=Y7s0gr2b?)ZfD7>BmcrHW+!y2X~q{ybSf3=`!1bGx*opV$JL*$UL64 zp(w+ols?JR^UCNVoN!c(#3^~@q!-)0d-qh;)t`*n)n3?WJiB$eo!H1HJoz=A?J^Y= zf@=}!VyHkr%C6CIP8LC!Gcz;fLJmf6IB|me!GfloM|MQ=j>tKTUcwZi8|njeH($5Xyk4%~y3nmlWnj}IZ$8Uh#RI!Gl;BxHo-S+6Az#{xQvdLe z0;jV*SB&FGVlt2HFivdWbwgP07GlZ!37q=jFphI^tSYjARRD!q13hZAHd<~_Y1rT+ zjBql5BHkD_`e_vzVLZD?qhqf)3#!1@is2b0SK~3SPNm&ii*B`1HZg6-rZ;V)7IvdF`j_1m8h(MS-jysA9|tiWY>N*9>!tPZVQ@+cH4_t4RXY0OMOi5+%#xCl zx>F?$jRc@RU;@f)H=L>+hVAQE&XRD<9-CoGa%abuPdv|nZW@bkAhxE0~oaRJWQ2dsrs#YJR1>UHx zVp+<7NAW=HwZlwEgtSJB}%bCK`nG9LQ%8-q>vLJ z&(^_Q3_7uE&RVWOPUhQfNxqpEuV38IGUqU@ExSKLGFE0ip(REINQZ%O@?c8 zJWeuF_MHx+eiIZ{QKv`%0$5%*?V+ZoX8GgemGJOzn;x<&R~T4W+E%7oT3YyOPW(@I z=zyqvYtfT-9uMy}JQb1!#5S9-ppx6Abp97)z4T;C`u-PJ3~w<3YW;PCri7vPHbc1% zCPLO6>Y+zMXx9PF479W{!JM=sIPB7`yr#kOYO-id+eXb-Z;Z#Qj3wd;ADnmG@rx35&iRrl zQ0j7|W-egHP-brBEu9T81-Oqpo`b{3)|D)H3APZqmI<{tupF%!C0YxEfUH>%s{+<9H65tGuoc^YeaY6vau;{6teTQJZ+rf23xsn6R9MH zH4!|sva(%Kr#m3_-83peJ8IBS-9K$foE`6&Cv=rLqi>@TIICy(>b%7tF(uhSUaJfS z$_Oq`@B19P4AuS$eZvB)VPqZ`T``goW=!iGcke(s>UEmh!q%|mBVpxKdPNr7%mtSv zSH>|;S5U#2jI^*WV0n13I;;18oo<*mx&sWBX)O9Sara{e?%_L+*wyDgmHVVol06wzuS{d=WEa4F z4jB|@--o6;ktZxp@~@5~Q^*;=s{N$$mCO8+NrQq6ryIu(lsJAHTIO=0>r)j&YEFT2 z2989bh_DkHC;3ar)zzvZSn?WoHLqWUIchyW_F#1jXAx{=SAXo_CEKs4mk{5#8xC^X^I zNgAIUM(+H;sVy%l*XC+*tNH0Y+6{5q@-}w8l0g(Xnp#f*}vx^7v^gpG9|48ZK%8~-Xm%Y$(Rep}%(fgHa8Lmh? zx24smG7|FuW&S$04;QI8j>1Wz%qmQAuibu#S>?2FucfUGh%jnLN5|pv$C_`)?8mDv z`yTBfG#>6n>df`N2_s7L{Ee-A$^{3_goB~t?hVYplom6fcBhg+$I)@%`~2NG)&u&rT7J0Yuy)w>6!@F)qUfYSXsJPl$R^Xehz1 zRfGb^3b1%=C={esq}c88+q;R75mkGmkBd!O@#p8~Gcq!oIx00(^wAm1JOQ*Z%z#&I z83T33_jck#_v(}hna6me+5Q#gqcjbm7e9Wy z0oS5uVEA;R=itB%1mE-c_=R12fH^i}SdyS31;Gz1tLsmmJn4NSeeUXl9p^r<=8pUY zwk+P|Ylm8v1Q}bLmwVbpZrzD04RB2ud3BH3s<%CKhEx(xA>l#hM@+HrN5 zXJBA}UPNSc&&S1u@7ABCtA+mewX~L8kD8mM3dhoK`BGjI|4rByLl?8RHs(E%T~b1X zl*ea@@$vi%wtT74Cv*YR`I$_$WYkNqU+S?)sh|V(*kVZDn-9d^#5}n`s3+1*y$6G{ zy?s}PcYdqCKs|f*%w)L87|1#oiAAUC=dWIc9UOQR`V-U9*@O7W%gbvsHpJq8?jm&) z5ZYmY5(IUki;B3R3ZC4%dfS>Pbj>LDQO|e3B7T4M#|QpoS2S1X9T_M)R|ifxD$TQt zi!zI={nN>GH(kfkw@af8pN8}NW4ZaiC&D=h93CyJ_=m$`sXP6%0&vzb=F;lG2VyB{ zY2O`L26rx__c;_ycHY!Wto*kV=HFkFnR$Bs{gQm>3ih#7FOZ{mOvs7|;GF*N7`)s6 zGlPdhgNrEYmqpV4d{! zRp$jU0UF)#1hkmi6EY8-XNH#VSTL{yK%Hk|W%WLpsIVRTB`#<;E)+>6*uAn>TwLtu z{T=8A1vjp{oY-sccfD@O;fA4Q2h=)6YVW6xYAQO}oC#1Ucs{O9IMH>R@$VVpq1J6L zc%J}M{pi_OQp~<}>BFG>wz$_;B8~2}F}|9SERK1>Q%kSC#v9j)>m?eNs(woSh*@i5 zB6P#(M1V)jI?n1az2hsbl5@I2tmU$&?tR7YzOMFWVQ^^vj%D?{KHk_9@$F>nv0i`5 z!6amTMBqw_-BA{oE!1PK4W;z(>2rJ-J+}qbRfIT?@dDvtOSOIsC>)TAuI1PfW~fEy z+qUhzBacV7itN^-9Vmw|=5Z!Hi{I_pcYLm&-Y*cK1=<41E3W&P3CY-*@6@DSzHt=W z-Ku6%T9E2Ziz)Zn;ETri?ZYmz)cNQdWbZG?)0D6J+(UAUx~6FK1pDMFMdtb&BXnQ~ zdyII)?F}FPTX0T*tP1~{3cDg;s zds#h>CEo8xi%`6xRn}HjmYHlmq?WxFPNToiyYx^j1QjdIWKU`sv3O3DL;kfXnSxZA z-n4w|c;n+aU||q%jQj*zYtG2$B9R1F1=izaiuao`HWB$u$xb#zp&eStn5+Foue@%; z?&SguF|3TAX(~k!t*<6@SI;H8fOujsmeqt=&V^&)cgx99-SBv0eelC_|Ju1?JXLIB zNq?qN{K!=!{Wm=Km0iQ`UZ@&`&+XN|xmxh|aU__EK&3Dx%*%RXiq@zaQx0sCxn-up3VxPLxO0?|54kF&O|!Ff6s!E5 z&njANr}-mW+>FjHn$Q*fH4*J}7A)Mjdqi)_v8yOUVwrT>0Z+4-Rgg2lBqlYdz%(no zMXAogl9^8_H5Vh(UM}S?a5{4{KQqVr3y8k#)Y|=KV)Y{2^kC zmPGY@IgX6DyPc=s&|)!Ku}2wNx$i9W`Z@Bqj|AIMUsdB!cEd#&h@>Lnf6K|)na8r1 z9^|#ms;c3YJyG}dmV>Q19<#3`V1YabRqfq-_oV#%8oZNRBgp1nS)_gX^&|I1YHAdO zEJhxl%kNdv-G15nvXzRyY&ZSX*J&~*$wzDREm)jPB|BNg4ev5UCUQ;QnaiuM%jz%g z+rMgBY@Iv(c9Q(-ejJKZzlLOg-1!oVN?IhSQl{Xt1@~wZ7-B*B^f`2b`W3g-{F_vx z-${|tCrknw5fV0yJd}f5h*pIS1tPB?`WY~5{02T-cc_J(uz|h3*iicC3AH80OY!qb zb)yN?_E()%Us8VY6{t|ORM^bRb_}Ec5T{c=1cJ&I<#=PYzjr&XBn6_K1%u5K5h2Zc z($@Sqfj|bQ?3b@!xBm!&6bS`)v3O!q(vtG}MOv{?z!L~*<4;FH5eCNGM`mU--J%j@FJz!*#>9ylsQTr05*zm;^gKWD(NnosY`OZf zoqGEBHGh7;|D(+$F|~lr`C-%uuV$)aHCPv8D=hRn zpTG-wA0vFfZ#K+^9SQE+t3Z%whak!OgdOK?Dwvm^fc9ky(nV-YjJw3ybkHReFy4OY z?k?CD1b?l^OfVU@-?7KOv;XlN9m31J1&=PW{H{a#Ay?(1GeGKIxAogAO=q}v?L3u$ zO%7S7a}f zS&_tDC8ax$9%UW9htDz=_6w9`2DlTqqhS!`TtIOp{LY&dGzo65{& z)I}Sqsa*!;ZI%Bg>{Hu`ujL#8+2Ev^d3W8hZI0;jWvkB+umOJd_No$42@t$`^$IpJ zCaUTmH?gqr2#`hX?N@K!B))0U?IMx*;>FCm6wH_)08&6i%gP5Jxjct`ezacO^SBG( z&(q*w3U9-x8L~9kqgj7_HDHg6V^M0}ocYXS(sCXcQGzR1u7H7!7iBct5{8h^z_Sd82=zK{gYFwRJ zUjh3l!|~!Z_9miR!X^*)-1+&YsE9D=+@&_~Fo9@t0d?)#HAx4DiRW$qBiG_!zXNNZ znVE@+NJsMWc(PHH!Jt>tgHK>guPjaO+;N*%0sQ#CiKko=yU=YKJMQN#@yxeeCMkv&GsdG5|%Q0`RF~eQ)jsCOmLgETgY2cm^bz7;H zZ2jlm|Nha0R(X14CNZ9I1pM<8j~*UREx;IY#>rez=8`o&OYt8Z?*q1FfiRGM|Wpqvc3;|X@~~{ z$mzf}4dcAUZ11DB%B{HcV{F(@(yFS2-~0P*8wDS}hco-xz41sN9HiiBqt&YODE_V6 z-^17#n~-20{Op+y9R2c1|1;keAm)ojT$YSrTOZa~T0@%9SsBbP-QL;B2fO)-oW!3O zKT0iBR$9V{*KOD9_YHF4dK%{D+2!8*b}OTg3(IArsh3Yrzy_L!frYCD0d){;EO}6W z>jHs02xr%KE8%z)QeGZdT33GM7FL*dCC&Jyx!SY=;h%3%@1+gi{7UG?u=U*=u^6PX zuQT@D?qFTFg-LyMM^;t`LjK@ny>~v;DkHxpYAaoT4P4(BfO5AOJgblD!3{Zx5W*|r zYz0M7_-#Z~&L@HjVd=U)KH_zT$yG`g7zE9Z_5Eb_J;0M)p_i}bS@maE zpBdi2ZwmD4w|h!TyT%_}*f=>WP+H&{tYq&j0R<)x%y+tQXBYdOnGiZ1R33C=be43} zmO*nyJ|viZ0dP8&z^dCn)X)$)!adgV^sN5zUWJYg2xUrfuQE}LM>NShz0#Mo-^xZ= zZ|-v@c7}(A75kkXP1UBeUB6xo-i|y?1J73IV)_G7fE_hhyV0;R`P1<1PvR94l1DVR ze6$KS!yzgS{%Pg%GxlDu7_%#7QA{$@CS4hMx&yL^>z{Us)4pHB#c$WDwvb!F_1dp9Z@w z0@l7}MVO5Zjc_iYMR+HJr}$-j{LIPzlzY+dQ-C#AFIZjB*Cl!P!mF$PJQ9y0gH$VhiRg@l09{9Q1o z%z|+j7qvQ3ONPjeN`e#E0lVy<9;BK>)&`5MoTX*X-;u0NHPV3|6(zkPO9wJ)47I2m z0Y5)~1u-K^Mow<-ZOpB0wGS>}bV(9+W+|v$7a<_$MxhWpC!`sdrQS~!wm>`JqtO#% z9!J#H{&JE)xdX)7TCd$0JfeY<-*fAT02Wp1-aUN8<_TMy43tk8z%>@7*YgEz_O8p? zL;5u?#V5Czn6OhtTut=TcQ!v?dJ7Fb2!92iMGQ;e|1<$RD5}Ye)aau!8jbF&b>Hyf zvGyMVlfaAZ$L-k9bs$7#Ge}6#!6R?Fs_DE0#>W^mEFgO%Jv^!kBlNM8;crrNiWgarEM! zME!A6u>-O)s4oXO54@OJS?9oJ)2?e@9}JT7z=;p|Oh1172!f-@*koy%sfkIQ-^KDq zv$RG(BQF1X5B`klwzD2xLJh0z*sVTD9NFpEnR-CH3|B z-Nr$%xDCSK+P+11$|r~u+Cz(oLHv@$G531ijxyrPbDwY}TW>t`%gWA%#w`WzhwsG_ z_$~pHNc10~&A+;heYJS?p(o7Ai5EHQHWM}F2189n#-Dp@Tp#O-Z@|U?bg}Lg9TU^; zy-{x=x(TFA2CT@ga$DfiGb+0#cr^rK;(iZ;28^4r_e}?jDL}T7=k@Eim6fBtlR>_0UT|W7-5lsXbB2(D(?&-Xf_?|Q*8%cr1&H6-o4WXv-RLkkA%JN=BIOm;LOg4Y%y$p zPkj#d-z_#?-tVIow#lx)m(=!qfF_(68;3z_^rVUB!_9SI2UURWIv=>8{f)K=_Fs+x-@j_#1Y6{7?HS zxs3tDaFE)75{ta{R;_|VLUf@+@YU{@t1-g~q{iZc6L69ic0I(P)uFSb z>&G&Z?Z7XFEVl-D-|}I)g?CARK(A_yS@+-myH>F20!I3Ox)~LUqyTepgdnCUaN7`) zkj$^l5?;RChg09PuoR1i&dAjSn zj`Hx*7fo<@BUqRGzg_a*vElzKzb2Pt@7(rS{rm)G=7kE+fAkS!g<9kqsy^TNiabR_ zD>iq4<=XCvcX#5!x3+qr`*Yi_YA|Y@r(hxvNO|i@FpWjEkBKU%&%5a-vZ_v(_zbEq zi3(IKB{OTTk3KeR0MbA_Bh;47@4x#YvkAH6UAH5Rv=5(hy&Gg4N35#JVD1^OCZTW% z(N3l)O`8+!ef!}Ex-;XDktP_$BWhwvUdFi~MwFzZ=ermMDR^z_8C-Nm1X%^_e`|sk zLVA`g&>7jTNHg>ob|5W7^f=M{3Bl7D^kw+wh8!@>uUbhZCCbEhP=Ca>xy1HHk7Vb( zx-owuTNN2vZ;bY{jmb_s!`i~HY&_;8V;-lC=X~`^V0?0arK5c3Y+`{P2Swb#_fyB@ zbXOAQiv9hjS2AH1g~O4CtL71c=fbb^px$oags$xuDt|R9jsVj3zspQ4+4vwanSvI} zk`J*sTOqtuM>)#NUlE=p5a}Ag7fHn6mA?viLtAz*A%1m4$qmagq53)nV8 z|4=AU>Q|wthQ`uelN8V-`m>d!p;CA3U{0Lg8?%>FQ+v@-`GZ{@Dwe2XcC=x74ir}~ ztr&hU{tw9;=y!>vhBz4CWpr<-&RXgmgJWJITYfO|iMkCp$hD zqA{{|x+Z_qJ`+D=;k~taatEflV><%dVfp*Js>L*KZqMyUzc;4nay1KOVIvF{=&!gQ zf!o8hD~TtG|1s&G-IWj=G^r6(e@Jh&cXI7|4I1&xN=ZE#B8PP4v0lAc1YKXp)8{0R zR9j>>A+kDNO}tUJ6aD3=PW84cMTQz_GRcR_RvWaKD6!P7;=lXvyHDD`HE^{jp7a5Tfydy|NORZ}xc`X1a{OVBMY^PKs|3P*$Pv!tfFhUAF>4%yL?N^_jLxp8og&uY$I&XmuaS4Y#oi*TfSDH8# zJlZ`T$_BCdvV{YJJkJs$|rMPYwEmRW# zmoNtDXGXw}uG^;h{joYeXqRYPtDK5nDQu*L#`<9Z`>hYcF0aeeH*mk?Jj99Ic*ontq4+L-Ek*+`qFD)yJTTs8Jqxgy!$I$PP87W2tr+kQEJdGKM zalC?5Oa(;o-2HKIjdRJ zdL^rfsH}U^Z|+1&MxQ9$xLyP{Jx1V=smyQN*YT*I;OhhE<*^X2NeJ~~7%}VY^80kz zsRmyFqu_8w<}mUK-}Ae!?{%oL@7~%R(PZ81Y=kctxn;;V_!lQ+yuzF2AJPj#zI;Ke z-1-X8GJw%nyvBa7Pf)o}dM@4}<0ky!is4KnS%QAjP@E8=pnO_)wk{j>tZ{(N9a6Tle++C{exd^sA6a^T?o-B@m2cS4Bl@CwBm@ z14*-ghwR35SU%R$R#FGETF}StDsB^F7FqT#nI9C1g%NXP;hoCPC1k?CI_;H_EpzRL z=o*~HyKwm4D<%Kjh#h3H^yL34z86r)rT0 zwk)*6RYOB}w$KS?GY=j{6-M;U=8x`Ow=>u>tSecWwHLX)%MZ^3g*~*y_ImYP0(f1& zPRnmNQ6KQFZGnAy$03EJ4@Z0N=ZNU2%~x@DqN#=Nt}=hVwfQay+N1Z#5{_!FDKS3m z;Y?)yE}}=JHaBc>0aoc*aPW`XD^w_IaW4`E2D0)Jf;a1(1uM_*PFmVnEns6heP31D zm>C^Ve3>@T|NYPYI2Q)0=XCKVUE+Lm{?W#50$YV736!peS+TK1z>=MV@6osvb%j$^9`f;Qs$}RFy1J5FRO>?gaMFE$ zt-e;6$Eb~yQx37CiZmvr?{jCzsD*fo&FQHv03%@|X`}&9BSiQIV>R9C^#O=AaJ`sVCcL3v|8M4ad zo?9)q9U}#1mS3AxB*YZ8vY938VqBHGRC#@0!N#+ZP9c40+d-C(%HXQD0%Pm=v3KvHlNQ?5!r|{;Hwk4!G z1qO0I@O?#kL-wFizH|m1*>APuB!P(w zgj(ZmRTVkFC6BX{{VhNzrqD(^40YrTBW@b~2hl(ePUzJvBj;GiLcirnA5wo5<*G1{ zRMW;xN0t?At$~>oA0Nz`=79-S6!5#U-~mAxXz(X0rr(-?6^MR-rhidrb*SuZz+O#Y zq4EBs_2%g0)r#7vq$Z4=f$v3VEsav5rq{12%N`G1L=j%O5)ASL3WfB+LS>cArXPSG zONq5~sMS#|r69cAY`OeBk(yIfP@Jyca3sj^v~|_DKmGwnAC4(HBhNevgPPk2g)AOC z8qz=y4r^K9Ux1De>g25La-rxT1ar0s>65|9kDua7QWJhX*SlG|$4Ru|<2G)o=Qot| zZbnlF8B(?LBEqYqHd}~Qrhz5nX#jYY>Actt?uwv3OkBol(ZDbHSl8|#qe@y=j%Tbk z&n8(TQ;=EA1@)Ol5)TdoF){H(Okve5K4Qs5`g@@m{X*e~JCKj{1xuuy-8%OAr$MLd zI=?EIjZ?>7r7diLi6Mt21|zSaKo+d6z{T43xc?dm>quuXamT#8Iv5Lm9e1_b9TIyx z%#yKZKR@9Mj5QGnxiWix%(L}iBDZ_i_P`_B@*7{JHy7!wznC#IFE@91 zyQ~X3$wi~^>Jv_6<#|%ya$EFP`Fs2{|7mXkh8g*9lvq_@EAj#A7U7SO#RCO#)Gs|b z`Eiy*VEB})^@jnb`}e*s%bULXVD^Htq6J)^FhB5(0t941VqsAj4}?o!l|TlIXvQt*8Rg8CP?RRz(*?5Q`YTwj(lzo;7?+_tKL zwkDmg@*IhJ=!}sn7md<3RBRso8}Co@-Gi~2SxE!u$H*C&>8@=gPhbi?=i}xnj!VYr z3Qy?GBW;?Vo|ZkD7nVMl9a!+sw#Q)|%4c?CC0wxJUt17VmiNs`;VH~Y8E}$U(tDK2 z@m3oTY4x@~@^^LR2eWYynSloeIPa@oJi+0GJ#no?#8T}yf%vA!YOM)hSev(RggSUC zML*PP;@rd_>C403OwhIaus;qBMECVve>{Z!ga!D;sSgKjqa2Aba9g3dH}-DY!0#09 zT_wyuA&4iW{d201+h-d4{uL%~x zX3yGq??*K2ep>#X$2lEMBRYzEqMH!Bz3l{55b}i&yc$)K3k}5QV`jx(@HdyZlu24r zCubM)E6ufBQKQCM663_qzX!+WeY)T1ULf|lb916ohZkpOXOYe((_r)i6OI7-t9II{cPO0VJyeI<$?&i$sg zbb0w*u$x8W)Zem<@;K&99gbQ)Uav)K28xBh>{=2VX@sRld4X=_MIhYs_K%@F+iFI0 z>AAeYO!X!TovFG}J4=!l$~;oT;;R+S(l@l3*NE0qQbMZ>;mOR|XdX#zNUfl5 zG8;;P8#g{q?f_H!$sRhGA~;D>-+@E1BW-gjm$0KpwKGzur$naz9r$?vs5$MIMB0G;&%rCz$%DS;AYd@I0SftgdZ~foB&oA zR>FAwrVC;$;tyC@@M{Cat>p7wlEhIOAp;IQYy9z}$H=s`pvG140V-}qEvHxUls zf-sFp+4^k(SNYD#K+C#yQbOE&Tepv~Qjf2q~kt=6)^#}|bhg*!$bd-k z$}!`v?GistS8?Gttzcy>0~P(HKm5>URL;-^O3`K{2vWlU%dav0)q662qvIjQ(A2>l zy4qTY5~(^gp^J}g${LSY3Lfp)*rsN(M}*lmiwI6EYxa}{!j-)D(b(*(86|~>+!aYgYbo5 zvNlf$T8$YKas7$Rq8M}4>z}7-Tdj{9RMUuY5*p_U-#Qked3`wGw@kwUgG*r#5s0H0cwZOT@Yj(ULn+>=H4qy`y8XfHqm7>F3Vf-cAZntmu z=00HK<`iCime#eej<%A)UbOU5VEJ$pK(_Lj&Z!5n!P*)^?OKn%#H|wJoZHYQ>1}DD z5xPO#FwV$|lO^p4o&+F{jXQTfdiNEr=L4`;g!fVZLo>*fS*}gn_L>Aaj{Hu0maJ#r ztqNy)1If;e=$&+!Nv?o^n2N>I>i-^^oPFQ8 zHuRO8*f%lv_jaSEcf7XaO^3l|HSs=7yFsKiMAHLH{#`` zOk5rtC05>8#ZteB>ytsBood+Ywk&(LNe~GcmYR&NC=&?a_`czgQrRlcFHDp@;7pny zn1&D?M>D|b|8IgtYd^{QG&dz$(L)vpQviD4$I*I+U-3;R_dfAhChcq?IAk~SP zh)r{X0i^5=c4DaZW50E~W@?NcteqoOs|5p{s;;n4wFQfp@wtJ|ES~obj7unFoBU(C zcmWAD%!+d(JARCuQhIUL^DFlXuATSP>`uaDVrOSkp*5*PZ~d7uojz^^}gJfE}tgzXywgvBf~yspV(1dzf0rB zKKtc$wHWQZPnf4Tm`WOcY?obXxp+%T+cHGXNPmH_0Vr)3p>LFCN>_a$lj>ITn=!Fn zuBkaV?SNJ~j43eaOn=`sK0coJnPDSAg*>3Yj&BFrIy+FkG$L& z(^Gy*?80vij`hPod3P+Zz3mt4!8S-9^bBdl9Y*s#X@2AsA-~+*+`@&u8`b|i#E2PQ ztre8$e~&WS9k-`8UWN^kc>>?R4PC-o*GJ3>`DeO-?=9*XQ4Sj!v2ll5{X5hE*uwot zW0Lez-XfU6vS^i#??mxeEi2jGO|GXW^glJ)oTOIXD1Z7yHP|{V1!&I1+)ar;4ue>I zwIC)d{Z;9RraHxIB0Q-Ftr0>^(lId*4_PKH_c?59;_0jZCur&AKcJ=Zs{h1Dv!SG| zxe#$d@>8uAV z$7D>phJ`;p(PGsc%{m{T%W|{az=z9_qEKP3c0T{KOlev?a|RZ)OG9g`yOt@NcB%Jn zKlGtYaaz1VX}u;&a*K+UH=?2}P+$`0A%K8$m_E|7ndK|#`m~Q5m`;o8DkN6kj@{dv zNL>W(H8I5Lx7k9v{+*}2zO)&Yl)M$%-Ex&rqf|Y{ux1?yUt5BX8aq^OQ8JsDf-6M| z!+tz);KJ8KLocFQOtW7c-R?wT9Xq)EX>Po^E1;%Fe)sm$~yCXAI3^igBTzSBPJzf2+%^QAJBXsd8=g^EYAMY)j zLGZwMmB@qWEjlIGp7l1MH1FixG!{}@92{IVNDF>l7kyeIi)l;Q6Uk?buefmjVs`@t zOE>Y3*40o{j2s#D??bMI&+IvFg5|aoCHCe`9#Tu97V~O6d#zHjmgoM*Sy@>{9vcJ#;09QN}k$qHonq@(A5f`BmNcXT^%cKJ-P`YeH#@4-7mO@)KTqcT_)RM@;=* z4+ZP3Ir}qT2*Cl)`StvilBGr9nv9V4H`Hk0CooyRiaBYvVhP7)H=i%^_o5F{R17Ht zO1mo!d}uT(m|(4-D5=V{=Hs`3g9)b7EN43|^G0WJ1GcdAUQ1k1?Sp3&gn4DOjMs1* zFD};?5T{G!O}O09sa6Zh9C-aOB-ioIKxdM<)OT6+XRg7H446fT*|{l~vi!5=TDsrv zCXdroi#EA#*4b^H9lB6iSYGDP7N+MhY4cs~o@ZE8n3JNEmZH7+yc&>Oe4%98*2&HE z+zpvrX4&7uGL5*dEZzHFjnxM*&s(|eyALn368i!WTY>M>Os`s&@hg=6kY290a_-@& zVB@}99qm%Gs0nd4Ih?VXxf%`%I|7kbuzmx)wqP~>9}BpU9+b!c2^|O z=%u8$lkzJ6eN>@r7QIj0af9|l8VA95+Wk?uyRMblMl>wHRaS8qcj?9UQMrqpLF&2W zwiE>N#3T%jSUi#3M;qnd=5)`=DrTwR2bTzIj&!4f1MkZ&>g?zGc5QqTq=hSc!?!OS z&WyO$U)J(Q=dLt8*0fRfO1r>)3MJ(-lF83^f?`P+j@_w#&NLL`>|nXY#E##1`aYFR zYn|5*Xlu>=)8c#ud1(o>_I>GcO*6Kaxm*$ggR9vYxzpStQ|T=F*;2Jv^*pE*0&N5L z`b`f^?Q3^@aP%|ho?FLc~tin%ia#&I4#<`^Ix})9BL|Fc{5mB zE;l1I(x8M(X@6ADs4=%(^q+~1sv~;m5<_?j0w(Y32bD?FEZ^RVUTpHg{Dth!OS%T7&T7p|Z^^1FOH`p&r4#wp}#a1@^4B_T{Nw@YXlmzlVW&W`%!gs_&cv$-A4 z6(=N@RXJzjb?0_`q5wyKM6WK&aSxR*t!=Ls4oWI5KAg4eA$n%ml7sUq7RX(3O(#>H z>yYAhkCMS~y|EB+#jG7nUvk}d;X#Gkyl?RmUcEp}<4lvuw-XimnC-ay$>2Yz(b6!R zYT3Aa@P$>CXv8Dgra7Ue`Yr9WtP{H)3AbCS@dVnOWanYpA|)1M;r=-(xvkZP;RcZa zWg3`mWb;+L!S#VOrNlP&J(bE&N9mQ3 zr|#+rT`%B}_=mUNxA|h;6&)A79g4T~wn&C5q%9L{53|_EbGk{wOJuhPRWeLNsLkYe ztHO*PPSaaguYO#;DkkvvH~QGaNq=cQVe{wvkla2l9ZREpc4n01Xh}P1_Oigc=^DdM zj{9$0)w8!^R~gX%%)FzRwLC&JN8{rK;U^@j(MRF-+ zjQ94~6t@3fLlGout39+s7g5>4YBSN&pZnA9+jdxQv1yXf%$`#VsM;>qIAL?%-e($o z0ElbaAZXdE)MjQQj)V3*d%kwfoMKx1w@F1vK85Nn`#L)K5oI~g>Nj5Q^mwZ^dO3Fz z%f-w>DzvC09&hP2e(8LM_nPVjWtP@mYMTq^W@$QY)AztZEU{7c#>HUV5FPuhj0&4- z*wg#4k2b8H>kACF7P}GEYH#72x?9TVxxj_z7I}5{EPJb+a^>hht`D(r37=uUd;8e) z9lTg3$sOsG^?n`ipNm_Xz2aI=X>|yV#as5AN7ZJ zg+PVSMb%nHl!87)Qx2=5DEKG(`;}+;pT8>N&vM$9!ErdEhQ8tSRY}dM3&Od4>OlB1 zA7@txB%3U&xa`6ij8uYm%eBaq)o-%+0Qd^~@`Sb(b$wDbehS=&J}%lM%W2xZj4@@> zPAuz$ykKqQ)U0+GRdLX~GB|3|pSm+qtF~$ph&>J;=_28RR>mzv9=G2`8QwkSZ*T5? zb?D<1TWYYRcNnX~%AlBC>K_DF-^KSHyu;;UBam13Z1a=R1Y1){Sy7cQn-5Byx0M(A zlVq6QE^)c9JO5t$Prv4b*C>n zat@^d=pQBFynde(A|YYyd+2`V%prN&)I}+mUfwR3Qaf+R^145AvrMIijsF#k%t%^+ zw_liE<=ADUu$1w0`24!{uAWV|Eq(roIU?fbw6)11(w39X!JDXSDn2#2SdK8=ap}=ed;}(K>htV7~!gLwm>pBPOb3E)RIiE(?x2m#{ z&?{>7p$cgWw20l{`f!`>XL{y5z30@H_UB{s^vcH&708FP^C`opW5PDKclDN}8Rto9 zRygm^s0p*6@w6X919;yuIw+*5>bXyx9;|&80+|0JPXy+%Y3NAK|B4-UH5pOOoro54 z&T)~k&sj8}W%c?IC04L0K;(&H(uu&+`yV)rm-_bh7LFyvZm)Jflg)Nao=*hS6D`Ku zxTN=o5QEVZdqHyxi#5Ew!5}wA+H>!}{S5ZbA7L+q8HP3WB}7}>Vx&$Jy8^6k#99qw zDSRAEdA7mGg;~ZXM9hDvyGdWx2h9vlud|v1GCfDn#7CK#Z*ESoifnb^e#_>|WNhb? z<6ZmOaD4aPXcof-r$ua<;li$MF`e5vTo4SmRLjWjM5(RA@lR!uu~FqnO8K#w;U5|D z|C=Nf_i1So6+=3>2>FF#$!*(_r-B$ktt7?I?<`>!CHf8U)Qc^qUOZ=u=cSPmw(X~9 zaYQEb;oq%e>jca$h?cH6Ec#ZOBhO?{8bizAq1D^3UX9FZJ78LI$U=(UaD?&Dqgr=t7g3vsYQ zW4}(G12GkrI=Agi?4E4uP*VA%XF2h$aKGSa#oRN|vZ{PV$+aAmXEFu@oFx6~ppFhy7mihHPxoc=t?P49Im&Nn`gl0DU`Hz9g--GPz<)}(WXeB7joDELPs8Ta zh?v&IxoH!@x%>~)vwQ4hrq7OcpGll^UcH1N;EOAx%fHb~vWWx6)#}Ly2QQjQPx56< zPo5Nn*QRShTts9pFTX`J(~y~8VE58fM(mMAu00(KH`h`if2$!(sKQ)4d#h1YZ#wEUbOI<_4<==r(! zYFYlybH;TBiiao&YW%Zb)%Ji30XKikTmBP&0s+{RSBsI@)e)~$*$@33!lBEh2Xg3K z8MZhW7_29ulni6pLt&R4f3Lqjx|t4F-j;{6?rT;5gZ|T~EquX85Eq4{qG@Tv{4;(C zX*M#7Z)suu%ZCIBNV$Ecka;x)XJ1Ibs7j3G2tOl{bBB){xd^*4GWWIO{UI;$quoc5 zfQRQAuL2kG@Ssx@ij3o~r_L6&u~L-RD>)P3uTONJQ2RF2umqw;Ih^+aXSxbDA!(dK z5)4RFhk%2`t&UV^K-7Kct|_C>;I6%jT#MG1!2gMx>I~n#eEIU#8;n}?Wq=1{m=^Pr z87N-Hp$=$8>fO5*CEg5(H+BEAS65fpY`8IP%hs)>_m13pc(R(^_ex7pkSo&ELgVAF zai4k2P+D3_03gya-#uMU%(w0vU)fugHL#-*^w$|2&Jr*qu9f$iQ)9WQm-{5A(Ov@A zc*5kftK9XDl4y#F@5Dq-prd;2A|Y50|ISkxmvEF0*uQ@j%73~gD`7mM$~r1q2Iiv@ z=|gfCK~en^soI~NNd41fm|~q=RXgxsXK(u_3@uSW)a){72Lqie|`m_wwiN9wHS$Gbh(7hKW(eO{=&Z= zfAekszxX0&3slqLqw9^e&LRflIs~{vcR2nl8Wamv5RiprJdDTvVH)QEF9At>m8ZFP zF9V6=_V-_k;6i5bpwEF5#k-4GiP9b|g$oTZ#+^BtX1WY<)?lr0a2)Mi{}0884i0I0 z?7tOWs01(~PJNQ?P56-;TXoL$zFc;L|o31tex#J3vh7iIJ3QFOj%eeVa{+%QhPeCfRb~!6+E-aGP zMGHUYnvv6C;RIY{+qP{JPH!7Y$`{GuCLVTpLYqT8zwr~}dNUL>B!&``KK@TE8Ys0Q#BHa8tJwXKbo zd@(Qosr4ERa)w22li8z?P(|eAI=;gXm9guc%Zu+2+d4aNQ(pyKyY>j;NhV%B8eCyH zO~4jDfhQQX1wdlK%C+;A8xq$th=sIy-7%x%L3~Q@IL=mzQ;i%LcdwzoURwmQ9kCL}eIk;Q z&K6BniRi&Co1rv6bV#d6GXf=h45DB4;goe)D86&2d>GhZL~QKv3+*92BO@Dzhk0ka z&lp6Wi~th=sUZ*F{PEr3i&aXGDEg3>B2OkLoQ7A_kc6GtBD3-&F`lPqWH?T-&d|}7 zypNDpy#og>OCc`{?HgI?56M?}v*A=}>KQW@i&}?v{5+y1ot2J(V!FT5>d~X??CtHZU%%e-cMSP>rK1Ry%1m(_Zd^to z>He7c3>7fyIPmNpTO3e^kZc=}2h-B1RknzTyg)yF%xq}B19~HoNDPe`Gc3YQE_{|E z(=)`&{b-;{AiG(9ZX{>4`!Ct5Ao$ufWz6rHS#pjay^(PDG+#|s6`ZL=c1}*hg&m$Y z2)IRj$jb^ouN9Qmr_SO4IxfbYDOg7NWZ;t~TZ@!JlCulz?5`_nE%s7+Hap0D z5CTe*%%3MA*a$0A{DXtb;o#5_A`J4wlGXT?G7ASrIbO-5iJ&E$hXOr*TSj zRHK_gk!{+vsr8wM90^wS_4P$MPOOuSD%sT_?YqMPiNX6Z!+IAKSdBb}>U0TtryXN& z;$<#jqR7d56NCOcujkD1PS+0!NPygk)K0NZuZ4NQ+{`6zk-*PfMgQPM15RM)&fi{K zX5A0^b4~{GKc*`X)^mUX^&|aY@GZo<-U^zkDC^>1K4Q0b7#UcMJ0~~5i#z@9!khCw z;F|UT8TyoPjp;4xu1;_KD>;E_GzsI79Dy)ekzug|>|KFdH78EisUY4y%1t~b+$DOG z6P_mqsjZXozm_XZW6u{7aQs$j>6DH+Ksrbgl!u9e(di99o(i#u-^NP?QRG2k zSXXh-_@wA>*%}%eIRvBpkmU7_Fq9c-I(KOVZmPYJT8{DOp$DWLCO+v;K)gqj;8ZTi ze}aq71nWpct(1$%J=?O13PT8dquniHN9_B8cG1W*4~9Eq%>9F$QA+BX#2jyGpp#7% z0b#+xS-*eo*L;y_Nk){ZXIp^3|3eeEAwlf$*s0kB$DQxvTX+Zs@Kl$5HPs}l9%Q6H zA!I+d(q9Zisox`h$o?59J_D3g&f-ite>4b*Y};1l1}yB?ESyzX*h!cwLQ}DL=fZ<; z4GOH8!_@t_L1c_9733(+{>sEnt|pLceSLifR$nouuT;`~Ay&;_)EM3_>zDp(n&rj{ z`#x<>V=Vdf21{aRN4%_4)%T_co+qJ2za>|Hrf5=++@8GMNnSsnI5lYQGQj*#CrcHS zoRW0h_nEGbcAmfH-K2#dccsiqCQqDAZu?qfF5e3{m6c5h^^;51V0b@B!UFL@KsOx65bH(R`%H{$O{ggg!gT!o#mW&y zP3Q=KfBZPW4fDu8;M^!RxvJN!pzhZ1tB98a0s4qvr+7olCu$;` zN_!E0My!`)t(wmV;U-VUQeaeKVlc6WF7RC#kzBzm&*=q8el=oXU%gT#FZ~vAap$u- zsNsoQJ4{1tFQyJe8HBkFAUI;dpPW>Jt_hyY48obL9t+`m!3=G-l7usnj^hh}7BEss zHT?q_`V}Oj7XeO2c4g~O_Pj(17WdmUtPCn}zMFW>JCS53GF!n6)BkfGfn^Up=FXEZ zDuHj}tW8HAE37P|-MCZS80Zzq9=`|^C^<^yF+N#a_{ZiHOEV6MHEC0*LHcmHrMTmV z4GgaK^qAq$AV%M#=XD2RHCzUWQ3W4lnT)EaPO&8@0u35riVA2UUwry)`223c{5rBn$vY zK`_MI@{ltUuq@9_gA0C6@;ZMm5Qu|hqW(Hk5COF~tf2>fU%-sY4&Bmg-l`lY_o6w^ z){xwBVgW)C8VJ>${1T3D`PjYyrJOQya&jvAjQm>LIWLSXBe53dC_1V~5=e*1&^dB#|rQOuIknb@o`TgbqY)_%=d zP1$k0mHD7DwTj>f*3F_BwA|%T$oVX5WBt%}NJhDg90N(h&2H7XLm`e1@JbgEzi$TX z0gO!L0AssTTb^QTA%S!of(ye0bpQp>7g$=6+dumKJE4{_9xfBAf=BK=X#>8ue`3@1 z5*tc$#6?imaeWB2rQ`ZGz~ck|2EmN0s;ecD!&(Sn_G2rZ-#f=*lFZlSD~J+K;=746 zAX?VRD%|8NY$^&`IGQWzN53#Obv(Q-Jq*9>Cf!KCvuS^{EJR_7lWd0YX7%7HmmWQ4 zeDYfn+ZyA08$bPEAUj9p#uU31|24(p|BzH1Twr2bQ#fbsR5GUG$(hE_}C7)N?t_L1kh(}yQHs#MM&ZMsgG;gLIHKb4r(X~K5j@Q5N23Uk7f2aBk*9@yv1#DH z??QF||Cj&2GA)cXJ?G1`vs81wZ*_=V@vHOlI(K2(hvZ33o^IW!h3o#h?vGfWE6&c& z`%770*5^geuH82J`87HEH*dV`RU)E~`b{D0TU(~953keF(cxgz%4=x6X>NWrXeK;N z%y{Q*WF#Z{xpNH__RIBCb$$)KpOqxk)uV5)vpZC}Sy<%mkFB{gjXTZY>Xwc)B%U45 zvxY4G+?Z)Pk2+oV6FHu`;<5i}sso!(VuILKrLyFyKFCb z`BA%*$_XFLGI;Q?uv~=knRTN(F>sQHM@3z~dGk_JDD}ffj~=y&kiw*EHhHQ3jPIq@@sL}RrEbhj}M)S{~yT(6!EhgT+#rv~-V>afd zw4NlQ5`^8z?CtIS{rztVQRCx#!8g};NB8yhW#s1GJ$0KYhTfXQ=eYHEFN@1eDI{^G&(wBF#VgOvWy1R?Udxw*za7rb+d zY?*}F9qeb%p3(STrVPl=zB$8}-tz_?>l+$0$Zp@b(M-D5WbI)XvbnYI@nxmq=%Jis z=!t^D`R={Mw^UL7{>^I6C#0gNN0O5F6cq{d^z?3-4c@w`RdPv1MI~C$nG5x-J4JF~ z{F{KHk`h|4+~)qmr_i7#CfT?eSO%RR?_bdNIN6CJ3ez;){dM1IubKJlmNj;j_MHbN zE0wBWub{Bj@LuD)eNSy<|122FzUn<`xi#ih13TZf=n<+ZnA}e(T~>GetFsj^1?e4Q zJT5M-AV~@zAD=WIiok$?TTbiGX3jhgdlJ{vMNY7W!St+AI8+lEG5q;`H9c*fc15CZ_9rd_$|I zxRN^F85uXUe{T9l1xTt@57<_rqn>+g1aA*YF5BcU7xoMuk4%wv7l!o?jtC3BS*~=b zITN=FCxZ!l={FFHM&I3)+aY@nb$9maJ-hyZ`lA3KR;_~hWl&673!=Wg1^C*)yreA+1% zt1Xz5rUB+ZzvZN5!?Uye^!^1j7%T z5cYAsf|s>Nd*e8V6f%VD(7@nG^ADLEy;JQcPd}xIoyuQ%c;Gzi#!$_UniL=6>+R=D zZ6Xqz)Ru|ghS_qiBDR<1=AmMl*))_Ckw$%!|4EXno_})tt7}Sqj|46C_J(A+T$_p+M~BlquvVgBRw&=JlTI=Q`Ns9S4K&zCvu zR_7nbf9lpJBozHJTQ?znf7bpvbw~qI-$Zp65x?7m->%Qyl_$s*6hGljU}?Dm&l zoF|EtglRo%J|aWqIn0wN&YT`~N}_fMd!M`RPEgNin^Ptnr;o;_etR03oN+8TF5;qT zqMcM_(8Y{GD`C&9!RQRkmx#~B{Q5DUs)N#w+jubeBxS-EA6{VPURh!JImNNc?|X2@ zGULt*R7VBei#**hV!)YDBE994|$F6~um*zV6T%L?T@gW96vo z&F1+e!`evAQ9~fh7&BHinj5c+=&f7W&wL9KeYcEyj82@5-|-m+PR)2aZiDBui3vqH z(rlus{7|nn&f)9e552)=5{Twz^vot9iblD7^o@^~Bm=Bvad?x}00aL#df_=w!682E*Ueke+63I8xI~9~@Zr&MIzmo1 zMZPQd8+U#MdfcbGqDzve2&?THhoE=@tv2Le^?JP0Z z+Fl;GV%ndraDcBVJ@>I5=Qa29#4AHYdbmp6OXYe+{_Jx_LFb9+j`sV%`8>Vb@2jav z?@7*MA^!OB#a|tXp~uHBFH!K~VPj+8U}m+Z1W{{BoTr|mCk2vGWMk!-4N*@mAFr%lJ>-!YZ_{#mi&#V?CK zwIP$hC`5ag(QCa??K~SnO{!Fs-cs*p2+{>ziUYYi0@+%nKUa6#qBxosx;}B5_TSuI z9U;LXVXuEndq2-+?y-LCSf%52W@hZt33q|y#kbFIVnre=$6;N8l*|R z*Rw$D{q(~QA16d;sHyZf+i^i?c%vju$}7MmZf@@8wzm5(Uw++)Yd3^P!GglVu~S(G zhmserD1gvg<{~fmt;~iB&I3ev;-Rlq9kC0aY%7ggH&M1nrdCvt5fKrkQ4^63poO<_ zCom4^7OP)~R8pBcRw}sMy!Cuy(we|u^FLxJnqS@2t|P_eragHPuPpjRL8-_

fiP0p?+VcO4(}3NDP~li3tmUc5ot4qz+d(R+s%k9wa>()&}Hh`4J@4uuz^Dzs^x z9-dE75foKc)kwgK<%eKFM)W%v?z2VevM4>Dy78xmctxyQG#n@5vU|qZ@CNJL^;Z7eq zRV$u=+=eF=51P1Kb17ykQ`Q{gFp|RuN>P}Yn0gDHADdfSxe{#o?iLppXXNDgv>Tr8pPrst@yEXxxz~10K!C|BBURUJ;k_ug-w}<5FQ50^ z^Tb55Y0fjHx@9et*CB(}`}gnh2noG)>wI;G3JfwbGr!NagrT9r7-huveviG2iMfc1 z=G4c05JK^WK`Qduk7&+EQc`tQZbx|ztC}d$M~|?Omoy3uKbnt}Ec9iOjJvK0eLY;t zQm?T4+$3z|-FPh5UW>6VCf%Q}_jIQ8SM<-qoYu6gZL%)9)Q!*W2pQ_8(t;75gFF*owc`s4e?QKFr$mz)u`AAA?Dw3H+ z^IFqAmXhkqQsWTZ8?|*>_~>`@>?O(VMwE%uR9C=xBO-Q;945+8AZ2Z7fu$E(V)EQJzDJiGHm{Hg<(FLJT4;=>}ie5t?gI(6$A89Y`PXQJ>HQx_gpJb zvs<%I3zrt1#(U5p#`oqGZoQUGNSYbDOY*AU=$lxZI_FVbm}t@0+D|UhC^sx7FLk*l zm&kFG7i8B{ni#&1qG#xT5K_HF_u_AqNXf;CmcEmXN2!ISy{O~T9dKUq94wEjT8O9- z2vju2>+E`JK5@@!WGVf{+B)lDSyC~Wndg%zDAQ-%e4>GjwmE=Ul##J>vqZ6p7+vTE zukPb0n{pzV^31`{x8ivR#Sk+0bTnyOiV`ChC-}{Tk<0ooUdd!%`TBM-lnrAR zUmI(ffW>m1Yc@%6Pr=m=vjl+y9g(J0%Rd}3YlvhKx7pp;yXInc zc6R`RIiJ;_%tlIbTJBPN9CQAzIznL$v|D!YD^@{WJ!9#5QS^5F(E4pmWV$!8LZTNqd^czs`Vwjsem~wCON$TsSLmMr#zOJaKc+2DTMgr)gB^%x9_5({a?L8*N^QGIitA#AV?I(Hvu z)M9$#11qaM73wUFR^nH1S0zc_DGtZ!j?DyBK7gB@9Lr$dn$1JmglbK*Gk&-0>hIV? zho)U&$DAe`AJ7uU^a-#?z0eSMxY*Rse!0JV%>7_#QewUEo-cq2fruHd0`TO=KLI?zpJos}zr(cg~ zvo;?3E@!c5=RGiZ{wDWLx29#nnx1sIPr135p)^gKhcbTBig)xhq;9Hgc9(&Hf!Fre zC1N(s*8s#b)n_LhzrXE08Fid+b6Weh_rDG!63#B&}7M5tW`-$`jl0;QH zteTYEym#*&z_dwwlQDjE<2_Hs&t`p{1wJ*ewz66|Ju@>#dBWK&SosoKv6V6xcyZL3 zaaiuH(&Lrqli$$6(hSr0S#oy6`S+E2`AK|P8E?-n>g|UQFU+VRsewe{+QSeEPV+5T zpUG-po&bn1w@}N~62HoN!*2gc-!b!dOE`PsvHcKrE+cID<{|Nax0pxW zHvCc3{O)@G^A>`jYqv39ym&EKYDwLnt7BRp$8Ad72*vq`MJ2AtVOQweckjGP$6as) z99GurAKp|OX*pf9vzs-h;OzRLusy>Q)Xia(iCU@MKv#jF% zMbet5$I~QEdt;7hy&UbEVTs2W<>lp0OP|y8p4Fm9SmqQr5{q)1vY=IHcW*PNtNb9r zl34rL#jlIoD~tY)LK|NwOjXA_E4>XjAq3~Gl29u4vwQc@VAz9wTYct!SUAo)g>Fq8^cFR9T zKzKoeDdJgMLlZ!6AG8c@t-^&b;Pa zWum96dnC&Wi!SURQrw+9%__os2)FrEwW-=dPR6RG&wAx!P^TNA9`#Sp4%J58k5;2^ z>7rv^rociC78nF}aZAfEo1)pdWjB>e9C0$3gfW-r+c)|oaSK&N{IpXdt(;;DmqqXL z?j{%1adaGV?MCamK~_}_f<)n5$pVAK`Ybx<9Vxi+uC9OO9FpJzX0wqQQnL4 z*l^R`-G$xv>!{KH%n%J79r{zZ9ik_0J1W`g`C{;4Il4vzi{C%ZiFFcpT3@1BE77gU zTm&E&+|}mP6yK`TVs_gVu;+1lK)D{5f{YESnuWpq>!`^;LAr8Hc+l+bs7)Ii!rLlW z6^{oIi`w^y+VO3eC`yOQm!t%If2ple*K(crf;h)x z!kXnT)l`d%i@ff~yyNai=17%?@_hfE;Pva*88GFqWW6`~*sd<0=3lLlL+^SW!luNp zSdJjTa=sxMlJ%xyPkwdYTCbcqxvZd|Kzi4Vbb5Lk)mw^rnMzO{E>foNXRbbumm-;} z7o@^kotd?gG$xA$Uo>QzrU>}CZz36XIQzM+t;9dTUIVK-m>Y%mtT#iUY;W8ZxogU7 z7&#@Tawjy_I*s*Ziu{=J7RKkQS(Q5^XZ$0v0tDieG?9<$w$L7~-Hl-604 z&cLN%qO@(>xwSD4T{E<>G^=r5zqnP!XrSsmtvbw*%eknJvX|Dh=P0b8?Px{DaimjM z9**m2a$^Wz0`-JtE;PCW?0ZYB`a?QY`dAB<=(5E8b-P+SLEvMh*^U77hYQ+Rype6I?F9 zPn@`n%KUf-?d4pA+IngA2`->?>BguG&)93=DQ4gqHo_XijNu`b@y{AvuhI$vA7MGa zDQ(@#A~I!Zv!w0(67jIw< zc6TcR&%)y@%+F7jnwrWop=sL4<7Hgd7;dI&kKYos;*#HMO@Ot?r~jnsLmTmEM{iSq zirAgwZi-YMC)|zbv(GS0D3EC&YZ{t8h+tKlPK&irL?TQa2A!*DPMiMccRNon$j5Se z65D-XrYz0K>{E0Q#SLRt(`3wvkw#Y-o=%;)=`qlBvBQx*5r)(dNTLrVtJ8eEV-Pp0 zu&~hjc;6aQQkgler$l92%WphN0@XRml}@yrgxD9OVG~gZza%}|^EexrcKR;~r?ThW2c7q?)OK`S0p7vXx;&+DmYk(m zdLT+>v0S3-d`~7}sj&X*Z7P=}Np4dO!>ndOk;Ny)6%^j0`Z~4*{$!5{MN$4iLG`vB zLVg~nTZ#K1x4bb*bR(W{KVp00vY1NTF!aBl7{6j1se&wgu+ZoV-Sz8QJJ$>#jHmAT z8#}q(3g*aP_<0ndU*4PBjZHKxjc)Cn-I9>5|3RhO=6%#M-jz4HDzfOD-$EIW;=6s4 z$}cA=wi1eJM6&AyKqarYIoq-@@P!&d3<$%zV-@xmrBAC~@;wNp3Py+m%ZVyvTlikO z*8N<1`VAq|`g+}!nVo^B5219(-oF9$4S|0QTJglmLl(9sR7F*2i0M)bt_-`~Novz{a4_eCs0W1b{f#XH%*G39h| zs*~xTEUHW3SghN{V0>>2N7%n~xx@!LI+P&jNOwFG7q6MJDm$U{tK0ZCat7EYE-H$m za-MzH#`5rAxT&J`u%fpQ1geP&FQ-;}`{oGp7fdIoZB~40tNJ)I?89HOzU=OueqDu! zrb@Qu<|I@IYwy2AMMQjd8X;~0zG`!>?OkG`$B@7PMD=&?-|HP$Cnt+Z5*W7(WWKPm z==#PgYhY&Hh>B^%THJWp%lRSs*NcJ?4Y71x<_9FVwnP}h={-Mw{5WxRm?9a*F7U+3 ziTC*AL_u5I%%!TVjMSlWY;|pLbyv#b?DS~uQ%VX3aAkL%e96kH(+?H$HE4^RYU8Lj zBLIO33)12!o!|Xjji|>VS)0jeCtUbsj7ljXA#9vb0vDN0mD0SJ+*`{0R(qS#>rwDy zT=?qcfiF>H9Jg-aK?NqEs!9ws8d`7YL>ur~HZ}!+_^kWzwBgUV7lKic6K6l%pY*;` zL3tf{Ko*r^+k_(UDvM#F+Ek`CHrA;Qkx4%c=#+m3*q*8Pxi*KSn`wv5I`PYvbXf;| z0q9`8SxDr;Q(Sn=&=g4#`Xihj0}AlR7ykq0#r3{eGK}$~>)!82GBW2;5HQXmsuRKw z8vY(biIEE2t+F zOJ|pDJc~Fd?=(|O>>j1ADudqjJY98UQ~Qjg(zCCV5siY!fYB}Na-#pAkO8L8n?ya43Mvc3#^!Y>;WMPnbkYZqSV9p+j( z>+0NR7E-;V5vx3h`{X`lo`N_oTcpe zGxM7TtH>e6;6`YY-z&M>u?!2E(y4(gZMf$00+{k)fiWSlY^awNjRO~Kyp7INwso*3 zBZ9))%y^wPV)O_$&s?IMOoPV;thpf`_dAF_N!V#C$uY1tHnZr=hw(VIW_;baMf}PT zGXn#z;W=PU1zr(*I*CuX{XKL|>l7h%?INrWox0mRTSR{Sv3hRwn^G|jK!MgPqD0Rk6I>L@GE%v%2fqzA|N*``Q_Zo+)i_YiJ zCXBziR-=KV6jtw6ocw~{#1i^A92axiyNOjN#X2h@HFR*Qi6paNWGtUY*Zv``IlvSQ zrtNhZ#6=|w)5-iCiOkm{z*;^|R;6mm*J(mMrj%4@<&yLZyZpc?oz*DhDYr~miEa9HvHRy(g3it17$18F{s)_!h=>i;eWWFB(NjK2+ zm;=v6n=9u=YU>+8#Tm^zwGTptW~TKe!W6Z*8`U(P+rM-p@V|RxbsO@fTKGCp9J{Y* z`jB9$urad0%DOwL*(6D`DW*etM_N;bi<wR>5OcJ2Y4=e`s8Ftl`3j^^5!= z_*_vMq1DL--W@~@zjmgkb9t{2r<`~4c2qDMvs}L_gFc0y>#K@AnW>V6M?~Z^Ir*%< zv5}64XYc)p|xXBdANrg6#= zYN=z9WRGx+)-w-MQc|9s9v?J!bTokn;oaM}NWoD$vNryWoRCqvCY)I*&*RLUo{^D` zlN0~^`SU2xLZc2k5fN%Rk&`={TU+_gTZWL!VIaA01a;OmT%3Dt!qF+drl)Rp<$9W; zi{exgIb@HGuuTd~jjJ;X7z5J~ES}k5izSj2{3~expV5EVDDZhm7#cECIZdLCTGio} z%|&V<${ZL=KIY|hwubhz$?p)ACG426uCF6OWqOQqZVy2p&UMR?JGP!{X!3hpA8m1Ozc- zRH)XrwyF7fzr4I#%ySlO`UL*7W&BTid&kWyg;jec=)XzTL&WkB$6XY$9nxq@fm_2k`fbvFs^Yz=g-R#foa|AON_&)W|<+B^~A7h3#l@8C0sAFlGD4Qfn zpp*V5GF?H5dm++9o?$V}O>^lErX2fL$P4#{t#;t#5IFTE1J&WRy(m;7TdMXQ| z9q6UaKYrXXpLlyrDdci>?d#U%3cE#{78{K7=V|Hb>$g9>Vh9tx!$-T|UJ&7-V0!AI zc2zBOtd@(vB<=I_21Vf%CMm!m@+1PyAT$im@bIhj3=Fi4jF@%4xOD{vZSpEAK2Qyx zLov$4Al2P(jKU)i! zW?9`(*b~Ox7(~zTz58D=fZqQ(19%Q48^`O#rd5JM0p&6&I>ep?jq82k8F&c6_kcuQcj;|%}B`HJz?@%@Zxc5?_v0$C{axioBvltZ#G6# z{vgAUxL{RC^zERB@l9TzP;wkMTDb=yb&woO5q%^wjUzr66eQhGqcj7Sg z`bb4TM&~ zDyqJbZCA>L`{~d1T2qPwO&RO_vcWnc@GO69Y! z3RI~in5;7>&SlKgSgRCKoI@fGCl!Lvu&ODJDxF|q{{i0;Bs&bOz@W!4eVLKxBl_#J zwhGT>E9)#@MjdhKv|N^oQNfr+>+FnV{SP|-$i_glT69!e`mNo*cAZo8c4%t7sza%a z>dahi#f%n9o5gx>=NyC^Q>YUzOQRqa;MRFx#F*db(;w2BqM~p=%Z{b`X4QreeNzwT z2b+)W^j{3Aq~4g<;!=fL7B`i$&sf?ceiUx@b9M2F-t`;-=C#8<-7l)hmu)CC?_!G^ z-lUrNH7Hm$=4*KFpB9qx>{x~y)?b5tz3U^|XDe?c6{zAykmw27Fq^k7w%Waf(tmbi zY+s!VdG8NY1UOlO!)4GF9KGcG4ASi4)s>8e7`-x*XT)*oHgO?`MW!{yg22B`zx-pY z8})msj(gKfn$F5ywi_lvG9>zHzV8|GtGAAf~51EmGRAFXDIH20SJV>6X8 zHe&l~=H>qP!f!`TW>`bBY)HM^^kK0FhFDTEDX;JU+K`b!+&H<80-06cADJ&F{}52~ zVPcoa5oFN{4cEEFMs_{7-Dfa!Z)#(8H$NOMndrQ!a8c$=Ebvo^$cV_5jU<}=VoBMR z5_4lEAuZnZWd!=%Z`!8_7*%C%DM&;~^r+-7`92^L*+pYTl8bV9OH6McqmuY@Zps1n zq9sf@A{~o$<$`I<(8WX~e~!yM_c+Wp@N?&5t+cCse(XBpkG0r{f<+Yhf3_z}(o|4P zv|6k?y^NjV-r5hC+1zL3L9)GpO3Kd{yt>0Y+w>tzn)NX!&@$0ZCE-@lS-sfA{bJ&L z_N}g2xD27^O_5|#O{IJ#L~NdB=*q7k2jr4Le@CjeDp|85o@*=^&t5o?l*Ya^Q|^(- z8e*zKQk8sfnq{#Ey?LhcV2}X5v8tY2^3ohZAkbjQB`uZ}vYOK7E!58~d)L45{~GBY zCC+9Xfy&h^z6f+Aq%#kI!?m%s?QiU_Yk<7$%+ca0 zG9MuZ8IIRwJEJw!%2~RtrWvQLzWztR(%UEVt(x50Gp!bH6nb4!qy|1nZYijvfVNaZM=_Ci-QXhny+tARc^BznznY1PYZ7(5Tp z>4AeJTELMVm`SQpyB-;*ojx@MW#!k~+jcl28cOcOMW{?6?B=f{s(j-Q+9ehGM1LO+ znuy`}sBu$9VJX@xm~ILSR_Tm;BSYC6L}fhiOvka}GchrF8x`d@RA@xrF=TePzNzWD zfWXM=+H)hL>kJGH+uCAcuilMP3OHN>!3hyYmx1N*4-S@aaJbtTO3iWRt<5+Pe=<_Y zjdgK7il8*}@YTltOLNM{Stcp<=RNt2W1 z%%2WGEWWAj(7eL)+r59G zr*1eW?s}b;?HsZ{)+5BnU!3k=u1wNNic_`W>Pg1o6%?cZYY}*~?+QKD*1l9Z_IuZH z#o>sd)N;SHtZc9sU0qu{=$ru9;G)&3&9CGk14)C-cYI+ht%{t^ng78Bk|l zUS6d9b~g$N3f@LXW1&FlMJ!GKmIAV)g$W~fsHD`;(0K4&O+mp67Wwfno@#6Izgehm z;ek{uKg3wA0^VXY8U}{*l9H1CZ{E~`Q$5dYh)Y>TMHjdul;?ks;qg4gF#Yx8gVXMi zQ5Uc-+wk>B2>}#oFnt)9yBADSWYBpnMtS4-?JsWqYWJ?2s!6)%jcBEi8KXpXb!p8? z8fk;;{H`>Ce0R(7xAu<@w=cLIZr^ip;XmA4vozZRtNj81gCa-n~o1PN{m$=BZX=SU449nyYX~MN3O7SgY>2 zV}{#b$gSSd|4c~hzKYP1N!!zN6Vk4KhDS;$3O9j{x(=gL3JNe?sMaFgcH+M132Ax% z%lon#Bx#Tf6ZU2E{Oj>QG=I2!RLN4M1T2Gox&rKQsWm z_VLjL!wGmHg8HZ%Ou_8`$5{eZX&0DE5t|3_+#eUSzN>ezmDt8>v>Bss}xjcd(egCZgbj0*;742(7Nu*PV+;N44>Zh!_x~A~+T=%{cex zQ-G3#coC-6st+TYTUzj+zTEI}0?D-jBnU&ZLgOwvdGWVec)qo@Ra{>Ft=%_uPe*gX03q?)T_56atw*$0!Ls zeE9H_QAff9AtNIr{Tfd+2qw2mB*8c(A#om7mYMG2w-J!fIf3d{(m~JG|4?E!^eQ@< zG>B4wSf|pVVY1eHV>*z7>2#kYUukyZ&%}wRA#r`^9$M z8u~8XU(!na;wvnqe+=On1eIC7@x{yjh~py!LkcTI(JLtE9f3~}hqj>?BM^DPI;v0Z z=;7x(fCc{4`=F!m{vI2d|J1jL(+=ZD)ybX;8Tccj%A9MD|f zt*=|{A06?0VAl8xg+D4Tvhwj=gVdm*a;+-RG`GCbY22dp6Nq941_o)XI?F>ArIjZF zcfS5w2D%6`%2%}aeQ9WDP-tjqNdA$h*KiR70|VR!uk3G1mRgJYl4RhpAZG5#zpm>YW${lH$M~z zkN$0+k~6GTd@&jrxMkG=V~OR&c#7#Wa5)?Iqy>LY2&LeA1^(Yp2{Y5v>r;NzF|vZN z>ws2zVZ*sA@##VEcWCbNKX>6WsR$=0CpdtUH=^xtHszF#4?n;52>|=*H8lFl^z@oJ%#ekH`&-^fUwgP2SNZ3`q|J&C{*lZ}szhyaAg+5iG0myg;^+Vr4$O;q*7G{@4X*6wq-Ral*gI70hJOXP$V6o{y%o9ic z_-Qc65kqjGgXe?DxF7$O3Ght3yv`^cO~Y@uKm$o|Js8;~3=C2usOz#+l$HG<*8UkN zRCCs=#RCE92`;TgMOe$_bK`D%I;Hb7FhR7hzw!$?ttXLk>b(Zv9|2TeB#@M`Uz0+5a<491Lc;THS&F2SpTzuvTBI95o7%#@;eVh zsS?;RVH%3RLocLtJ2!ZZNvXhVf6;Db5MT7<@>K$Y>8Wl!eEhCC#2gh$B^0z=#-WGq zbe^oj?Y@}UpFZ3djZuD<2Zl{N@Imjc=>LZvT;X5ZGL)@9`{A=8VVk`NW^?Hug&#} z>E%v8ELv?U*E}T4oK^VyyMqS)o1H;H116@Xe`ss~O5=$tAxQi=Uv$&MbbrtbkBrot zu73kz=pF!Hn)HWP&{TTk;N^HtabHJAhwU7MP`ZxI01PYWX@KU+T5#!rU>^!ilXSGS zXyBT)D4l?64%VnAvr^i#v(uAD($eC3dUO{rLU%Y|v;^Q*g8ShM#N7?me&_1$A+qF9``A+lUC+RSjaaMmL0&+40@T1A|KqJ_ zVM;VBH~}FAA&7IdO36CLzypT#UP4+tvnvEx83-L9Q{Z9~>qrpVfOH6PI77<90r-wW zK}SZ}a&F~1cr>naac#P`=|Cyg*(r~-($dqXa8r*dk}SoQoqj$;p`OGN1VZztkTy&P z-SVQtiE8Z(+kf3eWbELI0!ObOB<7Ij29bhy>cQp(6qNIMLuL@Xt;zrC*g!sb%t;VU z(CzTf($W$vHeDBC@XB5Gt6N>;r3fK5_c%RvMsz5!7QTA7C_r)9Zgse?NC#XQ{Eojz z&Eoz=Q7FsqSYXqRb>YK=W!dcLEQqF@Q&K{V6xpxw)B#3_8e)W8?}CgHoH)}ID#en@ zEjTeG2ditG2AZU?u3Yh}t`-Rh43z00At3>rtURHt9COzg;i30H%zJ<|7EAM`Bdaou zAGFJ?&>)>|udS^+2!;5hDl0o<1_2tH+JUjpw-qwPqkr61gJGqdcV&A(OaNf?-z$zbF`yr76Dp4+rP38oEt>PSW0j}6*U z{rCfa_J55OK@Bh!Q_$lb&0|SLarpw`=7xe-Kl$Nb#d2dS^nmMwf`L#+PJ0OfI?h|A<(}A(f=ZJW`i?TL^WIe)nn=T}qmtJ>m1Sy4iDx~Ws4Q?0VTPYVC0qbq>cN$P2SoM?R{KD{J? z|9$`I4lkfbqYkeQ9EY~RsRZxV{^ywmhoIX}P#`yJwi12?@2{N)if?*Z$kEK751nYh z_(yQtW;kYlhI|^KXd)f&MdWGZSFJ7~XC0vZ9hblkK|MXZ@E4p^LHo=V{FOZhco)#M z%zwWh8BGs8-cKw>xB`v8-zc3$Py@}(H`Vh{Q0Er3-M?5~atJM%$PzAQK?BbS<6X<~ zAk#13h(!)sqD)I{%=$mWAYojaitMBrP-oiva{Hx{iz8u@1Yo2xzm0>tFE zi^U_B)s*0z{|=I#=K3NzWG^&#?p%e+7fFFtF0sbn6!9zTT}65BR99RR3HZG*?1%hA(OlNE%A94N)S17#Bn5WPb#nA~Gt&~Mim76Q1d#A4Y@&D!= z1SBe7E*%k|2w8IzQLEl>K;Fe6{@?+UvN}z_VX~TS#y1=PI<&eA97@{IPsrEVTtr|W z&{=k7V&s7p4VYa3x3qa%8=J|swNNNenSGSZA?r>X;*T%s@~ocmz8c?^IGy`S5}4Hy4?03T0YF8{O9j^Sj4ThE){GIm^{5+tQ|+g1qFJO5wyYqnc>l#@jU zEcWNbjtqFedWALLyfF!C5EB(?B7?7t5dHl#m_{H|Gzj^mgQP+*hDJ%O6Wdc?eR3jDVwGR1(f z_%A-Huz5O;TKjcT4krQVAf2sp?weu6P0$EUak`x@f}H4pq!dWa`p@_-n;`hY>`!|z zRJ2fJH-LQu43fv^z(Kw+QC*$i9?7PKocMrrqo8iNA3I&55*$=#WCRr*k;i|R&0L1e zAKCtO2(<_Ge-^h&GNn5q780FJ`uIJO6>eTS<_m=+ruT&_nDP^D(DCl|{?jIbWp&)sgjd+_fCDsWM52MJ2cpYW| zcm>Pw@bDy{UU*;b-*Wgp`UY0}%3%I|8+=LP9U?{V?{h2mvYt3`hAP-xUj&3>`#`mU~DdZ3u_?1Z1}G~q)Ao6rb2(c{!xKYG8E zBIs%5Llf-?u8S0;)ecu4uno})?f#UL)BPDsJ*^!qOh`4KMlum1$b{GmfS_*$D5 zZd3|b)SMmBYI(K&>@N*R&QN)1BP@u)Swz=?+X>N%P< z*7BT4RbB;$pFI-mytEj^Uw81I44tIDA4Jvs5DO+|@`TWP4=RQ7{zNfc=OQH=OydNM zWVx;BUbU^xy~Rs$nc2Ir;vT&_@i9nXf2G!ZoI#qH$Un2Fr(JTcsV(m}%yso#-u57z zjS(BnoOCgV?A$)IN$UQLkCsgOYb@F}Wyuh5t*A?erAS z?sc|Od5g)l46<2|+_cLi^m5WP74*0rj;>Yvm`YE_{1l|dx9k+yM?Ng!wCc54V9*w- z2BT?%NGW`@avN|5A+({o`?k0PM>jD0Y^mIZrndLo<3B{aN-q;n<#!M>WO-yfRNz)&&#a^Jo@J#bQmEZ2)dC+z|3 zOAseo8fetq0}Rn{8VWr>GY>K4ZrUzIPO0zJa-wRI&tB4Tqm4e@XG2Z4S^j(+_&H`# zed8J5!o^!JI^pz!4A@jwAfObq-D5AiO!LmKoPw&L@?&d9|8(NP)0y;hl^;!;o_)?I zU&jp1#gqkuW~PchN|Mch&GY+-KAq0$>G2%`H8(hv#-{S8c;Q*OO{h7)|9LHu? zbC`@3!+ALBpH&$=A#(i`Zk1)eX~?;aY0jscJGfzM&bgc&cMa2OUSrZH+pbIWQ2=0_4Kx-J!c-2GXp0 zXhY;QXbop&kLFO?QL$pt;jTp=zWS3 zk65s|2JQOi<*`5Oqb$qkh(Z2nxwIIj4E}!BcyK!#Y@Hr{DSbai^zhJDNZ5`~(T(-f)m?pZ@?@>NzvqFcWD2k}t=o)ldCw32^KZ2(FZ@#7<;tMX!uq5jX_4G-c zb{p(~`?3o{ADm>gFj`)?(eqh}31M9z8?J*RvnDcMW&baJ0uG`379epz8y>VC-@?2^ z>IquYhbI$Xoo^!`hv-+JmhysJ5=J`Tk=O*3d{?RziQPgc8dBXrg{h_D{vq2KjXwF; zqSFc_`+_ou-w2?0_%kf-V`zplqh}U9$)2P>p1gqQW{}isQds^?MZ|EM;=zF^Pym9K zhxT`WTdY|%h|wU(puure`-^fO0l4IMX*fCY!GHkZEP_gcd==DW(W1M>r!iR!5%S|RpZK`AUW1A< z-Efk=&cuQR1D5tWA|>^T-PEbL3e(r{=e!9xeWey!)GXmVlgn^)*7LB{D2}Hg$T1(l zP@p`4cBTUkmQ^jc9Yv0a!_^ zHn!47CqZmoWtWpi%e1j#1lyTW9=Jb$<+5F?D9T<`{F#NFH7b4BZSa6P50eQOw>T5W z>Ev){u;_(S*gTK<@N2av&Z!dl|I}Od8^!)@he^A|n(UJ&RKOm|SSNq_1X^5&;mkjK zt;Onsm?Ux*cS4<~^;k15EN~q>YLld4KR>tgUhtknofx!iD%N#7YDS>a($eSz1;4gz zA<7Fi=mf+K{QoJ>|~{s%9v#8yl*|{?0wE} zKfm*wbDi_gxt>3|_U^j6thK(r>wACh&*%Mm-DWa#b5Y@K_vqz>Xhx?_NP)&ldKCLI z|8->HWBx~Q;8bv-o5UXMLoh4T$)ZOFE#=|yPe@3p@eOMkvgO-CP4;N(ukC3{dQs9O zPQ8Ci-{P%2U8{gG=NM`-+S|(-1Hfa@j#v!o+RIX(okwb~pFtP2NW|plyur~-<<-CM zp=3Wj+lVOwepAm;;?}3=3ha*o=8=$_o2!1@$Uv6kdbrAy#~zx7`S%VsB*&O?xr{D4 zUn@T8jxb-ANdZN|qL|al_xt)iAuG<)>EoQ36u^?=IQaKWYL%H?|4`;2iz_cbp9fc7 zL{yaBAijg`tGFK`4;ZqMzz=)}WG00o0`Tb?V#_{05kkrXU&?$#Lie@;!_@;DHKxBl8q7;zAVx5|Af)*`AbIIHIr>V z1-%$4?!A;fHE+?}90ZC;NwcbZfuI2Pn=T6qve##)`ob- zVJ+_(AWuDJg=UVSfq_vBJ8O>vJB>8(P6hC!BLRuahQFbq10B*`AD2G^EA7trtfoGi zzP@XD2oFw^EVJ{-2Nc{&tCoQv$@JkSyN5f@zZc(1J<=3yMu`>Nyq6<)&VaDOXlAMd z?-nek^$aOGd4iI?zO0uoChUwZg(_gyjbOvYyT=K~Q7A-cPZ@cF0L6wpi4-ryHg zm*(!1`83M(sML-H<=luNA~l-ygs;Ht<35Mvmk}S4gEy%50Z(Y?DrY!@T*PbBTUrSM{;^N!?F1X4DVdbdyjv=UlYKNgi5SdM4|P zU8XI178)jPr?17|maw*5UHfoZr7Ml&Y5yB}0lmn>T6P>~N{YRO#7a80Z(3!QuCZ*J zc4S#%-(ht?H&`%Mh`X|RY+p(H-V-_4pe>n*h!trIBh(I+#`6WMucVpHe-*^RbS{aR zt+*>Px}LeS`RhCFNHz016K(Pi)10Fkl3mrdhS@nNERJGhytv8xAb#qTees(M0>`l1 zB&a!}IkY$D`HUi-*&m;upuUX=R;2B;bVfvO$34CJF6&yu(YUR>?x$~*$z0^v)ptu> zoQ`&^-vIF=kV#{H4m~MlJ4f;5hVTSI;}@HCr~S^AJUzPCKoM-HjE+BR!wYpTHc*FjZb(>Od zHJ|HoSU@G>gKw$PMxh3@bFG{q$bX7!%@kVq#=3*Idkw9d1`^8$r37eIfApd;_7)Wr zGmdCX-R8S2#=`6bSNbp6-w&$C&k6GE=pPZw3k7ZiK?n8Yo)3$DKgs4|x{VqU-NA$fFTiCIZiPt_yC6E;2(6ix^Vomho; ze}Vi^=g#zYm^~|v*Z`ZxbXUiTUzB^nT$ow% zYiqKr{~U4p?I^cvBO>{uPtR@0I-1R-olB+t(sl9;C?^tJew&)caiA(HL|#JJ+4xG5 zwoh-=_1*(VBCF!W_5`O{?0!C}^!xI@EB&_5+AX9aG*3UibM9yO*I+ka?#MfjMUv$P zZK?^53ek_=?yAZ$Ze%(c)=1G)u9@gochuUpJ+!MdIapMZ)04)S^CAo%hV~DdgPx6) zaI#|%wwt695UE1+lLgORiVF&iF48eRnD20Tq^`v<1V*OC8)AD){R66Bj(g~qDBxjc zw&9h$9_MXFoZW^EqTlQnU94fm&Z(86Yk8^vx1gd*S+s{M(FV^w@@&Q1w{H_rWI{mn z03C5%CTT#-w2+r5Pv$n%dGBxzx^|}ia(c7j=<*!DZl+oqktk}R@zvqMXXk}H22iMUn!Pk2M zXjZ*_$gru_380>wSMYlF%_^b#E1IiiQ|O|YyJe=Q??SDM&`-#FXITOK_i}9P=h2hU z0_wKUQbGHJImNWCf-p#@o$L{0WeYWH)q{Bx(e&q6plylsoA$yeY*&khlgtf=uU=zSSMc)q;yU^5?9l1`rzE%pXI zaiMCdioTYnrj+1oWSg<2=3W)`U(( zPZLwyqJ|gvpEu9J8WZh|(M9RuDm;SclaFw^&OOF8Fr1oCF?r`u(qu+=~L4$wQ*RgB- zEethdJhootoSl$Z*{Cd|5cdxW2r*iK`cTeg|4c?V=7=pjaRpoFwY`AkswijPzw%A0 zkDq6cd@&5KiEgm7=J6~_)5-VzB;3H{{Db*J|S6RY2tunsx3#tHSzr=`qzgbiz&Y?$&N zYJq?311*|5rp6cpf*e0F@d~ZsXa%_wpAH}lSlC8GgMuhd@T}_^{fZQsHCljKqDa?d zrO)pD^~{z6GqbA)y!K}Xe*j2M#4nZsU!4`Z*KwMfW%oEdUB~(K%mO;!CAM2BEAG;J zDtlHB24CNB=E*&kv>UT;vP>U-81wX%_jN107I*@n(2HZ-0mbD%?nTJFo{^GW1pMLH z%YLnFaRR|?BABRGuH;&5VCOrT%Gi|usqb9e(v%Txvk1LHrZsnfvwR^XT4a2wI~*D_ zhQQKn)^Zhz%lW+G_XcHf&l|)p)SkN95$At9{d(zaQAw9Q|(mK+g`deTp@3{p#($<5K(MDlzvG zbDTtt+5E$mhCAlI`08`3(y&!EficIRXF7p6d#W0v%;UcDOB@Qy*b+zfV3Dc7v01#v zzu%Hmvqk?7*X2sIgmp$QI55FZGsfKUjMz2@U9oH(EKX*4TpV#I*t!?+@?C{%yajRB zs)`z1CogE=VMJcO$&<}|Yqn-*XQ3cf&17@%u|bwtsF6>3 z8r*S9`>6PW$?=jQm3X`F+~4$xzKn4?HSP0IV{+f49ws9^Ya}>2`mLc0$C>DB{1F}& zp}XxGKE7|5m-;N^;>~OJt<0dIa1&3tFowwehZ7QULKF0enTv43)AQMf63c+*4k{y{ zQ)N7}_=up@af^&fio51AyUYWjyJaDn)a_4vI`~X;_1hT-0q##hzPR8I4eGZX@9+-S z?g>028Jg82&E@^V_r7|v)s3{OL6-^|9>2U`pdn8r)RL#w9Q$Vx2VCzodkQhk&Z_pD zKhVjq;>10d>guSI+;pop1(-I=+%7Ve!7`|0nXiIXV3Fqo!5KJTDtRMc`ejLPp+IJ4 zuO-C@-Y>py-8A>I`)X!s8;nAAof1Yfx<;&7z1)k!n>)RPE$n zVhoz9Zctq6Wd(CtX&6w|Uknxy+PvA6&Ags_?aspL{$}H5Rcncqx6(VQLQW0rX@{Jm zL+uKkbrsdA;f`(tP36r!V=Ys6=|KB`iUF4KQzaCSRF{ew)jyr{Tc>0CfPY%rd6k~* zlnH(n-5}>Ti{b>UiWoOER)x9sZSYE8UP!({1VTgI-c#8OI57EKxo&@wXm<3{?uxLS z?b+2ii+}aW)Rs=RZKwuz*gm4f=w^J1FlclUzk$W}^;FHz>$rw=w)6rby@jHfMxn!o zGF@izySf`x?*wMqO80!=&W|y;s$g*`T*c^wSIM-7r3JO@<7I4(3BqUO!xiu-#oLro znyC6&n(KF=BBeNngs|?H`I~J@)4Xz5i(9uz>vP)<-lc;h+=b#+T5MIy{uyn@%&k^J z_q;P;q2tPTxzF!y#hlE89>#Wjx$v^- zuR@rBV`pmZlAIc9sR}jY!fm!z_$#>EzaADluI^{t$I()?f2sKgeie1`*^ZIfaPEAf zHhHpYEK4e*-*9_(&>Xx@i#aEwAs&|9lP@)E28J^nt?1+JJml1|Tqb z90?OurdlpJ1v1nJ3~rm`&ePH?Swb0}*9_w_eateCl;l4aQZb^xC`XfGiiQfJB;Zfh z%F6O04-(no`xkv`K2>YXcY9m11V}3hf%BD$-c9+se+fg7iags%aFf8Xtf!r_JaOgJ zeiLi~zEcw!64$k=21P#l8p`K(-juL5_S;Q0YrHD>I>@ZiL!mT6&w|-Cb_9J8te37G z#axIlU03!~0>Mu>f_3ZGB_M_Tc@+K1O8*5*x7bzlAhRkI7#K)`bh~z)VLpRnlZikx z303ddv4ceT(2~M1#mn}_-wV-ADP_;H{L1g+aol~AWHyE0gw zxvrh^MeUMSr7M#(!Yc8uUKF`mZAK@NBx2M_rl~V99MLFek`^mV_?k0bEPA0q*a0m(_@5)Ws8$g`56b< zg39<$IKkbiSj?AUUbr5o4axVFyC{iZt68n2^gB%vaTFqaMTLYw3CjNv2nNc_%lX@} zMSFZCZ_UPy^N{n+Dc5h4&Za)dIC(mh;u;;7aOQ$`pt(^C|F=H3I!*E6`YT$j!Is?{ z2LqFaxXrCfhkh-nWVr$5yY{`SwWpGm8K!+~T<6*P!qU?Po*`3isw$3A-&Rgk(qg76 ze0w)%;Rx%$kL)R7PP0X z{l~UnY88mP+a!zriOhXLPEI=dfdcIonXpJ<3sLz)OjNYApH-kG;3vTD>|5N$Gd8?4 zUeqfL!%m$Ai8@XveG}JxNv*ISm~7X$TJEcuYM|M%)b9P&i0{v)HeM~C8g2&t>$L>W3Lf=D5!o4xfhG=-_$VnK+dl86i-2GH@$;0@?bIJ~SF?({6<*Dmn_!ze zx&Jx&c|NA!?WR*0rR$TlCsaP*y@D=Zj<={viH|qTd!4RO>UO7)UFgC*H7-H6 z;VnPg%zKa-%O$LBg$G-eop5rWVV zV*KY{NSaZ1VEprXae9g4lUQT%dAku?8IrLm^0~9<(t@sKX_=W;xAr&dkbh{HiY_BY z7;zF8G;%q{7XEQAP(#_KghPQ*_?Q#U!}-g8(u99Q1dUqqIN2j2BG5qG_DpJEy)*Dd z%&1!}ehq2(*5$sHfQNt3ga*cSrLTrsAILRY;uVF3D*p&y8e+>$77m1L6{uG;R{R-t zoZEL$T#p@Jb7a$<|Fewxi0Xe7{8L^bS!Js#n!@|{{2lffR=fn#qT%@I=$l7&Qhwz9G^*rrBWsORQ7EK&nC zR@2;=ob3VuM-vX*c;WxlNGn)%w69TnO{S2|rhNLV5@L>|>~vILyn zWiP?|Ax_PQuEl7%#*iaQ5n)kLEdr9;AfH@0oN2oQqJQ-q%lqXyzF);Ihlj5%9qZ;8 z9Bp?Cu2Y(*eR%{uVvw&!SUH1{1PcNWWex_&JZ9&~=P!>S4Q2ynxDEEK-H4L$wOaYz zCjs$|N>@z1fvl^c=|ZI1mA?-Xb=IFK(I3$lTM+r7aH`1NFs^;!LJY_4kJM$NtzPz(W7SwAqT1juEG$F_H_ zk+;~UOCvlt4)h9MbhNp!A-c))=F>PS^=MP$n4*6?=00V7=+1}BH>1nTg8 z3PhlgG`#1p@h4|F&89F<_FLNCfci!GQC*!rLe{F(H!{+=LkIEBUyXG5eCyl@$W_GO zTCwKXadwdmy&pr7WdU&3MLieLR~YmsBMcrt;rrOr1vmYbl6=(q>4=G?6c;7i3KBu6dg}Ad`K595s1k(_acBYifK2Rm?f@4L% zc=1DCN3dVugsciat8JFbCqD77TzoM-d-v|$IO@fT(f9vUAXOzxSQQtR2!-NktkdHk zaDEYP3LDH6iie-?WXp{+{>?ZQ+H`V=2+kC7n0Xr*2zMcUThQCsyAkpJggyVH5&uae z{*y-hCyn@jlSbTh3?#oTI^qN*ylf7-WUqPIcykVhYx7W*U`?7!paIf;Mrm`Z!~vj^ zIG&5Eg-f_e=?X4lACji`?kQmKP87!^6`)AnCM=v&!K)(NU#03&=u&-jv78Mo9vM!wKjNb6!K??-Rz3f*?`_5Ddgrd_CTtC z0w;waHAQ(qJ{0jO(;&wF4R{mrnxm2t5f^va*Fi$@=)S)!@f0g`?F_2kv!3?WT^J zf>eG;hGCa{2&Yw&6PG?VXArAD3ptb76f#UFoS(MjeSg@JsiUV*JKvies`=@0e>hKw zDCNNU?f?QaSMvS{x8~6|nTb9CcOYy^XXpl$UWc$BfGsD* z)kauYZ03H}0?~-$EXXUZgq#71G!l$N=zGz|(0VA~ zcJB=S?E7fHQMkmyx4WBd3t_c9jDON^ME(NY;eVR#t^F}?iqd~penp1@4mqZ1s_Us` It6HD^7v^)PWB>pF diff --git a/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/config.yaml b/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/config.yaml deleted file mode 100644 index e79db86678b5..000000000000 --- a/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/config.yaml +++ /dev/null @@ -1,35 +0,0 @@ -num_clients: 100 -num_epochs: 5 -batch_size: 50 -num_rounds: 1000 -fraction: 0.1 -learning_rate: 0.1 -learning_rate_decay: 0.998 -static_bn: true -exclusive_learning: true -model_size: 3 -client_resources: - num_cpus: 1 - num_gpus: 0.5 -server_device: cuda -dataset_config: - iid: true -fit_config: - feddyn: false - kd: false - alpha: 0.1 - extended: false - drop_client: false -model: - _target_: depthfl.resnet_hetero.resnet18 - n_blocks: 4 - num_classes: 100 - scale: false -strategy: - _target_: depthfl.strategy_hetero.HeteroFL - fraction_fit: 1.0e-05 - fraction_evaluate: 0.0 - min_evaluate_clients: 0 - evaluate_metrics_aggregation_fn: - _target_: depthfl.strategy.weighted_average - _partial_: true diff --git a/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/hydra.yaml b/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/hydra.yaml deleted file mode 100644 index 2a1ae68a42ab..000000000000 --- a/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/hydra.yaml +++ /dev/null @@ -1,157 +0,0 @@ -hydra: - run: - dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][HYDRA] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - loggers: - logging_example: - level: DEBUG - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: RUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=RUN - task: - - exclusive_learning=true - - model_size=3 - - model.scale=false - job: - name: main - chdir: null - override_dirname: exclusive_learning=true,model.scale=false,model_size=3 - id: ??? - num: ??? - config_name: heterofl - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/peterpan/flower/baselines/depthfl - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf - schema: file - provider: main - - path: '' - schema: structured - provider: schema - output_dir: /home/peterpan/flower/baselines/depthfl/outputs/2023-09-05/17-39-22 - choices: - hydra/env: default - hydra/callbacks: null - hydra/job_logging: default - hydra/hydra_logging: default - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/overrides.yaml b/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/overrides.yaml deleted file mode 100644 index ee6825129f33..000000000000 --- a/baselines/depthfl/outputs/2023-09-05/17-39-22/.hydra/overrides.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- exclusive_learning=true -- model_size=3 -- model.scale=false From 92a10c6b3ca8feb2490c5d100438c644857b4a29 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Wed, 6 Sep 2023 15:15:35 +0900 Subject: [PATCH 03/51] update readme --- baselines/depthfl/README.md | 137 ++++++++++++++++++++++++++---------- 1 file changed, 101 insertions(+), 36 deletions(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 6ad0e85aceb6..0036b0355ff3 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -1,7 +1,7 @@ --- title: DepthFL:Depthwise Federated Learning for Heterogeneous Clients url: https://openreview.net/forum?id=pf8RIZTMU58 -labels: [image classification, cross-device, system heterogeneity] # please add between 4 and 10 single-word (maybe two-words) labels (e.g. "system heterogeneity", "image classification", "asynchronous", "weight sharing", "cross-silo") +labels: [image classification, system heterogeneity] # please add between 4 and 10 single-word (maybe two-words) labels (e.g. "system heterogeneity", "image classification", "asynchronous", "weight sharing", "cross-silo") dataset: [CIFAR100] # list of datasets you include in your baseline --- @@ -18,20 +18,20 @@ dataset: [CIFAR100] # list of datasets you include in your baseline ## About this baseline -****What’s implemented:**** : The code in this directory replicates the experiments in DepthFL: Depthwise Federated Learning for Heterogeneous Clients (Kim et al., 2023) for CIFAR100, which proposed the DepthFL algorithm. Concretely, it replicates the results for CIFAR100 in Table 2,3 and 4. +****What’s implemented:**** The code in this directory replicates the experiments in DepthFL: Depthwise Federated Learning for Heterogeneous Clients (Kim et al., 2023) for CIFAR100, which proposed the DepthFL algorithm. Concretely, it replicates the results for CIFAR100 dataset in Table 2,3 and 4. -****Datasets:**** : CIFAR100 from PyTorch's Torchvision +****Datasets:**** CIFAR100 from PyTorch's Torchvision -****Hardware Setup:**** : These experiments were run on a server with Nvidia 3090 GPUs. Any machine with 1x 8GB GPU or more would be able to run it in a reasonable amount of time. +****Hardware Setup:**** These experiments were run on a server with Nvidia 3090 GPUs. Any machine with 1x 8GB GPU or more would be able to run it in a reasonable amount of time. -****Contributors:**** : Minjae Kim +****Contributors:**** Minjae Kim ## Experimental Setup -****Task:**** : Image Classification +****Task:**** Image Classification -****Model:**** : ResNet18 with additional bottleneck layers +****Model:**** ResNet18 **Dataset:** This baseline only includes the CIFAR100 dataset. By default it will be partitioned into 100 clients following IID distribution. The settings are as follow: @@ -44,54 +44,119 @@ The following table shows the main hyperparameters for this baseline with their | Description | Default Value | | ----------- | ----- | -| total clients | 1000 | -| clients per round | 10 | -| number of rounds | 100 | -| client resources | {'num_cpus': 2.0, 'num_gpus': 0.0 }| -| data partition | pathological with power law (2 classes per client) | -| optimizer | SGD with proximal term | -| proximal mu | 1.0 | -| stragglers_fraction | 0.9 | +| total clients | 100 | +| local epoch | 5 | +| batch size | 50 | +| number of rounds | 1000 | +| participation ratio | 10% | +| learning rate | 0.1 | +| learning rate decay | 0.998 | +| client resources | {'num_cpus': 1.0, 'num_gpus': 0.5 }| +| data partition | IID | +| optimizer | SGD with dynamic regularization | +| alpha | 0.1 | ## Environment Setup -:warning: _The Python environment for all baselines should follow these guidelines in the `EXTENDED_README`. Specify the steps to create and activate your environment. If there are any external system-wide requirements, please include instructions for them too. These instructions should be comprehensive enough so anyone can run them (if non standard, describe them step-by-step)._ +To construct the Python environment follow these steps: +```bash +# install the base Poetry environment +poetry install -## Running the Experiments +# activate the environment +poetry shell -:warning: _Provide instructions on the steps to follow to run all the experiments._ -```bash -# The main experiment implemented in your baseline using default hyperparameters (that should be setup in the Hydra configs) should run (including dataset download and necessary partitioning) by executing the command: +# install PyTorch with GPU support. +pip install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 +``` -poetry run -m .main # where is the name of this directory and that of the only sub-directory in this directory (i.e. where all your source code is) -# If you are using a dataset that requires a complicated download (i.e. not using one natively supported by TF/PyTorch) + preprocessing logic, you might want to tell people to run one script first that will do all that. Please ensure the download + preprocessing can be configured to suit (at least!) a different download directory (and use as default the current directory). The expected command to run to do this is: +## Running the Experiments + +To run this DepthFL, first ensure you have activated your Poetry environment (execute `poetry shell` from this directory), then: -poetry run -m .dataset_preparation +```bash +python -m depthfl.main # this will run using the default settings in the `conf/config.yaml` -# It is expected that you baseline supports more than one dataset and different FL settings (e.g. different number of clients, dataset partitioning methods, etc). Please provide a list of commands showing how these experiments are run. Include also a short explanation of what each one does. Here it is expected you'll be using the Hydra syntax to override the default config. +# you can override settings directly from the command line +python -m depthfl.main exclusive_learning=true model_size=1 # exclusive learning - 100% (a) +python -m depthfl.main exclusive_learning=true model_size=4 # exclusive learning - 25% (d) +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false # DepthFL (FedAvg) +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false fit_config.extended=false # InclusiveFL +``` -poetry run -m .main -. -. -. -poetry run -m .main +To run using HeteroFL: +```bash +# since sbn takes too long, we test global model every 50 rounds. +python -m fedprox.main --config-name="heterofl" # HeteroFL +python -m fedprox.main --config-name="heterofl" exclusive_learning=true model_size=1 # exclusive learning - 100% (a) ``` ## Expected Results -:warning: _Your baseline implementation should replicate several of the experiments in the original paper. Please include here the exact command(s) needed to run each of those experiments followed by a figure (e.g. a line plot) or table showing the results you obtained when you ran the code. Below is an example of how you can present this. Please add command followed by results for all your experiments._ +With the following command we run DepthFL (FedDyn / FedAvg), InclusiveFL, and HeteroFL to replicate the results of table 2,3,4 in DepthFL paper. ```bash -# it is likely that for one experiment you need to sweep over different hyperparameters. You are encouraged to use Hydra's multirun functionality for this. This is an example of how you could achieve this for some typical FL hyperparameteres +python -m depthfl.main --config-name="heterofl" +python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=1 model.scale=false +python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=2 model.scale=false +python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=3 model.scale=false +python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=4 model.scale=false + +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=1 +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=2 +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=3 +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=4 + +python -m depthfl.main +python -m depthfl.main exclusive_learning=true model_size=1 +python -m depthfl.main exclusive_learning=true model_size=2 +python -m depthfl.main exclusive_learning=true model_size=3 +python -m depthfl.main exclusive_learning=true model_size=4 + +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false fit_config.extended=false + +python -m depthfl.main fit_config.kd=false +``` -poetry run -m .main --multirun num_client_per_round=5,10,50 dataset=femnist,cifar10 -# the above command will run a total of 6 individual experiments (because 3client_configs x 2datasets = 6 -- you can think of it as a grid). +The above commands would generate results in DepthFL paper. The numbers below are the results of a single run, and although they do not perfectly match the numbers recorded in the paper, they are very close. + +**Table 2** + +100% (a), 75%(b), 50%(c), 25% (d) cases are exclusive learning scenario. 100% (a) exclusive learning means, the global model and every local model are equal to the smallest local model, and 100% clients participate in learning. Likewise, 25% (d) exclusive learning means, the global model and every local model are equal to the larget local model, and only 25% clients participate in learning. + +| Scaling Method | Dataset | Global Model | 100% (a) | 75% (b) | 50% (c) | 25% (d) | +| :---: | :---: | :---: | :---: | :---: | :---: | :---: | +| HeterFL | CIFAR100 | 57.61 | 64.39 | 66.08 | 62.03 | 51.99 | +| DepthFL (FedAvg) | CIFAR100 | 72.67 | 67.08 | a | a | a | +| DepthFL | CIFAR100 | a | a | a | a | a | + +**Table 3** + +Accuracy of global sub-models compared to exclusive learning on CIFAR-100. + +| Method | Algorithm | Classifier 1/4 | Classifier 2/4 | Classifier 3/4 | Classifier 4/4 | +| :---: | :---: | :---: | :---: | :---: | :---: | +| Width Scaling | Exclusive Learning | a | a | a | a | +| Width Scaling | HeteroFL | a | a | a | a | + +| Method | Algorithm | Classifier 1/4 | Classifier 2/4 | Classifier 3/4 | Classifier 4/4 | +| :---: | :---: | :---: | :---: | :---: | :---: | +| Depth Scaling | Exclusive Learning | a | a | a | a | +| Depth Scaling | InclusiveFL | a | a | a | a | +| Depth Scaling | DepthFL (FedAvg)| a | a | a | a | + +**Table 4** + +Accuracy of the global model with/without self distillation on CIFAR-100. + +| Distribution | Dataset | KD | Classifier 1/4 | Classifier 2/4 | Classifier 3/4 | Classifier 4/4 | +| :---: | :---: | :---: | :---: | :---: | :---: | :---: | +| IID | CIFAR-100 | ✗ | a | a | a | a | +| IID | CIFAR-100 | ✓ | a | a | a | a | -[Now show a figure/table displaying the results of the above command] -# add more commands + plots for additional experiments. -``` From 3ea5aedbe62440dc13ceaa77cb600b91e2e700ff Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Fri, 8 Sep 2023 11:59:11 +0900 Subject: [PATCH 04/51] before rollback --- baselines/depthfl/README.md | 22 +- baselines/depthfl/depthfl/__init__.py | 2 +- baselines/depthfl/depthfl/client.py | 80 +++---- baselines/depthfl/depthfl/dataset.py | 4 +- .../depthfl/depthfl/dataset_preparation.py | 55 +++-- baselines/depthfl/depthfl/main.py | 43 ++-- baselines/depthfl/depthfl/models.py | 81 ++++--- baselines/depthfl/depthfl/ray_client_proxy.py | 21 +- baselines/depthfl/depthfl/resnet.py | 224 ++++++++++++------ baselines/depthfl/depthfl/resnet_hetero.py | 179 ++++++++++---- baselines/depthfl/depthfl/server.py | 57 +++-- baselines/depthfl/depthfl/simulation.py | 2 +- baselines/depthfl/depthfl/strategy.py | 80 ++++--- baselines/depthfl/depthfl/strategy_hetero.py | 72 +++--- baselines/depthfl/depthfl/typing.py | 9 +- baselines/depthfl/depthfl/utils.py | 6 +- 16 files changed, 578 insertions(+), 359 deletions(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 0036b0355ff3..f00a98981199 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -132,8 +132,8 @@ The above commands would generate results in DepthFL paper. The numbers below ar | Scaling Method | Dataset | Global Model | 100% (a) | 75% (b) | 50% (c) | 25% (d) | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | | HeterFL | CIFAR100 | 57.61 | 64.39 | 66.08 | 62.03 | 51.99 | -| DepthFL (FedAvg) | CIFAR100 | 72.67 | 67.08 | a | a | a | -| DepthFL | CIFAR100 | a | a | a | a | a | +| DepthFL (FedAvg) | CIFAR100 | 72.67 | 67.08 | 70.78 | 68.41 | 59.17 | +| DepthFL | CIFAR100 | 76.06 | 69.68 | 73.21 | 70.29 | 60.32 | **Table 3** @@ -141,22 +141,22 @@ Accuracy of global sub-models compared to exclusive learning on CIFAR-100. | Method | Algorithm | Classifier 1/4 | Classifier 2/4 | Classifier 3/4 | Classifier 4/4 | | :---: | :---: | :---: | :---: | :---: | :---: | -| Width Scaling | Exclusive Learning | a | a | a | a | -| Width Scaling | HeteroFL | a | a | a | a | +| Width Scaling | Exclusive Learning | 64.39 | 66.08 | 62.03 | 51.99 | +| Width Scaling | HeteroFL | 51.08 | 55.89 | 58.29 | 57.61 | | Method | Algorithm | Classifier 1/4 | Classifier 2/4 | Classifier 3/4 | Classifier 4/4 | | :---: | :---: | :---: | :---: | :---: | :---: | -| Depth Scaling | Exclusive Learning | a | a | a | a | -| Depth Scaling | InclusiveFL | a | a | a | a | -| Depth Scaling | DepthFL (FedAvg)| a | a | a | a | +| Depth Scaling | Exclusive Learning | 67.08 | 68.00 | 66.19 | 56.78 | +| Depth Scaling | InclusiveFL | 47.61 | 53.88 | 59.48 | 60.46 | +| Depth Scaling | DepthFL (FedAvg)| 66.18 | 67.56 | 67.97 | 68.01 | **Table 4** Accuracy of the global model with/without self distillation on CIFAR-100. -| Distribution | Dataset | KD | Classifier 1/4 | Classifier 2/4 | Classifier 3/4 | Classifier 4/4 | -| :---: | :---: | :---: | :---: | :---: | :---: | :---: | -| IID | CIFAR-100 | ✗ | a | a | a | a | -| IID | CIFAR-100 | ✓ | a | a | a | a | +| Distribution | Dataset | KD | Classifier 1/4 | Classifier 2/4 | Classifier 3/4 | Classifier 4/4 | Ensemble | +| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | +| IID | CIFAR-100 | ✗ | 70.13 | 69.63 | 68.92 | 68.92 | 74.48 | +| IID | CIFAR-100 | ✓ | 71.74 | 73.35 | 73.57 | 73.55 | 76.06 | diff --git a/baselines/depthfl/depthfl/__init__.py b/baselines/depthfl/depthfl/__init__.py index bd0e1cd09cc0..5455ba674372 100644 --- a/baselines/depthfl/depthfl/__init__.py +++ b/baselines/depthfl/depthfl/__init__.py @@ -1,4 +1,4 @@ """Template baseline package.""" from .typing import FitIns as FitIns -from .typing import FitRes as FitRes \ No newline at end of file +from .typing import FitRes as FitRes diff --git a/baselines/depthfl/depthfl/client.py b/baselines/depthfl/depthfl/client.py index 2d0185dacc2a..ccc0c6d75d09 100644 --- a/baselines/depthfl/depthfl/client.py +++ b/baselines/depthfl/depthfl/client.py @@ -1,36 +1,32 @@ """Defines the DepthFL Flower Client and a function to instantiate it.""" import copy -import torch -import numpy as np -import flwr as fl from collections import OrderedDict from typing import Callable, Dict, List, Tuple, Union -from hydra.utils import instantiate -from omegaconf import DictConfig -from torch.utils.data import DataLoader -from flwr.common import ( - ndarrays_to_parameters, - parameters_to_ndarrays, -) - -from flwr.common.typing import NDArrays, Scalar, Status, Code +import flwr as fl +import numpy as np +import torch from flwr.client import Client from flwr.client.app import ( - numpyclient_has_get_properties, - numpyclient_has_get_parameters, - numpyclient_has_fit, - numpyclient_has_evaluate, - _get_properties, - _get_parameters, - _evaluate, _constructor, + _evaluate, + _get_parameters, + _get_properties, + numpyclient_has_evaluate, + numpyclient_has_fit, + numpyclient_has_get_parameters, + numpyclient_has_get_properties, ) from flwr.client.numpy_client import NumPyClient +from flwr.common import ndarrays_to_parameters, parameters_to_ndarrays +from flwr.common.typing import Code, NDArrays, Scalar, Status +from hydra.utils import instantiate +from omegaconf import DictConfig +from torch.utils.data import DataLoader -from depthfl.models import test, train from depthfl import FitIns, FitRes +from depthfl.models import test, train EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_FIT = """ NumPyClient.fit did not return a tuple with 3 elements. @@ -41,17 +37,18 @@ ClientLike = Union[Client, NumPyClient] -def prune(state_dict, param_idx): - """prune width of DNN (for HeteroFL)""" +def prune(state_dict, param_idx): + """Prune width of DNN (for HeteroFL).""" ret_dict = {} for k in state_dict.keys(): - if 'num' not in k: + if "num" not in k: ret_dict[k] = state_dict[k][torch.meshgrid(param_idx[k])] else: ret_dict[k] = state_dict[k] return copy.deepcopy(ret_dict) + class FlowerClient( fl.client.NumPyClient ): # pylint: disable=too-many-instance-attributes @@ -79,18 +76,19 @@ def __init__( # for HeteroFL for k in state_dict.keys(): - self.param_idx[k] = [torch.arange(size) for size in state_dict[k].shape] # store client's weights' shape (for HeteroFL) - + self.param_idx[k] = [ + torch.arange(size) for size in state_dict[k].shape + ] # store client's weights' shape (for HeteroFL) def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: """Returns the parameters of the current net.""" return [val.cpu().numpy() for _, val in self.net.state_dict().items()] - + def set_parameters(self, parameters: NDArrays) -> None: """Changes the parameters of the model using the given ones.""" params_dict = zip(self.net.state_dict().keys(), parameters) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) - self.net.load_state_dict(prune(state_dict, self.param_idx), strict=True) + self.net.load_state_dict(prune(state_dict, self.param_idx), strict=True) def fit( self, parameters: NDArrays, prev_grads: Dict, config: Dict[str, Scalar] @@ -112,11 +110,11 @@ def fit( self.trainloader, self.device, epochs=num_epochs, - learning_rate=self.learning_rate * self.learning_rate_decay ** curr_round, + learning_rate=self.learning_rate * self.learning_rate_decay**curr_round, feddyn=config["feddyn"], kd=config["kd"], consistency_weight=consistency_weight, - prev_grads = prev_grads, + prev_grads=prev_grads, alpha=config["alpha"], extended=config["extended"], ) @@ -129,7 +127,11 @@ def evaluate( """Implements distributed evaluation for a given client.""" self.set_parameters(parameters) loss, accuracy, accuracy_single = test(self.net, self.valloader, self.device) - return float(loss), len(self.valloader), {"accuracy": float(accuracy), "accuracy_single":accuracy_single} + return ( + float(loss), + len(self.valloader), + {"accuracy": float(accuracy), "accuracy_single": accuracy_single}, + ) def gen_client_fn( @@ -141,7 +143,7 @@ def gen_client_fn( learning_rate: float, learning_rate_decay: float, models: List[DictConfig], - cfg: DictConfig + cfg: DictConfig, ) -> Tuple[ Callable[[str], FlowerClient], DataLoader ]: # pylint: disable=too-many-arguments @@ -179,7 +181,6 @@ def gen_client_fn( def client_fn(cid: str) -> FlowerClient: """Create a Flower client representing a single organization.""" - # Load model device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") @@ -190,7 +191,7 @@ def client_fn(cid: str) -> FlowerClient: # will train and evaluate on their own unique data trainloader = trainloaders[int(cid)] valloader = valloaders[int(cid)] - + return FlowerClient( net, trainloader, @@ -204,14 +205,12 @@ def client_fn(cid: str) -> FlowerClient: return client_fn - def _fit(self: Client, ins: FitIns) -> FitRes: - """Refine the provided parameters using the locally held dataset. - FitIns & FitRes were modified for FedDyn. Fit function gets prev_grads - as input and return the updated prev_grads with updated parameters - """ + FitIns & FitRes were modified for FedDyn. Fit function gets prev_grads as input and + return the updated prev_grads with updated parameters + """ # Deconstruct FitIns parameters: NDArrays = parameters_to_ndarrays(ins.parameters) @@ -233,7 +232,7 @@ def _fit(self: Client, ins: FitIns) -> FitRes: parameters=parameters_prime_proto, prev_grads=prev_grads, num_examples=num_examples, - cid = -1, + cid=-1, ) @@ -262,8 +261,9 @@ def _wrap_numpy_client(client: NumPyClient) -> Client: # Create and return an instance of the newly created class return wrapper_class(numpy_client=client) # type: ignore + def to_client(client_like: ClientLike) -> Client: """Take any Client-like object and return it as a Client.""" if isinstance(client_like, NumPyClient): return _wrap_numpy_client(client=client_like) - return client_like \ No newline at end of file + return client_like diff --git a/baselines/depthfl/depthfl/dataset.py b/baselines/depthfl/depthfl/dataset.py index d1a8cbcd6488..3c3c988bcdbc 100644 --- a/baselines/depthfl/depthfl/dataset.py +++ b/baselines/depthfl/depthfl/dataset.py @@ -47,7 +47,7 @@ def load_datasets( # pylint: disable=too-many-arguments trainloaders = [] valloaders = [] for dataset in datasets: - len_val = 0 + len_val = 0 if val_ratio > 0: len_val = int(len(dataset) / (1 / val_ratio)) lengths = [len(dataset) - len_val, len_val] @@ -56,4 +56,4 @@ def load_datasets( # pylint: disable=too-many-arguments ) trainloaders.append(DataLoader(ds_train, batch_size=batch_size, shuffle=True)) valloaders.append(DataLoader(ds_val, batch_size=batch_size)) - return trainloaders, valloaders, DataLoader(testset, batch_size=batch_size) \ No newline at end of file + return trainloaders, valloaders, DataLoader(testset, batch_size=batch_size) diff --git a/baselines/depthfl/depthfl/dataset_preparation.py b/baselines/depthfl/depthfl/dataset_preparation.py index be12b2829e7d..5b73f531507c 100644 --- a/baselines/depthfl/depthfl/dataset_preparation.py +++ b/baselines/depthfl/depthfl/dataset_preparation.py @@ -1,9 +1,8 @@ from typing import List, Optional, Tuple import numpy as np -import torch import torchvision.transforms as transforms -from torch.utils.data import ConcatDataset, Dataset, Subset, random_split +from torch.utils.data import Dataset, Subset from torchvision.datasets import CIFAR100 @@ -15,20 +14,28 @@ def _download_data() -> Tuple[Dataset, Dataset]: Tuple[CIFAR100, CIFAR100] The dataset for training and the dataset for testing CIFAR100. """ - transform_train = transforms.Compose([ - transforms.ToTensor(), - transforms.RandomCrop(32, padding=4), - transforms.RandomHorizontalFlip(), - transforms.Normalize((0.5071, 0.4867, 0.4408), - (0.2675, 0.2565, 0.2761))]) + transform_train = transforms.Compose( + [ + transforms.ToTensor(), + transforms.RandomCrop(32, padding=4), + transforms.RandomHorizontalFlip(), + transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)), + ] + ) - transform_test = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize((0.5071, 0.4867, 0.4408), - (0.2675, 0.2565, 0.2761))]) + transform_test = transforms.Compose( + [ + transforms.ToTensor(), + transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761)), + ] + ) - trainset = CIFAR100("./dataset", train=True, download=True, transform=transform_train) - testset = CIFAR100("./dataset", train=False, download=True, transform=transform_test) + trainset = CIFAR100( + "./dataset", train=True, download=True, transform=transform_train + ) + testset = CIFAR100( + "./dataset", train=False, download=True, transform=transform_test + ) return trainset, testset @@ -37,8 +44,8 @@ def _partition_data( iid: Optional[bool] = True, seed: Optional[int] = 41, ) -> Tuple[List[Dataset], Dataset]: - """Split training set into iid or non iid partitions to simulate the - federated setting. + """Split training set into iid or non iid partitions to simulate the federated + setting. Parameters ---------- @@ -58,20 +65,18 @@ def _partition_data( """ trainset, testset = _download_data() - datasets = list() + datasets = [] if iid: - np.random.seed(seed) - num_sample = int(len(trainset)/(num_clients)) - index = [i for i in range(len(trainset))] - for i in range(num_clients): - sample_idx = np.random.choice(index, num_sample, - replace=False) - index = list(set(index)-set(sample_idx)) + num_sample = int(len(trainset) / (num_clients)) + index = list(range(len(trainset))) + for _i in range(num_clients): + sample_idx = np.random.choice(index, num_sample, replace=False) + index = list(set(index) - set(sample_idx)) datasets.append(Subset(trainset, sample_idx)) else: pass - return datasets, testset \ No newline at end of file + return datasets, testset diff --git a/baselines/depthfl/depthfl/main.py b/baselines/depthfl/depthfl/main.py index e6ddcb86a8f1..a3a35acfe30e 100644 --- a/baselines/depthfl/depthfl/main.py +++ b/baselines/depthfl/depthfl/main.py @@ -1,17 +1,19 @@ import copy -import hydra + import flwr as fl +import hydra +from flwr.common import ndarrays_to_parameters +from flwr.server.client_manager import SimpleClientManager from hydra.core.hydra_config import HydraConfig from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf -from flwr.server.client_manager import ClientManager, SimpleClientManager -from flwr.common import ndarrays_to_parameters from depthfl import client, server, utils -from depthfl.simulation import start_simulation from depthfl.dataset import load_datasets +from depthfl.simulation import start_simulation from depthfl.utils import save_results_as_pickle + @hydra.main(config_path="conf", config_name="config", version_base=None) def main(cfg: DictConfig) -> None: """Run the baseline. @@ -21,7 +23,6 @@ def main(cfg: DictConfig) -> None: cfg : DictConfig An omegaconf object that stores the hydra config. """ - print(OmegaConf.to_yaml(cfg)) # partition dataset and get dataloaders @@ -34,16 +35,18 @@ def main(cfg: DictConfig) -> None: # exclusive learning baseline in DepthFL paper # (model_size, % of clients) = (a,100), (b,75), (c,50), (d,25) if cfg.exclusive_learning: - cfg.num_clients = int(cfg.num_clients - (cfg.model_size-1) * (cfg.num_clients // 4)) + cfg.num_clients = int( + cfg.num_clients - (cfg.model_size - 1) * (cfg.num_clients // 4) + ) models = [] for i in range(cfg.num_clients): model = copy.deepcopy(cfg.model) - # each client gets different model depth / width + # each client gets different model depth / width model.n_blocks = i // (cfg.num_clients // 4) + 1 - # In exclusive learning, every client has same model depth / width + # In exclusive learning, every client has same model depth / width if cfg.exclusive_learning: model.n_blocks = cfg.model_size @@ -65,10 +68,12 @@ def main(cfg: DictConfig) -> None: # get function that will executed by the strategy's evaluate() method # Set server's device device = cfg.server_device - + # Static Batch Normalization for HeteroFL if cfg.static_bn: - evaluate_fn = server.gen_evaluate_fn_hetero(trainloaders, testloader, device=device, model_cfg=model) + evaluate_fn = server.gen_evaluate_fn_hetero( + trainloaders, testloader, device=device, model_cfg=model + ) else: evaluate_fn = server.gen_evaluate_fn(testloader, device=device, model=model) @@ -89,14 +94,16 @@ def fit_config_fn(server_round: int): strategy = instantiate( cfg.strategy, cfg, - net, + net, evaluate_fn=evaluate_fn, on_fit_config_fn=get_on_fit_config(), - initial_parameters=ndarrays_to_parameters([val.cpu().numpy() for _, val in net.state_dict().items()]), - min_fit_clients= int(cfg.num_clients * cfg.fraction), - min_available_clients= int(cfg.num_clients * cfg.fraction), + initial_parameters=ndarrays_to_parameters( + [val.cpu().numpy() for _, val in net.state_dict().items()] + ), + min_fit_clients=int(cfg.num_clients * cfg.fraction), + min_available_clients=int(cfg.num_clients * cfg.fraction), ) - + # Start simulation history = start_simulation( client_fn=client_fn, @@ -107,7 +114,9 @@ def fit_config_fn(server_round: int): "num_gpus": cfg.client_resources.num_gpus, }, strategy=strategy, - server=server.Server_FedDyn(client_manager=SimpleClientManager(), strategy=strategy), + server=server.Server_FedDyn( + client_manager=SimpleClientManager(), strategy=strategy + ), ) # Experiment completed. Now we save the results and @@ -142,4 +151,4 @@ def fit_config_fn(server_round: int): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/baselines/depthfl/depthfl/models.py b/baselines/depthfl/depthfl/models.py index 1c98309a98f0..ede83ca14694 100644 --- a/baselines/depthfl/depthfl/models.py +++ b/baselines/depthfl/depthfl/models.py @@ -2,30 +2,36 @@ from typing import List, Tuple -from omegaconf import DictConfig import torch import torch.nn as nn import torch.nn.functional as F -from torch.nn.parameter import Parameter +from omegaconf import DictConfig from torch.utils.data import DataLoader + class KLLoss(nn.Module): - """KL divergence loss for self distillation""" + """KL divergence loss for self distillation.""" + def __init__(self): super(KLLoss, self).__init__() def forward(self, pred, label): T = 1 - predict = F.log_softmax(pred/T,dim=1) - target_data = F.softmax(label/T,dim=1) - target_data =target_data+10**(-7) + predict = F.log_softmax(pred / T, dim=1) + target_data = F.softmax(label / T, dim=1) + target_data = target_data + 10 ** (-7) with torch.no_grad(): target = target_data.detach().clone() - loss=T*T*((target*(target.log()-predict)).sum(1).sum()/target.size()[0]) + loss = ( + T + * T + * ((target * (target.log() - predict)).sum(1).sum() / target.size()[0]) + ) return loss + def train( # pylint: disable=too-many-arguments net: nn.Module, trainloader: DataLoader, @@ -33,7 +39,7 @@ def train( # pylint: disable=too-many-arguments epochs: int, learning_rate: float, feddyn: bool, - kd: bool, + kd: bool, consistency_weight: float, prev_grads: dict, alpha: float, @@ -59,7 +65,9 @@ def train( # pylint: disable=too-many-arguments criterion = torch.nn.CrossEntropyLoss() criterion_kl = KLLoss().cuda() optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, weight_decay=1e-3) - global_params = {k:val.detach().clone().flatten() for (k,val) in net.named_parameters()} + global_params = { + k: val.detach().clone().flatten() for (k, val) in net.named_parameters() + } for k, _ in net.named_parameters(): prev_grads[k] = prev_grads[k].to(device) @@ -67,7 +75,19 @@ def train( # pylint: disable=too-many-arguments net.train() for _ in range(epochs): _train_one_epoch( - net, global_params, trainloader, device, criterion, criterion_kl, optimizer, feddyn, kd, consistency_weight, prev_grads, alpha, extended, + net, + global_params, + trainloader, + device, + criterion, + criterion_kl, + optimizer, + feddyn, + kd, + consistency_weight, + prev_grads, + alpha, + extended, ) # update prev_grads for FedDyn @@ -75,7 +95,7 @@ def train( # pylint: disable=too-many-arguments for k, param in net.named_parameters(): curr_param = param.detach().clone().flatten() prev_grads[k] = prev_grads[k] - alpha * (curr_param - global_params[k]) - prev_grads[k] = prev_grads[k].to(torch.device('cpu')) + prev_grads[k] = prev_grads[k].to(torch.device("cpu")) def _train_one_epoch( # pylint: disable=too-many-arguments @@ -114,12 +134,11 @@ def _train_one_epoch( # pylint: disable=too-many-arguments """ for images, labels in trainloader: images, labels = images.to(device), labels.to(device) - loss = 0. + loss = 0.0 optimizer.zero_grad() output_lst = net(images) for i, branch_output in enumerate(output_lst): - # only trains last classifier in InclusiveFL if not extended and i != len(output_lst) - 1: continue @@ -128,24 +147,27 @@ def _train_one_epoch( # pylint: disable=too-many-arguments # self distillation term if kd and len(output_lst) > 1: - for j in range(len(output_lst)): if j == i: continue else: - loss += consistency_weight * \ - criterion_kl(branch_output, output_lst[j].detach()) / (len(output_lst) - 1) + loss += ( + consistency_weight + * criterion_kl(branch_output, output_lst[j].detach()) + / (len(output_lst) - 1) + ) # Dynamic regularization in FedDyn if feddyn: for k, param in net.named_parameters(): - curr_param = param.flatten() - + lin_penalty = torch.dot(curr_param, prev_grads[k]) loss -= lin_penalty - quad_penalty = alpha/2.0 * torch.sum(torch.square(curr_param - global_params[k])) + quad_penalty = ( + alpha / 2.0 * torch.sum(torch.square(curr_param - global_params[k])) + ) loss += quad_penalty loss.backward() @@ -173,7 +195,7 @@ def test( """ criterion = torch.nn.CrossEntropyLoss() correct, total, loss = 0, 0, 0.0 - correct_single = [0] * 4 # accuracy of each classifier within model + correct_single = [0] * 4 # accuracy of each classifier within model net.eval() with torch.no_grad(): for images, labels in testloader: @@ -192,7 +214,7 @@ def test( for i, single in enumerate(output_lst): _, predicted = torch.max(single, 1) correct_single[i] += (predicted == labels).sum().item() - + if len(testloader.dataset) == 0: raise ValueError("Testloader can't be 0, exiting...") loss /= len(testloader.dataset) @@ -200,8 +222,12 @@ def test( accuracy_single = [correct / total for correct in correct_single] return loss, accuracy, accuracy_single + def test_sbn( - nets: List[nn.Module], trainloaders:List[DictConfig], testloader: DataLoader, device: torch.device + nets: List[nn.Module], + trainloaders: List[DictConfig], + testloader: DataLoader, + device: torch.device, ) -> Tuple[float, float, List[float]]: """Evaluate the networks on the entire test set. @@ -210,7 +236,7 @@ def test_sbn( nets : List[nn.Module] The neural networks to test. Each neural network has different width trainloaders : List[DataLoader] - The List of dataloaders containing the data to train the network on + The List of dataloaders containing the data to train the network on testloader : DataLoader The DataLoader containing the data to test the network on. device : torch.device @@ -221,13 +247,12 @@ def test_sbn( Tuple[float, float, List[float]] The loss and the accuracy of the input model on the given data. """ - # static batch normalization for trainloader in trainloaders: with torch.no_grad(): for model in nets: model.train() - for batch_idx, (images, labels) in enumerate(trainloader): + for _batch_idx, (images, labels) in enumerate(trainloader): images, labels = images.to(device), labels.to(device) output = model(images) @@ -236,7 +261,7 @@ def test_sbn( criterion = torch.nn.CrossEntropyLoss() correct, total, loss = 0, 0, 0.0 correct_single = [0] * 4 - + # test each network of different width with torch.no_grad(): for images, labels in testloader: @@ -257,10 +282,10 @@ def test_sbn( for i, single in enumerate(output_lst): _, predicted = torch.max(single, 1) correct_single[i] += (predicted == labels).sum().item() - + if len(testloader.dataset) == 0: raise ValueError("Testloader can't be 0, exiting...") loss /= len(testloader.dataset) accuracy = correct / total accuracy_single = [correct / total for correct in correct_single] - return loss, accuracy, accuracy_single \ No newline at end of file + return loss, accuracy, accuracy_single diff --git a/baselines/depthfl/depthfl/ray_client_proxy.py b/baselines/depthfl/depthfl/ray_client_proxy.py index afeb1d90aa67..4ca3a48203fc 100644 --- a/baselines/depthfl/depthfl/ray_client_proxy.py +++ b/baselines/depthfl/depthfl/ray_client_proxy.py @@ -1,22 +1,20 @@ -from typing import Callable, Dict, Optional, cast from logging import ERROR -import ray +from typing import Callable, Optional, cast +import ray from flwr import common from flwr.client import Client, ClientLike -from depthfl.client import to_client -from flwr.client.client import ( - maybe_call_fit, -) -from flwr.simulation.ray_transport.ray_client_proxy import RayClientProxy +from flwr.client.client import maybe_call_fit from flwr.common.logger import log +from flwr.simulation.ray_transport.ray_client_proxy import RayClientProxy + +from depthfl.client import to_client ClientFn = Callable[[str], ClientLike] + class RayClientProxy_FedDyn(RayClientProxy): - def fit(self, ins: common.FitIns, timeout: Optional[float]) -> common.FitRes: - """Train model parameters on the locally held dataset.""" future_fit_res = launch_and_fit.options( # type: ignore **self.resources, @@ -30,7 +28,7 @@ def fit(self, ins: common.FitIns, timeout: Optional[float]) -> common.FitRes: common.FitRes, res, ) - + @ray.remote def launch_and_fit( @@ -43,7 +41,8 @@ def launch_and_fit( fit_ins=fit_ins, ) + def _create_client(client_fn: ClientFn, cid: str) -> Client: """Create a client instance.""" client_like: ClientLike = client_fn(cid) - return to_client(client_like=client_like) \ No newline at end of file + return to_client(client_like=client_like) diff --git a/baselines/depthfl/depthfl/resnet.py b/baselines/depthfl/depthfl/resnet.py index 7daa9d61b0e9..aeb7a7e079f7 100644 --- a/baselines/depthfl/depthfl/resnet.py +++ b/baselines/depthfl/depthfl/resnet.py @@ -1,45 +1,76 @@ +from typing import Callable, Optional + import torch import torch.nn as nn -from typing import Type, Any, Callable, Union, List, Optional + class MyGroupNorm(nn.Module): def __init__(self, num_channels): super(MyGroupNorm, self).__init__() ## change num_groups to 32 - self.norm = nn.GroupNorm(num_groups=16, num_channels=num_channels, eps=1e-5, affine=True) - + self.norm = nn.GroupNorm( + num_groups=16, num_channels=num_channels, eps=1e-5, affine=True + ) + def forward(self, x): x = self.norm(x) return x + class MyBatchNorm(nn.Module): def __init__(self, num_channels): super(MyBatchNorm, self).__init__() self.norm = nn.BatchNorm2d(num_channels, track_running_stats=True) - + def forward(self, x): x = self.norm(x) return x def conv3x3(in_planes, out_planes, stride=1): - return nn.Conv2d(in_planes, out_planes, kernel_size=3, - stride=stride, padding=1, bias=False) + return nn.Conv2d( + in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False + ) + def conv1x1(in_planes, planes, stride=1): return nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False) class SepConv(nn.Module): - - def __init__(self, channel_in, channel_out, kernel_size=3, stride=2, padding=1, affine=True, norm_layer=MyGroupNorm): + def __init__( + self, + channel_in, + channel_out, + kernel_size=3, + stride=2, + padding=1, + affine=True, + norm_layer=MyGroupNorm, + ): super(SepConv, self).__init__() self.op = nn.Sequential( - nn.Conv2d(channel_in, channel_in, kernel_size=kernel_size, stride=stride, padding=padding, groups=channel_in, bias=False), + nn.Conv2d( + channel_in, + channel_in, + kernel_size=kernel_size, + stride=stride, + padding=padding, + groups=channel_in, + bias=False, + ), nn.Conv2d(channel_in, channel_in, kernel_size=1, padding=0, bias=False), norm_layer(channel_in), nn.ReLU(inplace=False), - nn.Conv2d(channel_in, channel_in, kernel_size=kernel_size, stride=1, padding=padding, groups=channel_in, bias=False), + nn.Conv2d( + channel_in, + channel_in, + kernel_size=kernel_size, + stride=1, + padding=padding, + groups=channel_in, + bias=False, + ), nn.Conv2d(channel_in, channel_out, kernel_size=1, padding=0, bias=False), norm_layer(channel_out), nn.ReLU(inplace=False), @@ -51,6 +82,7 @@ def forward(self, x): class BasicBlock(nn.Module): expansion = 1 + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) @@ -60,7 +92,7 @@ def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None) self.bn2 = norm_layer(planes) self.downsample = downsample self.stride = stride - + def forward(self, x): residual = x @@ -73,13 +105,15 @@ def forward(self, x): if self.downsample is not None: residual = self.downsample(x) - + output += residual output = self.relu(output) return output + class BottleneckBlock(nn.Module): expansion = 4 + def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None): super(BottleneckBlock, self).__init__() self.conv1 = conv1x1(inplanes, planes) @@ -89,12 +123,12 @@ def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None) self.conv2 = conv3x3(planes, planes, stride) self.bn2 = norm_layer(planes) - self.conv3 = conv1x1(planes, planes*self.expansion) - self.bn3 = norm_layer(planes*self.expansion) + self.conv3 = conv1x1(planes, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) self.downsample = downsample self.stride = stride - + def forward(self, x): residual = x @@ -108,7 +142,7 @@ def forward(self, x): output = self.conv3(output) output = self.bn3(output) - + if self.downsample is not None: residual = self.downsample(x) @@ -117,119 +151,124 @@ def forward(self, x): return output + class Multi_ResNet(nn.Module): """Resnet model Args: block (class): block type, BasicBlock or BottleneckBlock layers (int list): layer num in each block - num_classes (int): class num + num_classes (int): class num. """ - def __init__(self, block, layers, n_blocks, num_classes=1000, \ - norm_layer: Optional[Callable[..., nn.Module]] = None): - + def __init__( + self, + block, + layers, + n_blocks, + num_classes=1000, + norm_layer: Optional[Callable[..., nn.Module]] = None, + ): super(Multi_ResNet, self).__init__() self.n_blocks = n_blocks self.inplanes = 64 self.norm_layer = norm_layer - #self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + # self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) + self.conv1 = nn.Conv2d( + 3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False + ) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) - #self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) - + self.middle_fc1 = nn.Linear(512 * block.expansion, num_classes) - #self.feature_fc1 = nn.Linear(512 * block.expansion, 512 * block.expansion) + # self.feature_fc1 = nn.Linear(512 * block.expansion, 512 * block.expansion) self.scala1 = nn.Sequential( SepConv( channel_in=64 * block.expansion, channel_out=128 * block.expansion, - norm_layer=norm_layer + norm_layer=norm_layer, ), SepConv( channel_in=128 * block.expansion, channel_out=256 * block.expansion, - norm_layer=norm_layer + norm_layer=norm_layer, ), SepConv( channel_in=256 * block.expansion, channel_out=512 * block.expansion, - norm_layer=norm_layer - + norm_layer=norm_layer, ), - nn.AdaptiveAvgPool2d(1) + nn.AdaptiveAvgPool2d(1), ) self.attention1 = nn.Sequential( SepConv( channel_in=64 * block.expansion, channel_out=64 * block.expansion, - norm_layer=norm_layer + norm_layer=norm_layer, ), norm_layer(64 * block.expansion), nn.ReLU(), - nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - nn.Sigmoid() + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), + nn.Sigmoid(), ) if n_blocks > 1: self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.middle_fc2 = nn.Linear(512 * block.expansion, num_classes) - #self.feature_fc2 = nn.Linear(512 * block.expansion, 512 * block.expansion) + # self.feature_fc2 = nn.Linear(512 * block.expansion, 512 * block.expansion) self.scala2 = nn.Sequential( SepConv( channel_in=128 * block.expansion, channel_out=256 * block.expansion, - norm_layer=norm_layer + norm_layer=norm_layer, ), SepConv( channel_in=256 * block.expansion, channel_out=512 * block.expansion, - norm_layer=norm_layer + norm_layer=norm_layer, ), - nn.AdaptiveAvgPool2d(1) + nn.AdaptiveAvgPool2d(1), ) self.attention2 = nn.Sequential( SepConv( channel_in=128 * block.expansion, channel_out=128 * block.expansion, - norm_layer=norm_layer + norm_layer=norm_layer, ), norm_layer(128 * block.expansion), nn.ReLU(), - nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - nn.Sigmoid() + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), + nn.Sigmoid(), ) - if n_blocks > 2: self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.middle_fc3 = nn.Linear(512 * block.expansion, num_classes) - #self.feature_fc3 = nn.Linear(512 * block.expansion, 512 * block.expansion) + # self.feature_fc3 = nn.Linear(512 * block.expansion, 512 * block.expansion) self.scala3 = nn.Sequential( SepConv( channel_in=256 * block.expansion, channel_out=512 * block.expansion, - norm_layer=norm_layer + norm_layer=norm_layer, ), - nn.AdaptiveAvgPool2d(1) + nn.AdaptiveAvgPool2d(1), ) self.attention3 = nn.Sequential( SepConv( channel_in=256 * block.expansion, channel_out=256 * block.expansion, - norm_layer=norm_layer + norm_layer=norm_layer, ), norm_layer(256 * block.expansion), nn.ReLU(), - nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False), - nn.Sigmoid() + nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False), + nn.Sigmoid(), ) - if n_blocks > 3: self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.fc = nn.Linear(512 * block.expansion, num_classes) @@ -237,46 +276,54 @@ def __init__(self, block, layers, n_blocks, num_classes=1000, \ for m in self.modules(): if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.GroupNorm) or isinstance(m, nn.BatchNorm2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + elif isinstance(m, nn.GroupNorm) or isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) - + def _make_layer(self, block, planes, layers, stride=1, norm_layer=None): """A block with 'layers' layers Args: block (class): block type planes (int): output channels = planes * expansion layers (int): layer num in the block - stride (int): the first layer stride in the block + stride (int): the first layer stride in the block. """ norm_layer = self.norm_layer downsample = None - if stride !=1 or self.inplanes != planes * block.expansion: + if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), norm_layer(planes * block.expansion), ) layer = [] - layer.append(block(self.inplanes, planes, stride=stride, downsample=downsample, norm_layer=norm_layer)) + layer.append( + block( + self.inplanes, + planes, + stride=stride, + downsample=downsample, + norm_layer=norm_layer, + ) + ) self.inplanes = planes * block.expansion - for i in range(1, layers): + for _i in range(1, layers): layer.append(block(self.inplanes, planes, norm_layer=norm_layer)) - + return nn.Sequential(*layer) - + def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) # x = self.maxpool(x) - + x = self.layer1(x) fea1 = self.attention1(x) fea1 = fea1 * x out1_feature = self.scala1(fea1).view(x.size(0), -1) middle_output1 = self.middle_fc1(out1_feature) - #out1_feature = self.feature_fc1(out1_feature) + # out1_feature = self.feature_fc1(out1_feature) if self.n_blocks == 1: return [middle_output1] @@ -286,7 +333,7 @@ def forward(self, x): fea2 = fea2 * x out2_feature = self.scala2(fea2).view(x.size(0), -1) middle_output2 = self.middle_fc2(out2_feature) - #out2_feature = self.feature_fc2(out2_feature) + # out2_feature = self.feature_fc2(out2_feature) if self.n_blocks == 2: return [middle_output1, middle_output2] @@ -295,7 +342,7 @@ def forward(self, x): fea3 = fea3 * x out3_feature = self.scala3(fea3).view(x.size(0), -1) middle_output3 = self.middle_fc3(out3_feature) - #out3_feature = self.feature_fc3(out3_feature) + # out3_feature = self.feature_fc3(out3_feature) if self.n_blocks == 3: return [middle_output1, middle_output2, middle_output3] @@ -306,34 +353,53 @@ def forward(self, x): return [middle_output1, middle_output2, middle_output3, output4] -def multi_resnet18(n_blocks=1, norm='bn', num_classes=100): - if norm == 'gn': + +def multi_resnet18(n_blocks=1, norm="bn", num_classes=100): + if norm == "gn": norm_layer = MyGroupNorm - - elif norm == 'bn': + + elif norm == "bn": norm_layer = MyBatchNorm - return Multi_ResNet(BasicBlock, [2,2,2,2], n_blocks, num_classes=num_classes, norm_layer=norm_layer) - -def multi_resnet34(n_blocks=4, norm='bn', num_classes=100): - if norm == 'gn': + return Multi_ResNet( + BasicBlock, + [2, 2, 2, 2], + n_blocks, + num_classes=num_classes, + norm_layer=norm_layer, + ) + + +def multi_resnet34(n_blocks=4, norm="bn", num_classes=100): + if norm == "gn": norm_layer = MyGroupNorm - - elif norm == 'bn': + + elif norm == "bn": norm_layer = MyBatchNorm - return Multi_ResNet(BasicBlock, [3,4,6,3], n_blocks, num_classes=num_classes, norm_layer=norm_layer) + return Multi_ResNet( + BasicBlock, + [3, 4, 6, 3], + n_blocks, + num_classes=num_classes, + norm_layer=norm_layer, + ) + if __name__ == "__main__": - from ptflops import get_model_complexity_info model = multi_resnet18(n_blocks=4, num_classes=100) - - with torch.cuda.device(0): - macs, params = get_model_complexity_info(model, (3, 32, 32), as_strings=True, - print_per_layer_stat=False, verbose=True, units='MMac') - print('{:<30} {:<8}'.format('Computational complexity: ', macs)) - print('{:<30} {:<8}'.format('Number of parameters: ', params)) + with torch.cuda.device(0): + macs, params = get_model_complexity_info( + model, + (3, 32, 32), + as_strings=True, + print_per_layer_stat=False, + verbose=True, + units="MMac", + ) + print("{:<30} {:<8}".format("Computational complexity: ", macs)) + print("{:<30} {:<8}".format("Number of parameters: ", params)) diff --git a/baselines/depthfl/depthfl/resnet_hetero.py b/baselines/depthfl/depthfl/resnet_hetero.py index 094a84d130af..5602d8a0761b 100644 --- a/baselines/depthfl/depthfl/resnet_hetero.py +++ b/baselines/depthfl/depthfl/resnet_hetero.py @@ -1,7 +1,7 @@ +import numpy as np import torch import torch.nn as nn -from typing import Type, Any, Callable, Union, List, Optional -import numpy as np + class Scaler(nn.Module): def __init__(self, rate, scale): @@ -21,15 +21,17 @@ def __init__(self, num_channels, track=True): super(MyBatchNorm, self).__init__() ## change num_groups to 32 self.norm = nn.BatchNorm2d(num_channels, track_running_stats=track) - + def forward(self, x): x = self.norm(x) return x def conv3x3(in_planes, out_planes, stride=1): - return nn.Conv2d(in_planes, out_planes, kernel_size=3, - stride=stride, padding=1, bias=False) + return nn.Conv2d( + in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False + ) + def conv1x1(in_planes, planes, stride=1): return nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False) @@ -37,7 +39,17 @@ def conv1x1(in_planes, planes, stride=1): class BasicBlock(nn.Module): expansion = 1 - def __init__(self, inplanes, planes, stride=1, scaler_rate=1, downsample=None, track=True, scale=True): + + def __init__( + self, + inplanes, + planes, + stride=1, + scaler_rate=1, + downsample=None, + track=True, + scale=True, + ): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.scaler = Scaler(scaler_rate, scale) @@ -47,7 +59,7 @@ def __init__(self, inplanes, planes, stride=1, scaler_rate=1, downsample=None, self.bn2 = MyBatchNorm(planes, track) self.downsample = downsample self.stride = stride - + def forward(self, x): residual = x @@ -62,7 +74,7 @@ def forward(self, x): if self.downsample is not None: residual = self.downsample(x) - + output += residual output = self.relu(output) return output @@ -70,7 +82,17 @@ def forward(self, x): class BottleneckBlock(nn.Module): expansion = 4 - def __init__(self, inplanes, planes, stride=1, scaler_rate=1, downsample=None, track=True, scale=True): + + def __init__( + self, + inplanes, + planes, + stride=1, + scaler_rate=1, + downsample=None, + track=True, + scale=True, + ): super(BottleneckBlock, self).__init__() self.conv1 = conv1x1(inplanes, planes) self.bn1 = MyBatchNorm(planes) @@ -79,12 +101,12 @@ def __init__(self, inplanes, planes, stride=1, scaler_rate=1, downsample=None, self.conv2 = conv3x3(planes, planes, stride) self.bn2 = MyBatchNorm(planes) - self.conv3 = conv1x1(planes, planes*self.expansion) - self.bn3 = MyBatchNorm(planes*self.expansion) + self.conv3 = conv1x1(planes, planes * self.expansion) + self.bn3 = MyBatchNorm(planes * self.expansion) self.downsample = downsample self.stride = stride - + def forward(self, x): residual = x @@ -98,7 +120,7 @@ def forward(self, x): output = self.conv3(output) output = self.bn3(output) - + if self.downsample is not None: residual = self.downsample(x) @@ -109,64 +131,117 @@ def forward(self, x): class Multi_ResNet(nn.Module): - - def __init__(self, hidden_size, block, layers, num_classes, scaler_rate, track, scale): - + def __init__( + self, hidden_size, block, layers, num_classes, scaler_rate, track, scale + ): super(Multi_ResNet, self).__init__() - + self.inplanes = hidden_size[0] self.norm_layer = MyBatchNorm - self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False) + self.conv1 = nn.Conv2d( + 3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False + ) self.scaler = Scaler(scaler_rate, scale) self.bn1 = self.norm_layer(self.inplanes, track) self.relu = nn.ReLU(inplace=True) - #self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) - - self.layer1 = self._make_layer(block, hidden_size[0], layers[0], scaler_rate = scaler_rate, track=track, scale=scale) - self.layer2 = self._make_layer(block, hidden_size[1], layers[1], stride=2, scaler_rate = scaler_rate, track=track, scale=scale) - self.layer3 = self._make_layer(block, hidden_size[2], layers[2], stride=2, scaler_rate = scaler_rate, track=track, scale=scale) - self.layer4 = self._make_layer(block, hidden_size[3], layers[3], stride=2, scaler_rate = scaler_rate, track=track, scale=scale) + # self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + self.layer1 = self._make_layer( + block, + hidden_size[0], + layers[0], + scaler_rate=scaler_rate, + track=track, + scale=scale, + ) + self.layer2 = self._make_layer( + block, + hidden_size[1], + layers[1], + stride=2, + scaler_rate=scaler_rate, + track=track, + scale=scale, + ) + self.layer3 = self._make_layer( + block, + hidden_size[2], + layers[2], + stride=2, + scaler_rate=scaler_rate, + track=track, + scale=scale, + ) + self.layer4 = self._make_layer( + block, + hidden_size[3], + layers[3], + stride=2, + scaler_rate=scaler_rate, + track=track, + scale=scale, + ) self.fc = nn.Linear(hidden_size[3] * block.expansion, num_classes) self.scala = nn.AdaptiveAvgPool2d(1) for m in self.modules(): if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') - elif isinstance(m, nn.GroupNorm) or isinstance(m, nn.BatchNorm2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + elif isinstance(m, nn.GroupNorm) or isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) - - def _make_layer(self, block, planes, layers, stride=1, scaler_rate = 1, track=True, scale=True): + + def _make_layer( + self, block, planes, layers, stride=1, scaler_rate=1, track=True, scale=True + ): """A block with 'layers' layers Args: block (class): block type planes (int): output channels = planes * expansion layers (int): layer num in the block - stride (int): the first layer stride in the block + stride (int): the first layer stride in the block. """ norm_layer = self.norm_layer downsample = None - if stride !=1 or self.inplanes != planes * block.expansion: + if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), norm_layer(planes * block.expansion, track), ) layer = [] - layer.append(block(self.inplanes, planes, stride=stride, scaler_rate = scaler_rate, downsample=downsample, track=track, scale=scale)) + layer.append( + block( + self.inplanes, + planes, + stride=stride, + scaler_rate=scaler_rate, + downsample=downsample, + track=track, + scale=scale, + ) + ) self.inplanes = planes * block.expansion - for i in range(1, layers): - layer.append(block(self.inplanes, planes, scaler_rate = scaler_rate, track=track, scale=scale)) - + for _i in range(1, layers): + layer.append( + block( + self.inplanes, + planes, + scaler_rate=scaler_rate, + track=track, + scale=scale, + ) + ) + return nn.Sequential(*layer) - + def forward(self, x): x = self.conv1(x) x = self.scaler(x) x = self.bn1(x) x = self.relu(x) - #x = self.maxpool(x) - + # x = self.maxpool(x) + x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) @@ -178,7 +253,6 @@ def forward(self, x): def resnet18(n_blocks=4, track=False, scale=True, num_classes=100): - # width pruning ratio : (0.25, 0.50, 0.75, 0.10) model_rate = n_blocks / 4 classes_size = num_classes @@ -188,18 +262,31 @@ def resnet18(n_blocks=4, track=False, scale=True, num_classes=100): scaler_rate = model_rate - return Multi_ResNet(hidden_size, BasicBlock, [2,2,2,2], num_classes=classes_size, scaler_rate=scaler_rate, track=track, scale=scale) + return Multi_ResNet( + hidden_size, + BasicBlock, + [2, 2, 2, 2], + num_classes=classes_size, + scaler_rate=scaler_rate, + track=track, + scale=scale, + ) if __name__ == "__main__": from ptflops import get_model_complexity_info model = resnet18(100, 1.0) - - with torch.cuda.device(0): - macs, params = get_model_complexity_info(model, (3, 32, 32), as_strings=True, - print_per_layer_stat=False, verbose=True, units='MMac') - - print('{:<30} {:<8}'.format('Computational complexity: ', macs)) - print('{:<30} {:<8}'.format('Number of parameters: ', params)) + with torch.cuda.device(0): + macs, params = get_model_complexity_info( + model, + (3, 32, 32), + as_strings=True, + print_per_layer_stat=False, + verbose=True, + units="MMac", + ) + + print("{:<30} {:<8}".format("Computational complexity: ", macs)) + print("{:<30} {:<8}".format("Number of parameters: ", params)) diff --git a/baselines/depthfl/depthfl/server.py b/baselines/depthfl/depthfl/server.py index 0ef5f638b14e..2406d49000d5 100644 --- a/baselines/depthfl/depthfl/server.py +++ b/baselines/depthfl/depthfl/server.py @@ -1,29 +1,23 @@ import concurrent.futures import copy -import torch from collections import OrderedDict -from typing import Callable, Dict, Optional, Tuple, List, Union from logging import DEBUG, INFO +from typing import Callable, Dict, List, Optional, Tuple, Union -from flwr.common.typing import NDArrays, Scalar +import torch +from flwr.common import Code, Parameters, Scalar, parameters_to_ndarrays from flwr.common.logger import log +from flwr.common.typing import NDArrays, Scalar from flwr.server import Server -from flwr.server.server import fit_clients from flwr.server.client_proxy import ClientProxy +from flwr.server.server import fit_clients from hydra.utils import instantiate from omegaconf import DictConfig from torch.utils.data import DataLoader from depthfl import FitIns, FitRes -from depthfl.models import test, test_sbn from depthfl.client import prune - -from flwr.common import ( - Code, - Parameters, - Scalar, - parameters_to_ndarrays, -) +from depthfl.models import test, test_sbn FitResultsAndFailures = Tuple[ List[Tuple[ClientProxy, FitRes]], @@ -58,7 +52,6 @@ def evaluate( ) -> Optional[Tuple[float, Dict[str, Scalar]]]: # pylint: disable=unused-argument """Use the entire CIFAR-100 test set for evaluation.""" - net = instantiate(model) params_dict = zip(net.state_dict().keys(), parameters_ndarrays) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) @@ -67,10 +60,11 @@ def evaluate( loss, accuracy, accuracy_single = test(net, testloader, device=device) # return statistics - return loss, {"accuracy": accuracy, "accuracy_single":accuracy_single} + return loss, {"accuracy": accuracy, "accuracy_single": accuracy_single} return evaluate + def gen_evaluate_fn_hetero( trainloaders: List[DataLoader], testloader: DataLoader, @@ -99,11 +93,10 @@ def evaluate( ) -> Optional[Tuple[float, Dict[str, Scalar]]]: # pylint: disable=unused-argument """Use the entire CIFAR-100 test set for evaluation.""" - # test per 50 rounds (sbn takes a long time) if server_round % 50 != 0: - return 0., {"accuracy": 0., "accuracy_single":[0]*4} - + return 0.0, {"accuracy": 0.0, "accuracy_single": [0] * 4} + # models with different width models = [] for i in range(4): @@ -120,7 +113,9 @@ def evaluate( nets.append(net) param_idx = {} for k in net_tmp.state_dict().keys(): - param_idx[k] = [torch.arange(size) for size in net.state_dict()[k].shape] + param_idx[k] = [ + torch.arange(size) for size in net.state_dict()[k].shape + ] param_idx_lst.append(param_idx) params_dict = zip(net_tmp.state_dict().keys(), parameters_ndarrays) @@ -131,20 +126,22 @@ def evaluate( net.to(device) net.train() - loss, accuracy, accuracy_single = test_sbn(nets, trainloaders, testloader, device=device) + loss, accuracy, accuracy_single = test_sbn( + nets, trainloaders, testloader, device=device + ) # return statistics - return loss, {"accuracy": accuracy, "accuracy_single":accuracy_single} + return loss, {"accuracy": accuracy, "accuracy_single": accuracy_single} return evaluate -class Server_FedDyn(Server): +class Server_FedDyn(Server): def fit_round( - self, - server_round: int, - timeout: Optional[float], + self, + server_round: int, + timeout: Optional[float], ) -> Optional[ - Tuple[Optional[Parameters], Dict[str, Scalar], FitResultsAndFailures] + Tuple[Optional[Parameters], Dict[str, Scalar], FitResultsAndFailures] ]: """Perform a single round of federated averaging.""" # Get clients and their respective instructions from strategy @@ -183,7 +180,9 @@ def fit_round( aggregated_result: Tuple[ Optional[Parameters], Dict[str, Scalar], - ] = self.strategy.aggregate_fit(server_round, results, failures, parameters_to_ndarrays(self.parameters)) + ] = self.strategy.aggregate_fit( + server_round, results, failures, parameters_to_ndarrays(self.parameters) + ) # ] = self.strategy.aggregate_fit(server_round, results, failures) parameters_aggregated, metrics_aggregated = aggregated_result @@ -220,12 +219,12 @@ def fit_client( client: ClientProxy, ins: FitIns, timeout: Optional[float] ) -> Tuple[ClientProxy, FitRes]: """Refine parameters on a single client.""" - fit_res = client.fit(ins, timeout=timeout) - # tag client id + # tag client id fit_res.cid = int(client.cid) return client, fit_res + def _handle_finished_future_after_fit( future: concurrent.futures.Future, # type: ignore results: List[Tuple[ClientProxy, FitRes]], @@ -248,4 +247,4 @@ def _handle_finished_future_after_fit( return # Not successful, client returned a result where the status code is not OK - failures.append(result) \ No newline at end of file + failures.append(result) diff --git a/baselines/depthfl/depthfl/simulation.py b/baselines/depthfl/depthfl/simulation.py index e317b02e85a4..39bf530f7812 100644 --- a/baselines/depthfl/depthfl/simulation.py +++ b/baselines/depthfl/depthfl/simulation.py @@ -20,7 +20,6 @@ from typing import Any, Callable, Dict, List, Optional import ray - from flwr.client.client import Client from flwr.common import EventType, event from flwr.common.logger import log @@ -29,6 +28,7 @@ from flwr.server.client_manager import ClientManager from flwr.server.history import History from flwr.server.strategy import Strategy + from depthfl.ray_client_proxy import RayClientProxy_FedDyn INVALID_ARGUMENTS_START_SIMULATION = """ diff --git a/baselines/depthfl/depthfl/strategy.py b/baselines/depthfl/depthfl/strategy.py index cdb674ed0c49..02c83f69a1f7 100644 --- a/baselines/depthfl/depthfl/strategy.py +++ b/baselines/depthfl/depthfl/strategy.py @@ -1,28 +1,25 @@ -from typing import List, Tuple, Union, Optional, Dict -from functools import reduce -from logging import DEBUG, INFO, WARNING -from hydra.utils import instantiate -from omegaconf import DictConfig +from logging import WARNING +from typing import Dict, List, Optional, Tuple, Union +import numpy as np +import torch +import torch.nn as nn from flwr.common import ( + Metrics, NDArrays, Parameters, Scalar, ndarrays_to_parameters, parameters_to_ndarrays, - Metrics, ) - -from flwr.common.typing import FitRes from flwr.common.logger import log -from flwr.server.client_proxy import ClientProxy +from flwr.common.typing import FitRes from flwr.server.client_manager import ClientManager +from flwr.server.client_proxy import ClientProxy from flwr.server.strategy import FedAvg -from depthfl import FitIns, FitRes +from omegaconf import DictConfig -import numpy as np -import torch -import torch.nn as nn +from depthfl import FitIns, FitRes def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: @@ -48,16 +45,19 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: class FedDyn(FedAvg): - """Applying dynamic regularization in FedDyn paper""" - def __init__(self, cfg:DictConfig, net: nn.Module, *args, **kwargs): + """Applying dynamic regularization in FedDyn paper.""" + + def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): self.cfg = cfg self.h = [np.zeros(v.shape) for (k, v) in net.state_dict().items()] - self.prev_grads = [{k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()}]*100 + self.prev_grads = [ + {k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()} + ] * 100 self.is_weight = [] # tagging real weights / biases for k in net.state_dict().keys(): - if 'weight' not in k and 'bias' not in k: + if "weight" not in k and "bias" not in k: self.is_weight.append(False) else: self.is_weight.append(True) @@ -66,13 +66,13 @@ def __init__(self, cfg:DictConfig, net: nn.Module, *args, **kwargs): def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> List[Tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" config = {} if self.on_fit_config_fn is not None: # Custom fit config function provided config = self.on_fit_config_fn(server_round) - + # Sample clients sample_size, min_num_clients = self.num_fit_clients( client_manager.num_available() @@ -82,14 +82,17 @@ def configure_fit( ) # Return client/config pairs - return [(client, FitIns(parameters, self.prev_grads[int(client.cid)], config)) for client in clients] + return [ + (client, FitIns(parameters, self.prev_grads[int(client.cid)], config)) + for client in clients + ] def aggregate_fit( self, server_round: int, results: List[Tuple[ClientProxy, FitRes]], failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - origin: NDArrays + origin: NDArrays, ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: @@ -97,7 +100,7 @@ def aggregate_fit( # Do not aggregate if there are failures and failures are not accepted if not self.accept_failures and failures: return None, {} - + for _, fit_res in results: self.prev_grads[fit_res.cid] = fit_res.prev_grads @@ -106,7 +109,9 @@ def aggregate_fit( (parameters_to_ndarrays(fit_res.parameters), fit_res.num_examples) for _, fit_res in results ] - parameters_aggregated = ndarrays_to_parameters(aggregate(weights_results, origin, self.h, self.is_weight, self.cfg)) + parameters_aggregated = ndarrays_to_parameters( + aggregate(weights_results, origin, self.h, self.is_weight, self.cfg) + ) # Aggregate custom metrics if aggregation fn was provided metrics_aggregated = {} @@ -118,13 +123,17 @@ def aggregate_fit( return parameters_aggregated, metrics_aggregated - - -def aggregate(results: List[Tuple[NDArrays, int]], origin: NDArrays, h:List, is_weight:List, cfg:DictConfig) -> NDArrays: +def aggregate( + results: List[Tuple[NDArrays, int]], + origin: NDArrays, + h: List, + is_weight: List, + cfg: DictConfig, +) -> NDArrays: param_count = [0] * len(origin) weights_sum = [np.zeros(v.shape) for v in origin] - + # summation & counting of parameters for weight, _ in results: for i, layer in enumerate(weight): @@ -133,23 +142,26 @@ def aggregate(results: List[Tuple[NDArrays, int]], origin: NDArrays, h:List, is_ # update parameters for i, weight in enumerate(weights_sum): - if param_count[i] > 0: weight = weight / param_count[i] # print(np.isscalar(weight)) # update h variable for FedDyn - h[i] = h[i] - cfg.fit_config.alpha * param_count[i] * (weight - origin[i]) / cfg.num_clients - + h[i] = ( + h[i] + - cfg.fit_config.alpha + * param_count[i] + * (weight - origin[i]) + / cfg.num_clients + ) + # applying h only for weights / biases if is_weight[i] and cfg.fit_config.feddyn: weights_sum[i] = weight - h[i] / cfg.fit_config.alpha else: weights_sum[i] = weight - + else: weights_sum[i] = origin[i] - - return weights_sum - + return weights_sum diff --git a/baselines/depthfl/depthfl/strategy_hetero.py b/baselines/depthfl/depthfl/strategy_hetero.py index 65f0f1ced715..b32a4a6675f6 100644 --- a/baselines/depthfl/depthfl/strategy_hetero.py +++ b/baselines/depthfl/depthfl/strategy_hetero.py @@ -1,9 +1,9 @@ -from typing import List, Tuple, Union, Optional, Dict -from functools import reduce -from logging import DEBUG, INFO, WARNING -from hydra.utils import instantiate -from omegaconf import DictConfig +from logging import WARNING +from typing import Dict, List, Optional, Tuple, Union +import numpy as np +import torch +import torch.nn as nn from flwr.common import ( NDArrays, Parameters, @@ -11,44 +11,47 @@ ndarrays_to_parameters, parameters_to_ndarrays, ) - -from flwr.common.typing import FitRes from flwr.common.logger import log -from flwr.server.client_proxy import ClientProxy +from flwr.common.typing import FitRes from flwr.server.client_manager import ClientManager +from flwr.server.client_proxy import ClientProxy from flwr.server.strategy import FedAvg -from depthfl import FitIns, FitRes +from hydra.utils import instantiate +from omegaconf import DictConfig -import numpy as np -import torch -import torch.nn as nn +from depthfl import FitIns, FitRes class HeteroFL(FedAvg): - """Custom FedAvg for HeteroFL""" - def __init__(self, cfg:DictConfig, net: nn.Module, *args, **kwargs): + """Custom FedAvg for HeteroFL.""" + + def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): self.cfg = cfg self.parameters = [np.zeros(v.shape) for (k, v) in net.state_dict().items()] - self.prev_grads = [{k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()}]*100 + self.prev_grads = [ + {k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()} + ] * 100 self.param_idx_lst = [] model = cfg.model # store parameter shapes of different width for i in range(4): - model.n_blocks=i+1 + model.n_blocks = i + 1 net_tmp = instantiate(model) param_idx = [] for k in net_tmp.state_dict().keys(): - param_idx.append([torch.arange(size) for size in net_tmp.state_dict()[k].shape]) - + param_idx.append( + [torch.arange(size) for size in net_tmp.state_dict()[k].shape] + ) + # print(net_tmp.state_dict()['conv1.weight'].shape[0]) self.param_idx_lst.append(param_idx) - + self.is_weight = [] # tagging real weights / biases for k in net.state_dict().keys(): - if 'num' in k: + if "num" in k: self.is_weight.append(False) else: self.is_weight.append(True) @@ -57,13 +60,13 @@ def __init__(self, cfg:DictConfig, net: nn.Module, *args, **kwargs): def configure_fit( self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: + ) -> List[Tuple[ClientProxy, FitIns]]: """Configure the next round of training.""" config = {} if self.on_fit_config_fn is not None: # Custom fit config function provided config = self.on_fit_config_fn(server_round) - + # Sample clients sample_size, min_num_clients = self.num_fit_clients( client_manager.num_available() @@ -73,14 +76,17 @@ def configure_fit( ) # Return client/config pairs - return [(client, FitIns(parameters, self.prev_grads[int(client.cid)], config)) for client in clients] + return [ + (client, FitIns(parameters, self.prev_grads[int(client.cid)], config)) + for client in clients + ] def aggregate_fit( self, server_round: int, results: List[Tuple[ClientProxy, FitRes]], failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - origin: NDArrays + origin: NDArrays, ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: """Aggregate fit results using weighted average.""" if not results: @@ -88,7 +94,7 @@ def aggregate_fit( # Do not aggregate if there are failures and failures are not accepted if not self.accept_failures and failures: return None, {} - + for _, fit_res in results: self.prev_grads[fit_res.cid] = fit_res.prev_grads @@ -112,9 +118,7 @@ def aggregate_fit( return parameters_aggregated, metrics_aggregated - def aggregate_hetero(self, results: List[Tuple[NDArrays, int]]) -> NDArrays: - for i, v in enumerate(self.parameters): count = np.zeros(v.shape) tmp_v = np.zeros(v.shape) @@ -123,8 +127,16 @@ def aggregate_hetero(self, results: List[Tuple[NDArrays, int]]) -> NDArrays: if self.cfg.exclusive_learning: cid = self.cfg.model_size * (self.cfg.num_clients // 4) - 1 - tmp_v[torch.meshgrid(self.param_idx_lst[cid // (self.cfg.num_clients // 4)][i])] += weights[i] - count[torch.meshgrid(self.param_idx_lst[cid // (self.cfg.num_clients // 4)][i])] += 1 + tmp_v[ + torch.meshgrid( + self.param_idx_lst[cid // (self.cfg.num_clients // 4)][i] + ) + ] += weights[i] + count[ + torch.meshgrid( + self.param_idx_lst[cid // (self.cfg.num_clients // 4)][i] + ) + ] += 1 tmp_v[count > 0] = np.divide(tmp_v[count > 0], count[count > 0]) v[count > 0] = tmp_v[count > 0] @@ -133,4 +145,4 @@ def aggregate_hetero(self, results: List[Tuple[NDArrays, int]]) -> NDArrays: tmp_v += weights[i] count += 1 tmp_v = np.divide(tmp_v, count) - v = tmp_v \ No newline at end of file + v = tmp_v diff --git a/baselines/depthfl/depthfl/typing.py b/baselines/depthfl/depthfl/typing.py index 1fc8e7f9020c..c80805eca594 100644 --- a/baselines/depthfl/depthfl/typing.py +++ b/baselines/depthfl/depthfl/typing.py @@ -1,9 +1,10 @@ -from enum import Enum from dataclasses import dataclass -from typing import Any, Callable, Dict, List, Optional, Tuple, Union +from enum import Enum +from typing import Dict, List, Union Scalar = Union[bool, bytes, float, int, str] + class Code(Enum): """Client status codes.""" @@ -13,6 +14,7 @@ class Code(Enum): FIT_NOT_IMPLEMENTED = 3 EVALUATE_NOT_IMPLEMENTED = 4 + @dataclass class Status: """Client status.""" @@ -20,6 +22,7 @@ class Status: code: Code message: str + @dataclass class Parameters: """Model parameters.""" @@ -27,6 +30,7 @@ class Parameters: tensors: List[bytes] tensor_type: str + @dataclass class FitIns: """Fit instructions for a client.""" @@ -35,6 +39,7 @@ class FitIns: prev_grads: Dict config: Dict[str, Scalar] + @dataclass class FitRes: """Fit response from a client.""" diff --git a/baselines/depthfl/depthfl/utils.py b/baselines/depthfl/depthfl/utils.py index 8578027acbf3..695dedf2b5d3 100644 --- a/baselines/depthfl/depthfl/utils.py +++ b/baselines/depthfl/depthfl/utils.py @@ -76,15 +76,15 @@ def save_results_as_pickle( File used by default if file_path points to a directory instead to a file. Default: "results.pkl" """ - path = Path(file_path) # ensure path exists path.mkdir(exist_ok=True, parents=True) def _add_random_suffix(path_: Path): - """Adds a randomly generated suffix to the file name (so it doesn't - overwrite the file).""" + """Adds a randomly generated suffix to the file name (so it doesn't overwrite + the file). + """ print(f"File `{path_}` exists! ") suffix = token_hex(4) print(f"New results to be saved with suffix: {suffix}") From 14781b2d1505d63dc2cc639d6f93acd90cb7d9a5 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Fri, 8 Sep 2023 15:59:12 +0900 Subject: [PATCH 05/51] client with state --- baselines/depthfl/depthfl/__init__.py | 4 - baselines/depthfl/depthfl/client.py | 105 ++------- baselines/depthfl/depthfl/main.py | 7 +- baselines/depthfl/depthfl/ray_client_proxy.py | 48 ----- baselines/depthfl/depthfl/server.py | 91 ++------ baselines/depthfl/depthfl/simulation.py | 203 ------------------ baselines/depthfl/depthfl/strategy.py | 44 ++-- baselines/depthfl/depthfl/strategy_hetero.py | 29 +-- baselines/depthfl/depthfl/typing.py | 51 ----- 9 files changed, 50 insertions(+), 532 deletions(-) delete mode 100644 baselines/depthfl/depthfl/ray_client_proxy.py delete mode 100644 baselines/depthfl/depthfl/simulation.py delete mode 100644 baselines/depthfl/depthfl/typing.py diff --git a/baselines/depthfl/depthfl/__init__.py b/baselines/depthfl/depthfl/__init__.py index 5455ba674372..e69de29bb2d1 100644 --- a/baselines/depthfl/depthfl/__init__.py +++ b/baselines/depthfl/depthfl/__init__.py @@ -1,4 +0,0 @@ -"""Template baseline package.""" - -from .typing import FitIns as FitIns -from .typing import FitRes as FitRes diff --git a/baselines/depthfl/depthfl/client.py b/baselines/depthfl/depthfl/client.py index ccc0c6d75d09..23dc56ee8db9 100644 --- a/baselines/depthfl/depthfl/client.py +++ b/baselines/depthfl/depthfl/client.py @@ -1,23 +1,14 @@ """Defines the DepthFL Flower Client and a function to instantiate it.""" import copy +import pickle from collections import OrderedDict -from typing import Callable, Dict, List, Tuple, Union +from typing import Callable, Dict, List, Tuple import flwr as fl import numpy as np import torch from flwr.client import Client -from flwr.client.app import ( - _constructor, - _evaluate, - _get_parameters, - _get_properties, - numpyclient_has_evaluate, - numpyclient_has_fit, - numpyclient_has_get_parameters, - numpyclient_has_get_properties, -) from flwr.client.numpy_client import NumPyClient from flwr.common import ndarrays_to_parameters, parameters_to_ndarrays from flwr.common.typing import Code, NDArrays, Scalar, Status @@ -25,18 +16,8 @@ from omegaconf import DictConfig from torch.utils.data import DataLoader -from depthfl import FitIns, FitRes from depthfl.models import test, train -EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_FIT = """ -NumPyClient.fit did not return a tuple with 3 elements. -The returned values should have the following type signature: - - Tuple[NDArrays, Dict, int] -""" - -ClientLike = Union[Client, NumPyClient] - def prune(state_dict, param_idx): """Prune width of DNN (for HeteroFL).""" @@ -63,6 +44,8 @@ def __init__( num_epochs: int, learning_rate: float, learning_rate_decay: float, + prev_grads: Dict, + cid: int, ): # pylint: disable=too-many-arguments self.net = net self.trainloader = trainloader @@ -71,6 +54,8 @@ def __init__( self.num_epochs = num_epochs self.learning_rate = learning_rate self.learning_rate_decay = learning_rate_decay + self.prev_grads = prev_grads + self.cid = cid self.param_idx = {} state_dict = net.state_dict() @@ -91,7 +76,7 @@ def set_parameters(self, parameters: NDArrays) -> None: self.net.load_state_dict(prune(state_dict, self.param_idx), strict=True) def fit( - self, parameters: NDArrays, prev_grads: Dict, config: Dict[str, Scalar] + self, parameters: NDArrays, config: Dict[str, Scalar] ) -> Tuple[NDArrays, Dict, int]: """Implements distributed fit function for a given client.""" self.set_parameters(parameters) @@ -114,12 +99,15 @@ def fit( feddyn=config["feddyn"], kd=config["kd"], consistency_weight=consistency_weight, - prev_grads=prev_grads, + prev_grads=self.prev_grads, alpha=config["alpha"], extended=config["extended"], - ) + ) + + with open(f'prev_grads/client_{self.cid}', 'wb') as f: + pickle.dump(self.prev_grads, f) - return self.get_parameters({}), prev_grads, len(self.trainloader) + return self.get_parameters({}), len(self.trainloader), {"cid": self.cid} def evaluate( self, parameters: NDArrays, config: Dict[str, Scalar] @@ -192,6 +180,9 @@ def client_fn(cid: str) -> FlowerClient: trainloader = trainloaders[int(cid)] valloader = valloaders[int(cid)] + with open(f'prev_grads/client_{int(cid)}', 'rb') as f: + prev_grads = pickle.load(f) + return FlowerClient( net, trainloader, @@ -200,70 +191,10 @@ def client_fn(cid: str) -> FlowerClient: num_epochs, learning_rate, learning_rate_decay, + prev_grads, + int(cid), ) return client_fn -def _fit(self: Client, ins: FitIns) -> FitRes: - """Refine the provided parameters using the locally held dataset. - - FitIns & FitRes were modified for FedDyn. Fit function gets prev_grads as input and - return the updated prev_grads with updated parameters - """ - # Deconstruct FitIns - parameters: NDArrays = parameters_to_ndarrays(ins.parameters) - - # Train - results = self.numpy_client.fit(parameters, ins.prev_grads, ins.config) # type: ignore - if not ( - len(results) == 3 - and isinstance(results[0], list) - and isinstance(results[1], Dict) - and isinstance(results[2], int) - ): - raise Exception(EXCEPTION_MESSAGE_WRONG_RETURN_TYPE_FIT) - - # Return FitRes - parameters_prime, prev_grads, num_examples = results - parameters_prime_proto = ndarrays_to_parameters(parameters_prime) - return FitRes( - status=Status(code=Code.OK, message="Success"), - parameters=parameters_prime_proto, - prev_grads=prev_grads, - num_examples=num_examples, - cid=-1, - ) - - -def _wrap_numpy_client(client: NumPyClient) -> Client: - member_dict: Dict[str, Callable] = { # type: ignore - "__init__": _constructor, - } - - # Add wrapper type methods (if overridden) - - if numpyclient_has_get_properties(client=client): - member_dict["get_properties"] = _get_properties - - if numpyclient_has_get_parameters(client=client): - member_dict["get_parameters"] = _get_parameters - - if numpyclient_has_fit(client=client): - member_dict["fit"] = _fit - - if numpyclient_has_evaluate(client=client): - member_dict["evaluate"] = _evaluate - - # Create wrapper class - wrapper_class = type("NumPyClientWrapper", (Client,), member_dict) - - # Create and return an instance of the newly created class - return wrapper_class(numpy_client=client) # type: ignore - - -def to_client(client_like: ClientLike) -> Client: - """Take any Client-like object and return it as a Client.""" - if isinstance(client_like, NumPyClient): - return _wrap_numpy_client(client=client_like) - return client_like diff --git a/baselines/depthfl/depthfl/main.py b/baselines/depthfl/depthfl/main.py index a3a35acfe30e..423448092f07 100644 --- a/baselines/depthfl/depthfl/main.py +++ b/baselines/depthfl/depthfl/main.py @@ -10,7 +10,6 @@ from depthfl import client, server, utils from depthfl.dataset import load_datasets -from depthfl.simulation import start_simulation from depthfl.utils import save_results_as_pickle @@ -105,7 +104,7 @@ def fit_config_fn(server_round: int): ) # Start simulation - history = start_simulation( + history = fl.simulation.start_simulation( client_fn=client_fn, num_clients=cfg.num_clients, config=fl.server.ServerConfig(num_rounds=cfg.num_rounds), @@ -114,9 +113,7 @@ def fit_config_fn(server_round: int): "num_gpus": cfg.client_resources.num_gpus, }, strategy=strategy, - server=server.Server_FedDyn( - client_manager=SimpleClientManager(), strategy=strategy - ), + server=server.Server_FedDyn(client_manager=SimpleClientManager(), strategy=strategy), ) # Experiment completed. Now we save the results and diff --git a/baselines/depthfl/depthfl/ray_client_proxy.py b/baselines/depthfl/depthfl/ray_client_proxy.py deleted file mode 100644 index 4ca3a48203fc..000000000000 --- a/baselines/depthfl/depthfl/ray_client_proxy.py +++ /dev/null @@ -1,48 +0,0 @@ -from logging import ERROR -from typing import Callable, Optional, cast - -import ray -from flwr import common -from flwr.client import Client, ClientLike -from flwr.client.client import maybe_call_fit -from flwr.common.logger import log -from flwr.simulation.ray_transport.ray_client_proxy import RayClientProxy - -from depthfl.client import to_client - -ClientFn = Callable[[str], ClientLike] - - -class RayClientProxy_FedDyn(RayClientProxy): - def fit(self, ins: common.FitIns, timeout: Optional[float]) -> common.FitRes: - """Train model parameters on the locally held dataset.""" - future_fit_res = launch_and_fit.options( # type: ignore - **self.resources, - ).remote(self.client_fn, self.cid, ins) - try: - res = ray.get(future_fit_res, timeout=timeout) - except Exception as ex: - log(ERROR, ex) - raise ex - return cast( - common.FitRes, - res, - ) - - -@ray.remote -def launch_and_fit( - client_fn: ClientFn, cid: str, fit_ins: common.FitIns -) -> common.FitRes: - """Exectue fit remotely.""" - client: Client = _create_client(client_fn, cid) - return maybe_call_fit( - client=client, - fit_ins=fit_ins, - ) - - -def _create_client(client_fn: ClientFn, cid: str) -> Client: - """Create a client instance.""" - client_like: ClientLike = client_fn(cid) - return to_client(client_like=client_like) diff --git a/baselines/depthfl/depthfl/server.py b/baselines/depthfl/depthfl/server.py index 2406d49000d5..c832ef9ade0b 100644 --- a/baselines/depthfl/depthfl/server.py +++ b/baselines/depthfl/depthfl/server.py @@ -1,21 +1,24 @@ -import concurrent.futures import copy from collections import OrderedDict from logging import DEBUG, INFO from typing import Callable, Dict, List, Optional, Tuple, Union import torch -from flwr.common import Code, Parameters, Scalar, parameters_to_ndarrays +from flwr.common import ( + Parameters, + Scalar, + parameters_to_ndarrays, + FitRes, +) +from flwr.common import Scalar from flwr.common.logger import log from flwr.common.typing import NDArrays, Scalar -from flwr.server import Server +from flwr.server.server import Server, fit_clients from flwr.server.client_proxy import ClientProxy -from flwr.server.server import fit_clients from hydra.utils import instantiate from omegaconf import DictConfig from torch.utils.data import DataLoader -from depthfl import FitIns, FitRes from depthfl.client import prune from depthfl.models import test, test_sbn @@ -24,7 +27,6 @@ List[Union[Tuple[ClientProxy, FitRes], BaseException]], ] - def gen_evaluate_fn( testloader: DataLoader, device: torch.device, @@ -134,14 +136,14 @@ def evaluate( return evaluate - class Server_FedDyn(Server): + def fit_round( - self, - server_round: int, - timeout: Optional[float], + self, + server_round: int, + timeout: Optional[float], ) -> Optional[ - Tuple[Optional[Parameters], Dict[str, Scalar], FitResultsAndFailures] + Tuple[Optional[Parameters], Dict[str, Scalar], FitResultsAndFailures] ]: """Perform a single round of federated averaging.""" # Get clients and their respective instructions from strategy @@ -180,71 +182,8 @@ def fit_round( aggregated_result: Tuple[ Optional[Parameters], Dict[str, Scalar], - ] = self.strategy.aggregate_fit( - server_round, results, failures, parameters_to_ndarrays(self.parameters) - ) + ] = self.strategy.aggregate_fit(server_round, results, failures, parameters_to_ndarrays(self.parameters)) # ] = self.strategy.aggregate_fit(server_round, results, failures) parameters_aggregated, metrics_aggregated = aggregated_result - return parameters_aggregated, metrics_aggregated, (results, failures) - - -def fit_clients( - client_instructions: List[Tuple[ClientProxy, FitIns]], - max_workers: Optional[int], - timeout: Optional[float], -) -> FitResultsAndFailures: - """Refine parameters concurrently on all selected clients.""" - with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: - submitted_fs = { - executor.submit(fit_client, client_proxy, ins, timeout) - for client_proxy, ins in client_instructions - } - finished_fs, _ = concurrent.futures.wait( - fs=submitted_fs, - timeout=None, # Handled in the respective communication stack - ) - - # Gather results - results: List[Tuple[ClientProxy, FitRes]] = [] - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]] = [] - for future in finished_fs: - _handle_finished_future_after_fit( - future=future, results=results, failures=failures - ) - return results, failures - - -def fit_client( - client: ClientProxy, ins: FitIns, timeout: Optional[float] -) -> Tuple[ClientProxy, FitRes]: - """Refine parameters on a single client.""" - fit_res = client.fit(ins, timeout=timeout) - # tag client id - fit_res.cid = int(client.cid) - return client, fit_res - - -def _handle_finished_future_after_fit( - future: concurrent.futures.Future, # type: ignore - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], -) -> None: - """Convert finished future into either a result or a failure.""" - # Check if there was an exception - failure = future.exception() - if failure is not None: - failures.append(failure) - return - - # Successfully received a result from a client - result: Tuple[ClientProxy, FitRes] = future.result() - _, res = result - - # Check result status code - if res.status.code == Code.OK: - results.append(result) - return - - # Not successful, client returned a result where the status code is not OK - failures.append(result) + return parameters_aggregated, metrics_aggregated, (results, failures) \ No newline at end of file diff --git a/baselines/depthfl/depthfl/simulation.py b/baselines/depthfl/depthfl/simulation.py deleted file mode 100644 index 39bf530f7812..000000000000 --- a/baselines/depthfl/depthfl/simulation.py +++ /dev/null @@ -1,203 +0,0 @@ -# Copyright 2020 Adap GmbH. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Flower simulation app.""" - - -import sys -from logging import ERROR, INFO -from typing import Any, Callable, Dict, List, Optional - -import ray -from flwr.client.client import Client -from flwr.common import EventType, event -from flwr.common.logger import log -from flwr.server import Server -from flwr.server.app import ServerConfig, _fl, _init_defaults -from flwr.server.client_manager import ClientManager -from flwr.server.history import History -from flwr.server.strategy import Strategy - -from depthfl.ray_client_proxy import RayClientProxy_FedDyn - -INVALID_ARGUMENTS_START_SIMULATION = """ -INVALID ARGUMENTS ERROR - -Invalid Arguments in method: - -`start_simulation( - *, - client_fn: Callable[[str], Client], - num_clients: Optional[int] = None, - clients_ids: Optional[List[str]] = None, - client_resources: Optional[Dict[str, float]] = None, - server: Optional[Server] = None, - config: ServerConfig = None, - strategy: Optional[Strategy] = None, - client_manager: Optional[ClientManager] = None, - ray_init_args: Optional[Dict[str, Any]] = None, -) -> None:` - -REASON: - Method requires: - - Either `num_clients`[int] or `clients_ids`[List[str]] - to be set exclusively. - OR - - `len(clients_ids)` == `num_clients` - -""" - - -def start_simulation( # pylint: disable=too-many-arguments - *, - client_fn: Callable[[str], Client], - num_clients: Optional[int] = None, - clients_ids: Optional[List[str]] = None, - client_resources: Optional[Dict[str, float]] = None, - server: Optional[Server] = None, - config: Optional[ServerConfig] = None, - strategy: Optional[Strategy] = None, - client_manager: Optional[ClientManager] = None, - ray_init_args: Optional[Dict[str, Any]] = None, - keep_initialised: Optional[bool] = False, -) -> History: - """Start a Ray-based Flower simulation server. - - Parameters - ---------- - client_fn : Callable[[str], Client] - A function creating client instances. The function must take a single - str argument called `cid`. It should return a single client instance. - Note that the created client instances are ephemeral and will often be - destroyed after a single method invocation. Since client instances are - not long-lived, they should not attempt to carry state over method - invocations. Any state required by the instance (model, dataset, - hyperparameters, ...) should be (re-)created in either the call to - `client_fn` or the call to any of the client methods (e.g., load - evaluation data in the `evaluate` method itself). - num_clients : Optional[int] - The total number of clients in this simulation. This must be set if - `clients_ids` is not set and vice-versa. - clients_ids : Optional[List[str]] - List `client_id`s for each client. This is only required if - `num_clients` is not set. Setting both `num_clients` and `clients_ids` - with `len(clients_ids)` not equal to `num_clients` generates an error. - client_resources : Optional[Dict[str, float]] (default: None) - CPU and GPU resources for a single client. Supported keys are - `num_cpus` and `num_gpus`. Example: `{"num_cpus": 4, "num_gpus": 1}`. - To understand the GPU utilization caused by `num_gpus`, consult the Ray - documentation on GPU support. - server : Optional[flwr.server.Server] (default: None). - An implementation of the abstract base class `flwr.server.Server`. If no - instance is provided, then `start_server` will create one. - config: ServerConfig (default: None). - Currently supported values are `num_rounds` (int, default: 1) and - `round_timeout` in seconds (float, default: None). - strategy : Optional[flwr.server.Strategy] (default: None) - An implementation of the abstract base class `flwr.server.Strategy`. If - no strategy is provided, then `start_server` will use - `flwr.server.strategy.FedAvg`. - client_manager : Optional[flwr.server.ClientManager] (default: None) - An implementation of the abstract base class `flwr.server.ClientManager`. - If no implementation is provided, then `start_simulation` will use - `flwr.server.client_manager.SimpleClientManager`. - ray_init_args : Optional[Dict[str, Any]] (default: None) - Optional dictionary containing arguments for the call to `ray.init`. - If ray_init_args is None (the default), Ray will be initialized with - the following default args: - - { "ignore_reinit_error": True, "include_dashboard": False } - - An empty dictionary can be used (ray_init_args={}) to prevent any - arguments from being passed to ray.init. - keep_initialised: Optional[bool] (default: False) - Set to True to prevent `ray.shutdown()` in case `ray.is_initialized()=True`. - - Returns - ------- - hist : flwr.server.history.History. - Object containing metrics from training. - """ - # pylint: disable-msg=too-many-locals - event( - EventType.START_SIMULATION_ENTER, - {"num_clients": len(clients_ids) if clients_ids is not None else num_clients}, - ) - - # Initialize server and server config - initialized_server, initialized_config = _init_defaults( - server=server, - config=config, - strategy=strategy, - client_manager=client_manager, - ) - log( - INFO, - "Starting Flower simulation, config: %s", - initialized_config, - ) - - # clients_ids takes precedence - cids: List[str] - if clients_ids is not None: - if (num_clients is not None) and (len(clients_ids) != num_clients): - log(ERROR, INVALID_ARGUMENTS_START_SIMULATION) - sys.exit() - else: - cids = clients_ids - else: - if num_clients is None: - log(ERROR, INVALID_ARGUMENTS_START_SIMULATION) - sys.exit() - else: - cids = [str(x) for x in range(num_clients)] - - # Default arguments for Ray initialization - if not ray_init_args: - ray_init_args = { - "ignore_reinit_error": True, - "include_dashboard": False, - } - - # Shut down Ray if it has already been initialized (unless asked not to) - if ray.is_initialized() and not keep_initialised: # type: ignore - ray.shutdown() # type: ignore - - # Initialize Ray - ray.init(**ray_init_args) # type: ignore - log( - INFO, - "Flower VCE: Ray initialized with resources: %s", - ray.cluster_resources(), # type: ignore - ) - - # Register one RayClientProxy object for each client with the ClientManager - resources = client_resources if client_resources is not None else {} - for cid in cids: - client_proxy = RayClientProxy_FedDyn( - client_fn=client_fn, - cid=cid, - resources=resources, - ) - initialized_server.client_manager().register(client=client_proxy) - - # Start training - hist = _fl( - server=initialized_server, - config=initialized_config, - ) - - event(EventType.START_SIMULATION_LEAVE) - - return hist diff --git a/baselines/depthfl/depthfl/strategy.py b/baselines/depthfl/depthfl/strategy.py index 02c83f69a1f7..830a157c9d7b 100644 --- a/baselines/depthfl/depthfl/strategy.py +++ b/baselines/depthfl/depthfl/strategy.py @@ -1,3 +1,5 @@ +import os +import pickle from logging import WARNING from typing import Dict, List, Optional, Tuple, Union @@ -14,13 +16,10 @@ ) from flwr.common.logger import log from flwr.common.typing import FitRes -from flwr.server.client_manager import ClientManager from flwr.server.client_proxy import ClientProxy from flwr.server.strategy import FedAvg from omegaconf import DictConfig -from depthfl import FitIns, FitRes - def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: """Aggregation function for weighted average during evaluation. @@ -50,9 +49,15 @@ class FedDyn(FedAvg): def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): self.cfg = cfg self.h = [np.zeros(v.shape) for (k, v) in net.state_dict().items()] - self.prev_grads = [ - {k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()} - ] * 100 + self.prev_grads = [{k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()}] * cfg.num_clients + + if not os.path.exists("prev_grads"): + os.makedirs("prev_grads") + + for idx in range(cfg.num_clients): + with open(f'prev_grads/client_{idx}', 'wb') as f: + pickle.dump(self.prev_grads[idx], f) + self.is_weight = [] # tagging real weights / biases @@ -64,28 +69,6 @@ def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): super().__init__(*args, **kwargs) - def configure_fit( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: - """Configure the next round of training.""" - config = {} - if self.on_fit_config_fn is not None: - # Custom fit config function provided - config = self.on_fit_config_fn(server_round) - - # Sample clients - sample_size, min_num_clients = self.num_fit_clients( - client_manager.num_available() - ) - clients = client_manager.sample( - num_clients=sample_size, min_num_clients=min_num_clients - ) - - # Return client/config pairs - return [ - (client, FitIns(parameters, self.prev_grads[int(client.cid)], config)) - for client in clients - ] def aggregate_fit( self, @@ -101,8 +84,9 @@ def aggregate_fit( if not self.accept_failures and failures: return None, {} - for _, fit_res in results: - self.prev_grads[fit_res.cid] = fit_res.prev_grads + for idx in range(self.cfg.num_clients): + with open(f'prev_grads/client_{idx}', 'rb') as f: + self.prev_grads[idx] = pickle.load(f) # Convert results weights_results = [ diff --git a/baselines/depthfl/depthfl/strategy_hetero.py b/baselines/depthfl/depthfl/strategy_hetero.py index b32a4a6675f6..2d291bd5d460 100644 --- a/baselines/depthfl/depthfl/strategy_hetero.py +++ b/baselines/depthfl/depthfl/strategy_hetero.py @@ -19,8 +19,6 @@ from hydra.utils import instantiate from omegaconf import DictConfig -from depthfl import FitIns, FitRes - class HeteroFL(FedAvg): """Custom FedAvg for HeteroFL.""" @@ -58,28 +56,6 @@ def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): super().__init__(*args, **kwargs) - def configure_fit( - self, server_round: int, parameters: Parameters, client_manager: ClientManager - ) -> List[Tuple[ClientProxy, FitIns]]: - """Configure the next round of training.""" - config = {} - if self.on_fit_config_fn is not None: - # Custom fit config function provided - config = self.on_fit_config_fn(server_round) - - # Sample clients - sample_size, min_num_clients = self.num_fit_clients( - client_manager.num_available() - ) - clients = client_manager.sample( - num_clients=sample_size, min_num_clients=min_num_clients - ) - - # Return client/config pairs - return [ - (client, FitIns(parameters, self.prev_grads[int(client.cid)], config)) - for client in clients - ] def aggregate_fit( self, @@ -95,12 +71,9 @@ def aggregate_fit( if not self.accept_failures and failures: return None, {} - for _, fit_res in results: - self.prev_grads[fit_res.cid] = fit_res.prev_grads - # Convert results weights_results = [ - (parameters_to_ndarrays(fit_res.parameters), fit_res.cid) + (parameters_to_ndarrays(fit_res.parameters), fit_res.metrics["cid"]) for _, fit_res in results ] diff --git a/baselines/depthfl/depthfl/typing.py b/baselines/depthfl/depthfl/typing.py deleted file mode 100644 index c80805eca594..000000000000 --- a/baselines/depthfl/depthfl/typing.py +++ /dev/null @@ -1,51 +0,0 @@ -from dataclasses import dataclass -from enum import Enum -from typing import Dict, List, Union - -Scalar = Union[bool, bytes, float, int, str] - - -class Code(Enum): - """Client status codes.""" - - OK = 0 - GET_PROPERTIES_NOT_IMPLEMENTED = 1 - GET_PARAMETERS_NOT_IMPLEMENTED = 2 - FIT_NOT_IMPLEMENTED = 3 - EVALUATE_NOT_IMPLEMENTED = 4 - - -@dataclass -class Status: - """Client status.""" - - code: Code - message: str - - -@dataclass -class Parameters: - """Model parameters.""" - - tensors: List[bytes] - tensor_type: str - - -@dataclass -class FitIns: - """Fit instructions for a client.""" - - parameters: Parameters - prev_grads: Dict - config: Dict[str, Scalar] - - -@dataclass -class FitRes: - """Fit response from a client.""" - - status: Status - parameters: Parameters - prev_grads: Dict - num_examples: int - cid: int From 336e7af0a373a0e33ce7243e61ed3eacc3365cb3 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Fri, 8 Sep 2023 16:01:59 +0900 Subject: [PATCH 06/51] update import --- baselines/depthfl/.gitignore | 3 ++- baselines/depthfl/depthfl/client.py | 13 +++------- baselines/depthfl/depthfl/main.py | 4 ++- baselines/depthfl/depthfl/server.py | 27 +++++++++----------- baselines/depthfl/depthfl/strategy.py | 11 ++++---- baselines/depthfl/depthfl/strategy_hetero.py | 2 -- 6 files changed, 27 insertions(+), 33 deletions(-) diff --git a/baselines/depthfl/.gitignore b/baselines/depthfl/.gitignore index d41f139dfc7e..6e8879866afe 100644 --- a/baselines/depthfl/.gitignore +++ b/baselines/depthfl/.gitignore @@ -1,2 +1,3 @@ dataset/ -outputs/ \ No newline at end of file +outputs/ +prev_grads/ \ No newline at end of file diff --git a/baselines/depthfl/depthfl/client.py b/baselines/depthfl/depthfl/client.py index 23dc56ee8db9..00df46b616c1 100644 --- a/baselines/depthfl/depthfl/client.py +++ b/baselines/depthfl/depthfl/client.py @@ -8,10 +8,7 @@ import flwr as fl import numpy as np import torch -from flwr.client import Client -from flwr.client.numpy_client import NumPyClient -from flwr.common import ndarrays_to_parameters, parameters_to_ndarrays -from flwr.common.typing import Code, NDArrays, Scalar, Status +from flwr.common.typing import NDArrays, Scalar from hydra.utils import instantiate from omegaconf import DictConfig from torch.utils.data import DataLoader @@ -102,9 +99,9 @@ def fit( prev_grads=self.prev_grads, alpha=config["alpha"], extended=config["extended"], - ) + ) - with open(f'prev_grads/client_{self.cid}', 'wb') as f: + with open(f"prev_grads/client_{self.cid}", "wb") as f: pickle.dump(self.prev_grads, f) return self.get_parameters({}), len(self.trainloader), {"cid": self.cid} @@ -180,7 +177,7 @@ def client_fn(cid: str) -> FlowerClient: trainloader = trainloaders[int(cid)] valloader = valloaders[int(cid)] - with open(f'prev_grads/client_{int(cid)}', 'rb') as f: + with open(f"prev_grads/client_{int(cid)}", "rb") as f: prev_grads = pickle.load(f) return FlowerClient( @@ -196,5 +193,3 @@ def client_fn(cid: str) -> FlowerClient: ) return client_fn - - diff --git a/baselines/depthfl/depthfl/main.py b/baselines/depthfl/depthfl/main.py index 423448092f07..7cb261d5f9f5 100644 --- a/baselines/depthfl/depthfl/main.py +++ b/baselines/depthfl/depthfl/main.py @@ -113,7 +113,9 @@ def fit_config_fn(server_round: int): "num_gpus": cfg.client_resources.num_gpus, }, strategy=strategy, - server=server.Server_FedDyn(client_manager=SimpleClientManager(), strategy=strategy), + server=server.Server_FedDyn( + client_manager=SimpleClientManager(), strategy=strategy + ), ) # Experiment completed. Now we save the results and diff --git a/baselines/depthfl/depthfl/server.py b/baselines/depthfl/depthfl/server.py index c832ef9ade0b..1e2c600e6dce 100644 --- a/baselines/depthfl/depthfl/server.py +++ b/baselines/depthfl/depthfl/server.py @@ -4,17 +4,11 @@ from typing import Callable, Dict, List, Optional, Tuple, Union import torch -from flwr.common import ( - Parameters, - Scalar, - parameters_to_ndarrays, - FitRes, -) -from flwr.common import Scalar +from flwr.common import FitRes, Parameters, Scalar, parameters_to_ndarrays from flwr.common.logger import log from flwr.common.typing import NDArrays, Scalar -from flwr.server.server import Server, fit_clients from flwr.server.client_proxy import ClientProxy +from flwr.server.server import Server, fit_clients from hydra.utils import instantiate from omegaconf import DictConfig from torch.utils.data import DataLoader @@ -27,6 +21,7 @@ List[Union[Tuple[ClientProxy, FitRes], BaseException]], ] + def gen_evaluate_fn( testloader: DataLoader, device: torch.device, @@ -136,14 +131,14 @@ def evaluate( return evaluate -class Server_FedDyn(Server): +class Server_FedDyn(Server): def fit_round( - self, - server_round: int, - timeout: Optional[float], + self, + server_round: int, + timeout: Optional[float], ) -> Optional[ - Tuple[Optional[Parameters], Dict[str, Scalar], FitResultsAndFailures] + Tuple[Optional[Parameters], Dict[str, Scalar], FitResultsAndFailures] ]: """Perform a single round of federated averaging.""" # Get clients and their respective instructions from strategy @@ -182,8 +177,10 @@ def fit_round( aggregated_result: Tuple[ Optional[Parameters], Dict[str, Scalar], - ] = self.strategy.aggregate_fit(server_round, results, failures, parameters_to_ndarrays(self.parameters)) + ] = self.strategy.aggregate_fit( + server_round, results, failures, parameters_to_ndarrays(self.parameters) + ) # ] = self.strategy.aggregate_fit(server_round, results, failures) parameters_aggregated, metrics_aggregated = aggregated_result - return parameters_aggregated, metrics_aggregated, (results, failures) \ No newline at end of file + return parameters_aggregated, metrics_aggregated, (results, failures) diff --git a/baselines/depthfl/depthfl/strategy.py b/baselines/depthfl/depthfl/strategy.py index 830a157c9d7b..b8aad370cba0 100644 --- a/baselines/depthfl/depthfl/strategy.py +++ b/baselines/depthfl/depthfl/strategy.py @@ -49,15 +49,17 @@ class FedDyn(FedAvg): def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): self.cfg = cfg self.h = [np.zeros(v.shape) for (k, v) in net.state_dict().items()] - self.prev_grads = [{k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()}] * cfg.num_clients + self.prev_grads = [ + {k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()} + ] * cfg.num_clients if not os.path.exists("prev_grads"): os.makedirs("prev_grads") for idx in range(cfg.num_clients): - with open(f'prev_grads/client_{idx}', 'wb') as f: + with open(f"prev_grads/client_{idx}", "wb") as f: pickle.dump(self.prev_grads[idx], f) - + self.is_weight = [] # tagging real weights / biases @@ -69,7 +71,6 @@ def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): super().__init__(*args, **kwargs) - def aggregate_fit( self, server_round: int, @@ -85,7 +86,7 @@ def aggregate_fit( return None, {} for idx in range(self.cfg.num_clients): - with open(f'prev_grads/client_{idx}', 'rb') as f: + with open(f"prev_grads/client_{idx}", "rb") as f: self.prev_grads[idx] = pickle.load(f) # Convert results diff --git a/baselines/depthfl/depthfl/strategy_hetero.py b/baselines/depthfl/depthfl/strategy_hetero.py index 2d291bd5d460..a4f0ad4e0281 100644 --- a/baselines/depthfl/depthfl/strategy_hetero.py +++ b/baselines/depthfl/depthfl/strategy_hetero.py @@ -13,7 +13,6 @@ ) from flwr.common.logger import log from flwr.common.typing import FitRes -from flwr.server.client_manager import ClientManager from flwr.server.client_proxy import ClientProxy from flwr.server.strategy import FedAvg from hydra.utils import instantiate @@ -56,7 +55,6 @@ def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): super().__init__(*args, **kwargs) - def aggregate_fit( self, server_round: int, From 213108312cdc284b0748953642caf6c8bc254d39 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Fri, 8 Sep 2023 16:33:01 +0900 Subject: [PATCH 07/51] remove extended_readme --- baselines/depthfl/EXTENDED_README.md | 123 --------------------------- 1 file changed, 123 deletions(-) delete mode 100644 baselines/depthfl/EXTENDED_README.md diff --git a/baselines/depthfl/EXTENDED_README.md b/baselines/depthfl/EXTENDED_README.md deleted file mode 100644 index 9c8f5bc72fa9..000000000000 --- a/baselines/depthfl/EXTENDED_README.md +++ /dev/null @@ -1,123 +0,0 @@ - -# Extended Readme - -> The baselines are expected to run in a machine running Ubuntu 22.04 - -While `README.md` should include information about the baseline you implement and how to run it, this _extended_ readme provides info on what's the expected directory structure for a new baseline and more generally the instructions to follow before your baseline can be merged into the Flower repository. Please follow closely these instructions. It is likely that you have already completed steps 1-2. - -1. Fork the Flower repository and clone it. -2. Navigate to the `baselines/` directory and from there run: - ```bash - # This will create a new directory with the same structure as this `baseline_template` directory. - ./dev/create-baseline.sh - ``` -3. All your code and configs should go into a sub-directory with the same name as the name of your baseline. - * The sub-directory contains a series of Python scripts that you can edit. Please stick to these files and consult with us if you need additional ones. - * There is also a basic config structure in `/conf` ready be parsed by [Hydra](https://hydra.cc/) when executing your `main.py`. -4. Therefore, the directory structure in your baseline should look like: - ```bash - baselines/ - ├── README.md # describes your baseline and everything needed to use it - ├── EXTENDED_README.md # to remove before creating your PR - ├── pyproject.toml # details your Python environment - └── - ├── *.py # several .py files including main.py and __init__.py - └── conf - └── *.yaml # one or more Hydra config files - - ``` -> :warning: Make sure the variable `name` in `pyproject.toml` is set to the name of the sub-directory containing all your code. - -5. Add your dependencies to the `pyproject.toml` (see below a few examples on how to do it). Read more about Poetry below in this `EXTENDED_README.md`. -6. Regularly check that your coding style and the documentation you add follow good coding practices. To test whether your code meets the requirements, please run the following: - ```bash - # After activating your environment and from your baseline's directory - cd .. # to go to the top-level directory of all baselines - ./dev/test-baseline.sh - ./dev/test-baseline-structure.sh - ``` - Both `test-baseline.sh` and `test-baseline-structure.sh` will also be automatically run when you create a PR, and both tests need to pass for the baseline to be merged. - To automatically solve some formatting issues and apply easy fixes, please run the formatting script: - ```bash - # After activating your environment and from your baseline's directory - cd .. # to go to the top-level directory of all baselines - ./dev/format-baseline.sh - ``` -7. Ensure that the Python environment for your baseline can be created without errors by simply running `poetry install` and that this is properly described later when you complete the `Environment Setup` section in `README.md`. This is specially important if your environment requires additional steps after doing `poetry install`. -8. Ensure that your baseline runs with default arguments by running `poetry run python -m .main`. Then, describe this and other forms of running your code in the `Running the Experiments` section in `README.md`. -9. Once your code is ready and you have checked: - * that following the instructions in your `README.md` the Python environment can be created correctly - - * that running the code following your instructions can reproduce the experiments in the paper - - , then you just need to create a Pull Request (PR) to kickstart the process of merging your baseline into the Flower repository. - -> Once you are happy to merge your baseline contribution, please delete this `EXTENDED_README.md` file. - - -## About Poetry - -We use Poetry to manage the Python environment for each individual baseline. You can follow the instructions [here](https://python-poetry.org/docs/) to install Poetry in your machine. - - -### Specifying a Python Version (optional) -By default, Poetry will use the Python version in your system. In some settings, you might want to specify a particular version of Python to use inside your Poetry environment. You can do so with [`pyenv`](https://github.com/pyenv/pyenv). Check the documentation for the different ways of installing `pyenv`, but one easy way is using the [automatic installer](https://github.com/pyenv/pyenv-installer): -```bash -curl https://pyenv.run | bash # then, don't forget links to your .bashrc/.zshrc -``` - -You can then install any Python version with `pyenv install ` (e.g. `pyenv install 3.9.17`). Then, in order to use that version for your baseline, you'd do the following: - -```bash -# cd to your baseline directory (i.e. where the `pyproject.toml` is) -pyenv local - -# set that version for poetry -poetry env use - -# then you can install your Poetry environment (see the next setp) -``` - -### Installing Your Environment -With the Poetry tool already installed, you can create an environment for this baseline with commands: -```bash -# run this from the same directory as the `pyproject.toml` file is -poetry install -``` - -This will create a basic Python environment with just Flower and additional packages, including those needed for simulation. Next, you should add the dependencies for your code. It is **critical** that you fix the version of the packages you use using a `=` not a `=^`. You can do so via [`poetry add`](https://python-poetry.org/docs/cli/#add). Below are some examples: - -```bash -# For instance, if you want to install tqdm -poetry add tqdm==4.65.0 - -# If you already have a requirements.txt, you can add all those packages (but ensure you have fixed the version) in one go as follows: -poetry add $( cat requirements.txt ) -``` -With each `poetry add` command, the `pyproject.toml` gets automatically updated so you don't need to keep that `requirements.txt` as part of this baseline. - - -More critically however, is adding your ML framework of choice to the list of dependencies. For some frameworks you might be able to do so with the `poetry add` command. Check [the Poetry documentation](https://python-poetry.org/docs/cli/#add) for how to add packages in various ways. For instance, let's say you want to use PyTorch: - -```bash -# with plain `pip` you'd run a command such as: -pip install torch==1.13.1+cu117 torchvision==0.14.1+cu117 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu117 - -# to add the same 3 dependencies to your Poetry environment you'd need to add the URL to the wheel that the above pip command auto-resolves for you. -# You can find those wheels in `https://download.pytorch.org/whl/cu117`. Copy the link and paste it after the `poetry add` command. -# For instance to add `torch==1.13.1+cu117` and a x86 Linux system with Python3.8 you'd: -poetry add https://download.pytorch.org/whl/cu117/torch-1.13.1%2Bcu117-cp38-cp38-linux_x86_64.whl -# you'll need to repeat this for both `torchvision` and `torchaudio` -``` -The above is just an example of how you can add these dependencies. Please refer to the Poetry documentation to extra reference. - -If all attempts fail, you can still install packages via standard `pip`. You'd first need to source/activate your Poetry environment. -```bash -# first ensure you have created your environment -# and installed the base packages provided in the template -poetry install - -# then activate it -poetry shell -``` -Now you are inside your environment (pretty much as when you use `virtualenv` or `conda`) so you can install further packages with `pip`. Please note that, unlike with `poetry add`, these extra requirements won't be captured by `pyproject.toml`. Therefore, please ensure that you provide all instructions needed to: (1) create the base environment with Poetry and (2) install any additional dependencies via `pip` when you complete your `README.md`. \ No newline at end of file From f5e6e9cde0c72403bb99068a5eedad19871daf5c Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Fri, 8 Sep 2023 16:58:26 +0900 Subject: [PATCH 08/51] rm unnecessary files --- baselines/depthfl/depthfl.sh | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100755 baselines/depthfl/depthfl.sh diff --git a/baselines/depthfl/depthfl.sh b/baselines/depthfl/depthfl.sh deleted file mode 100755 index ed6982fef9f1..000000000000 --- a/baselines/depthfl/depthfl.sh +++ /dev/null @@ -1,23 +0,0 @@ -#! /bin/bash - -python -m depthfl.main --config-name="heterofl" -python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=1 model.scale=false -python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=2 model.scale=false -python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=3 model.scale=false -python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=4 model.scale=false - -python -m depthfl.main fit_config.feddyn=false fit_config.kd=false -python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=1 -python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=2 -python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=3 -python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=4 - -python -m depthfl.main -python -m depthfl.main exclusive_learning=true model_size=1 -python -m depthfl.main exclusive_learning=true model_size=2 -python -m depthfl.main exclusive_learning=true model_size=3 -python -m depthfl.main exclusive_learning=true model_size=4 - -python -m depthfl.main fit_config.feddyn=false fit_config.kd=false fit_config.extended=false - -python -m depthfl.main fit_config.kd=false \ No newline at end of file From 1b52c4e44b92f13a39b09ee40d409a94667a6caf Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Mon, 11 Sep 2023 15:23:21 +0900 Subject: [PATCH 09/51] comment reflected --- baselines/depthfl/README.md | 26 +++++---- baselines/depthfl/depthfl/main.py | 10 ++-- baselines/depthfl/depthfl/strategy.py | 28 +++++----- baselines/depthfl/depthfl/utils.py | 78 +++++++++++++-------------- baselines/depthfl/pyproject.toml | 5 +- 5 files changed, 78 insertions(+), 69 deletions(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index f00a98981199..8c82d5cd5c0c 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -67,9 +67,6 @@ poetry install # activate the environment poetry shell - -# install PyTorch with GPU support. -pip install torch==1.13.1+cu116 torchvision==0.14.1+cu116 torchaudio==0.13.1 --extra-index-url https://download.pytorch.org/whl/cu116 ``` @@ -94,32 +91,39 @@ python -m fedprox.main --config-name="heterofl" # HeteroFL python -m fedprox.main --config-name="heterofl" exclusive_learning=true model_size=1 # exclusive learning - 100% (a) ``` +### Stateful clients comment + +To implement feddyn, stateful clients that store prev_grads information are needed. Since flwr does not yet officially support stateful clients, it was implemented as a temporary measure by loading prev_grads from disk when creating a client, and then storing it again on disk after learning. Specifically, there are files that store the state of each client in the prev_grads folder. + ## Expected Results -With the following command we run DepthFL (FedDyn / FedAvg), InclusiveFL, and HeteroFL to replicate the results of table 2,3,4 in DepthFL paper. +With the following command we run DepthFL (FedDyn / FedAvg), InclusiveFL, and HeteroFL to replicate the results of table 2,3,4 in DepthFL paper. Tables 2, 3, and 4 may contain results from the same experiment in multiple tables. ```bash +# table 2 +python -m depthfl.main # table 2 & 4 +python -m depthfl.main exclusive_learning=true model_size=1 +python -m depthfl.main exclusive_learning=true model_size=2 +python -m depthfl.main exclusive_learning=true model_size=3 +python -m depthfl.main exclusive_learning=true model_size=4 + +# table 2 & 3 python -m depthfl.main --config-name="heterofl" python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=1 model.scale=false python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=2 model.scale=false python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=3 model.scale=false python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=4 model.scale=false - python -m depthfl.main fit_config.feddyn=false fit_config.kd=false python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=1 python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=2 python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=3 python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=4 -python -m depthfl.main -python -m depthfl.main exclusive_learning=true model_size=1 -python -m depthfl.main exclusive_learning=true model_size=2 -python -m depthfl.main exclusive_learning=true model_size=3 -python -m depthfl.main exclusive_learning=true model_size=4 - +# table 3 python -m depthfl.main fit_config.feddyn=false fit_config.kd=false fit_config.extended=false +# table 4 python -m depthfl.main fit_config.kd=false ``` diff --git a/baselines/depthfl/depthfl/main.py b/baselines/depthfl/depthfl/main.py index 7cb261d5f9f5..cbb18476d68b 100644 --- a/baselines/depthfl/depthfl/main.py +++ b/baselines/depthfl/depthfl/main.py @@ -142,11 +142,11 @@ def fit_config_fn(server_round: int): f"_R={cfg.num_rounds}" ) - utils.plot_metric_from_history( - history, - save_path, - (file_suffix), - ) + # utils.plot_metric_from_history( + # history, + # save_path, + # (file_suffix), + # ) if __name__ == "__main__": diff --git a/baselines/depthfl/depthfl/strategy.py b/baselines/depthfl/depthfl/strategy.py index b8aad370cba0..05e7dae3bca2 100644 --- a/baselines/depthfl/depthfl/strategy.py +++ b/baselines/depthfl/depthfl/strategy.py @@ -49,7 +49,17 @@ class FedDyn(FedAvg): def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): self.cfg = cfg self.h = [np.zeros(v.shape) for (k, v) in net.state_dict().items()] - self.prev_grads = [ + + # tagging real weights / biases + self.is_weight = [] + for k in net.state_dict().keys(): + if "weight" not in k and "bias" not in k: + self.is_weight.append(False) + else: + self.is_weight.append(True) + + # prev_grads file for each client + prev_grads = [ {k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()} ] * cfg.num_clients @@ -58,16 +68,8 @@ def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): for idx in range(cfg.num_clients): with open(f"prev_grads/client_{idx}", "wb") as f: - pickle.dump(self.prev_grads[idx], f) + pickle.dump(prev_grads[idx], f) - self.is_weight = [] - - # tagging real weights / biases - for k in net.state_dict().keys(): - if "weight" not in k and "bias" not in k: - self.is_weight.append(False) - else: - self.is_weight.append(True) super().__init__(*args, **kwargs) @@ -85,9 +87,9 @@ def aggregate_fit( if not self.accept_failures and failures: return None, {} - for idx in range(self.cfg.num_clients): - with open(f"prev_grads/client_{idx}", "rb") as f: - self.prev_grads[idx] = pickle.load(f) + # for idx in range(self.cfg.num_clients): + # with open(f"prev_grads/client_{idx}", "rb") as f: + # self.prev_grads[idx] = pickle.load(f) # Convert results weights_results = [ diff --git a/baselines/depthfl/depthfl/utils.py b/baselines/depthfl/depthfl/utils.py index 695dedf2b5d3..3677582db47c 100644 --- a/baselines/depthfl/depthfl/utils.py +++ b/baselines/depthfl/depthfl/utils.py @@ -10,45 +10,45 @@ from flwr.server.history import History -def plot_metric_from_history( - hist: History, - save_plot_path: Path, - suffix: Optional[str] = "", -) -> None: - """Function to plot from Flower server History. - - Parameters - ---------- - hist : History - Object containing evaluation for all rounds. - save_plot_path : Path - Folder to save the plot to. - suffix: Optional[str] - Optional string to add at the end of the filename for the plot. - """ - metric_type = "centralized" - metric_dict = ( - hist.metrics_centralized - if metric_type == "centralized" - else hist.metrics_distributed - ) - rounds, values = zip(*metric_dict["accuracy"]) - - rounds_loss, values_loss = zip(*hist.losses_centralized) - - fig, axs = plt.subplots(nrows=2, ncols=1, sharex="row") - axs[0].plot(np.asarray(rounds_loss), np.asarray(values_loss)) - axs[1].plot(np.asarray(rounds_loss), np.asarray(values)) - - axs[0].set_ylabel("Loss") - axs[1].set_ylabel("Accuracy") - - # plt.title(f"{metric_type.capitalize()} Validation - MNIST") - plt.xlabel("Rounds") - # plt.legend(loc="lower right") - - plt.savefig(Path(save_plot_path) / Path(f"{metric_type}_metrics{suffix}.png")) - plt.close() +# def plot_metric_from_history( +# hist: History, +# save_plot_path: Path, +# suffix: Optional[str] = "", +# ) -> None: +# """Function to plot from Flower server History. + +# Parameters +# ---------- +# hist : History +# Object containing evaluation for all rounds. +# save_plot_path : Path +# Folder to save the plot to. +# suffix: Optional[str] +# Optional string to add at the end of the filename for the plot. +# """ +# metric_type = "centralized" +# metric_dict = ( +# hist.metrics_centralized +# if metric_type == "centralized" +# else hist.metrics_distributed +# ) +# rounds, values = zip(*metric_dict["accuracy"]) + +# rounds_loss, values_loss = zip(*hist.losses_centralized) + +# fig, axs = plt.subplots(nrows=2, ncols=1, sharex="row") +# axs[0].plot(np.asarray(rounds_loss), np.asarray(values_loss)) +# axs[1].plot(np.asarray(rounds_loss), np.asarray(values)) + +# axs[0].set_ylabel("Loss") +# axs[1].set_ylabel("Accuracy") + +# # plt.title(f"{metric_type.capitalize()} Validation - MNIST") +# plt.xlabel("Rounds") +# # plt.legend(loc="lower right") + +# plt.savefig(Path(save_plot_path) / Path(f"{metric_type}_metrics{suffix}.png")) +# plt.close() def save_results_as_pickle( diff --git a/baselines/depthfl/pyproject.toml b/baselines/depthfl/pyproject.toml index 123ae044d980..e9feac971bd0 100644 --- a/baselines/depthfl/pyproject.toml +++ b/baselines/depthfl/pyproject.toml @@ -38,10 +38,13 @@ classifiers = [ [tool.poetry.dependencies] python = ">=3.8.15, <3.10.0" # ray 1.11.1 doesn't support python 3.10 -flwr = "1.3.0" # don't change this +flwr = { extras = ["simulation"], version = "1.5.0" } ray = "1.11.1" # don't change this hydra-core = "1.3.2" # don't change this matplotlib = "3.7.1" +torch = { url = "https://download.pytorch.org/whl/cu116/torch-1.13.1%2Bcu116-cp39-cp39-linux_x86_64.whl"} +torchvision = { url = "https://download.pytorch.org/whl/cu116/torchvision-0.14.1%2Bcu116-cp39-cp39-linux_x86_64.whl"} + [tool.poetry.dev-dependencies] isort = "==5.11.5" From 1f0e335a33641cd406761c6485dd37a8ad43e4c1 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Mon, 11 Sep 2023 16:29:22 +0900 Subject: [PATCH 10/51] update pyproject.toml --- baselines/depthfl/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/baselines/depthfl/pyproject.toml b/baselines/depthfl/pyproject.toml index e9feac971bd0..720e7f9b4b5b 100644 --- a/baselines/depthfl/pyproject.toml +++ b/baselines/depthfl/pyproject.toml @@ -37,9 +37,9 @@ classifiers = [ ] [tool.poetry.dependencies] -python = ">=3.8.15, <3.10.0" # ray 1.11.1 doesn't support python 3.10 +python = ">=3.8.15, <3.10.0" flwr = { extras = ["simulation"], version = "1.5.0" } -ray = "1.11.1" # don't change this +ray = "2.6.3" # don't change this hydra-core = "1.3.2" # don't change this matplotlib = "3.7.1" torch = { url = "https://download.pytorch.org/whl/cu116/torch-1.13.1%2Bcu116-cp39-cp39-linux_x86_64.whl"} From cdbc87fc296d25b8c218f688c9cba93dd83c6875 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Mon, 11 Sep 2023 17:13:10 +0900 Subject: [PATCH 11/51] minor changes --- baselines/depthfl/depthfl/main.py | 17 +------- baselines/depthfl/depthfl/strategy.py | 5 --- baselines/depthfl/depthfl/strategy_hetero.py | 5 +-- baselines/depthfl/depthfl/utils.py | 43 -------------------- 4 files changed, 2 insertions(+), 68 deletions(-) diff --git a/baselines/depthfl/depthfl/main.py b/baselines/depthfl/depthfl/main.py index cbb18476d68b..f72fb62a90c9 100644 --- a/baselines/depthfl/depthfl/main.py +++ b/baselines/depthfl/depthfl/main.py @@ -8,7 +8,7 @@ from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf -from depthfl import client, server, utils +from depthfl import client, server from depthfl.dataset import load_datasets from depthfl.utils import save_results_as_pickle @@ -132,21 +132,6 @@ def fit_config_fn(server_round: int): save_results_as_pickle(history, file_path=save_path, extra_results={}) # plot results and include them in the readme - strategy_name = strategy.__class__.__name__ - file_suffix: str = ( - f"_{strategy_name}" - f"{'_iid' if cfg.dataset_config.iid else ''}" - f"_C={cfg.num_clients}" - f"_B={cfg.batch_size}" - f"_E={cfg.num_epochs}" - f"_R={cfg.num_rounds}" - ) - - # utils.plot_metric_from_history( - # history, - # save_path, - # (file_suffix), - # ) if __name__ == "__main__": diff --git a/baselines/depthfl/depthfl/strategy.py b/baselines/depthfl/depthfl/strategy.py index 05e7dae3bca2..3a19e1da842a 100644 --- a/baselines/depthfl/depthfl/strategy.py +++ b/baselines/depthfl/depthfl/strategy.py @@ -70,7 +70,6 @@ def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): with open(f"prev_grads/client_{idx}", "wb") as f: pickle.dump(prev_grads[idx], f) - super().__init__(*args, **kwargs) def aggregate_fit( @@ -87,10 +86,6 @@ def aggregate_fit( if not self.accept_failures and failures: return None, {} - # for idx in range(self.cfg.num_clients): - # with open(f"prev_grads/client_{idx}", "rb") as f: - # self.prev_grads[idx] = pickle.load(f) - # Convert results weights_results = [ (parameters_to_ndarrays(fit_res.parameters), fit_res.num_examples) diff --git a/baselines/depthfl/depthfl/strategy_hetero.py b/baselines/depthfl/depthfl/strategy_hetero.py index a4f0ad4e0281..03a2432a15ad 100644 --- a/baselines/depthfl/depthfl/strategy_hetero.py +++ b/baselines/depthfl/depthfl/strategy_hetero.py @@ -25,12 +25,9 @@ class HeteroFL(FedAvg): def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): self.cfg = cfg self.parameters = [np.zeros(v.shape) for (k, v) in net.state_dict().items()] - self.prev_grads = [ - {k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()} - ] * 100 self.param_idx_lst = [] - model = cfg.model + model = cfg.model # store parameter shapes of different width for i in range(4): model.n_blocks = i + 1 diff --git a/baselines/depthfl/depthfl/utils.py b/baselines/depthfl/depthfl/utils.py index 3677582db47c..69ddc1db2a6e 100644 --- a/baselines/depthfl/depthfl/utils.py +++ b/baselines/depthfl/depthfl/utils.py @@ -5,52 +5,9 @@ from secrets import token_hex from typing import Dict, Optional, Union -import matplotlib.pyplot as plt -import numpy as np from flwr.server.history import History -# def plot_metric_from_history( -# hist: History, -# save_plot_path: Path, -# suffix: Optional[str] = "", -# ) -> None: -# """Function to plot from Flower server History. - -# Parameters -# ---------- -# hist : History -# Object containing evaluation for all rounds. -# save_plot_path : Path -# Folder to save the plot to. -# suffix: Optional[str] -# Optional string to add at the end of the filename for the plot. -# """ -# metric_type = "centralized" -# metric_dict = ( -# hist.metrics_centralized -# if metric_type == "centralized" -# else hist.metrics_distributed -# ) -# rounds, values = zip(*metric_dict["accuracy"]) - -# rounds_loss, values_loss = zip(*hist.losses_centralized) - -# fig, axs = plt.subplots(nrows=2, ncols=1, sharex="row") -# axs[0].plot(np.asarray(rounds_loss), np.asarray(values_loss)) -# axs[1].plot(np.asarray(rounds_loss), np.asarray(values)) - -# axs[0].set_ylabel("Loss") -# axs[1].set_ylabel("Accuracy") - -# # plt.title(f"{metric_type.capitalize()} Validation - MNIST") -# plt.xlabel("Rounds") -# # plt.legend(loc="lower right") - -# plt.savefig(Path(save_plot_path) / Path(f"{metric_type}_metrics{suffix}.png")) -# plt.close() - - def save_results_as_pickle( history: History, file_path: Union[str, Path], From 2030b28b0760b7c321f363863f54417d52b42faf Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Mon, 11 Sep 2023 17:17:59 +0900 Subject: [PATCH 12/51] rm some comments --- baselines/depthfl/depthfl/main.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/baselines/depthfl/depthfl/main.py b/baselines/depthfl/depthfl/main.py index f72fb62a90c9..0dd9df784b3a 100644 --- a/baselines/depthfl/depthfl/main.py +++ b/baselines/depthfl/depthfl/main.py @@ -131,8 +131,6 @@ def fit_config_fn(server_round: int): # the directory created by Hydra for each run save_results_as_pickle(history, file_path=save_path, extra_results={}) - # plot results and include them in the readme - if __name__ == "__main__": main() From 39449fdc552db3c9c7f475877cb3bc0c99b06fae Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Tue, 12 Sep 2023 19:38:49 +0900 Subject: [PATCH 13/51] fix typo --- baselines/depthfl/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 8c82d5cd5c0c..c84599cef1e5 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -135,7 +135,7 @@ The above commands would generate results in DepthFL paper. The numbers below ar | Scaling Method | Dataset | Global Model | 100% (a) | 75% (b) | 50% (c) | 25% (d) | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | -| HeterFL | CIFAR100 | 57.61 | 64.39 | 66.08 | 62.03 | 51.99 | +| HeteroFL | CIFAR100 | 57.61 | 64.39 | 66.08 | 62.03 | 51.99 | | DepthFL (FedAvg) | CIFAR100 | 72.67 | 67.08 | 70.78 | 68.41 | 59.17 | | DepthFL | CIFAR100 | 76.06 | 69.68 | 73.21 | 70.29 | 60.32 | From 13c9443cb29d41180156c62afa7788c98a14414b Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Wed, 20 Sep 2023 16:44:37 +0900 Subject: [PATCH 14/51] formatting --- baselines/depthfl/depthfl/dataset.py | 2 +- baselines/depthfl/depthfl/server.py | 2 +- baselines/depthfl/depthfl/strategy_hetero.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/baselines/depthfl/depthfl/dataset.py b/baselines/depthfl/depthfl/dataset.py index 3c3c988bcdbc..8a09826d934b 100644 --- a/baselines/depthfl/depthfl/dataset.py +++ b/baselines/depthfl/depthfl/dataset.py @@ -35,7 +35,7 @@ def load_datasets( # pylint: disable=too-many-arguments Returns ------- Tuple[DataLoader, DataLoader, DataLoader] - The DataLoader for training, the DataLoader for validation, the DataLoader for testing. + The DataLoader for training, validation, and testing. """ print(f"Dataset partitioning config: {config}") datasets, testset = _partition_data( diff --git a/baselines/depthfl/depthfl/server.py b/baselines/depthfl/depthfl/server.py index 1e2c600e6dce..358f8b5a353d 100644 --- a/baselines/depthfl/depthfl/server.py +++ b/baselines/depthfl/depthfl/server.py @@ -6,7 +6,7 @@ import torch from flwr.common import FitRes, Parameters, Scalar, parameters_to_ndarrays from flwr.common.logger import log -from flwr.common.typing import NDArrays, Scalar +from flwr.common.typing import NDArrays from flwr.server.client_proxy import ClientProxy from flwr.server.server import Server, fit_clients from hydra.utils import instantiate diff --git a/baselines/depthfl/depthfl/strategy_hetero.py b/baselines/depthfl/depthfl/strategy_hetero.py index 03a2432a15ad..5ada0b308ea6 100644 --- a/baselines/depthfl/depthfl/strategy_hetero.py +++ b/baselines/depthfl/depthfl/strategy_hetero.py @@ -109,7 +109,7 @@ def aggregate_hetero(self, results: List[Tuple[NDArrays, int]]) -> NDArrays: v[count > 0] = tmp_v[count > 0] else: - for weights, cid in results: + for weights, _ in results: tmp_v += weights[i] count += 1 tmp_v = np.divide(tmp_v, count) From 4b0b6b0dff6bf23eb7f8b64493aba09287255b88 Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Wed, 20 Sep 2023 16:46:02 +0900 Subject: [PATCH 15/51] Update baselines/depthfl/README.md Co-authored-by: Javier --- baselines/depthfl/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index c84599cef1e5..61add92a5442 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -9,7 +9,7 @@ dataset: [CIFAR100] # list of datasets you include in your baseline > Note: If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. -****Paper:**** : https://openreview.net/forum?id=pf8RIZTMU58 +****Paper:**** : [openreview.net/forum?id=pf8RIZTMU58](https://openreview.net/forum?id=pf8RIZTMU58) ****Authors:**** : Minjae Kim, Sangyoon Yu, Suhyun Kim, Soo-Mook Moon From eac35f34671f9fe47dcb203b180bd1afc205e913 Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Wed, 20 Sep 2023 16:54:53 +0900 Subject: [PATCH 16/51] Update README.md --- baselines/depthfl/README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 61add92a5442..4b266c550c07 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -62,6 +62,12 @@ The following table shows the main hyperparameters for this baseline with their To construct the Python environment follow these steps: ```bash +# Set python version +pyenv local 3.10.6 + +# Tell poetry to use python 3.10 +poetry env use 3.10.6 + # install the base Poetry environment poetry install From aca8892c4380b7b45f4a2cd5183743c5c5e0231d Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Wed, 20 Sep 2023 16:55:36 +0900 Subject: [PATCH 17/51] Update baselines/depthfl/pyproject.toml Co-authored-by: Javier --- baselines/depthfl/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/baselines/depthfl/pyproject.toml b/baselines/depthfl/pyproject.toml index 720e7f9b4b5b..e1c384e37e93 100644 --- a/baselines/depthfl/pyproject.toml +++ b/baselines/depthfl/pyproject.toml @@ -37,7 +37,7 @@ classifiers = [ ] [tool.poetry.dependencies] -python = ">=3.8.15, <3.10.0" +python = ">=3.10.0, <3.11.0" flwr = { extras = ["simulation"], version = "1.5.0" } ray = "2.6.3" # don't change this hydra-core = "1.3.2" # don't change this From 8d71765d8b0d5b2443ebbae3b364131ea5bd5030 Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Wed, 20 Sep 2023 16:55:58 +0900 Subject: [PATCH 18/51] Update baselines/depthfl/pyproject.toml Co-authored-by: Javier --- baselines/depthfl/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/baselines/depthfl/pyproject.toml b/baselines/depthfl/pyproject.toml index e1c384e37e93..5e5a834a1325 100644 --- a/baselines/depthfl/pyproject.toml +++ b/baselines/depthfl/pyproject.toml @@ -42,8 +42,8 @@ flwr = { extras = ["simulation"], version = "1.5.0" } ray = "2.6.3" # don't change this hydra-core = "1.3.2" # don't change this matplotlib = "3.7.1" -torch = { url = "https://download.pytorch.org/whl/cu116/torch-1.13.1%2Bcu116-cp39-cp39-linux_x86_64.whl"} -torchvision = { url = "https://download.pytorch.org/whl/cu116/torchvision-0.14.1%2Bcu116-cp39-cp39-linux_x86_64.whl"} +torch = { url = "https://download.pytorch.org/whl/cu116/torch-1.13.1%2Bcu116-cp310-cp310-linux_x86_64.whl"} +torchvision = { url = "https://download.pytorch.org/whl/cu116/torchvision-0.14.1%2Bcu116-cp310-cp310-linux_x86_64.whl"} [tool.poetry.dev-dependencies] From 3307e38e5fc4438b67a51351740a00f9f76012a9 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Wed, 20 Sep 2023 18:02:47 +0900 Subject: [PATCH 19/51] updated README --- baselines/depthfl/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 4b266c550c07..f034a2949f4d 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -63,6 +63,7 @@ To construct the Python environment follow these steps: ```bash # Set python version +pyenv install 3.10.6 pyenv local 3.10.6 # Tell poetry to use python 3.10 From 8f525a7f747ad1a1607852ad099595822c4b5dd9 Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Fri, 29 Sep 2023 19:19:07 +0900 Subject: [PATCH 20/51] Update baselines/depthfl/README.md Co-authored-by: Javier --- baselines/depthfl/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index f034a2949f4d..a8794132cc09 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -1,8 +1,8 @@ --- title: DepthFL:Depthwise Federated Learning for Heterogeneous Clients url: https://openreview.net/forum?id=pf8RIZTMU58 -labels: [image classification, system heterogeneity] # please add between 4 and 10 single-word (maybe two-words) labels (e.g. "system heterogeneity", "image classification", "asynchronous", "weight sharing", "cross-silo") -dataset: [CIFAR100] # list of datasets you include in your baseline +labels: [image classification, system heterogeneity, cross-device] +dataset: [CIFAR100] --- # DepthFL: Depthwise Federated Learning for Heterogeneous Clients From deb28447f4dab9b77b12038670444af7273f2b12 Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Fri, 29 Sep 2023 19:28:45 +0900 Subject: [PATCH 21/51] Update baselines/depthfl/README.md Co-authored-by: Javier --- baselines/depthfl/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index a8794132cc09..222a47008e11 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -100,7 +100,7 @@ python -m fedprox.main --config-name="heterofl" exclusive_learning=true model_si ### Stateful clients comment -To implement feddyn, stateful clients that store prev_grads information are needed. Since flwr does not yet officially support stateful clients, it was implemented as a temporary measure by loading prev_grads from disk when creating a client, and then storing it again on disk after learning. Specifically, there are files that store the state of each client in the prev_grads folder. +To implement `feddyn`, stateful clients that store prev_grads information are needed. Since flwr does not yet officially support stateful clients, it was implemented as a temporary measure by loading `prev_grads` from disk when creating a client, and then storing it again on disk after learning. Specifically, there are files that store the state of each client in the `prev_grads` folder. ## Expected Results From 3c6c6117bcee23ddbfcde51b4abae3903365b35e Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Fri, 29 Sep 2023 23:34:55 +0900 Subject: [PATCH 22/51] Update README.md --- baselines/depthfl/README.md | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 222a47008e11..6ae0671fc6e6 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -142,9 +142,7 @@ The above commands would generate results in DepthFL paper. The numbers below ar | Scaling Method | Dataset | Global Model | 100% (a) | 75% (b) | 50% (c) | 25% (d) | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | -| HeteroFL | CIFAR100 | 57.61 | 64.39 | 66.08 | 62.03 | 51.99 | -| DepthFL (FedAvg) | CIFAR100 | 72.67 | 67.08 | 70.78 | 68.41 | 59.17 | -| DepthFL | CIFAR100 | 76.06 | 69.68 | 73.21 | 70.29 | 60.32 | +| HeteroFL
DepthF (FedAvg)
DepthFL | CIFAR100 | 57.61
72.67
76.06 | 64.39
67.08
69.68 | 66.08
70.78
73.21 | 62.03
68.41
70.29 | 51.99
59.17
60.32 | **Table 3** @@ -152,14 +150,11 @@ Accuracy of global sub-models compared to exclusive learning on CIFAR-100. | Method | Algorithm | Classifier 1/4 | Classifier 2/4 | Classifier 3/4 | Classifier 4/4 | | :---: | :---: | :---: | :---: | :---: | :---: | -| Width Scaling | Exclusive Learning | 64.39 | 66.08 | 62.03 | 51.99 | -| Width Scaling | HeteroFL | 51.08 | 55.89 | 58.29 | 57.61 | +| Width Scaling | Exclusive Learning
HeteroFL| 64.39
51.08 | 66.08
55.89 | 62.03
58.29 | 51.99
57.61 | | Method | Algorithm | Classifier 1/4 | Classifier 2/4 | Classifier 3/4 | Classifier 4/4 | | :---: | :---: | :---: | :---: | :---: | :---: | -| Depth Scaling | Exclusive Learning | 67.08 | 68.00 | 66.19 | 56.78 | -| Depth Scaling | InclusiveFL | 47.61 | 53.88 | 59.48 | 60.46 | -| Depth Scaling | DepthFL (FedAvg)| 66.18 | 67.56 | 67.97 | 68.01 | +| Depth Scaling | Exclusive Learning
InclusiveFL
DepthFL (FedAvg) | 67.08
47.61
66.18 | 68.00
53.88
67.56 | 66.19
59.48
67.97 | 56.78
60.46
68.01 | **Table 4** @@ -167,7 +162,5 @@ Accuracy of the global model with/without self distillation on CIFAR-100. | Distribution | Dataset | KD | Classifier 1/4 | Classifier 2/4 | Classifier 3/4 | Classifier 4/4 | Ensemble | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | -| IID | CIFAR-100 | ✗ | 70.13 | 69.63 | 68.92 | 68.92 | 74.48 | -| IID | CIFAR-100 | ✓ | 71.74 | 73.35 | 73.57 | 73.55 | 76.06 | - +| IID | CIFAR-100 | ✗
✓ | 70.13
71.74 | 69.63
73.35 | 68.92
73.57 | 68.92
73.55 | 74.48
76.06 | From 58234160e0cf8a1588d05bc5e16cd99a0129360d Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Fri, 29 Sep 2023 23:40:50 +0900 Subject: [PATCH 23/51] Update README.md --- baselines/depthfl/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 6ae0671fc6e6..afd110861f3b 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -116,11 +116,14 @@ python -m depthfl.main exclusive_learning=true model_size=3 python -m depthfl.main exclusive_learning=true model_size=4 # table 2 & 3 + +# HeteroFL & corresponding excluive learning python -m depthfl.main --config-name="heterofl" python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=1 model.scale=false python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=2 model.scale=false python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=3 model.scale=false python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=4 model.scale=false +# DepthFL (FedAvg) & corresponding exclusive learning python -m depthfl.main fit_config.feddyn=false fit_config.kd=false python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=1 python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=2 @@ -142,7 +145,7 @@ The above commands would generate results in DepthFL paper. The numbers below ar | Scaling Method | Dataset | Global Model | 100% (a) | 75% (b) | 50% (c) | 25% (d) | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | -| HeteroFL
DepthF (FedAvg)
DepthFL | CIFAR100 | 57.61
72.67
76.06 | 64.39
67.08
69.68 | 66.08
70.78
73.21 | 62.03
68.41
70.29 | 51.99
59.17
60.32 | +| HeteroFL
DepthFL (FedAvg)
DepthFL | CIFAR100 | 57.61
72.67
76.06 | 64.39
67.08
69.68 | 66.08
70.78
73.21 | 62.03
68.41
70.29 | 51.99
59.17
60.32 | **Table 3** From 249178eb5933500f4a9f76c7b012cbd861c009df Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Fri, 29 Sep 2023 23:42:18 +0900 Subject: [PATCH 24/51] Update README.md --- baselines/depthfl/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index afd110861f3b..44ba82b69541 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -94,8 +94,8 @@ python -m depthfl.main fit_config.feddyn=false fit_config.kd=false fit_config.ex To run using HeteroFL: ```bash # since sbn takes too long, we test global model every 50 rounds. -python -m fedprox.main --config-name="heterofl" # HeteroFL -python -m fedprox.main --config-name="heterofl" exclusive_learning=true model_size=1 # exclusive learning - 100% (a) +python -m depthfl.main --config-name="heterofl" # HeteroFL +python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=1 # exclusive learning - 100% (a) ``` ### Stateful clients comment From edb4d889bb11b3a885bb4c0339f057c0c57cdee2 Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Sun, 1 Oct 2023 18:02:07 +0900 Subject: [PATCH 25/51] Update baselines/depthfl/README.md Co-authored-by: Javier --- baselines/depthfl/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 44ba82b69541..48f111b39765 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -22,7 +22,7 @@ dataset: [CIFAR100] ****Datasets:**** CIFAR100 from PyTorch's Torchvision -****Hardware Setup:**** These experiments were run on a server with Nvidia 3090 GPUs. Any machine with 1x 8GB GPU or more would be able to run it in a reasonable amount of time. +****Hardware Setup:**** These experiments were run on a server with Nvidia 3090 GPUs. Any machine with 1x 8GB GPU or more would be able to run it in a reasonable amount of time. With the default settings, clients make use of 1.3GB of VRAM. Lower `num_gpus` in `client_resources` to train more clients in parallel on your GPU(s). ****Contributors:**** Minjae Kim From afc70c0ef6cfa3d91631bf3643c14e3985e9b30a Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Wed, 4 Oct 2023 10:55:36 +0900 Subject: [PATCH 26/51] non-IID data distribution --- baselines/depthfl/README.md | 5 +++- baselines/depthfl/depthfl/conf/config.yaml | 1 + baselines/depthfl/depthfl/dataset.py | 1 + .../depthfl/depthfl/dataset_preparation.py | 30 +++++++++++++++++-- 4 files changed, 34 insertions(+), 3 deletions(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 48f111b39765..0ad8cff703c6 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -135,6 +135,8 @@ python -m depthfl.main fit_config.feddyn=false fit_config.kd=false fit_config.ex # table 4 python -m depthfl.main fit_config.kd=false +python -m depthfl.main dataset_config.iid=false +python -m depthfl.main dataset_config.iid=false fit_config.kd=false ``` The above commands would generate results in DepthFL paper. The numbers below are the results of a single run, and although they do not perfectly match the numbers recorded in the paper, they are very close. @@ -165,5 +167,6 @@ Accuracy of the global model with/without self distillation on CIFAR-100. | Distribution | Dataset | KD | Classifier 1/4 | Classifier 2/4 | Classifier 3/4 | Classifier 4/4 | Ensemble | | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | -| IID | CIFAR-100 | ✗
✓ | 70.13
71.74 | 69.63
73.35 | 68.92
73.57 | 68.92
73.55 | 74.48
76.06 | +| IID | CIFAR100 | ✗
✓ | 70.13
71.74 | 69.63
73.35 | 68.92
73.57 | 68.92
73.55 | 74.48
76.06 | +| non-IID | CIFAR100 | ✗
✓ | 67.94
70.33 | 68.68
71.88 | 68.46
72.43 | 67.78
72.34 | 73.18
74.92 | diff --git a/baselines/depthfl/depthfl/conf/config.yaml b/baselines/depthfl/depthfl/conf/config.yaml index ce319f95322f..e5f5f92d824e 100644 --- a/baselines/depthfl/depthfl/conf/config.yaml +++ b/baselines/depthfl/depthfl/conf/config.yaml @@ -19,6 +19,7 @@ server_device: cuda dataset_config: iid: true + beta: 0.5 fit_config: feddyn: true diff --git a/baselines/depthfl/depthfl/dataset.py b/baselines/depthfl/depthfl/dataset.py index 8a09826d934b..fedb524a03b6 100644 --- a/baselines/depthfl/depthfl/dataset.py +++ b/baselines/depthfl/depthfl/dataset.py @@ -41,6 +41,7 @@ def load_datasets( # pylint: disable=too-many-arguments datasets, testset = _partition_data( num_clients, iid=config.iid, + beta=config.beta, seed=seed, ) # Split each partition into train/val and create DataLoader diff --git a/baselines/depthfl/depthfl/dataset_preparation.py b/baselines/depthfl/depthfl/dataset_preparation.py index 5b73f531507c..c0cb4b84297c 100644 --- a/baselines/depthfl/depthfl/dataset_preparation.py +++ b/baselines/depthfl/depthfl/dataset_preparation.py @@ -42,6 +42,7 @@ def _download_data() -> Tuple[Dataset, Dataset]: def _partition_data( num_clients, iid: Optional[bool] = True, + beta: Optional[float] = 0.5, seed: Optional[int] = 41, ) -> Tuple[List[Dataset], Dataset]: """Split training set into iid or non iid partitions to simulate the federated @@ -71,12 +72,37 @@ def _partition_data( np.random.seed(seed) num_sample = int(len(trainset) / (num_clients)) index = list(range(len(trainset))) - for _i in range(num_clients): + for _ in range(num_clients): sample_idx = np.random.choice(index, num_sample, replace=False) index = list(set(index) - set(sample_idx)) datasets.append(Subset(trainset, sample_idx)) else: - pass + labels = np.array([label for _, label in trainset]) + min_size = 0 + K = np.max(labels) + 1 + N = labels.shape[0] + # net_dataidx_map = {} + n_nets = num_clients + np.random.seed(seed) + + while min_size < 10: + idx_batch = [[] for _ in range(n_nets)] + # for each class in the dataset + for k in range(K): + idx_k = np.where(labels == k)[0] + np.random.shuffle(idx_k) + proportions = np.random.dirichlet(np.repeat(beta, n_nets)) + ## Balance + proportions = np.array([p*(len(idx_j) Date: Wed, 4 Oct 2023 11:04:36 +0900 Subject: [PATCH 27/51] Update Readme --- baselines/depthfl/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 0ad8cff703c6..fef8eccfd92d 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -37,7 +37,7 @@ dataset: [CIFAR100] | Dataset | #classes | #partitions | partitioning method | | :------ | :---: | :---: | :---: | -| CIFAR100 | 100 | 100 | IID | +| CIFAR100 | 100 | 100 | IID or Non-IID | **Training Hyperparameters:** The following table shows the main hyperparameters for this baseline with their default value (i.e. the value used if you run `python main.py` directly) From 3492c22041d72ffd6ed9bf27ad4aac56b6ddb621 Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Wed, 4 Oct 2023 11:10:24 +0900 Subject: [PATCH 28/51] Update README.md --- baselines/depthfl/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index fef8eccfd92d..5e905b921d45 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -1,7 +1,7 @@ --- title: DepthFL:Depthwise Federated Learning for Heterogeneous Clients url: https://openreview.net/forum?id=pf8RIZTMU58 -labels: [image classification, system heterogeneity, cross-device] +labels: [image classification, system heterogeneity, cross-device, knowledge distillation] dataset: [CIFAR100] --- From 20169301ee51f3625b757d57a415f124aa790b77 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Wed, 4 Oct 2023 11:16:06 +0900 Subject: [PATCH 29/51] formatting --- baselines/depthfl/depthfl/dataset_preparation.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/baselines/depthfl/depthfl/dataset_preparation.py b/baselines/depthfl/depthfl/dataset_preparation.py index c0cb4b84297c..ebbfed68959d 100644 --- a/baselines/depthfl/depthfl/dataset_preparation.py +++ b/baselines/depthfl/depthfl/dataset_preparation.py @@ -94,10 +94,18 @@ def _partition_data( np.random.shuffle(idx_k) proportions = np.random.dirichlet(np.repeat(beta, n_nets)) ## Balance - proportions = np.array([p*(len(idx_j) Date: Fri, 6 Oct 2023 14:41:10 +0900 Subject: [PATCH 30/51] formatting --- baselines/depthfl/depthfl/__init__.py | 1 + baselines/depthfl/depthfl/client.py | 21 ++- baselines/depthfl/depthfl/dataset.py | 2 +- .../depthfl/depthfl/dataset_preparation.py | 17 ++- baselines/depthfl/depthfl/main.py | 2 + baselines/depthfl/depthfl/models.py | 29 +++- baselines/depthfl/depthfl/resnet.py | 130 ++++++++---------- baselines/depthfl/depthfl/resnet_hetero.py | 118 +++++++--------- baselines/depthfl/depthfl/server.py | 22 ++- baselines/depthfl/depthfl/strategy.py | 5 +- baselines/depthfl/depthfl/strategy_hetero.py | 3 + baselines/depthfl/depthfl/utils.py | 12 +- 12 files changed, 186 insertions(+), 176 deletions(-) diff --git a/baselines/depthfl/depthfl/__init__.py b/baselines/depthfl/depthfl/__init__.py index e69de29bb2d1..3343905e1879 100644 --- a/baselines/depthfl/depthfl/__init__.py +++ b/baselines/depthfl/depthfl/__init__.py @@ -0,0 +1 @@ +"""Flower summer of reproducibility : DepthFL (ICLR' 23).""" diff --git a/baselines/depthfl/depthfl/client.py b/baselines/depthfl/depthfl/client.py index 00df46b616c1..a4884dcf215c 100644 --- a/baselines/depthfl/depthfl/client.py +++ b/baselines/depthfl/depthfl/client.py @@ -63,11 +63,11 @@ def __init__( ] # store client's weights' shape (for HeteroFL) def get_parameters(self, config: Dict[str, Scalar]) -> NDArrays: - """Returns the parameters of the current net.""" + """Return the parameters of the current net.""" return [val.cpu().numpy() for _, val in self.net.state_dict().items()] def set_parameters(self, parameters: NDArrays) -> None: - """Changes the parameters of the model using the given ones.""" + """Change the parameters of the model using the given ones.""" params_dict = zip(self.net.state_dict().keys(), parameters) state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict}) self.net.load_state_dict(prune(state_dict, self.param_idx), strict=True) @@ -75,7 +75,7 @@ def set_parameters(self, parameters: NDArrays) -> None: def fit( self, parameters: NDArrays, config: Dict[str, Scalar] ) -> Tuple[NDArrays, Dict, int]: - """Implements distributed fit function for a given client.""" + """Implement distributed fit function for a given client.""" self.set_parameters(parameters) num_epochs = self.num_epochs @@ -109,7 +109,7 @@ def fit( def evaluate( self, parameters: NDArrays, config: Dict[str, Scalar] ) -> Tuple[float, int, Dict]: - """Implements distributed evaluation for a given client.""" + """Implement distributed evaluation for a given client.""" self.set_parameters(parameters) loss, accuracy, accuracy_single = test(self.net, self.valloader, self.device) return ( @@ -129,10 +129,8 @@ def gen_client_fn( learning_rate_decay: float, models: List[DictConfig], cfg: DictConfig, -) -> Tuple[ - Callable[[str], FlowerClient], DataLoader -]: # pylint: disable=too-many-arguments - """Generates the client function that creates the Flower Clients. +) -> Callable[[str], FlowerClient]: + """Generate the client function that creates the Flower Clients. Parameters ---------- @@ -156,12 +154,13 @@ def gen_client_fn( The learning rate decay ratio per round for the SGD optimizer of clients. models : List[DictConfig] A list of DictConfigs, each pointing to the model config of client's local model + cfg : DictConfig + Configuration Returns ------- - Tuple[Callable[[str], FlowerClient], DataLoader] - A tuple containing the client function that creates Flower Clients and - the DataLoader that will be used for testing + Callable[[str], FlowerClient] + client function that creates Flower Clients """ def client_fn(cid: str) -> FlowerClient: diff --git a/baselines/depthfl/depthfl/dataset.py b/baselines/depthfl/depthfl/dataset.py index fedb524a03b6..c2024fe068a0 100644 --- a/baselines/depthfl/depthfl/dataset.py +++ b/baselines/depthfl/depthfl/dataset.py @@ -16,7 +16,7 @@ def load_datasets( # pylint: disable=too-many-arguments batch_size: Optional[int] = 32, seed: Optional[int] = 41, ) -> Tuple[DataLoader, DataLoader, DataLoader]: - """Creates the dataloaders to be fed into the model. + """Create the dataloaders to be fed into the model. Parameters ---------- diff --git a/baselines/depthfl/depthfl/dataset_preparation.py b/baselines/depthfl/depthfl/dataset_preparation.py index ebbfed68959d..778695063f5d 100644 --- a/baselines/depthfl/depthfl/dataset_preparation.py +++ b/baselines/depthfl/depthfl/dataset_preparation.py @@ -1,3 +1,5 @@ +"""Dataset(CIFAR100) preparation for DepthFL.""" + from typing import List, Optional, Tuple import numpy as np @@ -7,7 +9,7 @@ def _download_data() -> Tuple[Dataset, Dataset]: - """Downloads (if necessary) and returns the CIFAR-100 dataset. + """Download (if necessary) and returns the CIFAR-100 dataset. Returns ------- @@ -45,24 +47,25 @@ def _partition_data( beta: Optional[float] = 0.5, seed: Optional[int] = 41, ) -> Tuple[List[Dataset], Dataset]: - """Split training set into iid or non iid partitions to simulate the federated - setting. + """Split training set to simulate the federated setting. Parameters ---------- num_clients : int The number of clients that hold a part of the data iid : bool, optional - Whether the data should be independent and identically distributed between - the clients or if the data should first be sorted by labels and distributed by chunks - to each client (used to test the convergence in a worst case scenario), by default False + Whether the data should be independent and identically distributed + or if the data should first be sorted by labels and distributed by + noniid manner to each client, by default true + beta : hyperparameter for dirichlet distribution seed : int, optional Used to set a fix seed to replicate experiments, by default 42 Returns ------- Tuple[List[Dataset], Dataset] - A list of dataset for each client and a single dataset to be use for testing the model. + A list of dataset for each client and a + single dataset to be use for testing the model. """ trainset, testset = _download_data() diff --git a/baselines/depthfl/depthfl/main.py b/baselines/depthfl/depthfl/main.py index 0dd9df784b3a..6a908ca5b0fb 100644 --- a/baselines/depthfl/depthfl/main.py +++ b/baselines/depthfl/depthfl/main.py @@ -1,3 +1,5 @@ +"""DepthFL main.""" + import copy import flwr as fl diff --git a/baselines/depthfl/depthfl/models.py b/baselines/depthfl/depthfl/models.py index ede83ca14694..a8fb251b078f 100644 --- a/baselines/depthfl/depthfl/models.py +++ b/baselines/depthfl/depthfl/models.py @@ -17,6 +17,7 @@ def __init__(self): super(KLLoss, self).__init__() def forward(self, pred, label): + """KL loss forward.""" T = 1 predict = F.log_softmax(pred / T, dim=1) target_data = F.softmax(label / T, dim=1) @@ -59,8 +60,18 @@ def train( # pylint: disable=too-many-arguments The number of epochs the model should be trained for. learning_rate : float The learning rate for the SGD optimizer. + feddyn : bool + whether using feddyn or fedavg + kd : bool + whether using self distillation + consistency_weight : float + hyperparameter for self distillation + prev_grads : dict + control variate for feddyn alpha : float Hyperparameter for the FedDyn. + extended : bool + if extended, train all sub-classifiers within local model """ criterion = torch.nn.CrossEntropyLoss() criterion_kl = KLLoss().cuda() @@ -127,10 +138,22 @@ def _train_one_epoch( # pylint: disable=too-many-arguments The device on which the model should be trained, either 'cpu' or 'cuda'. criterion : torch.nn.CrossEntropyLoss The loss function to use for training + criterion_kl : nn.Module + The loss function for self distillation optimizer : torch.optim.Adam The optimizer to use for training + feddyn : bool + whether using feddyn or fedavg + kd : bool + whether using self distillation + consistency_weight : float + hyperparameter for self distillation + prev_grads : dict + control variate for feddyn alpha : float Hyperparameter for the FedDyn. + extended : bool + if extended, train all sub-classifiers within local model """ for images, labels in trainloader: images, labels = images.to(device), labels.to(device) @@ -191,7 +214,8 @@ def test( Returns ------- Tuple[float, float, List[float]] - The loss and the accuracy of the input model on the given data. + The loss and the accuracy of the global model + and the list of accuracy for each classifier on the given data. """ criterion = torch.nn.CrossEntropyLoss() correct, total, loss = 0, 0, 0.0 @@ -245,7 +269,8 @@ def test_sbn( Returns ------- Tuple[float, float, List[float]] - The loss and the accuracy of the input model on the given data. + The loss and the accuracy of the global model + and the list of accuracy for each classifier on the given data. """ # static batch normalization for trainloader in trainloaders: diff --git a/baselines/depthfl/depthfl/resnet.py b/baselines/depthfl/depthfl/resnet.py index aeb7a7e079f7..b8dfe50da272 100644 --- a/baselines/depthfl/depthfl/resnet.py +++ b/baselines/depthfl/depthfl/resnet.py @@ -1,10 +1,13 @@ +"""ResNet18 for DepthFL.""" + from typing import Callable, Optional -import torch import torch.nn as nn class MyGroupNorm(nn.Module): + """Group Normalization layer.""" + def __init__(self, num_channels): super(MyGroupNorm, self).__init__() ## change num_groups to 32 @@ -13,31 +16,39 @@ def __init__(self, num_channels): ) def forward(self, x): + """GN forward.""" x = self.norm(x) return x class MyBatchNorm(nn.Module): + """Batch Normalization layer.""" + def __init__(self, num_channels): super(MyBatchNorm, self).__init__() self.norm = nn.BatchNorm2d(num_channels, track_running_stats=True) def forward(self, x): + """BN forward.""" x = self.norm(x) return x def conv3x3(in_planes, out_planes, stride=1): + """Convolution layer 3x3.""" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False ) def conv1x1(in_planes, planes, stride=1): + """Convolution layer 1x1.""" return nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False) class SepConv(nn.Module): + """Bottleneck layer module.""" + def __init__( self, channel_in, @@ -77,10 +88,13 @@ def __init__( ) def forward(self, x): + """SepConv forward.""" return self.op(x) class BasicBlock(nn.Module): + """Basic Block for ResNet18.""" + expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None): @@ -94,6 +108,7 @@ def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None) self.stride = stride def forward(self, x): + """BasicBlock forward.""" residual = x output = self.conv1(x) @@ -111,53 +126,15 @@ def forward(self, x): return output -class BottleneckBlock(nn.Module): - expansion = 4 - - def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None): - super(BottleneckBlock, self).__init__() - self.conv1 = conv1x1(inplanes, planes) - self.bn1 = norm_layer(planes) - self.relu = nn.ReLU(inplace=True) - - self.conv2 = conv3x3(planes, planes, stride) - self.bn2 = norm_layer(planes) - - self.conv3 = conv1x1(planes, planes * self.expansion) - self.bn3 = norm_layer(planes * self.expansion) - - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - output = self.conv1(x) - output = self.bn1(output) - output = self.relu(output) - - output = self.conv2(output) - output = self.bn2(output) - output = self.relu(output) - - output = self.conv3(output) - output = self.bn3(output) - - if self.downsample is not None: - residual = self.downsample(x) - - output += residual - output = self.relu(output) - - return output - - class Multi_ResNet(nn.Module): - """Resnet model + """Resnet model. + Args: block (class): block type, BasicBlock or BottleneckBlock layers (int list): layer num in each block + n_blocks (int) : Depth of network num_classes (int): class num. + norm_layer (class): type of normalization layer. """ def __init__( @@ -166,13 +143,12 @@ def __init__( layers, n_blocks, num_classes=1000, - norm_layer: Optional[Callable[..., nn.Module]] = None, + norm_layer: Optional[Callable[..., nn.Module]] = MyBatchNorm, ): super(Multi_ResNet, self).__init__() self.n_blocks = n_blocks self.inplanes = 64 self.norm_layer = norm_layer - # self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.conv1 = nn.Conv2d( 3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False ) @@ -282,12 +258,14 @@ def __init__( nn.init.constant_(m.bias, 0) def _make_layer(self, block, planes, layers, stride=1, norm_layer=None): - """A block with 'layers' layers + """Create a block with layers. + Args: block (class): block type planes (int): output channels = planes * expansion layers (int): layer num in the block stride (int): the first layer stride in the block. + norm_layer (class): type of normalization layer. """ norm_layer = self.norm_layer downsample = None @@ -313,6 +291,7 @@ def _make_layer(self, block, planes, layers, stride=1, norm_layer=None): return nn.Sequential(*layer) def forward(self, x): + """Resnet forward.""" x = self.conv1(x) x = self.bn1(x) x = self.relu(x) @@ -355,6 +334,21 @@ def forward(self, x): def multi_resnet18(n_blocks=1, norm="bn", num_classes=100): + """Create resnet18 for HeteroFL. + + Parameters + ---------- + n_blocks: int + depth of network + norm: str + normalization layer type + num_classes: int + # of labels + + Returns + ------- + Callable [ [nn.Module,List[int],int,int,nn.Module], nn.Module] + """ if norm == "gn": norm_layer = MyGroupNorm @@ -370,36 +364,20 @@ def multi_resnet18(n_blocks=1, norm="bn", num_classes=100): ) -def multi_resnet34(n_blocks=4, norm="bn", num_classes=100): - if norm == "gn": - norm_layer = MyGroupNorm +# if __name__ == "__main__": +# from ptflops import get_model_complexity_info - elif norm == "bn": - norm_layer = MyBatchNorm +# model = multi_resnet18(n_blocks=4, num_classes=100) - return Multi_ResNet( - BasicBlock, - [3, 4, 6, 3], - n_blocks, - num_classes=num_classes, - norm_layer=norm_layer, - ) - - -if __name__ == "__main__": - from ptflops import get_model_complexity_info - - model = multi_resnet18(n_blocks=4, num_classes=100) - - with torch.cuda.device(0): - macs, params = get_model_complexity_info( - model, - (3, 32, 32), - as_strings=True, - print_per_layer_stat=False, - verbose=True, - units="MMac", - ) +# with torch.cuda.device(0): +# macs, params = get_model_complexity_info( +# model, +# (3, 32, 32), +# as_strings=True, +# print_per_layer_stat=False, +# verbose=True, +# units="MMac", +# ) - print("{:<30} {:<8}".format("Computational complexity: ", macs)) - print("{:<30} {:<8}".format("Number of parameters: ", params)) +# print("{:<30} {:<8}".format("Computational complexity: ", macs)) +# print("{:<30} {:<8}".format("Number of parameters: ", params)) diff --git a/baselines/depthfl/depthfl/resnet_hetero.py b/baselines/depthfl/depthfl/resnet_hetero.py index 5602d8a0761b..b466a97c2f99 100644 --- a/baselines/depthfl/depthfl/resnet_hetero.py +++ b/baselines/depthfl/depthfl/resnet_hetero.py @@ -1,9 +1,12 @@ +"""ResNet18 for HeteroFL.""" + import numpy as np -import torch import torch.nn as nn class Scaler(nn.Module): + """Scaler module for HeteroFL.""" + def __init__(self, rate, scale): super().__init__() if scale: @@ -12,32 +15,40 @@ def __init__(self, rate, scale): self.rate = 1 def forward(self, input): + """Scaler forward.""" output = input / self.rate if self.training else input return output class MyBatchNorm(nn.Module): + """Static Batch Normalization for HeteroFL.""" + def __init__(self, num_channels, track=True): super(MyBatchNorm, self).__init__() ## change num_groups to 32 self.norm = nn.BatchNorm2d(num_channels, track_running_stats=track) def forward(self, x): + """BatchNorm forward.""" x = self.norm(x) return x def conv3x3(in_planes, out_planes, stride=1): + """Convolution layer 3x3.""" return nn.Conv2d( in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False ) def conv1x1(in_planes, planes, stride=1): + """Convolution layer 1x1.""" return nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False) class BasicBlock(nn.Module): + """Basic Block for ResNet18.""" + expansion = 1 def __init__( @@ -61,6 +72,7 @@ def __init__( self.stride = stride def forward(self, x): + """BasicBlock forward.""" residual = x output = self.conv1(x) @@ -80,57 +92,9 @@ def forward(self, x): return output -class BottleneckBlock(nn.Module): - expansion = 4 - - def __init__( - self, - inplanes, - planes, - stride=1, - scaler_rate=1, - downsample=None, - track=True, - scale=True, - ): - super(BottleneckBlock, self).__init__() - self.conv1 = conv1x1(inplanes, planes) - self.bn1 = MyBatchNorm(planes) - self.relu = nn.ReLU(inplace=True) - - self.conv2 = conv3x3(planes, planes, stride) - self.bn2 = MyBatchNorm(planes) - - self.conv3 = conv1x1(planes, planes * self.expansion) - self.bn3 = MyBatchNorm(planes * self.expansion) - - self.downsample = downsample - self.stride = stride - - def forward(self, x): - residual = x - - output = self.conv1(x) - output = self.bn1(output) - output = self.relu(output) - - output = self.conv2(output) - output = self.bn2(output) - output = self.relu(output) - - output = self.conv3(output) - output = self.bn3(output) - - if self.downsample is not None: - residual = self.downsample(x) - - output += residual - output = self.relu(output) - - return output - - class Multi_ResNet(nn.Module): + """Resnet model.""" + def __init__( self, hidden_size, block, layers, num_classes, scaler_rate, track, scale ): @@ -195,12 +159,16 @@ def __init__( def _make_layer( self, block, planes, layers, stride=1, scaler_rate=1, track=True, scale=True ): - """A block with 'layers' layers + """Create a block with layers. + Args: block (class): block type planes (int): output channels = planes * expansion layers (int): layer num in the block stride (int): the first layer stride in the block. + scaler_rate (float): for scaler module + track (bool): static batch normalization + scale (bool): for scaler module. """ norm_layer = self.norm_layer downsample = None @@ -236,6 +204,7 @@ def _make_layer( return nn.Sequential(*layer) def forward(self, x): + """Resnet forward.""" x = self.conv1(x) x = self.scaler(x) x = self.bn1(x) @@ -253,6 +222,23 @@ def forward(self, x): def resnet18(n_blocks=4, track=False, scale=True, num_classes=100): + """Create resnet18 for HeteroFL. + + Parameters + ---------- + n_blocks: int + corresponds to width (divided by 4) + track: bool + static batch normalization + scale: bool + scaler module + num_classes: int + # of labels + + Returns + ------- + Callable [ [List[int],nn.Module,List[int],int,float,bool,bool], nn.Module] + """ # width pruning ratio : (0.25, 0.50, 0.75, 0.10) model_rate = n_blocks / 4 classes_size = num_classes @@ -273,20 +259,20 @@ def resnet18(n_blocks=4, track=False, scale=True, num_classes=100): ) -if __name__ == "__main__": - from ptflops import get_model_complexity_info +# if __name__ == "__main__": +# from ptflops import get_model_complexity_info - model = resnet18(100, 1.0) +# model = resnet18(100, 1.0) - with torch.cuda.device(0): - macs, params = get_model_complexity_info( - model, - (3, 32, 32), - as_strings=True, - print_per_layer_stat=False, - verbose=True, - units="MMac", - ) +# with torch.cuda.device(0): +# macs, params = get_model_complexity_info( +# model, +# (3, 32, 32), +# as_strings=True, +# print_per_layer_stat=False, +# verbose=True, +# units="MMac", +# ) - print("{:<30} {:<8}".format("Computational complexity: ", macs)) - print("{:<30} {:<8}".format("Number of parameters: ", params)) +# print("{:<30} {:<8}".format("Computational complexity: ", macs)) +# print("{:<30} {:<8}".format("Number of parameters: ", params)) diff --git a/baselines/depthfl/depthfl/server.py b/baselines/depthfl/depthfl/server.py index 358f8b5a353d..a205bc5271fb 100644 --- a/baselines/depthfl/depthfl/server.py +++ b/baselines/depthfl/depthfl/server.py @@ -1,3 +1,5 @@ +"""Server for DepthFL baseline.""" + import copy from collections import OrderedDict from logging import DEBUG, INFO @@ -29,7 +31,7 @@ def gen_evaluate_fn( ) -> Callable[ [int, NDArrays, Dict[str, Scalar]], Optional[Tuple[float, Dict[str, Scalar]]] ]: - """Generates the function for centralized evaluation. + """Generate the function for centralized evaluation. Parameters ---------- @@ -37,10 +39,13 @@ def gen_evaluate_fn( The dataloader to test the model with. device : torch.device The device to test the model on. + model : DictConfig + model configuration for instantiating Returns ------- - Callable[ [int, NDArrays, Dict[str, Scalar]], Optional[Tuple[float, Dict[str, Scalar]]] ] + Callable[ [int, NDArrays, Dict[str, Scalar]], + Optional[Tuple[float, Dict[str, Scalar]]] ] The centralized evaluation function. """ @@ -70,18 +75,23 @@ def gen_evaluate_fn_hetero( ) -> Callable[ [int, NDArrays, Dict[str, Scalar]], Optional[Tuple[float, Dict[str, Scalar]]] ]: - """Generates the function for centralized evaluation. + """Generate the function for centralized evaluation. Parameters ---------- + trainloaders : List[DataLoader] + The list of dataloaders to calculate statistics for BN testloader : DataLoader The dataloader to test the model with. device : torch.device The device to test the model on. + model_cfg : DictConfig + model configuration for instantiating Returns ------- - Callable[ [int, NDArrays, Dict[str, Scalar]], Optional[Tuple[float, Dict[str, Scalar]]] ] + Callable[ [int, NDArrays, Dict[str, Scalar]], + Optional[Tuple[float, Dict[str, Scalar]]] ] The centralized evaluation function. """ @@ -133,6 +143,8 @@ def evaluate( class Server_FedDyn(Server): + """Sever for FedDyn.""" + def fit_round( self, server_round: int, @@ -140,7 +152,7 @@ def fit_round( ) -> Optional[ Tuple[Optional[Parameters], Dict[str, Scalar], FitResultsAndFailures] ]: - """Perform a single round of federated averaging.""" + """Perform a single round.""" # Get clients and their respective instructions from strategy client_instructions = self.strategy.configure_fit( server_round=server_round, diff --git a/baselines/depthfl/depthfl/strategy.py b/baselines/depthfl/depthfl/strategy.py index 3a19e1da842a..40695ba74a4b 100644 --- a/baselines/depthfl/depthfl/strategy.py +++ b/baselines/depthfl/depthfl/strategy.py @@ -1,3 +1,5 @@ +"""Strategy for DepthFL.""" + import os import pickle from logging import WARNING @@ -22,7 +24,7 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - """Aggregation function for weighted average during evaluation. + """Weighted average during evaluation. Parameters ---------- @@ -113,6 +115,7 @@ def aggregate( is_weight: List, cfg: DictConfig, ) -> NDArrays: + """Aggregate model parameters with different depths.""" param_count = [0] * len(origin) weights_sum = [np.zeros(v.shape) for v in origin] diff --git a/baselines/depthfl/depthfl/strategy_hetero.py b/baselines/depthfl/depthfl/strategy_hetero.py index 5ada0b308ea6..269c41a2682c 100644 --- a/baselines/depthfl/depthfl/strategy_hetero.py +++ b/baselines/depthfl/depthfl/strategy_hetero.py @@ -1,3 +1,5 @@ +"""Strategy for HeteroFL.""" + from logging import WARNING from typing import Dict, List, Optional, Tuple, Union @@ -87,6 +89,7 @@ def aggregate_fit( return parameters_aggregated, metrics_aggregated def aggregate_hetero(self, results: List[Tuple[NDArrays, int]]) -> NDArrays: + """Aggregate function for HeteroFL.""" for i, v in enumerate(self.parameters): count = np.zeros(v.shape) tmp_v = np.zeros(v.shape) diff --git a/baselines/depthfl/depthfl/utils.py b/baselines/depthfl/depthfl/utils.py index 69ddc1db2a6e..15461d97c588 100644 --- a/baselines/depthfl/depthfl/utils.py +++ b/baselines/depthfl/depthfl/utils.py @@ -11,10 +11,10 @@ def save_results_as_pickle( history: History, file_path: Union[str, Path], - extra_results: Optional[Dict] = {}, + extra_results: Dict, default_filename: Optional[str] = "results.pkl", ) -> None: - """Saves results from simulation to pickle. + """Save results from simulation to pickle. Parameters ---------- @@ -26,7 +26,7 @@ def save_results_as_pickle( path doesn't exist, it will be created. If file exists, a randomly generated suffix will be added to the file name. This is done to avoid overwritting results. - extra_results : Optional[Dict] + extra_results : Dict A dictionary containing additional results you would like to be saved to disk. Default: {} (an empty dictionary) default_filename: Optional[str] @@ -39,16 +39,14 @@ def save_results_as_pickle( path.mkdir(exist_ok=True, parents=True) def _add_random_suffix(path_: Path): - """Adds a randomly generated suffix to the file name (so it doesn't overwrite - the file). - """ + """Add a randomly generated suffix to the file name.""" print(f"File `{path_}` exists! ") suffix = token_hex(4) print(f"New results to be saved with suffix: {suffix}") return path_.parent / (path_.stem + "_" + suffix + ".pkl") def _complete_path_with_default_name(path_: Path): - """Appends the default file name to the path.""" + """Append the default file name to the path.""" print("Using default filename") return path_ / default_filename From f414f7b9415f2bdcae7091e012553db6173e677f Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Sun, 8 Oct 2023 16:34:37 +0900 Subject: [PATCH 31/51] formatting --- baselines/depthfl/depthfl/client.py | 25 +-- .../depthfl/depthfl/dataset_preparation.py | 94 ++++++------ baselines/depthfl/depthfl/main.py | 7 +- baselines/depthfl/depthfl/models.py | 143 +++++++++--------- baselines/depthfl/depthfl/resnet.py | 15 +- baselines/depthfl/depthfl/resnet_hetero.py | 6 +- baselines/depthfl/depthfl/server.py | 14 +- baselines/depthfl/depthfl/strategy.py | 6 +- baselines/depthfl/depthfl/strategy_hetero.py | 6 +- baselines/depthfl/depthfl/utils.py | 4 +- 10 files changed, 158 insertions(+), 162 deletions(-) diff --git a/baselines/depthfl/depthfl/client.py b/baselines/depthfl/depthfl/client.py index a4884dcf215c..4bed018c60f1 100644 --- a/baselines/depthfl/depthfl/client.py +++ b/baselines/depthfl/depthfl/client.py @@ -74,17 +74,17 @@ def set_parameters(self, parameters: NDArrays) -> None: def fit( self, parameters: NDArrays, config: Dict[str, Scalar] - ) -> Tuple[NDArrays, Dict, int]: + ) -> Tuple[NDArrays, int, Dict]: """Implement distributed fit function for a given client.""" self.set_parameters(parameters) num_epochs = self.num_epochs - curr_round = config["curr_round"] - 1 + curr_round = int(config["curr_round"]) - 1 # consistency weight for self distillation in DepthFL - CONSISTENCY_WEIGHT = 300 - current = np.clip(curr_round, 0.0, CONSISTENCY_WEIGHT) - phase = 1.0 - current / CONSISTENCY_WEIGHT + consistency_weight_constant = 300 + current = np.clip(curr_round, 0.0, consistency_weight_constant) + phase = 1.0 - current / consistency_weight_constant consistency_weight = float(np.exp(-5.0 * phase * phase)) train( @@ -93,12 +93,9 @@ def fit( self.device, epochs=num_epochs, learning_rate=self.learning_rate * self.learning_rate_decay**curr_round, - feddyn=config["feddyn"], - kd=config["kd"], + config=config, consistency_weight=consistency_weight, prev_grads=self.prev_grads, - alpha=config["alpha"], - extended=config["extended"], ) with open(f"prev_grads/client_{self.cid}", "wb") as f: @@ -120,25 +117,17 @@ def evaluate( def gen_client_fn( - num_clients: int, - num_rounds: int, num_epochs: int, trainloaders: List[DataLoader], valloaders: List[DataLoader], learning_rate: float, learning_rate_decay: float, models: List[DictConfig], - cfg: DictConfig, ) -> Callable[[str], FlowerClient]: """Generate the client function that creates the Flower Clients. Parameters ---------- - num_clients : int - The number of clients present in the setup - num_rounds: int - The number of rounds in the experiment. This is used to construct - the scheduling for stragglers num_epochs : int The number of local epochs each client should run the training for before sending it to the server. @@ -154,8 +143,6 @@ def gen_client_fn( The learning rate decay ratio per round for the SGD optimizer of clients. models : List[DictConfig] A list of DictConfigs, each pointing to the model config of client's local model - cfg : DictConfig - Configuration Returns ------- diff --git a/baselines/depthfl/depthfl/dataset_preparation.py b/baselines/depthfl/depthfl/dataset_preparation.py index 778695063f5d..7e07c15efe54 100644 --- a/baselines/depthfl/depthfl/dataset_preparation.py +++ b/baselines/depthfl/depthfl/dataset_preparation.py @@ -44,8 +44,8 @@ def _download_data() -> Tuple[Dataset, Dataset]: def _partition_data( num_clients, iid: Optional[bool] = True, - beta: Optional[float] = 0.5, - seed: Optional[int] = 41, + beta=0.5, + seed=41, ) -> Tuple[List[Dataset], Dataset]: """Split training set to simulate the federated setting. @@ -69,51 +69,57 @@ def _partition_data( """ trainset, testset = _download_data() - datasets = [] + datasets: List[Subset] = [] if iid: - np.random.seed(seed) - num_sample = int(len(trainset) / (num_clients)) - index = list(range(len(trainset))) - for _ in range(num_clients): - sample_idx = np.random.choice(index, num_sample, replace=False) - index = list(set(index) - set(sample_idx)) - datasets.append(Subset(trainset, sample_idx)) + distribute_iid(num_clients, seed, trainset, datasets) else: - labels = np.array([label for _, label in trainset]) - min_size = 0 - K = np.max(labels) + 1 - N = labels.shape[0] - # net_dataidx_map = {} - n_nets = num_clients - np.random.seed(seed) - - while min_size < 10: - idx_batch = [[] for _ in range(n_nets)] - # for each class in the dataset - for k in range(K): - idx_k = np.where(labels == k)[0] - np.random.shuffle(idx_k) - proportions = np.random.dirichlet(np.repeat(beta, n_nets)) - ## Balance - proportions = np.array( - [ - p * (len(idx_j) < N / n_nets) - for p, idx_j in zip(proportions, idx_batch) - ] - ) - proportions = proportions / proportions.sum() - proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1] - idx_batch = [ - idx_j + idx.tolist() - for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions)) - ] - min_size = min([len(idx_j) for idx_j in idx_batch]) - - for j in range(n_nets): - np.random.shuffle(idx_batch[j]) - # net_dataidx_map[j] = np.array(idx_batch[j]) - datasets.append(Subset(trainset, np.array(idx_batch[j]))) + distribute_noniid(num_clients, beta, seed, trainset, datasets) return datasets, testset + + +def distribute_iid(num_clients, seed, trainset, datasets): + """Distribute dataset in iid manner.""" + np.random.seed(seed) + num_sample = int(len(trainset) / (num_clients)) + index = list(range(len(trainset))) + for _ in range(num_clients): + sample_idx = np.random.choice(index, num_sample, replace=False) + index = list(set(index) - set(sample_idx)) + datasets.append(Subset(trainset, sample_idx)) + + +def distribute_noniid(num_clients, beta, seed, trainset, datasets): + """Distribute dataset in non-iid manner.""" + labels = np.array([label for _, label in trainset]) + min_size = 0 + np.random.seed(seed) + + while min_size < 10: + idx_batch: list = [[] for _ in range(num_clients)] + # for each class in the dataset + for k in range(np.max(labels) + 1): + idx_k = np.where(labels == k)[0] + np.random.shuffle(idx_k) + proportions = np.random.dirichlet(np.repeat(beta, num_clients)) + ## Balance + proportions = np.array( + [ + p * (len(idx_j) < labels.shape[0] / num_clients) + for p, idx_j in zip(proportions, idx_batch) + ] + ) + proportions = proportions / proportions.sum() + proportions = (np.cumsum(proportions) * len(idx_k)).astype(int)[:-1] + idx_batch = [ + idx_j + idx.tolist() + for idx_j, idx in zip(idx_batch, np.split(idx_k, proportions)) + ] + min_size = min([len(idx_j) for idx_j in idx_batch]) + + for j in range(num_clients): + np.random.shuffle(idx_batch[j]) + # net_dataidx_map[j] = np.array(idx_batch[j]) + datasets.append(Subset(trainset, np.array(idx_batch[j]))) diff --git a/baselines/depthfl/depthfl/main.py b/baselines/depthfl/depthfl/main.py index 6a908ca5b0fb..7bf1d9563eae 100644 --- a/baselines/depthfl/depthfl/main.py +++ b/baselines/depthfl/depthfl/main.py @@ -55,15 +55,12 @@ def main(cfg: DictConfig) -> None: # prepare function that will be used to spawn each client client_fn = client.gen_client_fn( - num_clients=cfg.num_clients, num_epochs=cfg.num_epochs, trainloaders=trainloaders, valloaders=valloaders, - num_rounds=cfg.num_rounds, learning_rate=cfg.learning_rate, learning_rate_decay=cfg.learning_rate_decay, models=models, - cfg=cfg, ) # get function that will executed by the strategy's evaluate() method @@ -81,7 +78,7 @@ def main(cfg: DictConfig) -> None: # get a function that will be used to construct the config that the client's # fit() method will received def get_on_fit_config(): - def fit_config_fn(server_round: int): + def fit_config_fn(server_round): # resolve and convert to python dict fit_config = OmegaConf.to_container(cfg.fit_config, resolve=True) fit_config["curr_round"] = server_round # add round info @@ -115,7 +112,7 @@ def fit_config_fn(server_round: int): "num_gpus": cfg.client_resources.num_gpus, }, strategy=strategy, - server=server.Server_FedDyn( + server=server.ServerFedDyn( client_manager=SimpleClientManager(), strategy=strategy ), ) diff --git a/baselines/depthfl/depthfl/models.py b/baselines/depthfl/depthfl/models.py index a8fb251b078f..cdcf46b5649c 100644 --- a/baselines/depthfl/depthfl/models.py +++ b/baselines/depthfl/depthfl/models.py @@ -14,20 +14,20 @@ class KLLoss(nn.Module): """KL divergence loss for self distillation.""" def __init__(self): - super(KLLoss, self).__init__() + super().__init__() + self.temperature = 1 def forward(self, pred, label): """KL loss forward.""" - T = 1 - predict = F.log_softmax(pred / T, dim=1) - target_data = F.softmax(label / T, dim=1) + predict = F.log_softmax(pred / self.temperature, dim=1) + target_data = F.softmax(label / self.temperature, dim=1) target_data = target_data + 10 ** (-7) with torch.no_grad(): target = target_data.detach().clone() loss = ( - T - * T + self.temperature + * self.temperature * ((target * (target.log() - predict)).sum(1).sum() / target.size()[0]) ) return loss @@ -39,12 +39,9 @@ def train( # pylint: disable=too-many-arguments device: torch.device, epochs: int, learning_rate: float, - feddyn: bool, - kd: bool, + config: dict, consistency_weight: float, prev_grads: dict, - alpha: float, - extended: bool, ) -> None: """Train the network on the training set. @@ -60,21 +57,14 @@ def train( # pylint: disable=too-many-arguments The number of epochs the model should be trained for. learning_rate : float The learning rate for the SGD optimizer. - feddyn : bool - whether using feddyn or fedavg - kd : bool - whether using self distillation + config : dict + training configuration consistency_weight : float hyperparameter for self distillation prev_grads : dict control variate for feddyn - alpha : float - Hyperparameter for the FedDyn. - extended : bool - if extended, train all sub-classifiers within local model """ criterion = torch.nn.CrossEntropyLoss() - criterion_kl = KLLoss().cuda() optimizer = torch.optim.SGD(net.parameters(), lr=learning_rate, weight_decay=1e-3) global_params = { k: val.detach().clone().flatten() for (k, val) in net.named_parameters() @@ -91,22 +81,25 @@ def train( # pylint: disable=too-many-arguments trainloader, device, criterion, - criterion_kl, optimizer, - feddyn, - kd, + config, consistency_weight, prev_grads, - alpha, - extended, ) # update prev_grads for FedDyn - if feddyn: - for k, param in net.named_parameters(): - curr_param = param.detach().clone().flatten() - prev_grads[k] = prev_grads[k] - alpha * (curr_param - global_params[k]) - prev_grads[k] = prev_grads[k].to(torch.device("cpu")) + if config["feddyn"]: + update_prev_grads(config, net, prev_grads, global_params) + + +def update_prev_grads(config, net, prev_grads, global_params): + """Update prev_grads for FedDyn.""" + for k, param in net.named_parameters(): + curr_param = param.detach().clone().flatten() + prev_grads[k] = prev_grads[k] - config["alpha"] * ( + curr_param - global_params[k] + ) + prev_grads[k] = prev_grads[k].to(torch.device(torch.device("cpu"))) def _train_one_epoch( # pylint: disable=too-many-arguments @@ -115,14 +108,10 @@ def _train_one_epoch( # pylint: disable=too-many-arguments trainloader: DataLoader, device: torch.device, criterion: torch.nn.CrossEntropyLoss, - criterion_kl: nn.Module, - optimizer: torch.optim.Adam, - feddyn: bool, - kd: bool, + optimizer: torch.optim.SGD, + config: dict, consistency_weight: float, prev_grads: dict, - alpha: float, - extended: bool, ): """Train for one epoch. @@ -138,65 +127,81 @@ def _train_one_epoch( # pylint: disable=too-many-arguments The device on which the model should be trained, either 'cpu' or 'cuda'. criterion : torch.nn.CrossEntropyLoss The loss function to use for training - criterion_kl : nn.Module - The loss function for self distillation optimizer : torch.optim.Adam The optimizer to use for training - feddyn : bool - whether using feddyn or fedavg - kd : bool - whether using self distillation + config : dict + training configuration consistency_weight : float hyperparameter for self distillation prev_grads : dict control variate for feddyn - alpha : float - Hyperparameter for the FedDyn. - extended : bool - if extended, train all sub-classifiers within local model """ for images, labels in trainloader: images, labels = images.to(device), labels.to(device) - loss = 0.0 + loss = torch.zeros(1).to(device) optimizer.zero_grad() output_lst = net(images) for i, branch_output in enumerate(output_lst): # only trains last classifier in InclusiveFL - if not extended and i != len(output_lst) - 1: + if not config["extended"] and i != len(output_lst) - 1: continue loss += criterion(branch_output, labels) - # self distillation term - if kd and len(output_lst) > 1: - for j in range(len(output_lst)): - if j == i: - continue - else: - loss += ( - consistency_weight - * criterion_kl(branch_output, output_lst[j].detach()) - / (len(output_lst) - 1) - ) + if config["kd"] and len(output_lst) > 1: + # self distillation term + loss = self_distillation( + output_lst, + i, + loss, + consistency_weight, + KLLoss().cuda(), + branch_output, + ) # Dynamic regularization in FedDyn - if feddyn: - for k, param in net.named_parameters(): - curr_param = param.flatten() - - lin_penalty = torch.dot(curr_param, prev_grads[k]) - loss -= lin_penalty - - quad_penalty = ( - alpha / 2.0 * torch.sum(torch.square(curr_param - global_params[k])) - ) - loss += quad_penalty + if config["feddyn"]: + loss = dynamic_regularization(config, net, prev_grads, global_params, loss) loss.backward() optimizer.step() +def self_distillation( + output_lst, i, loss, consistency_weight, criterion_kl, branch_output +): + """'self distillation term.""" + for j, output in enumerate(output_lst): + if j == i: + continue + + loss += ( + consistency_weight + * criterion_kl(branch_output, output.detach()) + / (len(output_lst) - 1) + ) + + return loss + + +def dynamic_regularization(config, net, prev_grads, global_params, loss): + """Dynamic regularization for FedDyn.""" + for k, param in net.named_parameters(): + curr_param = param.flatten() + + lin_penalty = torch.dot(curr_param, prev_grads[k]) + loss -= lin_penalty + quad_penalty = ( + config["alpha"] + / 2.0 + * torch.sum(torch.square(curr_param - global_params[k])) + ) + loss += quad_penalty + + return loss + + def test( net: nn.Module, testloader: DataLoader, device: torch.device ) -> Tuple[float, float, List[float]]: diff --git a/baselines/depthfl/depthfl/resnet.py b/baselines/depthfl/depthfl/resnet.py index b8dfe50da272..f48741580740 100644 --- a/baselines/depthfl/depthfl/resnet.py +++ b/baselines/depthfl/depthfl/resnet.py @@ -1,7 +1,5 @@ """ResNet18 for DepthFL.""" -from typing import Callable, Optional - import torch.nn as nn @@ -9,7 +7,7 @@ class MyGroupNorm(nn.Module): """Group Normalization layer.""" def __init__(self, num_channels): - super(MyGroupNorm, self).__init__() + super().__init__() ## change num_groups to 32 self.norm = nn.GroupNorm( num_groups=16, num_channels=num_channels, eps=1e-5, affine=True @@ -25,7 +23,7 @@ class MyBatchNorm(nn.Module): """Batch Normalization layer.""" def __init__(self, num_channels): - super(MyBatchNorm, self).__init__() + super().__init__() self.norm = nn.BatchNorm2d(num_channels, track_running_stats=True) def forward(self, x): @@ -56,10 +54,9 @@ def __init__( kernel_size=3, stride=2, padding=1, - affine=True, norm_layer=MyGroupNorm, ): - super(SepConv, self).__init__() + super().__init__() self.op = nn.Sequential( nn.Conv2d( channel_in, @@ -98,7 +95,7 @@ class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None): - super(BasicBlock, self).__init__() + super().__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) self.relu = nn.ReLU(inplace=True) @@ -143,9 +140,9 @@ def __init__( layers, n_blocks, num_classes=1000, - norm_layer: Optional[Callable[..., nn.Module]] = MyBatchNorm, + norm_layer=MyBatchNorm, ): - super(Multi_ResNet, self).__init__() + super().__init__() self.n_blocks = n_blocks self.inplanes = 64 self.norm_layer = norm_layer diff --git a/baselines/depthfl/depthfl/resnet_hetero.py b/baselines/depthfl/depthfl/resnet_hetero.py index b466a97c2f99..283a05c6ca99 100644 --- a/baselines/depthfl/depthfl/resnet_hetero.py +++ b/baselines/depthfl/depthfl/resnet_hetero.py @@ -24,7 +24,7 @@ class MyBatchNorm(nn.Module): """Static Batch Normalization for HeteroFL.""" def __init__(self, num_channels, track=True): - super(MyBatchNorm, self).__init__() + super().__init__() ## change num_groups to 32 self.norm = nn.BatchNorm2d(num_channels, track_running_stats=track) @@ -61,7 +61,7 @@ def __init__( track=True, scale=True, ): - super(BasicBlock, self).__init__() + super().__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.scaler = Scaler(scaler_rate, scale) self.bn1 = MyBatchNorm(planes, track) @@ -98,7 +98,7 @@ class Multi_ResNet(nn.Module): def __init__( self, hidden_size, block, layers, num_classes, scaler_rate, track, scale ): - super(Multi_ResNet, self).__init__() + super().__init__() self.inplanes = hidden_size[0] self.norm_layer = MyBatchNorm diff --git a/baselines/depthfl/depthfl/server.py b/baselines/depthfl/depthfl/server.py index a205bc5271fb..f9903632f934 100644 --- a/baselines/depthfl/depthfl/server.py +++ b/baselines/depthfl/depthfl/server.py @@ -29,7 +29,8 @@ def gen_evaluate_fn( device: torch.device, model: DictConfig, ) -> Callable[ - [int, NDArrays, Dict[str, Scalar]], Optional[Tuple[float, Dict[str, Scalar]]] + [int, NDArrays, Dict[str, Scalar]], + Tuple[float, Dict[str, Union[Scalar, List[float]]]], ]: """Generate the function for centralized evaluation. @@ -51,7 +52,7 @@ def gen_evaluate_fn( def evaluate( server_round: int, parameters_ndarrays: NDArrays, config: Dict[str, Scalar] - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Tuple[float, Dict[str, Union[Scalar, List[float]]]]: # pylint: disable=unused-argument """Use the entire CIFAR-100 test set for evaluation.""" net = instantiate(model) @@ -73,7 +74,8 @@ def gen_evaluate_fn_hetero( device: torch.device, model_cfg: DictConfig, ) -> Callable[ - [int, NDArrays, Dict[str, Scalar]], Optional[Tuple[float, Dict[str, Scalar]]] + [int, NDArrays, Dict[str, Scalar]], + Tuple[float, Dict[str, Union[Scalar, List[float]]]], ]: """Generate the function for centralized evaluation. @@ -97,7 +99,7 @@ def gen_evaluate_fn_hetero( def evaluate( server_round: int, parameters_ndarrays: NDArrays, config: Dict[str, Scalar] - ) -> Optional[Tuple[float, Dict[str, Scalar]]]: + ) -> Tuple[float, Dict[str, Union[Scalar, List[float]]]]: # pylint: disable=unused-argument """Use the entire CIFAR-100 test set for evaluation.""" # test per 50 rounds (sbn takes a long time) @@ -142,7 +144,7 @@ def evaluate( return evaluate -class Server_FedDyn(Server): +class ServerFedDyn(Server): """Sever for FedDyn.""" def fit_round( @@ -189,7 +191,7 @@ def fit_round( aggregated_result: Tuple[ Optional[Parameters], Dict[str, Scalar], - ] = self.strategy.aggregate_fit( + ] = self.strategy.aggregate_fit( # type: ignore [call-arg] server_round, results, failures, parameters_to_ndarrays(self.parameters) ) # ] = self.strategy.aggregate_fit(server_round, results, failures) diff --git a/baselines/depthfl/depthfl/strategy.py b/baselines/depthfl/depthfl/strategy.py index 40695ba74a4b..95a0a9c362b0 100644 --- a/baselines/depthfl/depthfl/strategy.py +++ b/baselines/depthfl/depthfl/strategy.py @@ -74,7 +74,7 @@ def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): super().__init__(*args, **kwargs) - def aggregate_fit( + def aggregate_fit( # type: ignore[override] self, server_round: int, results: List[Tuple[ClientProxy, FitRes]], @@ -120,8 +120,8 @@ def aggregate( weights_sum = [np.zeros(v.shape) for v in origin] # summation & counting of parameters - for weight, _ in results: - for i, layer in enumerate(weight): + for parameters, _ in results: + for i, layer in enumerate(parameters): weights_sum[i] += layer param_count[i] += 1 diff --git a/baselines/depthfl/depthfl/strategy_hetero.py b/baselines/depthfl/depthfl/strategy_hetero.py index 269c41a2682c..f92353e15330 100644 --- a/baselines/depthfl/depthfl/strategy_hetero.py +++ b/baselines/depthfl/depthfl/strategy_hetero.py @@ -54,7 +54,7 @@ def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): super().__init__(*args, **kwargs) - def aggregate_fit( + def aggregate_fit( # type: ignore[override] self, server_round: int, results: List[Tuple[ClientProxy, FitRes]], @@ -88,7 +88,9 @@ def aggregate_fit( return parameters_aggregated, metrics_aggregated - def aggregate_hetero(self, results: List[Tuple[NDArrays, int]]) -> NDArrays: + def aggregate_hetero( + self, results: List[Tuple[NDArrays, Union[bool, bytes, float, int, str]]] + ): """Aggregate function for HeteroFL.""" for i, v in enumerate(self.parameters): count = np.zeros(v.shape) diff --git a/baselines/depthfl/depthfl/utils.py b/baselines/depthfl/depthfl/utils.py index 15461d97c588..fad2afcad4be 100644 --- a/baselines/depthfl/depthfl/utils.py +++ b/baselines/depthfl/depthfl/utils.py @@ -3,7 +3,7 @@ import pickle from pathlib import Path from secrets import token_hex -from typing import Dict, Optional, Union +from typing import Dict, Union from flwr.server.history import History @@ -12,7 +12,7 @@ def save_results_as_pickle( history: History, file_path: Union[str, Path], extra_results: Dict, - default_filename: Optional[str] = "results.pkl", + default_filename: str = "results.pkl", ) -> None: """Save results from simulation to pickle. From c39e949aa75e4e41689857ce25122820a04dec38 Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Sun, 8 Oct 2023 16:48:01 +0900 Subject: [PATCH 32/51] Update baselines/depthfl/pyproject.toml Co-authored-by: Javier --- baselines/depthfl/pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/baselines/depthfl/pyproject.toml b/baselines/depthfl/pyproject.toml index 5e5a834a1325..d0a56094cdf7 100644 --- a/baselines/depthfl/pyproject.toml +++ b/baselines/depthfl/pyproject.toml @@ -39,7 +39,6 @@ classifiers = [ [tool.poetry.dependencies] python = ">=3.10.0, <3.11.0" flwr = { extras = ["simulation"], version = "1.5.0" } -ray = "2.6.3" # don't change this hydra-core = "1.3.2" # don't change this matplotlib = "3.7.1" torch = { url = "https://download.pytorch.org/whl/cu116/torch-1.13.1%2Bcu116-cp310-cp310-linux_x86_64.whl"} From 3462c1bed07f9f274e0c749e7a905fd110fa1b5b Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Sun, 8 Oct 2023 16:48:19 +0900 Subject: [PATCH 33/51] Update baselines/depthfl/pyproject.toml Co-authored-by: Javier --- baselines/depthfl/pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/baselines/depthfl/pyproject.toml b/baselines/depthfl/pyproject.toml index d0a56094cdf7..468dfd91b00d 100644 --- a/baselines/depthfl/pyproject.toml +++ b/baselines/depthfl/pyproject.toml @@ -49,7 +49,7 @@ torchvision = { url = "https://download.pytorch.org/whl/cu116/torchvision-0.14.1 isort = "==5.11.5" black = "==23.1.0" docformatter = "==1.5.1" -mypy = "==0.961" +mypy = "==1.4.1" pylint = "==2.8.2" flake8 = "==3.9.2" pytest = "==6.2.4" From 4a67e89861231011adb862f8aaa4b0fe5e4e894f Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Sun, 8 Oct 2023 17:10:53 +0900 Subject: [PATCH 34/51] updated pyproject.toml --- baselines/depthfl/pyproject.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/baselines/depthfl/pyproject.toml b/baselines/depthfl/pyproject.toml index 468dfd91b00d..45b853520120 100644 --- a/baselines/depthfl/pyproject.toml +++ b/baselines/depthfl/pyproject.toml @@ -5,9 +5,9 @@ build-backend = "poetry.masonry.api" [tool.poetry] name = "depthfl" # <----- Ensure it matches the name of your baseline directory containing all the source code version = "1.0.0" -description = "Flower Baselines" +description = "DepthFL: Depthwise Federated Learning for Heterogeneous Clients" license = "Apache-2.0" -authors = ["The Flower Authors "] +authors = ["Minjae Kim "] readme = "README.md" homepage = "https://flower.dev" repository = "https://github.com/adap/flower" From ce55d7afb465835dd005f75dd0f24ebeeba3a160 Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Mon, 9 Oct 2023 17:12:50 +0900 Subject: [PATCH 35/51] Update baselines/depthfl/pyproject.toml Co-authored-by: Javier --- baselines/depthfl/pyproject.toml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/baselines/depthfl/pyproject.toml b/baselines/depthfl/pyproject.toml index 45b853520120..2f928c2d3553 100644 --- a/baselines/depthfl/pyproject.toml +++ b/baselines/depthfl/pyproject.toml @@ -86,6 +86,9 @@ disable = "bad-continuation,duplicate-code,too-few-public-methods,useless-import good-names = "i,j,k,_,x,y,X,Y" signature-mutators="hydra.main.main" +[tool.pylint.typecheck] +generated-members="numpy.*, torch.*, tensorflow.*" + [[tool.mypy.overrides]] module = [ "importlib.metadata.*", From d8ef21f51efa21ee3a27b6f7e9b827a0b24b9c0c Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Mon, 9 Oct 2023 17:13:07 +0900 Subject: [PATCH 36/51] formatting --- .../depthfl/depthfl/dataset_preparation.py | 2 +- baselines/depthfl/depthfl/models.py | 68 +++++++------------ baselines/depthfl/depthfl/strategy.py | 4 +- 3 files changed, 28 insertions(+), 46 deletions(-) diff --git a/baselines/depthfl/depthfl/dataset_preparation.py b/baselines/depthfl/depthfl/dataset_preparation.py index 7e07c15efe54..9680dfc1f4b0 100644 --- a/baselines/depthfl/depthfl/dataset_preparation.py +++ b/baselines/depthfl/depthfl/dataset_preparation.py @@ -98,7 +98,7 @@ def distribute_noniid(num_clients, beta, seed, trainset, datasets): np.random.seed(seed) while min_size < 10: - idx_batch: list = [[] for _ in range(num_clients)] + idx_batch = [[] for _ in range(num_clients)] # for each class in the dataset for k in range(np.max(labels) + 1): idx_k = np.where(labels == k)[0] diff --git a/baselines/depthfl/depthfl/models.py b/baselines/depthfl/depthfl/models.py index cdcf46b5649c..4015415590b7 100644 --- a/baselines/depthfl/depthfl/models.py +++ b/baselines/depthfl/depthfl/models.py @@ -136,6 +136,8 @@ def _train_one_epoch( # pylint: disable=too-many-arguments prev_grads : dict control variate for feddyn """ + criterion_kl = KLLoss().cuda() + for images, labels in trainloader: images, labels = images.to(device), labels.to(device) loss = torch.zeros(1).to(device) @@ -149,57 +151,35 @@ def _train_one_epoch( # pylint: disable=too-many-arguments loss += criterion(branch_output, labels) + # self distillation term if config["kd"] and len(output_lst) > 1: - # self distillation term - loss = self_distillation( - output_lst, - i, - loss, - consistency_weight, - KLLoss().cuda(), - branch_output, - ) + for j in range(len(output_lst)): + if j == i: + continue + else: + loss += ( + consistency_weight + * criterion_kl(branch_output, output_lst[j].detach()) + / (len(output_lst) - 1) + ) # Dynamic regularization in FedDyn if config["feddyn"]: - loss = dynamic_regularization(config, net, prev_grads, global_params, loss) - - loss.backward() - optimizer.step() - - -def self_distillation( - output_lst, i, loss, consistency_weight, criterion_kl, branch_output -): - """'self distillation term.""" - for j, output in enumerate(output_lst): - if j == i: - continue - - loss += ( - consistency_weight - * criterion_kl(branch_output, output.detach()) - / (len(output_lst) - 1) - ) - - return loss + for k, param in net.named_parameters(): + curr_param = param.flatten() + lin_penalty = torch.dot(curr_param, prev_grads[k]) + loss -= lin_penalty -def dynamic_regularization(config, net, prev_grads, global_params, loss): - """Dynamic regularization for FedDyn.""" - for k, param in net.named_parameters(): - curr_param = param.flatten() - - lin_penalty = torch.dot(curr_param, prev_grads[k]) - loss -= lin_penalty - quad_penalty = ( - config["alpha"] - / 2.0 - * torch.sum(torch.square(curr_param - global_params[k])) - ) - loss += quad_penalty + quad_penalty = ( + config["alpha"] + / 2.0 + * torch.sum(torch.square(curr_param - global_params[k])) + ) + loss += quad_penalty - return loss + loss.backward() + optimizer.step() def test( diff --git a/baselines/depthfl/depthfl/strategy.py b/baselines/depthfl/depthfl/strategy.py index 95a0a9c362b0..2ec9d876fd72 100644 --- a/baselines/depthfl/depthfl/strategy.py +++ b/baselines/depthfl/depthfl/strategy.py @@ -42,7 +42,9 @@ def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: # Aggregate and return custom metric (weighted average) print("here and nothing is breaking!!!") - return {"accuracy": int(sum(accuracies)) / int(sum(examples))} + return { + "accuracy": int(sum(accuracies)) / int(sum(examples)) # type:ignore[arg-type] + } class FedDyn(FedAvg): From a568a60d99cf05f31c1bbd334f65ddce8e3a95c1 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Mon, 9 Oct 2023 17:55:35 +0900 Subject: [PATCH 37/51] formatting --- baselines/depthfl/depthfl/client.py | 10 +-- .../depthfl/depthfl/dataset_preparation.py | 2 +- baselines/depthfl/depthfl/models.py | 20 ++--- baselines/depthfl/depthfl/resnet.py | 44 ++++++----- baselines/depthfl/depthfl/resnet_hetero.py | 36 +++++---- baselines/depthfl/depthfl/server.py | 12 ++- baselines/depthfl/depthfl/strategy.py | 79 ++++++++++--------- baselines/depthfl/depthfl/strategy_hetero.py | 79 ++++++++++--------- 8 files changed, 151 insertions(+), 131 deletions(-) diff --git a/baselines/depthfl/depthfl/client.py b/baselines/depthfl/depthfl/client.py index 4bed018c60f1..481ac90f1c79 100644 --- a/baselines/depthfl/depthfl/client.py +++ b/baselines/depthfl/depthfl/client.py @@ -98,8 +98,8 @@ def fit( prev_grads=self.prev_grads, ) - with open(f"prev_grads/client_{self.cid}", "wb") as f: - pickle.dump(self.prev_grads, f) + with open(f"prev_grads/client_{self.cid}", "wb") as prev_grads_file: + pickle.dump(self.prev_grads, prev_grads_file) return self.get_parameters({}), len(self.trainloader), {"cid": self.cid} @@ -116,7 +116,7 @@ def evaluate( ) -def gen_client_fn( +def gen_client_fn( # pylint: disable=too-many-arguments num_epochs: int, trainloaders: List[DataLoader], valloaders: List[DataLoader], @@ -163,8 +163,8 @@ def client_fn(cid: str) -> FlowerClient: trainloader = trainloaders[int(cid)] valloader = valloaders[int(cid)] - with open(f"prev_grads/client_{int(cid)}", "rb") as f: - prev_grads = pickle.load(f) + with open(f"prev_grads/client_{int(cid)}", "rb") as prev_grads_file: + prev_grads = pickle.load(prev_grads_file) return FlowerClient( net, diff --git a/baselines/depthfl/depthfl/dataset_preparation.py b/baselines/depthfl/depthfl/dataset_preparation.py index 9680dfc1f4b0..006491c7679e 100644 --- a/baselines/depthfl/depthfl/dataset_preparation.py +++ b/baselines/depthfl/depthfl/dataset_preparation.py @@ -104,7 +104,7 @@ def distribute_noniid(num_clients, beta, seed, trainset, datasets): idx_k = np.where(labels == k)[0] np.random.shuffle(idx_k) proportions = np.random.dirichlet(np.repeat(beta, num_clients)) - ## Balance + # Balance proportions = np.array( [ p * (len(idx_j) < labels.shape[0] / num_clients) diff --git a/baselines/depthfl/depthfl/models.py b/baselines/depthfl/depthfl/models.py index 4015415590b7..df3eebf9f9ce 100644 --- a/baselines/depthfl/depthfl/models.py +++ b/baselines/depthfl/depthfl/models.py @@ -102,7 +102,7 @@ def update_prev_grads(config, net, prev_grads, global_params): prev_grads[k] = prev_grads[k].to(torch.device(torch.device("cpu"))) -def _train_one_epoch( # pylint: disable=too-many-arguments +def _train_one_epoch( # pylint: disable=too-many-locals, too-many-arguments net: nn.Module, global_params: dict, trainloader: DataLoader, @@ -153,15 +153,15 @@ def _train_one_epoch( # pylint: disable=too-many-arguments # self distillation term if config["kd"] and len(output_lst) > 1: - for j in range(len(output_lst)): + for j, output in enumerate(output_lst): if j == i: continue - else: - loss += ( - consistency_weight - * criterion_kl(branch_output, output_lst[j].detach()) - / (len(output_lst) - 1) - ) + + loss += ( + consistency_weight + * criterion_kl(branch_output, output.detach()) + / (len(output_lst) - 1) + ) # Dynamic regularization in FedDyn if config["feddyn"]: @@ -182,7 +182,7 @@ def _train_one_epoch( # pylint: disable=too-many-arguments optimizer.step() -def test( +def test( # pylint: disable=too-many-locals net: nn.Module, testloader: DataLoader, device: torch.device ) -> Tuple[float, float, List[float]]: """Evaluate the network on the entire test set. @@ -232,7 +232,7 @@ def test( return loss, accuracy, accuracy_single -def test_sbn( +def test_sbn( # pylint: disable=too-many-locals nets: List[nn.Module], trainloaders: List[DictConfig], testloader: DataLoader, diff --git a/baselines/depthfl/depthfl/resnet.py b/baselines/depthfl/depthfl/resnet.py index f48741580740..04348ae17441 100644 --- a/baselines/depthfl/depthfl/resnet.py +++ b/baselines/depthfl/depthfl/resnet.py @@ -8,7 +8,7 @@ class MyGroupNorm(nn.Module): def __init__(self, num_channels): super().__init__() - ## change num_groups to 32 + # change num_groups to 32 self.norm = nn.GroupNorm( num_groups=16, num_channels=num_channels, eps=1e-5, affine=True ) @@ -47,7 +47,7 @@ def conv1x1(in_planes, planes, stride=1): class SepConv(nn.Module): """Bottleneck layer module.""" - def __init__( + def __init__( # pylint: disable=too-many-arguments self, channel_in, channel_out, @@ -57,7 +57,7 @@ def __init__( norm_layer=MyGroupNorm, ): super().__init__() - self.op = nn.Sequential( + self.operations = nn.Sequential( nn.Conv2d( channel_in, channel_in, @@ -86,7 +86,7 @@ def __init__( def forward(self, x): """SepConv forward.""" - return self.op(x) + return self.operations(x) class BasicBlock(nn.Module): @@ -94,7 +94,9 @@ class BasicBlock(nn.Module): expansion = 1 - def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None): + def __init__( + self, inplanes, planes, stride=1, downsample=None, norm_layer=None + ): # pylint: disable=too-many-arguments super().__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = norm_layer(planes) @@ -123,7 +125,7 @@ def forward(self, x): return output -class Multi_ResNet(nn.Module): +class MultiResnet(nn.Module): # pylint: disable=too-many-instance-attributes """Resnet model. Args: @@ -134,7 +136,7 @@ class Multi_ResNet(nn.Module): norm_layer (class): type of normalization layer. """ - def __init__( + def __init__( # pylint: disable=too-many-arguments self, block, layers, @@ -244,17 +246,21 @@ def __init__( if n_blocks > 3: self.layer4 = self._make_layer(block, 512, layers[3], stride=2) - self.fc = nn.Linear(512 * block.expansion, num_classes) + self.fc_layer = nn.Linear(512 * block.expansion, num_classes) self.scala4 = nn.AdaptiveAvgPool2d(1) - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") - elif isinstance(m, nn.GroupNorm) or isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) - - def _make_layer(self, block, planes, layers, stride=1, norm_layer=None): + for module in self.modules(): + if isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_( + module.weight, mode="fan_out", nonlinearity="relu" + ) + elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(module.weight, 1) + nn.init.constant_(module.bias, 0) + + def _make_layer( + self, block, planes, layers, stride=1, norm_layer=None + ): # pylint: disable=too-many-arguments """Create a block with layers. Args: @@ -325,7 +331,7 @@ def forward(self, x): x = self.layer4(x) out4_feature = self.scala4(x).view(x.size(0), -1) - output4 = self.fc(out4_feature) + output4 = self.fc_layer(out4_feature) return [middle_output1, middle_output2, middle_output3, output4] @@ -352,7 +358,7 @@ def multi_resnet18(n_blocks=1, norm="bn", num_classes=100): elif norm == "bn": norm_layer = MyBatchNorm - return Multi_ResNet( + return MultiResnet( BasicBlock, [2, 2, 2, 2], n_blocks, @@ -364,7 +370,7 @@ def multi_resnet18(n_blocks=1, norm="bn", num_classes=100): # if __name__ == "__main__": # from ptflops import get_model_complexity_info -# model = multi_resnet18(n_blocks=4, num_classes=100) +# model = MultiResnet18(n_blocks=4, num_classes=100) # with torch.cuda.device(0): # macs, params = get_model_complexity_info( diff --git a/baselines/depthfl/depthfl/resnet_hetero.py b/baselines/depthfl/depthfl/resnet_hetero.py index 283a05c6ca99..a84c07b881b2 100644 --- a/baselines/depthfl/depthfl/resnet_hetero.py +++ b/baselines/depthfl/depthfl/resnet_hetero.py @@ -14,9 +14,9 @@ def __init__(self, rate, scale): else: self.rate = 1 - def forward(self, input): + def forward(self, x): """Scaler forward.""" - output = input / self.rate if self.training else input + output = x / self.rate if self.training else x return output @@ -25,7 +25,7 @@ class MyBatchNorm(nn.Module): def __init__(self, num_channels, track=True): super().__init__() - ## change num_groups to 32 + # change num_groups to 32 self.norm = nn.BatchNorm2d(num_channels, track_running_stats=track) def forward(self, x): @@ -46,12 +46,12 @@ def conv1x1(in_planes, planes, stride=1): return nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=False) -class BasicBlock(nn.Module): +class BasicBlock(nn.Module): # pylint: disable=too-many-instance-attributes """Basic Block for ResNet18.""" expansion = 1 - def __init__( + def __init__( # pylint: disable=too-many-arguments self, inplanes, planes, @@ -92,10 +92,10 @@ def forward(self, x): return output -class Multi_ResNet(nn.Module): +class Resnet(nn.Module): # pylint: disable=too-many-instance-attributes """Resnet model.""" - def __init__( + def __init__( # pylint: disable=too-many-arguments self, hidden_size, block, layers, num_classes, scaler_rate, track, scale ): super().__init__() @@ -146,17 +146,19 @@ def __init__( track=track, scale=scale, ) - self.fc = nn.Linear(hidden_size[3] * block.expansion, num_classes) + self.fc_layer = nn.Linear(hidden_size[3] * block.expansion, num_classes) self.scala = nn.AdaptiveAvgPool2d(1) - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") - elif isinstance(m, nn.GroupNorm) or isinstance(m, nn.BatchNorm2d): - nn.init.constant_(m.weight, 1) - nn.init.constant_(m.bias, 0) + for module in self.modules(): + if isinstance(module, nn.Conv2d): + nn.init.kaiming_normal_( + module.weight, mode="fan_out", nonlinearity="relu" + ) + elif isinstance(module, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(module.weight, 1) + nn.init.constant_(module.bias, 0) - def _make_layer( + def _make_layer( # pylint: disable=too-many-arguments self, block, planes, layers, stride=1, scaler_rate=1, track=True, scale=True ): """Create a block with layers. @@ -216,7 +218,7 @@ def forward(self, x): x = self.layer3(x) x = self.layer4(x) out = self.scala(x).view(x.size(0), -1) - out = self.fc(out) + out = self.fc_layer(out) return [out] @@ -248,7 +250,7 @@ def resnet18(n_blocks=4, track=False, scale=True, num_classes=100): scaler_rate = model_rate - return Multi_ResNet( + return Resnet( hidden_size, BasicBlock, [2, 2, 2, 2], diff --git a/baselines/depthfl/depthfl/server.py b/baselines/depthfl/depthfl/server.py index f9903632f934..1b2c526b8a7f 100644 --- a/baselines/depthfl/depthfl/server.py +++ b/baselines/depthfl/depthfl/server.py @@ -17,6 +17,7 @@ from depthfl.client import prune from depthfl.models import test, test_sbn +from depthfl.strategy import aggregate_fit FitResultsAndFailures = Tuple[ List[Tuple[ClientProxy, FitRes]], @@ -97,7 +98,7 @@ def gen_evaluate_fn_hetero( The centralized evaluation function. """ - def evaluate( + def evaluate( # pylint: disable=too-many-locals server_round: int, parameters_ndarrays: NDArrays, config: Dict[str, Scalar] ) -> Tuple[float, Dict[str, Union[Scalar, List[float]]]]: # pylint: disable=unused-argument @@ -191,10 +192,13 @@ def fit_round( aggregated_result: Tuple[ Optional[Parameters], Dict[str, Scalar], - ] = self.strategy.aggregate_fit( # type: ignore [call-arg] - server_round, results, failures, parameters_to_ndarrays(self.parameters) + ] = aggregate_fit( + self.strategy, + server_round, + results, + failures, + parameters_to_ndarrays(self.parameters), ) - # ] = self.strategy.aggregate_fit(server_round, results, failures) parameters_aggregated, metrics_aggregated = aggregated_result return parameters_aggregated, metrics_aggregated, (results, failures) diff --git a/baselines/depthfl/depthfl/strategy.py b/baselines/depthfl/depthfl/strategy.py index 2ec9d876fd72..974edd876d66 100644 --- a/baselines/depthfl/depthfl/strategy.py +++ b/baselines/depthfl/depthfl/strategy.py @@ -52,7 +52,7 @@ class FedDyn(FedAvg): def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): self.cfg = cfg - self.h = [np.zeros(v.shape) for (k, v) in net.state_dict().items()] + self.h_variate = [np.zeros(v.shape) for (k, v) in net.state_dict().items()] # tagging real weights / biases self.is_weight = [] @@ -71,49 +71,56 @@ def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): os.makedirs("prev_grads") for idx in range(cfg.num_clients): - with open(f"prev_grads/client_{idx}", "wb") as f: - pickle.dump(prev_grads[idx], f) + with open(f"prev_grads/client_{idx}", "wb") as prev_grads_file: + pickle.dump(prev_grads[idx], prev_grads_file) super().__init__(*args, **kwargs) - def aggregate_fit( # type: ignore[override] - self, - server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - origin: NDArrays, - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: - """Aggregate fit results using weighted average.""" - if not results: - return None, {} - # Do not aggregate if there are failures and failures are not accepted - if not self.accept_failures and failures: - return None, {} - - # Convert results - weights_results = [ - (parameters_to_ndarrays(fit_res.parameters), fit_res.num_examples) - for _, fit_res in results - ] - parameters_aggregated = ndarrays_to_parameters( - aggregate(weights_results, origin, self.h, self.is_weight, self.cfg) + +def aggregate_fit( + strategy, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + origin: NDArrays, +) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Aggregate fit results using weighted average.""" + if not results: + return None, {} + # Do not aggregate if there are failures and failures are not accepted + if not strategy.accept_failures and failures: + return None, {} + + # Convert results + weights_results = [ + (parameters_to_ndarrays(fit_res.parameters), fit_res.num_examples) + for _, fit_res in results + ] + parameters_aggregated = ndarrays_to_parameters( + aggregate( + weights_results, + origin, + strategy.h_variate, + strategy.is_weight, + strategy.cfg, ) + ) - # Aggregate custom metrics if aggregation fn was provided - metrics_aggregated = {} - if self.fit_metrics_aggregation_fn: - fit_metrics = [(res.num_examples, res.metrics) for _, res in results] - metrics_aggregated = self.fit_metrics_aggregation_fn(fit_metrics) - elif server_round == 1: # Only log this warning once - log(WARNING, "No fit_metrics_aggregation_fn provided") + # Aggregate custom metrics if aggregation fn was provided + metrics_aggregated = {} + if strategy.fit_metrics_aggregation_fn: + fit_metrics = [(res.num_examples, res.metrics) for _, res in results] + metrics_aggregated = strategy.fit_metrics_aggregation_fn(fit_metrics) + elif server_round == 1: # Only log this warning once + log(WARNING, "No fit_metrics_aggregation_fn provided") - return parameters_aggregated, metrics_aggregated + return parameters_aggregated, metrics_aggregated def aggregate( results: List[Tuple[NDArrays, int]], origin: NDArrays, - h: List, + h_list: List, is_weight: List, cfg: DictConfig, ) -> NDArrays: @@ -134,8 +141,8 @@ def aggregate( # print(np.isscalar(weight)) # update h variable for FedDyn - h[i] = ( - h[i] + h_list[i] = ( + h_list[i] - cfg.fit_config.alpha * param_count[i] * (weight - origin[i]) @@ -144,7 +151,7 @@ def aggregate( # applying h only for weights / biases if is_weight[i] and cfg.fit_config.feddyn: - weights_sum[i] = weight - h[i] / cfg.fit_config.alpha + weights_sum[i] = weight - h_list[i] / cfg.fit_config.alpha else: weights_sum[i] = weight diff --git a/baselines/depthfl/depthfl/strategy_hetero.py b/baselines/depthfl/depthfl/strategy_hetero.py index f92353e15330..73c742452275 100644 --- a/baselines/depthfl/depthfl/strategy_hetero.py +++ b/baselines/depthfl/depthfl/strategy_hetero.py @@ -54,47 +54,13 @@ def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): super().__init__(*args, **kwargs) - def aggregate_fit( # type: ignore[override] - self, - server_round: int, - results: List[Tuple[ClientProxy, FitRes]], - failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], - origin: NDArrays, - ) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: - """Aggregate fit results using weighted average.""" - if not results: - return None, {} - # Do not aggregate if there are failures and failures are not accepted - if not self.accept_failures and failures: - return None, {} - - # Convert results - weights_results = [ - (parameters_to_ndarrays(fit_res.parameters), fit_res.metrics["cid"]) - for _, fit_res in results - ] - - self.parameters = origin - self.aggregate_hetero(weights_results) - parameters_aggregated = ndarrays_to_parameters(self.parameters) - - # Aggregate custom metrics if aggregation fn was provided - metrics_aggregated = {} - if self.fit_metrics_aggregation_fn: - fit_metrics = [(res.num_examples, res.metrics) for _, res in results] - metrics_aggregated = self.fit_metrics_aggregation_fn(fit_metrics) - elif server_round == 1: # Only log this warning once - log(WARNING, "No fit_metrics_aggregation_fn provided") - - return parameters_aggregated, metrics_aggregated - def aggregate_hetero( self, results: List[Tuple[NDArrays, Union[bool, bytes, float, int, str]]] ): """Aggregate function for HeteroFL.""" - for i, v in enumerate(self.parameters): - count = np.zeros(v.shape) - tmp_v = np.zeros(v.shape) + for i, params in enumerate(self.parameters): + count = np.zeros(params.shape) + tmp_v = np.zeros(params.shape) if self.is_weight[i]: for weights, cid in results: if self.cfg.exclusive_learning: @@ -111,11 +77,46 @@ def aggregate_hetero( ) ] += 1 tmp_v[count > 0] = np.divide(tmp_v[count > 0], count[count > 0]) - v[count > 0] = tmp_v[count > 0] + params[count > 0] = tmp_v[count > 0] else: for weights, _ in results: tmp_v += weights[i] count += 1 tmp_v = np.divide(tmp_v, count) - v = tmp_v + params = tmp_v + + +def aggregate_fit( + strategy, + server_round: int, + results: List[Tuple[ClientProxy, FitRes]], + failures: List[Union[Tuple[ClientProxy, FitRes], BaseException]], + origin: NDArrays, +) -> Tuple[Optional[Parameters], Dict[str, Scalar]]: + """Aggregate fit results using weighted average.""" + if not results: + return None, {} + # Do not aggregate if there are failures and failures are not accepted + if not strategy.accept_failures and failures: + return None, {} + + # Convert results + weights_results = [ + (parameters_to_ndarrays(fit_res.parameters), fit_res.metrics["cid"]) + for _, fit_res in results + ] + + strategy.parameters = origin + strategy.aggregate_hetero(weights_results) + parameters_aggregated = ndarrays_to_parameters(strategy.parameters) + + # Aggregate custom metrics if aggregation fn was provided + metrics_aggregated = {} + if strategy.fit_metrics_aggregation_fn: + fit_metrics = [(res.num_examples, res.metrics) for _, res in results] + metrics_aggregated = strategy.fit_metrics_aggregation_fn(fit_metrics) + elif server_round == 1: # Only log this warning once + log(WARNING, "No fit_metrics_aggregation_fn provided") + + return parameters_aggregated, metrics_aggregated From ebbbb0c24cd9ec87641ad54f3a385460bd6eefd2 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Mon, 9 Oct 2023 18:00:17 +0900 Subject: [PATCH 38/51] formatting --- baselines/depthfl/depthfl/conf/config.yaml | 7 +----- baselines/depthfl/depthfl/conf/heterofl.yaml | 6 +---- baselines/depthfl/depthfl/strategy.py | 25 -------------------- 3 files changed, 2 insertions(+), 36 deletions(-) diff --git a/baselines/depthfl/depthfl/conf/config.yaml b/baselines/depthfl/depthfl/conf/config.yaml index e5f5f92d824e..5a126229956e 100644 --- a/baselines/depthfl/depthfl/conf/config.yaml +++ b/baselines/depthfl/depthfl/conf/config.yaml @@ -39,9 +39,4 @@ strategy: fraction_evaluate: 0.0 # min_fit_clients: ${clients_per_round} min_evaluate_clients: 0 - # min_available_clients: ${clients_per_round} - evaluate_metrics_aggregation_fn: - _target_: depthfl.strategy.weighted_average - _partial_: true # we dont' want this function to be evaluated when instantiating the strategy, we treat it as a partial and evaluate it when the strategy actuallly calls the function (in aggregate_evaluate()) - - + # min_available_clients: ${clients_per_round} \ No newline at end of file diff --git a/baselines/depthfl/depthfl/conf/heterofl.yaml b/baselines/depthfl/depthfl/conf/heterofl.yaml index 2ad8fa576c4a..0afe61ee95b4 100644 --- a/baselines/depthfl/depthfl/conf/heterofl.yaml +++ b/baselines/depthfl/depthfl/conf/heterofl.yaml @@ -39,8 +39,4 @@ strategy: fraction_evaluate: 0.0 # min_fit_clients: ${clients_per_round} min_evaluate_clients: 0 - # min_available_clients: ${clients_per_round} - evaluate_metrics_aggregation_fn: - _target_: depthfl.strategy.weighted_average - _partial_: true # we dont' want this function to be evaluated when instantiating the strategy, we treat it as a partial and evaluate it when the strategy actuallly calls the function (in aggregate_evaluate()) - + # min_available_clients: ${clients_per_round} \ No newline at end of file diff --git a/baselines/depthfl/depthfl/strategy.py b/baselines/depthfl/depthfl/strategy.py index 974edd876d66..ceaff50d57ee 100644 --- a/baselines/depthfl/depthfl/strategy.py +++ b/baselines/depthfl/depthfl/strategy.py @@ -9,7 +9,6 @@ import torch import torch.nn as nn from flwr.common import ( - Metrics, NDArrays, Parameters, Scalar, @@ -23,30 +22,6 @@ from omegaconf import DictConfig -def weighted_average(metrics: List[Tuple[int, Metrics]]) -> Metrics: - """Weighted average during evaluation. - - Parameters - ---------- - metrics : List[Tuple[int, Metrics]] - The list of metrics to aggregate. - - Returns - ------- - Metrics - The weighted average metric. - """ - # Multiply accuracy of each client by number of examples used - accuracies = [num_examples * m["accuracy"] for num_examples, m in metrics] - examples = [num_examples for num_examples, _ in metrics] - - # Aggregate and return custom metric (weighted average) - print("here and nothing is breaking!!!") - return { - "accuracy": int(sum(accuracies)) / int(sum(examples)) # type:ignore[arg-type] - } - - class FedDyn(FedAvg): """Applying dynamic regularization in FedDyn paper.""" From 0f6b9b8b03d4541d660e095f3591ed176020b63c Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Tue, 10 Oct 2023 19:06:39 +0900 Subject: [PATCH 39/51] debug heterofl --- baselines/depthfl/depthfl/conf/heterofl.yaml | 1 + baselines/depthfl/depthfl/server.py | 9 +++++++-- baselines/depthfl/depthfl/strategy.py | 2 +- baselines/depthfl/depthfl/strategy_hetero.py | 2 +- 4 files changed, 10 insertions(+), 4 deletions(-) diff --git a/baselines/depthfl/depthfl/conf/heterofl.yaml b/baselines/depthfl/depthfl/conf/heterofl.yaml index 0afe61ee95b4..ad0bb8c8f8b8 100644 --- a/baselines/depthfl/depthfl/conf/heterofl.yaml +++ b/baselines/depthfl/depthfl/conf/heterofl.yaml @@ -19,6 +19,7 @@ server_device: cuda dataset_config: iid: true + beta: 0.5 fit_config: feddyn: false diff --git a/baselines/depthfl/depthfl/server.py b/baselines/depthfl/depthfl/server.py index 1b2c526b8a7f..dc99ae2fc5de 100644 --- a/baselines/depthfl/depthfl/server.py +++ b/baselines/depthfl/depthfl/server.py @@ -17,7 +17,8 @@ from depthfl.client import prune from depthfl.models import test, test_sbn -from depthfl.strategy import aggregate_fit +from depthfl.strategy import aggregate_fit_depthfl +from depthfl.strategy_hetero import aggregate_fit_hetero FitResultsAndFailures = Tuple[ List[Tuple[ClientProxy, FitRes]], @@ -188,7 +189,11 @@ def fit_round( len(failures), ) - # Aggregate training results + if "HeteroFL" in str(type(self.strategy)): + aggregate_fit = aggregate_fit_hetero + else: + aggregate_fit = aggregate_fit_depthfl + aggregated_result: Tuple[ Optional[Parameters], Dict[str, Scalar], diff --git a/baselines/depthfl/depthfl/strategy.py b/baselines/depthfl/depthfl/strategy.py index ceaff50d57ee..3414c28c4518 100644 --- a/baselines/depthfl/depthfl/strategy.py +++ b/baselines/depthfl/depthfl/strategy.py @@ -52,7 +52,7 @@ def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): super().__init__(*args, **kwargs) -def aggregate_fit( +def aggregate_fit_depthfl( strategy, server_round: int, results: List[Tuple[ClientProxy, FitRes]], diff --git a/baselines/depthfl/depthfl/strategy_hetero.py b/baselines/depthfl/depthfl/strategy_hetero.py index 73c742452275..3c4ea662873a 100644 --- a/baselines/depthfl/depthfl/strategy_hetero.py +++ b/baselines/depthfl/depthfl/strategy_hetero.py @@ -87,7 +87,7 @@ def aggregate_hetero( params = tmp_v -def aggregate_fit( +def aggregate_fit_hetero( strategy, server_round: int, results: List[Tuple[ClientProxy, FitRes]], From 7894f9134f68d88e7bcd430664695ab5a05a73cd Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Tue, 10 Oct 2023 20:59:39 +0900 Subject: [PATCH 40/51] debug heterofl --- baselines/depthfl/depthfl/strategy_hetero.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/baselines/depthfl/depthfl/strategy_hetero.py b/baselines/depthfl/depthfl/strategy_hetero.py index 3c4ea662873a..7544204cde2f 100644 --- a/baselines/depthfl/depthfl/strategy_hetero.py +++ b/baselines/depthfl/depthfl/strategy_hetero.py @@ -1,5 +1,7 @@ """Strategy for HeteroFL.""" +import os +import pickle from logging import WARNING from typing import Dict, List, Optional, Tuple, Union @@ -52,6 +54,18 @@ def __init__(self, cfg: DictConfig, net: nn.Module, *args, **kwargs): else: self.is_weight.append(True) + # prev_grads file for each client + prev_grads = [ + {k: torch.zeros(v.numel()) for (k, v) in net.named_parameters()} + ] * cfg.num_clients + + if not os.path.exists("prev_grads"): + os.makedirs("prev_grads") + + for idx in range(cfg.num_clients): + with open(f"prev_grads/client_{idx}", "wb") as prev_grads_file: + pickle.dump(prev_grads[idx], prev_grads_file) + super().__init__(*args, **kwargs) def aggregate_hetero( From 0d60b24d511f0880770ffa3cb10c75842ea8d283 Mon Sep 17 00:00:00 2001 From: jafermarq Date: Wed, 18 Oct 2023 22:47:42 +0000 Subject: [PATCH 41/51] minor formatting --- baselines/depthfl/README.md | 38 ++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 5e905b921d45..54b26ec32647 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -1,37 +1,37 @@ --- -title: DepthFL:Depthwise Federated Learning for Heterogeneous Clients +title: DepthFL: Depthwise Federated Learning for Heterogeneous Clients url: https://openreview.net/forum?id=pf8RIZTMU58 labels: [image classification, system heterogeneity, cross-device, knowledge distillation] -dataset: [CIFAR100] +dataset: [CIFAR-100] --- # DepthFL: Depthwise Federated Learning for Heterogeneous Clients > Note: If you use this baseline in your work, please remember to cite the original authors of the paper as well as the Flower paper. -****Paper:**** : [openreview.net/forum?id=pf8RIZTMU58](https://openreview.net/forum?id=pf8RIZTMU58) +**Paper:** [openreview.net/forum?id=pf8RIZTMU58](https://openreview.net/forum?id=pf8RIZTMU58) -****Authors:**** : Minjae Kim, Sangyoon Yu, Suhyun Kim, Soo-Mook Moon +**Authors:** Minjae Kim, Sangyoon Yu, Suhyun Kim, Soo-Mook Moon -****Abstract:**** : Federated learning is for training a global model without collecting private local data from clients. As they repeatedly need to upload locally-updated weights or gradients instead, clients require both computation and communication resources enough to participate in learning, but in reality their resources are heterogeneous. To enable resource-constrained clients to train smaller local models, width scaling techniques have been used, which reduces the channels of a global model. Unfortunately, width scaling suffers from heterogeneity of local models when averaging them, leading to a lower accuracy than when simply excluding resource-constrained clients from training. This paper proposes a new approach based on depth scaling called DepthFL. DepthFL defines local models of different depths by pruning the deepest layers off the global model, and allocates them to clients depending on their available resources. Since many clients do not have enough resources to train deep local models, this would make deep layers partially-trained with insufficient data, unlike shallow layers that are fully trained. DepthFL alleviates this problem by mutual self-distillation of knowledge among the classifiers of various depths within a local model. Our experiments show that depth-scaled local models build a global model better than width-scaled ones, and that self-distillation is highly effective in training data-insufficient deep layers. +**Abstract:** Federated learning is for training a global model without collecting private local data from clients. As they repeatedly need to upload locally-updated weights or gradients instead, clients require both computation and communication resources enough to participate in learning, but in reality their resources are heterogeneous. To enable resource-constrained clients to train smaller local models, width scaling techniques have been used, which reduces the channels of a global model. Unfortunately, width scaling suffers from heterogeneity of local models when averaging them, leading to a lower accuracy than when simply excluding resource-constrained clients from training. This paper proposes a new approach based on depth scaling called DepthFL. DepthFL defines local models of different depths by pruning the deepest layers off the global model, and allocates them to clients depending on their available resources. Since many clients do not have enough resources to train deep local models, this would make deep layers partially-trained with insufficient data, unlike shallow layers that are fully trained. DepthFL alleviates this problem by mutual self-distillation of knowledge among the classifiers of various depths within a local model. Our experiments show that depth-scaled local models build a global model better than width-scaled ones, and that self-distillation is highly effective in training data-insufficient deep layers. ## About this baseline -****What’s implemented:**** The code in this directory replicates the experiments in DepthFL: Depthwise Federated Learning for Heterogeneous Clients (Kim et al., 2023) for CIFAR100, which proposed the DepthFL algorithm. Concretely, it replicates the results for CIFAR100 dataset in Table 2,3 and 4. +**What’s implemented:** The code in this directory replicates the experiments in DepthFL: Depthwise Federated Learning for Heterogeneous Clients (Kim et al., 2023) for CIFAR100, which proposed the DepthFL algorithm. Concretely, it replicates the results for CIFAR100 dataset in Table 2, 3 and 4. -****Datasets:**** CIFAR100 from PyTorch's Torchvision +**Datasets:** CIFAR100 from PyTorch's Torchvision -****Hardware Setup:**** These experiments were run on a server with Nvidia 3090 GPUs. Any machine with 1x 8GB GPU or more would be able to run it in a reasonable amount of time. With the default settings, clients make use of 1.3GB of VRAM. Lower `num_gpus` in `client_resources` to train more clients in parallel on your GPU(s). +**Hardware Setup:** These experiments were run on a server with Nvidia 3090 GPUs. Any machine with 1x 8GB GPU or more would be able to run it in a reasonable amount of time. With the default settings, clients make use of 1.3GB of VRAM. Lower `num_gpus` in `client_resources` to train more clients in parallel on your GPU(s). -****Contributors:**** Minjae Kim +**Contributors:** Minjae Kim ## Experimental Setup -****Task:**** Image Classification +**Task:** Image Classification -****Model:**** ResNet18 +**Model:** ResNet18 **Dataset:** This baseline only includes the CIFAR100 dataset. By default it will be partitioned into 100 clients following IID distribution. The settings are as follow: @@ -40,7 +40,7 @@ dataset: [CIFAR100] | CIFAR100 | 100 | 100 | IID or Non-IID | **Training Hyperparameters:** -The following table shows the main hyperparameters for this baseline with their default value (i.e. the value used if you run `python main.py` directly) +The following table shows the main hyperparameters for this baseline with their default value (i.e. the value used if you run `python -m depthfl.main` directly) | Description | Default Value | | ----------- | ----- | @@ -69,10 +69,10 @@ pyenv local 3.10.6 # Tell poetry to use python 3.10 poetry env use 3.10.6 -# install the base Poetry environment +# Install the base Poetry environment poetry install -# activate the environment +# Activate the environment poetry shell ``` @@ -100,7 +100,7 @@ python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_si ### Stateful clients comment -To implement `feddyn`, stateful clients that store prev_grads information are needed. Since flwr does not yet officially support stateful clients, it was implemented as a temporary measure by loading `prev_grads` from disk when creating a client, and then storing it again on disk after learning. Specifically, there are files that store the state of each client in the `prev_grads` folder. +To implement `feddyn`, stateful clients that store prev_grads information are needed. Since flwr does not yet officially support stateful clients, it was implemented as a temporary measure by loading `prev_grads` from disk when creating a client, and then storing it again on disk after learning. Specifically, there are files that store the state of each client in the `prev_grads` folder. When the strategy is instantiated (for both `FedDyn` and `HeteroFL`) the content of `prev_grads` is reset. ## Expected Results @@ -108,14 +108,14 @@ To implement `feddyn`, stateful clients that store prev_grads information are ne With the following command we run DepthFL (FedDyn / FedAvg), InclusiveFL, and HeteroFL to replicate the results of table 2,3,4 in DepthFL paper. Tables 2, 3, and 4 may contain results from the same experiment in multiple tables. ```bash -# table 2 +## table 2 python -m depthfl.main # table 2 & 4 python -m depthfl.main exclusive_learning=true model_size=1 python -m depthfl.main exclusive_learning=true model_size=2 python -m depthfl.main exclusive_learning=true model_size=3 python -m depthfl.main exclusive_learning=true model_size=4 -# table 2 & 3 +## table 2 & 3 # HeteroFL & corresponding excluive learning python -m depthfl.main --config-name="heterofl" @@ -130,10 +130,10 @@ python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_le python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=3 python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=4 -# table 3 +## table 3 python -m depthfl.main fit_config.feddyn=false fit_config.kd=false fit_config.extended=false -# table 4 +## table 4 python -m depthfl.main fit_config.kd=false python -m depthfl.main dataset_config.iid=false python -m depthfl.main dataset_config.iid=false fit_config.kd=false From aa3e3fc34d9132932fc1ee66a0b88c483a11193f Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Fri, 20 Oct 2023 18:08:49 +0900 Subject: [PATCH 42/51] Update README.md --- baselines/depthfl/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 54b26ec32647..aa0917f077e9 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -1,5 +1,5 @@ --- -title: DepthFL: Depthwise Federated Learning for Heterogeneous Clients +title: DepthFL:Depthwise Federated Learning for Heterogeneous Clients url: https://openreview.net/forum?id=pf8RIZTMU58 labels: [image classification, system heterogeneity, cross-device, knowledge distillation] dataset: [CIFAR-100] From ed21edacaedf970af69156532da25729c2a78b47 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Sat, 21 Oct 2023 15:18:29 +0900 Subject: [PATCH 43/51] update readme --- baselines/depthfl/README.md | 56 +++--- .../2023-10-21/15-08-08/0/.hydra/config.yaml | 33 +++ .../2023-10-21/15-08-08/0/.hydra/hydra.yaml | 157 +++++++++++++++ .../15-08-08/0/.hydra/overrides.yaml | 3 + .../2023-10-21/15-08-08/multirun.yaml | 190 ++++++++++++++++++ .../2023-10-21/15-09-27/0/.hydra/config.yaml | 32 +++ .../2023-10-21/15-09-27/0/.hydra/hydra.yaml | 157 +++++++++++++++ .../15-09-27/0/.hydra/overrides.yaml | 3 + .../2023-10-21/15-09-27/1/.hydra/config.yaml | 32 +++ .../2023-10-21/15-09-27/1/.hydra/hydra.yaml | 157 +++++++++++++++ .../15-09-27/1/.hydra/overrides.yaml | 3 + .../2023-10-21/15-09-27/multirun.yaml | 189 +++++++++++++++++ 12 files changed, 983 insertions(+), 29 deletions(-) create mode 100644 baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/config.yaml create mode 100644 baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/hydra.yaml create mode 100644 baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/overrides.yaml create mode 100644 baselines/depthfl/multirun/2023-10-21/15-08-08/multirun.yaml create mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/config.yaml create mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/hydra.yaml create mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/overrides.yaml create mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/config.yaml create mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/hydra.yaml create mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/overrides.yaml create mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/multirun.yaml diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index aa0917f077e9..0166c94c1397 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -108,38 +108,18 @@ To implement `feddyn`, stateful clients that store prev_grads information are ne With the following command we run DepthFL (FedDyn / FedAvg), InclusiveFL, and HeteroFL to replicate the results of table 2,3,4 in DepthFL paper. Tables 2, 3, and 4 may contain results from the same experiment in multiple tables. ```bash -## table 2 -python -m depthfl.main # table 2 & 4 -python -m depthfl.main exclusive_learning=true model_size=1 -python -m depthfl.main exclusive_learning=true model_size=2 -python -m depthfl.main exclusive_learning=true model_size=3 -python -m depthfl.main exclusive_learning=true model_size=4 - -## table 2 & 3 - -# HeteroFL & corresponding excluive learning +## table 2 (HeteroFL row) python -m depthfl.main --config-name="heterofl" -python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=1 model.scale=false -python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=2 model.scale=false -python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=3 model.scale=false -python -m depthfl.main --config-name="heterofl" exclusive_learning=true model_size=4 model.scale=false -# DepthFL (FedAvg) & corresponding exclusive learning -python -m depthfl.main fit_config.feddyn=false fit_config.kd=false -python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=1 -python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=2 -python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=3 -python -m depthfl.main fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=4 - -## table 3 -python -m depthfl.main fit_config.feddyn=false fit_config.kd=false fit_config.extended=false +python -m depthfl.main --config-name="heterofl" --multirun exclusive_learning=true model.scale=false model_size=1,2,3,4 -## table 4 -python -m depthfl.main fit_config.kd=false -python -m depthfl.main dataset_config.iid=false -python -m depthfl.main dataset_config.iid=false fit_config.kd=false -``` +## table 2 (DepthFL(FedAvg) row) +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false +python -m depthfl.main --multirun fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=1,2,3,4 -The above commands would generate results in DepthFL paper. The numbers below are the results of a single run, and although they do not perfectly match the numbers recorded in the paper, they are very close. +## table 2 (DepthFL row) +python -m depthfl.main +python -m depthfl.main --multirun exclusive_learning=true model_size=1,2,3,4 +``` **Table 2** @@ -149,6 +129,19 @@ The above commands would generate results in DepthFL paper. The numbers below ar | :---: | :---: | :---: | :---: | :---: | :---: | :---: | | HeteroFL
DepthFL (FedAvg)
DepthFL | CIFAR100 | 57.61
72.67
76.06 | 64.39
67.08
69.68 | 66.08
70.78
73.21 | 62.03
68.41
70.29 | 51.99
59.17
60.32 | +```bash +## table 3 (Width Scaling - Duplicate results from table 2) +python -m depthfl.main --config-name="heterofl" --multirun exclusive_learning=true model.scale=false model_size=1,2,3,4 +python -m depthfl.main --config-name="heterofl" + +## table 3 (Depth Scaling : Exclusive Learning, DepthFL(FedAvg) rows - Duplicate results from table 2) +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false +python -m depthfl.main --multirun fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=1,2,3,4 + +## table 3 (Depth Scaling - InclusiveFL row) +python -m depthfl.main fit_config.feddyn=false fit_config.kd=false fit_config.extended=false +``` + **Table 3** Accuracy of global sub-models compared to exclusive learning on CIFAR-100. @@ -161,6 +154,11 @@ Accuracy of global sub-models compared to exclusive learning on CIFAR-100. | :---: | :---: | :---: | :---: | :---: | :---: | | Depth Scaling | Exclusive Learning
InclusiveFL
DepthFL (FedAvg) | 67.08
47.61
66.18 | 68.00
53.88
67.56 | 66.19
59.48
67.97 | 56.78
60.46
68.01 | +```bash +## table 4 +python -m depthfl.main --multirun fit_config.kd=true,false dataset_config.iid=true,false +``` + **Table 4** Accuracy of the global model with/without self distillation on CIFAR-100. diff --git a/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/config.yaml b/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/config.yaml new file mode 100644 index 000000000000..339d6db9ee33 --- /dev/null +++ b/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/config.yaml @@ -0,0 +1,33 @@ +num_clients: 100 +num_epochs: 5 +batch_size: 50 +num_rounds: 1000 +fraction: 0.1 +learning_rate: 0.1 +learning_rate_decay: 0.998 +static_bn: true +exclusive_learning: true +model_size: 1 +client_resources: + num_cpus: 1 + num_gpus: 0.5 +server_device: cuda +dataset_config: + iid: true + beta: 0.5 +fit_config: + feddyn: false + kd: false + alpha: 0.1 + extended: false + drop_client: false +model: + _target_: depthfl.resnet_hetero.resnet18 + n_blocks: 4 + num_classes: 100 + scale: false +strategy: + _target_: depthfl.strategy_hetero.HeteroFL + fraction_fit: 1.0e-05 + fraction_evaluate: 0.0 + min_evaluate_clients: 0 diff --git a/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/hydra.yaml b/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/hydra.yaml new file mode 100644 index 000000000000..85af5b25ec5d --- /dev/null +++ b/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/hydra.yaml @@ -0,0 +1,157 @@ +hydra: + run: + dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: MULTIRUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=MULTIRUN + task: + - exclusive_learning=True + - model.scale=False + - model_size=1 + job: + name: main + chdir: null + override_dirname: exclusive_learning=True,model.scale=False,model_size=1 + id: '0' + num: 0 + config_name: heterofl + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/peterpan/flower/baselines/depthfl + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /home/peterpan/flower/baselines/depthfl/multirun/2023-10-21/15-08-08/0 + choices: + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/overrides.yaml b/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/overrides.yaml new file mode 100644 index 000000000000..0cb14a15733d --- /dev/null +++ b/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/overrides.yaml @@ -0,0 +1,3 @@ +- exclusive_learning=True +- model.scale=False +- model_size=1 diff --git a/baselines/depthfl/multirun/2023-10-21/15-08-08/multirun.yaml b/baselines/depthfl/multirun/2023-10-21/15-08-08/multirun.yaml new file mode 100644 index 000000000000..225bdb2176fd --- /dev/null +++ b/baselines/depthfl/multirun/2023-10-21/15-08-08/multirun.yaml @@ -0,0 +1,190 @@ +hydra: + run: + dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: MULTIRUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=MULTIRUN + task: + - exclusive_learning=true + - model.scale=false + - model_size=1,2,3,4 + job: + name: main + chdir: null + override_dirname: exclusive_learning=true,model.scale=false,model_size=1,2,3,4 + id: ??? + num: ??? + config_name: heterofl + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/peterpan/flower/baselines/depthfl + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: ??? + choices: + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false +num_clients: 100 +num_epochs: 5 +batch_size: 50 +num_rounds: 1000 +fraction: 0.1 +learning_rate: 0.1 +learning_rate_decay: 0.998 +static_bn: true +exclusive_learning: true +model_size: 1 +client_resources: + num_cpus: 1 + num_gpus: 0.5 +server_device: cuda +dataset_config: + iid: true + beta: 0.5 +fit_config: + feddyn: false + kd: false + alpha: 0.1 + extended: false + drop_client: false +model: + _target_: depthfl.resnet_hetero.resnet18 + n_blocks: 4 + num_classes: 100 + scale: false +strategy: + _target_: depthfl.strategy_hetero.HeteroFL + fraction_fit: 1.0e-05 + fraction_evaluate: 0.0 + min_evaluate_clients: 0 diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/config.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/config.yaml new file mode 100644 index 000000000000..3ec047eeefd7 --- /dev/null +++ b/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/config.yaml @@ -0,0 +1,32 @@ +num_clients: 100 +num_epochs: 5 +batch_size: 50 +num_rounds: 20 +fraction: 0.1 +learning_rate: 0.1 +learning_rate_decay: 0.998 +static_bn: false +exclusive_learning: false +model_size: 1 +client_resources: + num_cpus: 1 + num_gpus: 0.5 +server_device: cuda +dataset_config: + iid: true + beta: 0.5 +fit_config: + feddyn: true + kd: true + alpha: 0.1 + extended: true + drop_client: false +model: + _target_: depthfl.resnet.multi_resnet18 + n_blocks: 4 + num_classes: 100 +strategy: + _target_: depthfl.strategy.FedDyn + fraction_fit: 1.0e-05 + fraction_evaluate: 0.0 + min_evaluate_clients: 0 diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/hydra.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/hydra.yaml new file mode 100644 index 000000000000..b7821154a442 --- /dev/null +++ b/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/hydra.yaml @@ -0,0 +1,157 @@ +hydra: + run: + dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: MULTIRUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=MULTIRUN + task: + - fit_config.kd=True + - dataset_config.iid=True + - num_rounds=20 + job: + name: main + chdir: null + override_dirname: dataset_config.iid=True,fit_config.kd=True,num_rounds=20 + id: '0' + num: 0 + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/peterpan/flower/baselines/depthfl + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /home/peterpan/flower/baselines/depthfl/multirun/2023-10-21/15-09-27/0 + choices: + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/overrides.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/overrides.yaml new file mode 100644 index 000000000000..ae12b21b6f86 --- /dev/null +++ b/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/overrides.yaml @@ -0,0 +1,3 @@ +- fit_config.kd=True +- dataset_config.iid=True +- num_rounds=20 diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/config.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/config.yaml new file mode 100644 index 000000000000..a8187fd74873 --- /dev/null +++ b/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/config.yaml @@ -0,0 +1,32 @@ +num_clients: 100 +num_epochs: 5 +batch_size: 50 +num_rounds: 20 +fraction: 0.1 +learning_rate: 0.1 +learning_rate_decay: 0.998 +static_bn: false +exclusive_learning: false +model_size: 1 +client_resources: + num_cpus: 1 + num_gpus: 0.5 +server_device: cuda +dataset_config: + iid: false + beta: 0.5 +fit_config: + feddyn: true + kd: true + alpha: 0.1 + extended: true + drop_client: false +model: + _target_: depthfl.resnet.multi_resnet18 + n_blocks: 4 + num_classes: 100 +strategy: + _target_: depthfl.strategy.FedDyn + fraction_fit: 1.0e-05 + fraction_evaluate: 0.0 + min_evaluate_clients: 0 diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/hydra.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/hydra.yaml new file mode 100644 index 000000000000..fc2b6726c347 --- /dev/null +++ b/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/hydra.yaml @@ -0,0 +1,157 @@ +hydra: + run: + dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: MULTIRUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=MULTIRUN + task: + - fit_config.kd=True + - dataset_config.iid=False + - num_rounds=20 + job: + name: main + chdir: null + override_dirname: dataset_config.iid=False,fit_config.kd=True,num_rounds=20 + id: '1' + num: 1 + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/peterpan/flower/baselines/depthfl + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: /home/peterpan/flower/baselines/depthfl/multirun/2023-10-21/15-09-27/1 + choices: + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/overrides.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/overrides.yaml new file mode 100644 index 000000000000..0c1956022f7a --- /dev/null +++ b/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/overrides.yaml @@ -0,0 +1,3 @@ +- fit_config.kd=True +- dataset_config.iid=False +- num_rounds=20 diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/multirun.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/multirun.yaml new file mode 100644 index 000000000000..a21f6d32b46e --- /dev/null +++ b/baselines/depthfl/multirun/2023-10-21/15-09-27/multirun.yaml @@ -0,0 +1,189 @@ +hydra: + run: + dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} + sweep: + dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} + subdir: ${hydra.job.num} + launcher: + _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher + sweeper: + _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper + max_batch_size: null + params: null + help: + app_name: ${hydra.job.name} + header: '${hydra.help.app_name} is powered by Hydra. + + ' + footer: 'Powered by Hydra (https://hydra.cc) + + Use --hydra-help to view Hydra specific help + + ' + template: '${hydra.help.header} + + == Configuration groups == + + Compose your configuration from those groups (group=option) + + + $APP_CONFIG_GROUPS + + + == Config == + + Override anything in the config (foo.bar=value) + + + $CONFIG + + + ${hydra.help.footer} + + ' + hydra_help: + template: 'Hydra (${hydra.runtime.version}) + + See https://hydra.cc for more info. + + + == Flags == + + $FLAGS_HELP + + + == Configuration groups == + + Compose your configuration from those groups (For example, append hydra/job_logging=disabled + to command line) + + + $HYDRA_CONFIG_GROUPS + + + Use ''--cfg hydra'' to Show the Hydra config. + + ' + hydra_help: ??? + hydra_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][HYDRA] %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + root: + level: INFO + handlers: + - console + loggers: + logging_example: + level: DEBUG + disable_existing_loggers: false + job_logging: + version: 1 + formatters: + simple: + format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' + handlers: + console: + class: logging.StreamHandler + formatter: simple + stream: ext://sys.stdout + file: + class: logging.FileHandler + formatter: simple + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log + root: + level: INFO + handlers: + - console + - file + disable_existing_loggers: false + env: {} + mode: MULTIRUN + searchpath: [] + callbacks: {} + output_subdir: .hydra + overrides: + hydra: + - hydra.mode=MULTIRUN + task: + - fit_config.kd=true,false + - dataset_config.iid=true,false + - num_rounds=20 + job: + name: main + chdir: null + override_dirname: dataset_config.iid=true,false,fit_config.kd=true,false,num_rounds=20 + id: ??? + num: ??? + config_name: config + env_set: {} + env_copy: [] + config: + override_dirname: + kv_sep: '=' + item_sep: ',' + exclude_keys: [] + runtime: + version: 1.3.2 + version_base: '1.3' + cwd: /home/peterpan/flower/baselines/depthfl + config_sources: + - path: hydra.conf + schema: pkg + provider: hydra + - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf + schema: file + provider: main + - path: '' + schema: structured + provider: schema + output_dir: ??? + choices: + hydra/env: default + hydra/callbacks: null + hydra/job_logging: default + hydra/hydra_logging: default + hydra/hydra_help: default + hydra/help: default + hydra/sweeper: basic + hydra/launcher: basic + hydra/output: default + verbose: false +num_clients: 100 +num_epochs: 5 +batch_size: 50 +num_rounds: 20 +fraction: 0.1 +learning_rate: 0.1 +learning_rate_decay: 0.998 +static_bn: false +exclusive_learning: false +model_size: 1 +client_resources: + num_cpus: 1 + num_gpus: 0.5 +server_device: cuda +dataset_config: + iid: true + beta: 0.5 +fit_config: + feddyn: true + kd: true + alpha: 0.1 + extended: true + drop_client: false +model: + _target_: depthfl.resnet.multi_resnet18 + n_blocks: 4 + num_classes: 100 +strategy: + _target_: depthfl.strategy.FedDyn + fraction_fit: 1.0e-05 + fraction_evaluate: 0.0 + min_evaluate_clients: 0 From d63e743791211d0a0adeef375936545a76eccb84 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Sat, 21 Oct 2023 15:19:10 +0900 Subject: [PATCH 44/51] update gitignore --- baselines/depthfl/.gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/baselines/depthfl/.gitignore b/baselines/depthfl/.gitignore index 6e8879866afe..fb7448bbcb01 100644 --- a/baselines/depthfl/.gitignore +++ b/baselines/depthfl/.gitignore @@ -1,3 +1,4 @@ dataset/ outputs/ -prev_grads/ \ No newline at end of file +prev_grads/ +multirun/ \ No newline at end of file From 46acafc883ef50a9752c5c1a26b9d6f3c083f3ea Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Sat, 21 Oct 2023 15:22:02 +0900 Subject: [PATCH 45/51] remove multirun directory --- .../2023-10-21/15-08-08/0/.hydra/config.yaml | 33 --- .../2023-10-21/15-08-08/0/.hydra/hydra.yaml | 157 --------------- .../15-08-08/0/.hydra/overrides.yaml | 3 - .../2023-10-21/15-08-08/multirun.yaml | 190 ------------------ .../2023-10-21/15-09-27/0/.hydra/config.yaml | 32 --- .../2023-10-21/15-09-27/0/.hydra/hydra.yaml | 157 --------------- .../15-09-27/0/.hydra/overrides.yaml | 3 - .../2023-10-21/15-09-27/1/.hydra/config.yaml | 32 --- .../2023-10-21/15-09-27/1/.hydra/hydra.yaml | 157 --------------- .../15-09-27/1/.hydra/overrides.yaml | 3 - .../2023-10-21/15-09-27/multirun.yaml | 189 ----------------- 11 files changed, 956 deletions(-) delete mode 100644 baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/config.yaml delete mode 100644 baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/hydra.yaml delete mode 100644 baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/overrides.yaml delete mode 100644 baselines/depthfl/multirun/2023-10-21/15-08-08/multirun.yaml delete mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/config.yaml delete mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/hydra.yaml delete mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/overrides.yaml delete mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/config.yaml delete mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/hydra.yaml delete mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/overrides.yaml delete mode 100644 baselines/depthfl/multirun/2023-10-21/15-09-27/multirun.yaml diff --git a/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/config.yaml b/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/config.yaml deleted file mode 100644 index 339d6db9ee33..000000000000 --- a/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/config.yaml +++ /dev/null @@ -1,33 +0,0 @@ -num_clients: 100 -num_epochs: 5 -batch_size: 50 -num_rounds: 1000 -fraction: 0.1 -learning_rate: 0.1 -learning_rate_decay: 0.998 -static_bn: true -exclusive_learning: true -model_size: 1 -client_resources: - num_cpus: 1 - num_gpus: 0.5 -server_device: cuda -dataset_config: - iid: true - beta: 0.5 -fit_config: - feddyn: false - kd: false - alpha: 0.1 - extended: false - drop_client: false -model: - _target_: depthfl.resnet_hetero.resnet18 - n_blocks: 4 - num_classes: 100 - scale: false -strategy: - _target_: depthfl.strategy_hetero.HeteroFL - fraction_fit: 1.0e-05 - fraction_evaluate: 0.0 - min_evaluate_clients: 0 diff --git a/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/hydra.yaml b/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/hydra.yaml deleted file mode 100644 index 85af5b25ec5d..000000000000 --- a/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/hydra.yaml +++ /dev/null @@ -1,157 +0,0 @@ -hydra: - run: - dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][HYDRA] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - loggers: - logging_example: - level: DEBUG - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - exclusive_learning=True - - model.scale=False - - model_size=1 - job: - name: main - chdir: null - override_dirname: exclusive_learning=True,model.scale=False,model_size=1 - id: '0' - num: 0 - config_name: heterofl - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/peterpan/flower/baselines/depthfl - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf - schema: file - provider: main - - path: '' - schema: structured - provider: schema - output_dir: /home/peterpan/flower/baselines/depthfl/multirun/2023-10-21/15-08-08/0 - choices: - hydra/env: default - hydra/callbacks: null - hydra/job_logging: default - hydra/hydra_logging: default - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/overrides.yaml b/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/overrides.yaml deleted file mode 100644 index 0cb14a15733d..000000000000 --- a/baselines/depthfl/multirun/2023-10-21/15-08-08/0/.hydra/overrides.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- exclusive_learning=True -- model.scale=False -- model_size=1 diff --git a/baselines/depthfl/multirun/2023-10-21/15-08-08/multirun.yaml b/baselines/depthfl/multirun/2023-10-21/15-08-08/multirun.yaml deleted file mode 100644 index 225bdb2176fd..000000000000 --- a/baselines/depthfl/multirun/2023-10-21/15-08-08/multirun.yaml +++ /dev/null @@ -1,190 +0,0 @@ -hydra: - run: - dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][HYDRA] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - loggers: - logging_example: - level: DEBUG - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - exclusive_learning=true - - model.scale=false - - model_size=1,2,3,4 - job: - name: main - chdir: null - override_dirname: exclusive_learning=true,model.scale=false,model_size=1,2,3,4 - id: ??? - num: ??? - config_name: heterofl - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/peterpan/flower/baselines/depthfl - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf - schema: file - provider: main - - path: '' - schema: structured - provider: schema - output_dir: ??? - choices: - hydra/env: default - hydra/callbacks: null - hydra/job_logging: default - hydra/hydra_logging: default - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false -num_clients: 100 -num_epochs: 5 -batch_size: 50 -num_rounds: 1000 -fraction: 0.1 -learning_rate: 0.1 -learning_rate_decay: 0.998 -static_bn: true -exclusive_learning: true -model_size: 1 -client_resources: - num_cpus: 1 - num_gpus: 0.5 -server_device: cuda -dataset_config: - iid: true - beta: 0.5 -fit_config: - feddyn: false - kd: false - alpha: 0.1 - extended: false - drop_client: false -model: - _target_: depthfl.resnet_hetero.resnet18 - n_blocks: 4 - num_classes: 100 - scale: false -strategy: - _target_: depthfl.strategy_hetero.HeteroFL - fraction_fit: 1.0e-05 - fraction_evaluate: 0.0 - min_evaluate_clients: 0 diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/config.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/config.yaml deleted file mode 100644 index 3ec047eeefd7..000000000000 --- a/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/config.yaml +++ /dev/null @@ -1,32 +0,0 @@ -num_clients: 100 -num_epochs: 5 -batch_size: 50 -num_rounds: 20 -fraction: 0.1 -learning_rate: 0.1 -learning_rate_decay: 0.998 -static_bn: false -exclusive_learning: false -model_size: 1 -client_resources: - num_cpus: 1 - num_gpus: 0.5 -server_device: cuda -dataset_config: - iid: true - beta: 0.5 -fit_config: - feddyn: true - kd: true - alpha: 0.1 - extended: true - drop_client: false -model: - _target_: depthfl.resnet.multi_resnet18 - n_blocks: 4 - num_classes: 100 -strategy: - _target_: depthfl.strategy.FedDyn - fraction_fit: 1.0e-05 - fraction_evaluate: 0.0 - min_evaluate_clients: 0 diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/hydra.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/hydra.yaml deleted file mode 100644 index b7821154a442..000000000000 --- a/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/hydra.yaml +++ /dev/null @@ -1,157 +0,0 @@ -hydra: - run: - dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][HYDRA] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - loggers: - logging_example: - level: DEBUG - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - fit_config.kd=True - - dataset_config.iid=True - - num_rounds=20 - job: - name: main - chdir: null - override_dirname: dataset_config.iid=True,fit_config.kd=True,num_rounds=20 - id: '0' - num: 0 - config_name: config - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/peterpan/flower/baselines/depthfl - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf - schema: file - provider: main - - path: '' - schema: structured - provider: schema - output_dir: /home/peterpan/flower/baselines/depthfl/multirun/2023-10-21/15-09-27/0 - choices: - hydra/env: default - hydra/callbacks: null - hydra/job_logging: default - hydra/hydra_logging: default - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/overrides.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/overrides.yaml deleted file mode 100644 index ae12b21b6f86..000000000000 --- a/baselines/depthfl/multirun/2023-10-21/15-09-27/0/.hydra/overrides.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- fit_config.kd=True -- dataset_config.iid=True -- num_rounds=20 diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/config.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/config.yaml deleted file mode 100644 index a8187fd74873..000000000000 --- a/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/config.yaml +++ /dev/null @@ -1,32 +0,0 @@ -num_clients: 100 -num_epochs: 5 -batch_size: 50 -num_rounds: 20 -fraction: 0.1 -learning_rate: 0.1 -learning_rate_decay: 0.998 -static_bn: false -exclusive_learning: false -model_size: 1 -client_resources: - num_cpus: 1 - num_gpus: 0.5 -server_device: cuda -dataset_config: - iid: false - beta: 0.5 -fit_config: - feddyn: true - kd: true - alpha: 0.1 - extended: true - drop_client: false -model: - _target_: depthfl.resnet.multi_resnet18 - n_blocks: 4 - num_classes: 100 -strategy: - _target_: depthfl.strategy.FedDyn - fraction_fit: 1.0e-05 - fraction_evaluate: 0.0 - min_evaluate_clients: 0 diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/hydra.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/hydra.yaml deleted file mode 100644 index fc2b6726c347..000000000000 --- a/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/hydra.yaml +++ /dev/null @@ -1,157 +0,0 @@ -hydra: - run: - dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][HYDRA] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - loggers: - logging_example: - level: DEBUG - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - fit_config.kd=True - - dataset_config.iid=False - - num_rounds=20 - job: - name: main - chdir: null - override_dirname: dataset_config.iid=False,fit_config.kd=True,num_rounds=20 - id: '1' - num: 1 - config_name: config - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/peterpan/flower/baselines/depthfl - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf - schema: file - provider: main - - path: '' - schema: structured - provider: schema - output_dir: /home/peterpan/flower/baselines/depthfl/multirun/2023-10-21/15-09-27/1 - choices: - hydra/env: default - hydra/callbacks: null - hydra/job_logging: default - hydra/hydra_logging: default - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/overrides.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/overrides.yaml deleted file mode 100644 index 0c1956022f7a..000000000000 --- a/baselines/depthfl/multirun/2023-10-21/15-09-27/1/.hydra/overrides.yaml +++ /dev/null @@ -1,3 +0,0 @@ -- fit_config.kd=True -- dataset_config.iid=False -- num_rounds=20 diff --git a/baselines/depthfl/multirun/2023-10-21/15-09-27/multirun.yaml b/baselines/depthfl/multirun/2023-10-21/15-09-27/multirun.yaml deleted file mode 100644 index a21f6d32b46e..000000000000 --- a/baselines/depthfl/multirun/2023-10-21/15-09-27/multirun.yaml +++ /dev/null @@ -1,189 +0,0 @@ -hydra: - run: - dir: outputs/${now:%Y-%m-%d}/${now:%H-%M-%S} - sweep: - dir: multirun/${now:%Y-%m-%d}/${now:%H-%M-%S} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: null - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][HYDRA] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - loggers: - logging_example: - level: DEBUG - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: simple - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - fit_config.kd=true,false - - dataset_config.iid=true,false - - num_rounds=20 - job: - name: main - chdir: null - override_dirname: dataset_config.iid=true,false,fit_config.kd=true,false,num_rounds=20 - id: ??? - num: ??? - config_name: config - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/peterpan/flower/baselines/depthfl - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: /home/peterpan/flower/baselines/depthfl/depthfl/conf - schema: file - provider: main - - path: '' - schema: structured - provider: schema - output_dir: ??? - choices: - hydra/env: default - hydra/callbacks: null - hydra/job_logging: default - hydra/hydra_logging: default - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false -num_clients: 100 -num_epochs: 5 -batch_size: 50 -num_rounds: 20 -fraction: 0.1 -learning_rate: 0.1 -learning_rate_decay: 0.998 -static_bn: false -exclusive_learning: false -model_size: 1 -client_resources: - num_cpus: 1 - num_gpus: 0.5 -server_device: cuda -dataset_config: - iid: true - beta: 0.5 -fit_config: - feddyn: true - kd: true - alpha: 0.1 - extended: true - drop_client: false -model: - _target_: depthfl.resnet.multi_resnet18 - n_blocks: 4 - num_classes: 100 -strategy: - _target_: depthfl.strategy.FedDyn - fraction_fit: 1.0e-05 - fraction_evaluate: 0.0 - min_evaluate_clients: 0 From bf711613e7a4c987deb977fff027e03dabda13e3 Mon Sep 17 00:00:00 2001 From: Peterpan828 Date: Sat, 21 Oct 2023 15:31:14 +0900 Subject: [PATCH 46/51] update readme --- baselines/depthfl/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 0166c94c1397..cfc6bcf08d2e 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -131,8 +131,9 @@ python -m depthfl.main --multirun exclusive_learning=true model_size=1,2,3,4 ```bash ## table 3 (Width Scaling - Duplicate results from table 2) -python -m depthfl.main --config-name="heterofl" --multirun exclusive_learning=true model.scale=false model_size=1,2,3,4 python -m depthfl.main --config-name="heterofl" +python -m depthfl.main --config-name="heterofl" --multirun exclusive_learning=true model.scale=false model_size=1,2,3,4 + ## table 3 (Depth Scaling : Exclusive Learning, DepthFL(FedAvg) rows - Duplicate results from table 2) python -m depthfl.main fit_config.feddyn=false fit_config.kd=false From 4b8a4740862eb76355da912374f3a2092300f3a9 Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Sat, 21 Oct 2023 15:32:26 +0900 Subject: [PATCH 47/51] Update README.md --- baselines/depthfl/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index cfc6bcf08d2e..a7c6265173bf 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -108,15 +108,15 @@ To implement `feddyn`, stateful clients that store prev_grads information are ne With the following command we run DepthFL (FedDyn / FedAvg), InclusiveFL, and HeteroFL to replicate the results of table 2,3,4 in DepthFL paper. Tables 2, 3, and 4 may contain results from the same experiment in multiple tables. ```bash -## table 2 (HeteroFL row) +# table 2 (HeteroFL row) python -m depthfl.main --config-name="heterofl" python -m depthfl.main --config-name="heterofl" --multirun exclusive_learning=true model.scale=false model_size=1,2,3,4 -## table 2 (DepthFL(FedAvg) row) +# table 2 (DepthFL(FedAvg) row) python -m depthfl.main fit_config.feddyn=false fit_config.kd=false python -m depthfl.main --multirun fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=1,2,3,4 -## table 2 (DepthFL row) +# table 2 (DepthFL row) python -m depthfl.main python -m depthfl.main --multirun exclusive_learning=true model_size=1,2,3,4 ``` @@ -130,12 +130,12 @@ python -m depthfl.main --multirun exclusive_learning=true model_size=1,2,3,4 | HeteroFL
DepthFL (FedAvg)
DepthFL | CIFAR100 | 57.61
72.67
76.06 | 64.39
67.08
69.68 | 66.08
70.78
73.21 | 62.03
68.41
70.29 | 51.99
59.17
60.32 | ```bash -## table 3 (Width Scaling - Duplicate results from table 2) +# table 3 (Width Scaling - Duplicate results from table 2) python -m depthfl.main --config-name="heterofl" python -m depthfl.main --config-name="heterofl" --multirun exclusive_learning=true model.scale=false model_size=1,2,3,4 -## table 3 (Depth Scaling : Exclusive Learning, DepthFL(FedAvg) rows - Duplicate results from table 2) +# table 3 (Depth Scaling : Exclusive Learning, DepthFL(FedAvg) rows - Duplicate results from table 2) python -m depthfl.main fit_config.feddyn=false fit_config.kd=false python -m depthfl.main --multirun fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=1,2,3,4 @@ -156,7 +156,7 @@ Accuracy of global sub-models compared to exclusive learning on CIFAR-100. | Depth Scaling | Exclusive Learning
InclusiveFL
DepthFL (FedAvg) | 67.08
47.61
66.18 | 68.00
53.88
67.56 | 66.19
59.48
67.97 | 56.78
60.46
68.01 | ```bash -## table 4 +# table 4 python -m depthfl.main --multirun fit_config.kd=true,false dataset_config.iid=true,false ``` From 03e422c3ecbd78ac55b04225188ed65107137a7a Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Sat, 21 Oct 2023 15:36:29 +0900 Subject: [PATCH 48/51] Update README.md --- baselines/depthfl/README.md | 1 - 1 file changed, 1 deletion(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index a7c6265173bf..9abd47000d04 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -134,7 +134,6 @@ python -m depthfl.main --multirun exclusive_learning=true model_size=1,2,3,4 python -m depthfl.main --config-name="heterofl" python -m depthfl.main --config-name="heterofl" --multirun exclusive_learning=true model.scale=false model_size=1,2,3,4 - # table 3 (Depth Scaling : Exclusive Learning, DepthFL(FedAvg) rows - Duplicate results from table 2) python -m depthfl.main fit_config.feddyn=false fit_config.kd=false python -m depthfl.main --multirun fit_config.feddyn=false fit_config.kd=false exclusive_learning=true model_size=1,2,3,4 From b96e6bcfa25743b2ad3e255956961c7bbe2b6590 Mon Sep 17 00:00:00 2001 From: jafermarq Date: Sun, 22 Oct 2023 02:06:43 +0000 Subject: [PATCH 49/51] reflected in changelog --- doc/source/ref-changelog.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md index 891632edaaf5..29ca58989915 100644 --- a/doc/source/ref-changelog.md +++ b/doc/source/ref-changelog.md @@ -28,6 +28,8 @@ - FedMeta [#2438](https://github.com/adap/flower/pull/2438) + - DepthFL [#2295](https://github.com/adap/flower/pull/2295) + - **Update Flower Examples** ([#2384](https://github.com/adap/flower/pull/2384)), ([#2425](https://github.com/adap/flower/pull/2425)) - **General updates to baselines** ([#2301](https://github.com/adap/flower/pull/2301), [#2305](https://github.com/adap/flower/pull/2305), [#2307](https://github.com/adap/flower/pull/2307), [#2327](https://github.com/adap/flower/pull/2327), [#2435](https://github.com/adap/flower/pull/2435)) From 827c7d2e44abc56228d151df2ec74bfe60ee94a4 Mon Sep 17 00:00:00 2001 From: Peterpan828 <59055419+Peterpan828@users.noreply.github.com> Date: Thu, 26 Oct 2023 18:15:11 +0900 Subject: [PATCH 50/51] Update README.md --- baselines/depthfl/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/baselines/depthfl/README.md b/baselines/depthfl/README.md index 9abd47000d04..b8ab7ed18571 100644 --- a/baselines/depthfl/README.md +++ b/baselines/depthfl/README.md @@ -82,7 +82,8 @@ poetry shell To run this DepthFL, first ensure you have activated your Poetry environment (execute `poetry shell` from this directory), then: ```bash -python -m depthfl.main # this will run using the default settings in the `conf/config.yaml` +# this will run using the default settings in the `conf/config.yaml` +python -m depthfl.main # 'accuracy' : accuracy of the ensemble model, 'accuracy_single' : accuracy of each classifier. # you can override settings directly from the command line python -m depthfl.main exclusive_learning=true model_size=1 # exclusive learning - 100% (a) From 6a1f1667cff5237daeb57f9f9e8bfc2c3861f81c Mon Sep 17 00:00:00 2001 From: jafermarq Date: Mon, 30 Oct 2023 07:53:54 +0000 Subject: [PATCH 51/51] format --- doc/source/ref-changelog.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/ref-changelog.md b/doc/source/ref-changelog.md index fb200b8cd5ec..f137cb28fea6 100644 --- a/doc/source/ref-changelog.md +++ b/doc/source/ref-changelog.md @@ -31,7 +31,7 @@ - FjORD [#2431](https://github.com/adap/flower/pull/2431) - MOON [#2421](https://github.com/adap/flower/pull/2421) - + - DepthFL [#2295](https://github.com/adap/flower/pull/2295) - **Update Flower Examples** ([#2384](https://github.com/adap/flower/pull/2384),[#2425](https://github.com/adap/flower/pull/2425), [#2526](https://github.com/adap/flower/pull/2526))