diff --git a/examples/doc/source/conf.py b/examples/doc/source/conf.py index fd93502cf5df..b9c18fba2e18 100644 --- a/examples/doc/source/conf.py +++ b/examples/doc/source/conf.py @@ -24,7 +24,6 @@ import datetime - project = "Flower" copyright = f"{datetime.date.today().year} Flower Labs GmbH" author = "The Flower Authors" @@ -63,8 +62,10 @@ # Sphinx redirects, implemented after the doc filename changes. # To prevent 404 errors and redirect to the new pages. -# redirects = { -# } +redirects = { + "quickstart-mxnet": "index.html", + "mxnet-from-centralized-to-federated": "index.html", +} # -- Options for HTML output ------------------------------------------------- diff --git a/examples/mxnet-from-centralized-to-federated/.gitignore b/examples/mxnet-from-centralized-to-federated/.gitignore deleted file mode 100644 index 10d00b5797e2..000000000000 --- a/examples/mxnet-from-centralized-to-federated/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.gz diff --git a/examples/mxnet-from-centralized-to-federated/README.md b/examples/mxnet-from-centralized-to-federated/README.md deleted file mode 100644 index 2c3f240d8978..000000000000 --- a/examples/mxnet-from-centralized-to-federated/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# MXNet: From Centralized To Federated - -> Note the MXNet project has ended, and is now in [Attic](https://attic.apache.org/projects/mxnet.html). The MXNet GitHub has also [been archived](https://github.com/apache/mxnet). As a result, this example won't be receiving more updates. Using MXNet is no longer recommnended. - -This example demonstrates how an already existing centralized MXNet-based machine learning project can be federated with Flower. - -This introductory example for Flower uses MXNet, but you're not required to be a MXNet expert to run the example. The example will help you to understand how Flower can be used to build federated learning use cases based on an existing MXNet project. - -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/mxnet-from-centralized-to-federated . && rm -rf flower && cd mxnet-from-centralized-to-federated -``` - -This will create a new directory called `mxnet-from-centralized-to-federated` containing the following files: - -```shell --- pyproject.toml --- requirements.txt --- mxnet_mnist.py --- client.py --- server.py --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `mxnet` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -## Run MXNet Federated - -This MXNet example is based on the [Handwritten Digit Recognition](https://mxnet.apache.org/versions/1.7.0/api/python/docs/tutorials/packages/gluon/image/mnist.html) tutorial and uses the MNIST dataset (hand-written digits with 28x28 pixels in greyscale with 10 classes). Feel free to consult the tutorial if you want to get a better understanding of MXNet. The file `mxnet_mnist.py` contains all the steps that are described in the tutorial. It loads the dataset and a sequential model, trains the model with the training set, and evaluates the trained model on the test set. - -The only things we need are a simple Flower server (in `server.py`) and a Flower client (in `client.py`). The Flower client basically takes model and training code tells Flower how to call it. - -Start the server in a terminal as follows: - -```shell -python3 server.py -``` - -Now that the server is running and waiting for clients, we can start two clients that will participate in the federated learning process. To do so simply open two more terminal windows and run the following commands. - -Start client 1 in the first terminal: - -```shell -python3 client.py -``` - -Start client 2 in the second terminal: - -```shell -python3 client.py -``` - -You are now training a MXNet-based classifier on MNIST, federated across two clients. The setup is of course simplified since both clients hold the same dataset, but you can now continue with your own explorations. How about changing from a sequential model to a CNN? How about adding more clients? diff --git a/examples/mxnet-from-centralized-to-federated/client.py b/examples/mxnet-from-centralized-to-federated/client.py deleted file mode 100644 index bb666a26508e..000000000000 --- a/examples/mxnet-from-centralized-to-federated/client.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Flower client example using MXNet for MNIST classification.""" - -from typing import Dict, List, Tuple - -import flwr as fl -import numpy as np -import mxnet as mx -from mxnet import nd - -import mxnet_mnist - - -# Flower Client -class MNISTClient(fl.client.NumPyClient): - """Flower client implementing MNIST classification using MXNet.""" - - def __init__( - self, - model: mxnet_mnist.model(), - train_data: mx.io.NDArrayIter, - val_data: mx.io.NDArrayIter, - device: mx.context, - ) -> None: - self.model = model - self.train_data = train_data - self.val_data = val_data - self.device = device - - def get_parameters(self, config: Dict) -> List[np.ndarray]: - # Return model parameters as a list of NumPy Arrays - param = [] - for val in self.model.collect_params(".*weight").values(): - p = val.data() - # convert parameters from NDArray to Numpy Array required by Flower Numpy Client - param.append(p.asnumpy()) - return param - - def set_parameters(self, parameters: List[np.ndarray]) -> None: - # Collect model parameters and set new weight values - params = zip(self.model.collect_params(".*weight").keys(), parameters) - for key, value in params: - self.model.collect_params().setattr(key, value) - - def fit( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[List[np.ndarray], int, Dict]: - # Set model parameters, train model, return updated model parameters - self.set_parameters(parameters) - [accuracy, loss], num_examples = mxnet_mnist.train( - self.model, self.train_data, epoch=2, device=self.device - ) - results = {"accuracy": accuracy[1], "loss": loss[1]} - return self.get_parameters(config={}), num_examples, results - - def evaluate( - self, parameters: List[np.ndarray], config: Dict - ) -> Tuple[int, float, Dict]: - # Set model parameters, evaluate model on local test dataset, return result - self.set_parameters(parameters) - [accuracy, loss], num_examples = mxnet_mnist.test( - self.model, self.val_data, device=self.device - ) - print("Evaluation accuracy & loss", accuracy, loss) - return ( - float(loss[1]), - num_examples, - {"accuracy": float(accuracy[1])}, - ) - - -def main() -> None: - """Load data, start MNISTClient.""" - - # Set context to GPU or - if not available - to CPU - DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - - # Load data - train_data, val_data = mxnet_mnist.load_data() - - # Load model (from centralized training) - model = mxnet_mnist.model() - - # Do one forward propagation to initialize parameters - init = nd.random.uniform(shape=(2, 784)) - model(init) - - # Start Flower client - client = MNISTClient(model, train_data, val_data, DEVICE) - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client=client) - - -if __name__ == "__main__": - main() diff --git a/examples/mxnet-from-centralized-to-federated/mxnet_mnist.py b/examples/mxnet-from-centralized-to-federated/mxnet_mnist.py deleted file mode 100644 index 5cf39da7c9ca..000000000000 --- a/examples/mxnet-from-centralized-to-federated/mxnet_mnist.py +++ /dev/null @@ -1,144 +0,0 @@ -"""MXNet MNIST image classification. - -The code is generally adapted from: - -https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/image/mnist.html -""" - -from typing import List, Tuple -import mxnet as mx -from mxnet import gluon -from mxnet.gluon import nn -from mxnet import autograd as ag -import mxnet.ndarray as F -from mxnet import nd - -# Fixing the random seed -mx.random.seed(42) - - -def load_data() -> Tuple[mx.io.NDArrayIter, mx.io.NDArrayIter]: - print("Download Dataset") - # Download MNIST data - mnist = mx.test_utils.get_mnist() - batch_size = 100 - train_data = mx.io.NDArrayIter( - mnist["train_data"], mnist["train_label"], batch_size, shuffle=True - ) - val_data = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"], batch_size) - return train_data, val_data - - -def model(): - # Define simple Sequential model - net = nn.Sequential() - net.add(nn.Dense(256, activation="relu")) - net.add(nn.Dense(64, activation="relu")) - net.add(nn.Dense(10)) - net.collect_params().initialize() - return net - - -def train( - net: mx.gluon.nn, train_data: mx.io.NDArrayIter, epoch: int, device: mx.context -) -> Tuple[List[float], int]: - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.01}) - # Use Accuracy and Cross Entropy Loss as the evaluation metric. - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss() - for i in range(epoch): - # Reset the train data iterator. - train_data.reset() - # Calculate number of samples - num_examples = 0 - # Loop over the train data iterator. - for batch in train_data: - # Splits train data into multiple slices along batch_axis - # and copy each slice into a context. - data = gluon.utils.split_and_load( - batch.data[0], ctx_list=device, batch_axis=0 - ) - # Splits train labels into multiple slices along batch_axis - # and copy each slice into a context. - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=device, batch_axis=0 - ) - outputs = [] - # Inside training scope - with ag.record(): - for x, y in zip(data, label): - z = net(x) - # Computes softmax cross entropy loss. - loss = softmax_cross_entropy_loss(z, y) - # Backpropogate the error for one iteration. - loss.backward() - outputs.append(z.softmax()) - num_examples += len(x) - # Updates internal evaluation - metrics.update(label, outputs) - # Make one step of parameter update. Trainer needs to know the - # batch size of data to normalize the gradient by 1/batch_size. - trainer.step(batch.data[0].shape[0]) - # Gets the evaluation result. - trainings_metric = metrics.get_name_value() - print("Accuracy & loss at epoch %d: %s" % (i, trainings_metric)) - return trainings_metric, num_examples - - -def test( - net: mx.gluon.nn, val_data: mx.io.NDArrayIter, device: mx.context -) -> Tuple[List[float], int]: - # Use Accuracy as the evaluation metric. - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - # Reset the validation data iterator. - val_data.reset() - # Get number of samples for val_dat - num_examples = 0 - # Loop over the validation data iterator. - for batch in val_data: - # Splits validation data into multiple slices along batch_axis - # and copy each slice into a context. - data = gluon.utils.split_and_load(batch.data[0], ctx_list=device, batch_axis=0) - # Splits validation label into multiple slices along batch_axis - # and copy each slice into a context. - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=device, batch_axis=0 - ) - outputs = [] - for x in data: - outputs.append(net(x).softmax()) - num_examples += len(x) - # Updates internal evaluation - metrics.update(label, outputs) - return metrics.get_name_value(), num_examples - - -def main(): - # Set context to GPU or - if not available - to CPU - DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - # Load train and validation data - train_data, val_data = load_data() - # Define sequential model - net = model() - init = nd.random.uniform(shape=(2, 784)) - net(init) - # Start model training based on training set - train(net=net, train_data=train_data, epoch=2, device=DEVICE) - # Evaluate model using loss and accuracy - eval_metric, _ = test(net=net, val_data=val_data, device=DEVICE) - acc = eval_metric[0] - loss = eval_metric[1] - print("Evaluation Loss: ", loss) - print("Evaluation Accuracy: ", acc) - - -if __name__ == "__main__": - main() diff --git a/examples/mxnet-from-centralized-to-federated/pyproject.toml b/examples/mxnet-from-centralized-to-federated/pyproject.toml deleted file mode 100644 index b00b3ddfe412..000000000000 --- a/examples/mxnet-from-centralized-to-federated/pyproject.toml +++ /dev/null @@ -1,15 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "mxnet_example" -version = "0.1.0" -description = "MXNet example with MNIST and CNN" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = "1.6.0" -mxnet = "1.9.1" -numpy = "1.23.1" diff --git a/examples/mxnet-from-centralized-to-federated/requirements.txt b/examples/mxnet-from-centralized-to-federated/requirements.txt deleted file mode 100644 index 8dd6f7150dfd..000000000000 --- a/examples/mxnet-from-centralized-to-federated/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -flwr==1.6.0 -mxnet==1.9.1 -numpy==1.23.1 diff --git a/examples/mxnet-from-centralized-to-federated/server.py b/examples/mxnet-from-centralized-to-federated/server.py deleted file mode 100644 index 871aa4e8ec99..000000000000 --- a/examples/mxnet-from-centralized-to-federated/server.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Flower server example.""" - -import flwr as fl - -if __name__ == "__main__": - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - ) diff --git a/examples/quickstart-mxnet/.gitignore b/examples/quickstart-mxnet/.gitignore deleted file mode 100644 index 10d00b5797e2..000000000000 --- a/examples/quickstart-mxnet/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.gz diff --git a/examples/quickstart-mxnet/README.md b/examples/quickstart-mxnet/README.md deleted file mode 100644 index 37e01ef2707c..000000000000 --- a/examples/quickstart-mxnet/README.md +++ /dev/null @@ -1,78 +0,0 @@ -# Flower Example using MXNet - -> Note the MXNet project has ended, and is now in [Attic](https://attic.apache.org/projects/mxnet.html). The MXNet GitHub has also [been archived](https://github.com/apache/mxnet). As a result, this example won't be receiving more updates. Using MXNet is no longer recommnended. - -This example demonstrates how to run a MXNet machine learning project federated with Flower. - -This introductory example for Flower uses MXNet, but you're not required to be a MXNet expert to run the example. The example will help you to understand how Flower can be used to build federated learning use cases based on an existing MXNet projects. - -## Project Setup - -Start by cloning the example project. We prepared a single-line command that you can copy into your shell which will checkout the example for you: - -```shell -git clone --depth=1 https://github.com/adap/flower.git && mv flower/examples/quickstart-mxnet . && rm -rf flower && cd quickstart-mxnet -``` - -This will create a new directory called `quickstart-mxnet` containing the following files: - -```shell --- pyproject.toml --- requirements.txt --- client.py --- server.py --- README.md -``` - -### Installing Dependencies - -Project dependencies (such as `mxnet` and `flwr`) are defined in `pyproject.toml` and `requirements.txt`. We recommend [Poetry](https://python-poetry.org/docs/) to install those dependencies and manage your virtual environment ([Poetry installation](https://python-poetry.org/docs/#installation)) or [pip](https://pip.pypa.io/en/latest/development/), but feel free to use a different way of installing dependencies and managing virtual environments if you have other preferences. - -#### Poetry - -```shell -poetry install -poetry shell -``` - -Poetry will install all your dependencies in a newly created virtual environment. To verify that everything works correctly you can run the following command: - -```shell -poetry run python3 -c "import flwr" -``` - -If you don't see any errors you're good to go! - -#### pip - -Write the command below in your terminal to install the dependencies according to the configuration file requirements.txt. - -```shell -pip install -r requirements.txt -``` - -## Run MXNet Federated - -This MXNet example is based on the [Handwritten Digit Recognition](https://mxnet.apache.org/versions/1.7.0/api/python/docs/tutorials/packages/gluon/image/mnist.html) tutorial and uses the MNIST dataset (hand-written digits with 28x28 pixels in greyscale with 10 classes). Feel free to consult the tutorial if you want to get a better understanding of MXNet. The file `client.py` contains all the steps that are described in the tutorial. It loads the dataset and a sequential model, trains the model with the training set, and evaluates the trained model on the test set. - -You are ready to start the Flower server as well as the clients. You can simply start the server in a terminal as follows: - -```shell -python3 server.py -``` - -Now you are ready to start the Flower clients which will participate in the learning. To do so simply open two more terminal windows and run the following commands. - -Start client 1 in the first terminal: - -```shell -python3 client.py -``` - -Start client 2 in the second terminal: - -```shell -python3 client.py -``` - -You are now training a MXNet-based classifier on MNIST, federated across two clients. The setup is of course simplified since both clients hold the same dataset, but you can now continue with your own explorations. How about changing from a sequential model to a CNN? How about adding more clients? diff --git a/examples/quickstart-mxnet/client.py b/examples/quickstart-mxnet/client.py deleted file mode 100644 index 6c2b2e99775d..000000000000 --- a/examples/quickstart-mxnet/client.py +++ /dev/null @@ -1,136 +0,0 @@ -"""Flower client example using MXNet for MNIST classification. - -The code is generally adapted from: - -https://mxnet.apache.org/api/python/docs/tutorials/packages/gluon/image/mnist.html -""" - -import flwr as fl -import numpy as np -import mxnet as mx -from mxnet import nd -from mxnet import gluon -from mxnet.gluon import nn -from mxnet import autograd as ag -import mxnet.ndarray as F - -# Fixing the random seed -mx.random.seed(42) - -# Setup context to GPU or CPU -DEVICE = [mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()] - - -def main(): - def model(): - net = nn.Sequential() - net.add(nn.Dense(256, activation="relu")) - net.add(nn.Dense(64, activation="relu")) - net.add(nn.Dense(10)) - net.collect_params().initialize() - return net - - train_data, val_data = load_data() - - model = model() - init = nd.random.uniform(shape=(2, 784)) - model(init) - - # Flower Client - class MNISTClient(fl.client.NumPyClient): - def get_parameters(self, config): - param = [] - for val in model.collect_params(".*weight").values(): - p = val.data() - param.append(p.asnumpy()) - return param - - def set_parameters(self, parameters): - params = zip(model.collect_params(".*weight").keys(), parameters) - for key, value in params: - model.collect_params().setattr(key, value) - - def fit(self, parameters, config): - self.set_parameters(parameters) - [accuracy, loss], num_examples = train(model, train_data, epoch=2) - results = {"accuracy": float(accuracy[1]), "loss": float(loss[1])} - return self.get_parameters(config={}), num_examples, results - - def evaluate(self, parameters, config): - self.set_parameters(parameters) - [accuracy, loss], num_examples = test(model, val_data) - print("Evaluation accuracy & loss", accuracy, loss) - return float(loss[1]), num_examples, {"accuracy": float(accuracy[1])} - - # Start Flower client - fl.client.start_numpy_client(server_address="0.0.0.0:8080", client=MNISTClient()) - - -def load_data(): - print("Download Dataset") - mnist = mx.test_utils.get_mnist() - batch_size = 100 - train_data = mx.io.NDArrayIter( - mnist["train_data"], mnist["train_label"], batch_size, shuffle=True - ) - val_data = mx.io.NDArrayIter(mnist["test_data"], mnist["test_label"], batch_size) - return train_data, val_data - - -def train(net, train_data, epoch): - trainer = gluon.Trainer(net.collect_params(), "sgd", {"learning_rate": 0.01}) - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - softmax_cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss() - for i in range(epoch): - train_data.reset() - num_examples = 0 - for batch in train_data: - data = gluon.utils.split_and_load( - batch.data[0], ctx_list=DEVICE, batch_axis=0 - ) - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=DEVICE, batch_axis=0 - ) - outputs = [] - with ag.record(): - for x, y in zip(data, label): - z = net(x) - loss = softmax_cross_entropy_loss(z, y) - loss.backward() - outputs.append(z.softmax()) - num_examples += len(x) - metrics.update(label, outputs) - trainer.step(batch.data[0].shape[0]) - trainings_metric = metrics.get_name_value() - print("Accuracy & loss at epoch %d: %s" % (i, trainings_metric)) - return trainings_metric, num_examples - - -def test(net, val_data): - accuracy_metric = mx.metric.Accuracy() - loss_metric = mx.metric.CrossEntropy() - metrics = mx.metric.CompositeEvalMetric() - for child_metric in [accuracy_metric, loss_metric]: - metrics.add(child_metric) - val_data.reset() - num_examples = 0 - for batch in val_data: - data = gluon.utils.split_and_load(batch.data[0], ctx_list=DEVICE, batch_axis=0) - label = gluon.utils.split_and_load( - batch.label[0], ctx_list=DEVICE, batch_axis=0 - ) - outputs = [] - for x in data: - outputs.append(net(x).softmax()) - num_examples += len(x) - metrics.update(label, outputs) - metrics.update(label, outputs) - return metrics.get_name_value(), num_examples - - -if __name__ == "__main__": - main() diff --git a/examples/quickstart-mxnet/pyproject.toml b/examples/quickstart-mxnet/pyproject.toml deleted file mode 100644 index b00b3ddfe412..000000000000 --- a/examples/quickstart-mxnet/pyproject.toml +++ /dev/null @@ -1,15 +0,0 @@ -[build-system] -requires = ["poetry-core>=1.4.0"] -build-backend = "poetry.core.masonry.api" - -[tool.poetry] -name = "mxnet_example" -version = "0.1.0" -description = "MXNet example with MNIST and CNN" -authors = ["The Flower Authors "] - -[tool.poetry.dependencies] -python = ">=3.8,<3.11" -flwr = "1.6.0" -mxnet = "1.9.1" -numpy = "1.23.1" diff --git a/examples/quickstart-mxnet/requirements.txt b/examples/quickstart-mxnet/requirements.txt deleted file mode 100644 index 8dd6f7150dfd..000000000000 --- a/examples/quickstart-mxnet/requirements.txt +++ /dev/null @@ -1,3 +0,0 @@ -flwr==1.6.0 -mxnet==1.9.1 -numpy==1.23.1 diff --git a/examples/quickstart-mxnet/server.py b/examples/quickstart-mxnet/server.py deleted file mode 100644 index 871aa4e8ec99..000000000000 --- a/examples/quickstart-mxnet/server.py +++ /dev/null @@ -1,9 +0,0 @@ -"""Flower server example.""" - -import flwr as fl - -if __name__ == "__main__": - fl.server.start_server( - server_address="0.0.0.0:8080", - config=fl.server.ServerConfig(num_rounds=3), - )