From 7d0620c0e337a2d6e00fccd3eb38fd1aa951f3e0 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 24 Sep 2024 00:32:12 +0000 Subject: [PATCH] Update text and language files --- doc/locales/fr/LC_MESSAGES/framework-docs.po | 2194 +++++++++++------ doc/locales/ko/LC_MESSAGES/framework-docs.po | 1989 ++++++++++----- .../pt_BR/LC_MESSAGES/framework-docs.po | 1939 ++++++++++----- .../zh_Hans/LC_MESSAGES/framework-docs.po | 2179 ++++++++++------ 4 files changed, 5574 insertions(+), 2727 deletions(-) diff --git a/doc/locales/fr/LC_MESSAGES/framework-docs.po b/doc/locales/fr/LC_MESSAGES/framework-docs.po index bee09019489f..681916e78ed5 100644 --- a/doc/locales/fr/LC_MESSAGES/framework-docs.po +++ b/doc/locales/fr/LC_MESSAGES/framework-docs.po @@ -3,7 +3,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower Docs\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" -"POT-Creation-Date: 2024-09-15 09:09+0200\n" +"POT-Creation-Date: 2024-09-24 00:29+0000\n" "PO-Revision-Date: 2023-09-05 17:54+0000\n" "Last-Translator: Charles Beauville \n" "Language: fr\n" @@ -13,7 +13,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" #: ../../source/contributor-explanation-public-and-private-apis.rst:2 msgid "Public and private APIs" @@ -1522,7 +1522,7 @@ msgstr "" msgid "Setting up the repository" msgstr "Mise en place du référentiel" -#: ../../source/contributor-tutorial-contribute-on-github.rst:12 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "**Create a GitHub account and setup Git**" msgstr "**Créer un compte GitHub et configurer Git**" @@ -1571,7 +1571,7 @@ msgstr "" " des modifications localement et tu en gardes une trace à l'aide de Git, " "puis tu télécharges ton nouvel historique à nouveau sur GitHub." -#: ../../source/contributor-tutorial-contribute-on-github.rst:23 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "**Forking the Flower repository**" msgstr "**Fourche le dépôt de Flower**" @@ -1601,7 +1601,7 @@ msgstr "" " devrais voir dans le coin supérieur gauche que tu es en train de " "regarder ta propre version de Flower." -#: ../../source/contributor-tutorial-contribute-on-github.rst:34 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "**Cloning your forked repository**" msgstr "**Clonage de ton dépôt forké**" @@ -1635,7 +1635,7 @@ msgstr "" "Cela créera un dossier `flower/` (ou le nom de ta fourche si tu l'as " "renommée) dans le répertoire de travail actuel." -#: ../../source/contributor-tutorial-contribute-on-github.rst:49 +#: ../../source/contributor-tutorial-contribute-on-github.rst:66 msgid "**Add origin**" msgstr "**Ajouter l'origine**" @@ -1663,7 +1663,7 @@ msgstr "" "Une fois que le \\ est copié, nous pouvons taper la commande " "suivante dans notre terminal :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:68 +#: ../../source/contributor-tutorial-contribute-on-github.rst:90 msgid "**Add upstream**" msgstr "**Ajouter en amont**" @@ -1739,7 +1739,7 @@ msgstr "" msgid "And with Flower's repository:" msgstr "Et avec le référentiel de Flower :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:114 +#: ../../source/contributor-tutorial-contribute-on-github.rst:122 msgid "**Create a new branch**" msgstr "**Créer une nouvelle branche**" @@ -1761,7 +1761,7 @@ msgstr "" "Pour ce faire, il suffit d'exécuter la commande suivante dans le " "répertoire du référentiel :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:124 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "**Make changes**" msgstr "**Apporter des modifications**" @@ -1771,7 +1771,7 @@ msgstr "" "Écris du bon code et crée de merveilleuses modifications à l'aide de ton " "éditeur préféré !" -#: ../../source/contributor-tutorial-contribute-on-github.rst:127 +#: ../../source/contributor-tutorial-contribute-on-github.rst:138 msgid "**Test and format your code**" msgstr "**Teste et mets en forme ton code**" @@ -1789,7 +1789,7 @@ msgstr "" msgid "To do so, we have written a few scripts that you can execute:" msgstr "Pour ce faire, nous avons écrit quelques scripts que tu peux exécuter :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:140 +#: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "**Stage changes**" msgstr "**Changements de scène**" @@ -1815,7 +1815,7 @@ msgstr "" "version (last commit) et pour voir quels fichiers sont mis à disposition " "pour le commit, tu peux utiliser la commande :code:`git status`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:152 +#: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "**Commit changes**" msgstr "**Commit changes**" @@ -1838,7 +1838,7 @@ msgstr "" "commit. Il doit être écrit dans un style impératif et être concis. Un " "exemple serait :code:`git commit -m \"Ajouter des images au README\"`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:162 +#: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "**Push the changes to the fork**" msgstr "**Pousser les changements vers la fourche**" @@ -1865,7 +1865,7 @@ msgstr "" msgid "Creating and merging a pull request (PR)" msgstr "Créer et fusionner une pull request (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:176 +#: ../../source/contributor-tutorial-contribute-on-github.rst:206 msgid "**Create the PR**" msgstr "**Créer le PR**" @@ -1949,7 +1949,7 @@ msgstr "" " personne, tu as la possibilité de créer un brouillon de demande de " "traction :" -#: ../../source/contributor-tutorial-contribute-on-github.rst:208 +#: ../../source/contributor-tutorial-contribute-on-github.rst:209 msgid "**Making new changes**" msgstr "**Faire de nouveaux changements**" @@ -1963,7 +1963,7 @@ msgstr "" "toujours y pousser de nouveaux commits de la même manière qu'auparavant, " "en apportant des modifications à la branche associée au PR." -#: ../../source/contributor-tutorial-contribute-on-github.rst:211 +#: ../../source/contributor-tutorial-contribute-on-github.rst:231 msgid "**Review the PR**" msgstr "**Review the PR**" @@ -2008,7 +2008,7 @@ msgstr "" "Une fois que toutes les conversations ont été résolues, tu peux " "redemander un examen." -#: ../../source/contributor-tutorial-contribute-on-github.rst:233 +#: ../../source/contributor-tutorial-contribute-on-github.rst:251 msgid "**Once the PR is merged**" msgstr "**Une fois que le PR est fusionné**" @@ -2340,6 +2340,7 @@ msgstr "Devenez un·e contributeur·ice" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 #: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:15 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:12 #: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" @@ -3098,6 +3099,241 @@ msgid "" " the SuperNode to execute the ClientApp as a subprocess:" msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +#, fuzzy +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "Démarrage rapide XGBoost" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 +msgid "" +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:11 +msgid "" +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see `Limitations`_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +msgid "Before you start, make sure that:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:19 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +msgid "The Docker daemon is running." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +msgid "Docker Compose is `installed `_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:24 +#, fuzzy +msgid "Run the Quickstart Example" +msgstr "Demande pour un nouveau Flower Example" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:26 +msgid "" +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:34 +msgid "" +"Download the `compose.yml " +"`_" +" file into the example directory:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:41 +#, fuzzy +msgid "Build and start the services using the following command:" +msgstr "Active la virtualenv en exécutant la commande suivante :" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:47 +#, fuzzy +msgid "" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:49 +#: ../../source/docker/tutorial-quickstart-docker.rst:319 +msgid "pyproject.toml" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:58 +msgid "" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 +msgid "" +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +#, fuzzy +msgid "Run the example:" +msgstr "Fédérer l'exemple" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:71 +msgid "Follow the logs of the SuperExec service:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:77 +msgid "" +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +msgid "Run a Different Quickstart Example" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 +msgid "" +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:89 +msgid "After that, you can repeat the steps above." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:92 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:98 +#, fuzzy +msgid "Limitations" +msgstr "Simulation de moniteur" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:97 +#, fuzzy +msgid "Quickstart Example" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 +#, fuzzy +msgid "quickstart-fastai" +msgstr "Démarrage rapide fastai" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" +msgstr "Aucun" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#, fuzzy +msgid "quickstart-huggingface" +msgstr "Quickstart tutorials" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#, fuzzy +msgid "quickstart-jax" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#, fuzzy +msgid "" +"The example has not yet been updated to work with the latest ``flwr`` " +"version." +msgstr "" +"Les exemples de code couvrant scikit-learn et PyTorch Lightning ont été " +"mis à jour pour fonctionner avec la dernière version de Flower." + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#, fuzzy +msgid "quickstart-mlcube" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#, fuzzy +msgid "quickstart-mlx" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +msgid "" +"`Requires to run on macOS with Apple Silicon `_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#, fuzzy +msgid "quickstart-monai" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#, fuzzy +msgid "quickstart-pandas" +msgstr "Démarrage rapide des Pandas" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 +#, fuzzy +msgid "quickstart-pytorch-lightning" +msgstr "Démarrage rapide de PyTorch Lightning" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +msgid "" +"Requires an older pip version that is not supported by the Flower Docker " +"images." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#, fuzzy +msgid "quickstart-pytorch" +msgstr "Démarrage rapide de PyTorch" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#, fuzzy +msgid "quickstart-sklearn-tabular" +msgstr "Démarrage rapide de scikit-learn" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#, fuzzy +msgid "quickstart-tabnet" +msgstr "Démarrage rapide de JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#, fuzzy +msgid "quickstart-tensorflow" +msgstr "Démarrage rapide de TensorFlow" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +msgid "Only runs on AMD64." +msgstr "" + #: ../../source/docker/set-environment-variables.rst:2 #, fuzzy msgid "Set Environment Variables" @@ -3128,21 +3364,6 @@ msgid "" " understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 -#: ../../source/docker/tutorial-quickstart-docker.rst:13 -msgid "Before you start, make sure that:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 -#: ../../source/docker/tutorial-quickstart-docker.rst:15 -msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 -#: ../../source/docker/tutorial-quickstart-docker.rst:16 -msgid "The Docker daemon is running." -msgstr "" - #: ../../source/docker/tutorial-quickstart-docker-compose.rst:21 #: ../../source/docker/tutorial-quickstart-docker.rst:19 msgid "Step 1: Set Up" @@ -3568,10 +3789,6 @@ msgstr "" msgid "Add the following lines to the ``pyproject.toml``:" msgstr "Augmente la version mineure de ``pyproject.toml`` d'une unité." -#: ../../source/docker/tutorial-quickstart-docker.rst:319 -msgid "pyproject.toml" -msgstr "" - #: ../../source/docker/tutorial-quickstart-docker.rst:326 msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" @@ -3621,6 +3838,7 @@ msgstr "" msgid "Remove the containers and the bridge network:" msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:401 #: ../../source/docker/tutorial-quickstart-docker.rst:399 #, fuzzy msgid "Where to Go Next" @@ -3657,10 +3875,6 @@ msgid "" "configuration that best suits your project's needs." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 -msgid "Docker Compose is `installed `_." -msgstr "" - #: ../../source/docker/tutorial-quickstart-docker-compose.rst:23 msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" @@ -3856,7 +4070,7 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker-compose.rst:188 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:362 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 msgid "Rerun the ``quickstart-compose`` project:" msgstr "" @@ -3920,75 +4134,80 @@ msgstr "" msgid "compose.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:303 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 msgid "" "If you also want to enable TLS for the new SuperNodes, duplicate the " "SuperNode definition for each new SuperNode service in the ``with-" "tls.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:306 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:313 msgid "" "Make sure that the names of the services match with the one in the " "``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:308 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:315 msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:317 msgid "with-tls.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:332 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:334 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:341 msgid "" "To run Flower with persisted SuperLink state and enabled TLS, a slight " "change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:337 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:344 msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:346 msgid "with-state.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:356 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 #, fuzzy msgid "Restart the services:" msgstr "Démarrer le serveur" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:370 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:377 msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:372 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:379 msgid "" "You can merge multiple Compose files into a single file. For instance, if" " you wish to combine the basic configuration with the TLS configuration, " "execute the following command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:380 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 msgid "" "This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" " a new file called ``my_compose.yml``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:384 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 msgid "Step 10: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:386 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 msgid "Remove all services and volumes:" msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:403 +#, fuzzy +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "Démarrage rapide XGBoost" + #: ../../source/docker/use-a-different-version.rst:2 msgid "Use a Different Flower Version" msgstr "" @@ -4389,7 +4608,7 @@ msgstr "" "getting/setting model parameters, one method for training the model, and " "one method for testing the model:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:218 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 msgid ":code:`set_parameters`" msgstr ":code:`set_parameters`" @@ -4425,9 +4644,9 @@ msgstr "" ":code:`ndarray` NumPy (ce qui correspond à ce que " ":code:`flwr.client.NumPyClient` attend)" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:223 -#: ../../source/tutorial-quickstart-jax.rst:171 -#: ../../source/tutorial-quickstart-scikitlearn.rst:123 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 msgid ":code:`fit`" msgstr ":code:`fit`" @@ -4451,9 +4670,9 @@ msgstr "entraîne le modèle sur l'ensemble d'apprentissage local" msgid "get the updated local model weights and return them to the server" msgstr "récupère les poids du modèle local mis à jour et les renvoie au serveur" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:227 -#: ../../source/tutorial-quickstart-jax.rst:175 -#: ../../source/tutorial-quickstart-scikitlearn.rst:127 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 msgid ":code:`evaluate`" msgstr ":code:`évaluer`" @@ -4572,7 +4791,7 @@ msgid "" " individual's information remains hidden in the crowd." msgstr "" -#: ../../source/explanation-differential-privacy.rst:16 +#: ../../source/explanation-differential-privacy.rst:-1 msgid "DP Intro" msgstr "" @@ -4685,8 +4904,8 @@ msgid "" "the client's data." msgstr "" +#: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/explanation-differential-privacy.rst:71 #: ../../source/how-to-use-differential-privacy.rst:11 #, fuzzy msgid "Central Differential Privacy" @@ -4714,7 +4933,7 @@ msgid "" "that larger updates are scaled down to fit within the norm `S`." msgstr "" -#: ../../source/explanation-differential-privacy.rst:84 +#: ../../source/explanation-differential-privacy.rst:-1 msgid "clipping" msgstr "" @@ -4759,8 +4978,8 @@ msgid "" "others." msgstr "" +#: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/explanation-differential-privacy.rst:110 #: ../../source/how-to-use-differential-privacy.rst:96 #, fuzzy msgid "Local Differential Privacy" @@ -5047,7 +5266,7 @@ msgstr "" msgid "This is sometimes called a hub-and-spoke topology:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:18 +#: ../../source/explanation-flower-architecture.rst:24 #, fuzzy msgid "Hub-and-spoke topology in federated learning" msgstr "Qu'est-ce que l'apprentissage fédéré ?" @@ -5120,7 +5339,7 @@ msgid "" "`missing link` between all those SuperNodes." msgstr "" -#: ../../source/explanation-flower-architecture.rst:65 +#: ../../source/explanation-flower-architecture.rst:71 #, fuzzy msgid "Basic Flower architecture" msgstr "Architecture florale" @@ -5158,7 +5377,7 @@ msgid "" "SuperNodes." msgstr "" -#: ../../source/explanation-flower-architecture.rst:91 +#: ../../source/explanation-flower-architecture.rst:97 #, fuzzy msgid "Multi-tenancy federated learning architecture" msgstr "Stratégie de moyenne fédérée." @@ -5182,7 +5401,7 @@ msgid "" "their corresponding ``ClientApp``\\s:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:107 +#: ../../source/explanation-flower-architecture.rst:113 #, fuzzy msgid "Multi-tenancy federated learning architecture - Run 1" msgstr "Stratégie de moyenne fédérée." @@ -5199,7 +5418,7 @@ msgid "" " to participate in the training:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:119 +#: ../../source/explanation-flower-architecture.rst:125 #, fuzzy msgid "Multi-tenancy federated learning architecture - Run 2" msgstr "Stratégie de moyenne fédérée." @@ -5236,7 +5455,7 @@ msgid "" "developer machine." msgstr "" -#: ../../source/explanation-flower-architecture.rst:145 +#: ../../source/explanation-flower-architecture.rst:151 msgid "Flower Deployment Engine with SuperExec" msgstr "" @@ -8526,7 +8745,7 @@ msgid "" "adaptive clipping." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:25 +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy msgid "server side clipping" msgstr "Logique côté serveur" @@ -8557,7 +8776,7 @@ msgid "" ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:57 +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy msgid "client side clipping" msgstr "Logique côté client" @@ -8585,7 +8804,7 @@ msgid "" "clipping norm value, sensitivity, epsilon, and delta." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:99 +#: ../../source/how-to-use-differential-privacy.rst:-1 msgid "local DP mod" msgstr "" @@ -9047,11 +9266,33 @@ msgstr "" msgid "Arguments" msgstr "Amélioration de la documentation" -#: ../../flwr install:1 new:1 run:1 +#: ../../flwr install:1 log:1 new:1 run:1 #, fuzzy msgid "Optional argument" msgstr "Améliorations facultatives" +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." +msgstr "" + +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" +msgstr "" + +#: ../../flwr log +#, fuzzy +msgid "default" +msgstr "Flux de travail" + +#: ../../flwr log:1 +msgid "``True``" +msgstr "" + +#: ../../flwr log:1 +#, fuzzy +msgid "Required argument" +msgstr "Amélioration de la documentation" + #: ../../flwr new:1 #, fuzzy msgid "Create new Flower App." @@ -9143,7 +9384,7 @@ msgstr "" #: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`client `\\" +msgid ":py:obj:`flwr.client `\\" msgstr "serveur.stratégie.Stratégie" #: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of @@ -9153,7 +9394,7 @@ msgstr "Client de Flower" #: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`common `\\" +msgid ":py:obj:`flwr.common `\\" msgstr "serveur.stratégie.Stratégie" #: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of @@ -9162,7 +9403,7 @@ msgstr "Composants communs partagés entre le serveur et le client." #: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`server `\\" +msgid ":py:obj:`flwr.server `\\" msgstr "serveur.stratégie.Stratégie" #: ../../source/ref-api/flwr.rst:35::1 @@ -9174,7 +9415,7 @@ msgstr "Serveur de Flower" #: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`simulation `\\" +msgid ":py:obj:`flwr.simulation `\\" msgstr "serveur.stratégie.Stratégie" #: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of @@ -9258,7 +9499,7 @@ msgstr "" #: ../../source/ref-api/flwr.client.rst:50::1 #, fuzzy -msgid ":py:obj:`mod `\\" +msgid ":py:obj:`flwr.client.mod `\\" msgstr "serveur.stratégie.Stratégie" #: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of @@ -9459,48 +9700,57 @@ msgstr "" msgid "Getter for `Context` client attribute." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst -#: ../../source/ref-api/flwr.client.NumPyClient.rst -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst -#: ../../source/ref-api/flwr.common.Array.rst -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst -#: ../../source/ref-api/flwr.common.Context.rst -#: ../../source/ref-api/flwr.common.Error.rst -#: ../../source/ref-api/flwr.common.Message.rst -#: ../../source/ref-api/flwr.common.Metadata.rst -#: ../../source/ref-api/flwr.common.MetricsRecord.rst #: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst -#: ../../source/ref-api/flwr.common.RecordSet.rst -#: ../../source/ref-api/flwr.server.ClientManager.rst -#: ../../source/ref-api/flwr.server.Driver.rst -#: ../../source/ref-api/flwr.server.ServerAppComponents.rst -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst -#: ../../source/ref-api/flwr.server.strategy.Krum.rst -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst -#: ../../source/ref-api/flwr.simulation.run_simulation.rst -#: ../../source/ref-api/flwr.simulation.start_simulation.rst #: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.server.app.start_server -#: flwr.server.driver.driver.Driver.send_and_receive of +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of #, fuzzy msgid "Parameters" msgstr "Paramètres du modèle." @@ -9512,21 +9762,31 @@ msgid "" "customize the local evaluation process." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst -#: ../../source/ref-api/flwr.client.NumPyClient.rst -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst -#: ../../source/ref-api/flwr.common.Message.rst -#: ../../source/ref-api/flwr.common.MetricsRecord.rst -#: ../../source/ref-api/flwr.common.ParametersRecord.rst -#: ../../source/ref-api/flwr.server.ClientManager.rst -#: ../../source/ref-api/flwr.server.Driver.rst -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst -#: ../../source/ref-api/flwr.simulation.start_simulation.rst -#: flwr.server.app.start_server -#: flwr.server.driver.driver.Driver.send_and_receive of +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of #, fuzzy msgid "Returns" msgstr "Ressources" @@ -9537,18 +9797,29 @@ msgid "" "details such as the number of local data examples used for evaluation." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst -#: ../../source/ref-api/flwr.client.NumPyClient.rst -#: ../../source/ref-api/flwr.common.Message.rst -#: ../../source/ref-api/flwr.server.ClientManager.rst -#: ../../source/ref-api/flwr.server.Driver.rst -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst -#: ../../source/ref-api/flwr.simulation.start_simulation.rst -#: flwr.server.app.start_server -#: flwr.server.driver.driver.Driver.send_and_receive of +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of msgid "Return type" msgstr "" @@ -9879,6 +10150,11 @@ msgstr "Logique côté client" msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" msgstr "serveur.stratégie.Stratégie" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." +msgstr "" + #: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" ":py:obj:`message_size_mod `\\ \\(msg\\," @@ -10046,10 +10322,6 @@ msgstr "" msgid "make\\_ffn" msgstr "" -#: flwr.client.mod.utils.make_ffn:1 of -msgid "." -msgstr "" - #: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 msgid "message\\_size\\_mod" msgstr "" @@ -10078,15 +10350,6 @@ msgstr "" msgid "secaggplus\\_mod" msgstr "Flux de travail" -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -msgid "run\\_client\\_app" -msgstr "" - -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -#, fuzzy -msgid "run\\_supernode" -msgstr "flower-superlink" - #: ../../source/ref-api/flwr.client.start_client.rst:2 #, fuzzy msgid "start\\_client" @@ -10822,17 +11085,12 @@ msgstr "" #: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`get `\\ \\(key\\[\\, default\\]\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" "Flower 1.0 : ``start_server(..., " "config=flwr.server.ServerConfig(num_rounds=3, round_timeout=600.0), " "...)``" -#: collections.abc.Mapping.get:1 -#: collections.abc.MutableMapping.clear:1::1 of -msgid "Retrieve the corresponding layout by the string key." -msgstr "" - #: collections.abc.MutableMapping.clear:1::1 of msgid ":py:obj:`items `\\ \\(\\)" msgstr "" @@ -10889,22 +11147,6 @@ msgstr "" msgid "This function counts booleans as occupying 1 Byte." msgstr "" -#: collections.abc.Mapping.get:3 of -msgid "" -"When there isn't an exact match, all the existing keys in the layout map " -"will be treated as a regex and map against the input key again. The first" -" match will be returned, based on the key insertion order. Return None if" -" there isn't any match found." -msgstr "" - -#: collections.abc.Mapping.get:8 of -msgid "the string key as the query for the layout." -msgstr "" - -#: collections.abc.Mapping.get:10 of -msgid "Corresponding layout based on the query." -msgstr "" - #: ../../source/ref-api/flwr.common.Context.rst:2 msgid "Context" msgstr "" @@ -11663,7 +11905,7 @@ msgstr "" msgid "The encoding in which to encode the string." msgstr "" -#: flwr.common.EventType.encode:5 of +#: flwr.common.EventType.encode:9 of msgid "errors" msgstr "" @@ -11839,7 +12081,7 @@ msgid "" "string." msgstr "" -#: flwr.common.EventType.replace:3 of +#: flwr.common.EventType.replace:5 of msgid "count" msgstr "" @@ -11875,7 +12117,7 @@ msgid "" "strings and the original string." msgstr "" -#: flwr.common.EventType.rsplit:3 flwr.common.EventType.split:3 of +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of msgid "sep" msgstr "" @@ -11890,7 +12132,7 @@ msgid "" " empty strings from the result." msgstr "" -#: flwr.common.EventType.rsplit:9 flwr.common.EventType.split:9 of +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of msgid "maxsplit" msgstr "" @@ -11931,7 +12173,7 @@ msgid "" "remaining cased characters have lower case." msgstr "" -#: flwr.common.EventType.translate:3 of +#: flwr.common.EventType.translate:5 of #, fuzzy msgid "table" msgstr "Database" @@ -12354,7 +12596,7 @@ msgstr "" #: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`get `\\ \\(key\\[\\, default\\]\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "serveur.stratégie.Stratégie" #: collections.abc.MutableMapping.clear:1::1 of @@ -12490,9 +12732,7 @@ msgstr "" #: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`get `\\ \\(key\\[\\, " -"default\\]\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "serveur.stratégie.Stratégie" #: collections.abc.MutableMapping.clear:1::1 of @@ -12839,7 +13079,7 @@ msgstr "" #: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid ":py:obj:`strategy `\\" +msgid ":py:obj:`flwr.server.strategy `\\" msgstr "serveur.stratégie.Stratégie" #: ../../source/ref-api/flwr.server.rst:56::1 @@ -12849,7 +13089,7 @@ msgstr "" #: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid ":py:obj:`workflow `\\" +msgid ":py:obj:`flwr.server.workflow `\\" msgstr "serveur.stratégie.Stratégie" #: ../../source/ref-api/flwr.server.rst:56::1 @@ -13353,8 +13593,7 @@ msgid "" msgstr "" #: flwr.server.app.start_server:9 -#: flwr.server.serverapp_components.ServerAppComponents:6 -#: flwr.simulation.app.start_simulation:29 of +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" "Currently supported values are `num_rounds` (int, default: 1) and " "`round_timeout` in seconds (float, default: None)." @@ -13478,15 +13717,6 @@ msgstr "" msgid "**success**" msgstr "" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -#, fuzzy -msgid "run\\_superlink" -msgstr "flower-superlink" - #: ../../source/ref-api/flwr.server.start_server.rst:2 #, fuzzy msgid "start\\_server" @@ -16567,16 +16797,16 @@ msgid "Run a Flower App using the Simulation Engine." msgstr "" #: ../../source/ref-api/flwr.simulation.rst:18::1 +#, fuzzy msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\, num\\_clients\\)" -msgstr "" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" +msgstr "serveur.stratégie.Stratégie" #: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -#, fuzzy -msgid "Start a Ray-based Flower simulation server." -msgstr "Simulation de moniteur" +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." +msgstr "" #: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 #, fuzzy @@ -16638,120 +16868,6 @@ msgstr "" msgid "start\\_simulation" msgstr "démarrer_simulation" -#: flwr.simulation.app.start_simulation:3 of -msgid "" -"A function creating `Client` instances. The function must have the " -"signature `client_fn(context: Context). It should return a single client " -"instance of type `Client`. Note that the created client instances are " -"ephemeral and will often be destroyed after a single method invocation. " -"Since client instances are not long-lived, they should not attempt to " -"carry state over method invocations. Any state required by the instance " -"(model, dataset, hyperparameters, ...) should be (re-)created in either " -"the call to `client_fn` or the call to any of the client methods (e.g., " -"load evaluation data in the `evaluate` method itself)." -msgstr "" - -#: flwr.simulation.app.start_simulation:13 of -msgid "The total number of clients in this simulation." -msgstr "" - -#: flwr.simulation.app.start_simulation:15 of -msgid "" -"UNSUPPORTED, WILL BE REMOVED. USE `num_clients` INSTEAD. List " -"`client_id`s for each client. This is only required if `num_clients` is " -"not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error. Using " -"this argument will raise an error." -msgstr "" - -#: flwr.simulation.app.start_simulation:21 of -msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." -msgstr "" - -#: flwr.simulation.app.start_simulation:26 of -msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." -msgstr "" - -#: flwr.simulation.app.start_simulation:32 of -msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." -msgstr "" - -#: flwr.simulation.app.start_simulation:36 of -msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." -msgstr "" - -#: flwr.simulation.app.start_simulation:40 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." -msgstr "" - -#: flwr.simulation.app.start_simulation:40 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" -msgstr "" - -#: flwr.simulation.app.start_simulation:44 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" -msgstr "" - -#: flwr.simulation.app.start_simulation:46 of -msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." -msgstr "" - -#: flwr.simulation.app.start_simulation:49 of -msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." -msgstr "" - -#: flwr.simulation.app.start_simulation:51 of -msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." -msgstr "" - -#: flwr.simulation.app.start_simulation:55 of -msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." -msgstr "" - -#: flwr.simulation.app.start_simulation:58 of -msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" -msgstr "" - -#: flwr.simulation.app.start_simulation:67 of -msgid "**hist** -- Object containing metrics from training." -msgstr "" - #: ../../source/ref-changelog.md:1 msgid "Changelog" msgstr "Changelog" @@ -16889,13 +17005,6 @@ msgstr "" msgid "Incompatible changes" msgstr "Changements incompatibles" -#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 -#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 -#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 -#: ../../source/ref-changelog.md:929 -msgid "None" -msgstr "Aucun" - #: ../../source/ref-changelog.md:35 #, fuzzy msgid "v1.11.0 (2024-08-30)" @@ -23786,7 +23895,20 @@ msgstr "" "Oui, bien sûr, une liste d'exemples disponibles utilisant Flower dans un " "environnement blockchain est disponible ici :" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-faq.rst:29 +msgid "`FLock: A Decentralised AI Training Platform `_." +msgstr "" + +#: ../../source/ref-faq.rst:29 +msgid "Contribute to on-chain training the model and earn rewards." +msgstr "" + +#: ../../source/ref-faq.rst:30 +#, fuzzy +msgid "Local blockchain with federated learning simulation." +msgstr "Mise à l'échelle de l'apprentissage fédéré" + +#: ../../source/ref-faq.rst:31 msgid "" "`Flower meets Nevermined GitHub Repository `_." @@ -23794,7 +23916,7 @@ msgstr "" "`Flower meets Nevermined GitHub Repository `_." -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-faq.rst:32 msgid "" "`Flower meets Nevermined YouTube video " "`_." @@ -23802,7 +23924,7 @@ msgstr "" "`Flower rencontre Nevermined vidéo YouTube " "`_." -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-faq.rst:33 #, fuzzy msgid "" "`Flower meets KOSMoS `_." -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-faq.rst:34 msgid "" "`Flower meets Talan blog post `_ ." -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-faq.rst:35 msgid "" "`Flower meets Talan GitHub Repository " "`_ ." @@ -24147,239 +24269,325 @@ msgstr "" "`_ " "pour en savoir plus." -#: ../../source/tutorial-quickstart-fastai.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." -msgstr "" - #: ../../source/tutorial-quickstart-fastai.rst:5 msgid "Quickstart fastai" msgstr "Démarrage rapide fastai" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +#: ../../source/tutorial-quickstart-fastai.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant fastai et " -"Flower !" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." #: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +msgid "Then, clone the code example directly from GitHub:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:20 +msgid "" +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:33 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:33 #, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "et active l'environnement virtuel avec :" + +#: ../../source/tutorial-quickstart-fastai.rst:43 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ " -"pour en savoir plus." + +#: ../../source/tutorial-quickstart-fastai.rst:56 +#: ../../source/tutorial-quickstart-huggingface.rst:65 +#: ../../source/tutorial-quickstart-mlx.rst:64 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:56 +#: ../../source/tutorial-quickstart-pytorch.rst:64 +#: ../../source/tutorial-quickstart-tensorflow.rst:65 +msgid "With default arguments you will see an output like this one:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:100 +#: ../../source/tutorial-quickstart-huggingface.rst:116 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:106 +#: ../../source/tutorial-quickstart-pytorch.rst:105 +#: ../../source/tutorial-quickstart-tensorflow.rst:106 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:110 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." +msgstr "" +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." #: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" "Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" #: ../../source/tutorial-quickstart-huggingface.rst:5 msgid "Quickstart 🤗 Transformers" msgstr "Démarrage rapide 🤗 Transformateurs" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/tutorial-quickstart-huggingface.rst:7 +#, fuzzy msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" -"Construisons un système d'apprentissage fédéré à l'aide des " -"transformateurs Hugging Face et de Flower !" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/tutorial-quickstart-huggingface.rst:14 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" -"Nous nous appuierons sur Hugging Face pour fédérer l'entraînement de " -"modèles de langage sur plusieurs clients à l'aide de Flower. Plus " -"précisément, nous mettrons au point un modèle Transformer pré-entraîné " -"(distilBERT) pour la classification de séquences sur un ensemble de " -"données d'évaluations IMDB. L'objectif final est de détecter si " -"l'évaluation d'un film est positive ou négative." - -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" -msgstr "Dépendances" #: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/tutorial-quickstart-mlx.rst:19 +#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/tutorial-quickstart-tensorflow.rst:20 +#, fuzzy msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" msgstr "" -"Pour suivre ce tutoriel, tu devras installer les paquets suivants : " -":code:`datasets`, :code:`evaluate`, :code:`flwr`, :code:`torch`, et " -":code:`transformers`. Cela peut être fait en utilisant :code:`pip` :" +"Maintenant que nous avons une idée approximative de ce qui se passe, " +"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " +"lançant :" -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" -msgstr "Flux de travail standard pour le visage" +#: ../../source/tutorial-quickstart-huggingface.rst:28 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" -msgstr "Traitement des données" +#: ../../source/tutorial-quickstart-huggingface.rst:36 +#: ../../source/tutorial-quickstart-mlx.rst:35 +#: ../../source/tutorial-quickstart-pytorch.rst:35 +#: ../../source/tutorial-quickstart-tensorflow.rst:36 +msgid "" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/tutorial-quickstart-huggingface.rst:50 +#: ../../source/tutorial-quickstart-mlx.rst:49 +#: ../../source/tutorial-quickstart-pytorch.rst:49 +#: ../../source/tutorial-quickstart-tensorflow.rst:50 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:58 +#: ../../source/tutorial-quickstart-pytorch.rst:57 +#: ../../source/tutorial-quickstart-tensorflow.rst:58 +msgid "To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:106 +msgid "You can also run the project with GPU as follows:" msgstr "" -"Pour récupérer le jeu de données IMDB, nous utiliserons la bibliothèque " -":code:`datasets` de Hugging Face. Nous devons ensuite tokeniser les " -"données et créer des :code:`PyTorch` dataloaders, ce qui est fait dans la" -" fonction :code:`load_data` :" -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" -msgstr "Former et tester le modèle" +#: ../../source/tutorial-quickstart-huggingface.rst:113 +msgid "" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:113 msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." msgstr "" -"Une fois que nous avons trouvé un moyen de créer notre trainloader et " -"notre testloader, nous pouvons nous occuper de l'entraînement et du test." -" C'est très similaire à n'importe quelle boucle d'entraînement ou de test" -" :code:`PyTorch` :" -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" -msgstr "Créer le modèle lui-même" +#: ../../source/tutorial-quickstart-huggingface.rst:130 +#: ../../source/tutorial-quickstart-mlx.rst:120 +#: ../../source/tutorial-quickstart-pytorch.rst:119 +#: ../../source/tutorial-quickstart-tensorflow.rst:116 +#, fuzzy +msgid "The Data" +msgstr "Chargement des données" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/tutorial-quickstart-huggingface.rst:132 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." msgstr "" -"Pour créer le modèle lui-même, nous allons simplement charger le modèle " -"distillBERT pré-entraîné en utilisant le " -":code:`AutoModelForSequenceClassification` de Hugging Face :" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" -msgstr "Fédérer l'exemple" +#: ../../source/tutorial-quickstart-huggingface.rst:178 +#: ../../source/tutorial-quickstart-mlx.rst:164 +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:145 +#, fuzzy +msgid "The Model" +msgstr "Entraîne le modèle" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" -msgstr "Création du client IMDBC" +#: ../../source/tutorial-quickstart-huggingface.rst:180 +#, fuzzy +msgid "" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" +msgstr "" +"Nous nous appuierons sur Hugging Face pour fédérer l'entraînement de " +"modèles de langage sur plusieurs clients à l'aide de Flower. Plus " +"précisément, nous mettrons au point un modèle Transformer pré-entraîné " +"(distilBERT) pour la classification de séquences sur un ensemble de " +"données d'évaluations IMDB. L'objectif final est de détecter si " +"l'évaluation d'un film est positive ou négative." -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/tutorial-quickstart-huggingface.rst:193 msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." msgstr "" -"Pour fédérer notre exemple à plusieurs clients, nous devons d'abord " -"écrire notre classe de client Flower (héritant de " -":code:`flwr.client.NumPyClient`). C'est très facile, car notre modèle est" -" un modèle :code:`PyTorch` standard :" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/tutorial-quickstart-huggingface.rst:196 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" msgstr "" -"La fonction :code:`get_parameters` permet au serveur d'obtenir les " -"paramètres du client. Inversement, la fonction :code:`set_parameters` " -"permet au serveur d'envoyer ses paramètres au client. Enfin, la fonction " -":code:`fit` forme le modèle localement pour le client, et la fonction " -":code:`evaluate` teste le modèle localement et renvoie les mesures " -"correspondantes." -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" -msgstr "Démarrer le serveur" +#: ../../source/tutorial-quickstart-huggingface.rst:239 +#: ../../source/tutorial-quickstart-mlx.rst:210 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +#: ../../source/tutorial-quickstart-tensorflow.rst:176 +#, fuzzy +msgid "The ClientApp" +msgstr "client" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/tutorial-quickstart-huggingface.rst:241 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -"Maintenant que nous avons un moyen d'instancier les clients, nous devons " -"créer notre serveur afin d'agréger les résultats. Avec Flower, cela peut " -"être fait très facilement en choisissant d'abord une stratégie (ici, nous" -" utilisons :code:`FedAvg`, qui définira les poids globaux comme la " -"moyenne des poids de tous les clients à chaque tour) et en utilisant " -"ensuite la fonction :code:`flwr.server.start_server` :" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/tutorial-quickstart-huggingface.rst:254 +#: ../../source/tutorial-quickstart-pytorch.rst:245 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." msgstr "" -"La fonction :code:`weighted_average` est là pour fournir un moyen " -"d'agréger les mesures réparties entre les clients (en gros, cela nous " -"permet d'afficher une belle moyenne de précision et de perte pour chaque " -"tour)." -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" -msgstr "Tout assembler" - -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" -msgstr "Nous pouvons maintenant démarrer des instances de clients en utilisant :" +#: ../../source/tutorial-quickstart-huggingface.rst:269 +#: ../../source/tutorial-quickstart-pytorch.rst:261 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:221 +#: ../../source/tutorial-quickstart-huggingface.rst:296 msgid "" -"And they will be able to connect to the server and start the federated " -"training." -msgstr "Et ils pourront se connecter au serveur et démarrer la formation fédérée." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/tutorial-quickstart-huggingface.rst:330 +#: ../../source/tutorial-quickstart-mlx.rst:376 +#: ../../source/tutorial-quickstart-pytorch.rst:321 +#: ../../source/tutorial-quickstart-tensorflow.rst:245 #, fuzzy +msgid "The ServerApp" +msgstr "serveur" + +#: ../../source/tutorial-quickstart-huggingface.rst:332 msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." msgstr "" -"Si tu veux voir tout ce qui est mis ensemble, tu devrais consulter " -"l'exemple de code complet : " -"[https://github.com/adap/flower/tree/main/examples/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/tutorial-quickstart-huggingface.rst:371 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." msgstr "" -"Bien sûr, c'est un exemple très basique, et beaucoup de choses peuvent " -"être ajoutées ou modifiées, il s'agissait juste de montrer avec quelle " -"simplicité on pouvait fédérer un flux de travail Hugging Face à l'aide de" -" Flower." -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/tutorial-quickstart-huggingface.rst:376 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." msgstr "" -"Notez que dans cet exemple, nous avons utilisé :code:`PyTorch`, mais nous" -" aurions très bien pu utiliser :code:`TensorFlow`." #: ../../source/tutorial-quickstart-ios.rst:-1 msgid "" @@ -24455,7 +24663,6 @@ msgstr "" #: ../../source/tutorial-quickstart-ios.rst:34 #: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 #: ../../source/tutorial-quickstart-xgboost.rst:55 msgid "Flower Client" msgstr "Client de la fleur" @@ -24529,13 +24736,11 @@ msgstr "" #: ../../source/tutorial-quickstart-ios.rst:129 #: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 #: ../../source/tutorial-quickstart-xgboost.rst:341 msgid "Flower Server" msgstr "Serveur de Flower" #: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 msgid "" "For simple workloads we can start a Flower server and leave all the " "configuration possibilities at their default values. In a file named " @@ -24548,12 +24753,10 @@ msgstr "" #: ../../source/tutorial-quickstart-ios.rst:142 #: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 msgid "Train the model, federated!" msgstr "Entraîne le modèle, fédéré !" #: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 #: ../../source/tutorial-quickstart-xgboost.rst:567 msgid "" "With both client and server ready, we can now run everything and see " @@ -24806,7 +25009,7 @@ msgstr "" "paramètres du modèle, une méthode pour former le modèle, et une méthode " "pour tester le modèle :" -#: ../../source/tutorial-quickstart-jax.rst:165 +#: ../../source/tutorial-quickstart-jax.rst:167 msgid ":code:`set_parameters (optional)`" msgstr ":code:`set_parameters (optional)`" @@ -24920,17 +25123,6 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:19 -#: ../../source/tutorial-quickstart-pytorch.rst:19 -#, fuzzy -msgid "" -"Now that we have a rough idea of what this example is about, let's get " -"started. First, install Flower in your new environment:" -msgstr "" -"Maintenant que nous avons une idée approximative de ce qui se passe, " -"commençons. Nous devons d'abord installer Flower. Tu peux le faire en " -"lançant :" - #: ../../source/tutorial-quickstart-mlx.rst:27 msgid "" "Then, run the command below. You will be prompted to select of the " @@ -24938,49 +25130,16 @@ msgid "" "type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:35 -#: ../../source/tutorial-quickstart-pytorch.rst:35 -msgid "" -"After running it you'll notice a new directory with your project name has" -" been created. It should have the following structure:" -msgstr "" - -#: ../../source/tutorial-quickstart-mlx.rst:49 -#: ../../source/tutorial-quickstart-pytorch.rst:49 -msgid "" -"If you haven't yet installed the project and its dependencies, you can do" -" so by:" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:57 msgid "To run the project do:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:64 -#: ../../source/tutorial-quickstart-pytorch.rst:64 -msgid "With default arguments you will see an output like this one:" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:106 msgid "" "You can also override the parameters defined in " "``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:114 -#: ../../source/tutorial-quickstart-pytorch.rst:113 -msgid "" -"What follows is an explanation of each component in the project you just " -"created: dataset partition, the model, defining the ``ClientApp`` and " -"defining the ``ServerApp``." -msgstr "" - -#: ../../source/tutorial-quickstart-mlx.rst:120 -#: ../../source/tutorial-quickstart-pytorch.rst:119 -#, fuzzy -msgid "The Data" -msgstr "Chargement des données" - #: ../../source/tutorial-quickstart-mlx.rst:122 msgid "" "We will use `Flower Datasets `_ to " @@ -24992,12 +25151,6 @@ msgid "" "api/flwr_datasets.partitioner.html>`_ available in Flower Datasets:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:164 -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#, fuzzy -msgid "The Model" -msgstr "Entraîne le modèle" - #: ../../source/tutorial-quickstart-mlx.rst:166 msgid "" "We define the model as in the `centralized MLX example " @@ -25011,12 +25164,6 @@ msgid "" "over batches." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:210 -#: ../../source/tutorial-quickstart-pytorch.rst:234 -#, fuzzy -msgid "The ClientApp" -msgstr "client" - #: ../../source/tutorial-quickstart-mlx.rst:212 msgid "" "The main changes we have to make to use `MLX` with `Flower` will be found" @@ -25085,12 +25232,6 @@ msgid "" "method." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:376 -#: ../../source/tutorial-quickstart-pytorch.rst:321 -#, fuzzy -msgid "The ServerApp" -msgstr "serveur" - #: ../../source/tutorial-quickstart-mlx.rst:378 msgid "" "To construct a ``ServerApp``, we define a ``server_fn()`` callback with " @@ -25104,6 +25245,7 @@ msgstr "" #: ../../source/tutorial-quickstart-mlx.rst:402 #: ../../source/tutorial-quickstart-pytorch.rst:360 +#: ../../source/tutorial-quickstart-tensorflow.rst:279 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system." @@ -25184,16 +25326,6 @@ msgid "" "and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:57 -msgid "To run the project, do:" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:105 -msgid "" -"You can also override the parameters defined in the " -"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" -msgstr "" - #: ../../source/tutorial-quickstart-pytorch.rst:121 msgid "" "This tutorial uses `Flower Datasets `_ " @@ -25237,22 +25369,6 @@ msgid "" "PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:245 -msgid "" -"The specific implementation of ``get_weights()`` and ``set_weights()`` " -"depends on the type of models you use. The ones shown below work for a " -"wide range of PyTorch models but you might need to adjust them if you " -"have more exotic model architectures." -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:261 -msgid "" -"The rest of the functionality is directly inspired by the centralized " -"case. The ``fit()`` method in the client trains the model using the local" -" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " -"model received on a held-out validation set that the client might have:" -msgstr "" - #: ../../source/tutorial-quickstart-pytorch.rst:294 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " @@ -25292,6 +25408,7 @@ msgstr "" "/quickstart-mxnet`." #: ../../source/tutorial-quickstart-pytorch.rst:372 +#: ../../source/tutorial-quickstart-tensorflow.rst:295 #, fuzzy msgid "Video tutorial" msgstr "Tutoriel" @@ -25303,35 +25420,57 @@ msgid "" "that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." -msgstr "" - #: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 msgid "Quickstart PyTorch Lightning" msgstr "Démarrage rapide de PyTorch Lightning" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 #, fuzzy msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -"Construisons un système d'apprentissage fédéré en utilisant PyTorch " -"Lightning et Flower !" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:20 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:43 +msgid "" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:94 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:116 #, fuzzy msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." msgstr "" -"Réfère-toi à l'exemple de code complet " -"`_ pour en savoir plus." +"Félicitations ! Tu as réussi à construire et à faire fonctionner ton " +"premier système d'apprentissage fédéré. Le code source complet " +"`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." #: ../../source/tutorial-quickstart-scikitlearn.rst:-1 msgid "" @@ -25439,7 +25578,7 @@ msgstr ":code:`set_model_params()`" msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" msgstr "Définit les paramètres d'un modèle de régression logistique :code:`sklean`" -#: ../../source/tutorial-quickstart-scikitlearn.rst:49 +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 msgid ":code:`set_initial_params()`" msgstr ":code:`set_initial_params()`" @@ -25514,7 +25653,7 @@ msgstr "" msgid "return the model weight as a list of NumPy ndarrays" msgstr "renvoie le poids du modèle sous la forme d'une liste de ndarrays NumPy" -#: ../../source/tutorial-quickstart-scikitlearn.rst:120 +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 msgid ":code:`set_parameters` (optional)" msgstr ":code:`set_parameters` (optionnel)" @@ -25644,7 +25783,6 @@ msgstr "" " commencer par lancer le serveur :" #: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 #: ../../source/tutorial-quickstart-xgboost.rst:575 msgid "" "Once the server is running we can start the clients in different " @@ -25655,7 +25793,6 @@ msgstr "" "premier client :" #: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 #: ../../source/tutorial-quickstart-xgboost.rst:582 msgid "Open another terminal and start the second client:" msgstr "Ouvre un autre terminal et démarre le deuxième client :" @@ -25688,144 +25825,118 @@ msgstr "" #: ../../source/tutorial-quickstart-tensorflow.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +"with TensorFlow to train a CNN model on CIFAR-10." msgstr "" #: ../../source/tutorial-quickstart-tensorflow.rst:5 msgid "Quickstart TensorFlow" msgstr "Démarrage rapide de TensorFlow" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" -msgstr "" -"Construisons un système d'apprentissage fédéré en moins de 20 lignes de " -"code !" - -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "Avant de pouvoir importer une fleur, nous devons l'installer :" - -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/tutorial-quickstart-tensorflow.rst:7 +#, fuzzy msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" -msgstr "" -"Comme nous voulons utiliser l'API Keras de TensorFlow (TF), nous devons " -"également installer TF :" - -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." msgstr "" -"Ensuite, dans un fichier appelé :code:`client.py`, importe Flower et " -"TensorFlow :" +"Tout d'abord, il est recommandé de créer un environnement virtuel et de " +"tout exécuter au sein d'un `virtualenv `_." -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/tutorial-quickstart-tensorflow.rst:13 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -"Nous utilisons les utilitaires Keras de TF pour charger CIFAR10, un " -"ensemble de données de classification d'images colorées populaire pour " -"l'apprentissage automatique. L'appel à " -":code:`tf.keras.datasets.cifar10.load_data()` télécharge CIFAR10, le met " -"en cache localement, puis renvoie l'ensemble d'entraînement et de test " -"sous forme de NumPy ndarrays." -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/tutorial-quickstart-tensorflow.rst:28 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" msgstr "" -"Ensuite, nous avons besoin d'un modèle. Pour les besoins de ce tutoriel, " -"nous utilisons MobilNetV2 avec 10 classes de sortie :" -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/tutorial-quickstart-tensorflow.rst:118 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." msgstr "" -"Le serveur Flower interagit avec les clients par le biais d'une interface" -" appelée :code:`Client`. Lorsque le serveur sélectionne un client " -"particulier pour la formation, il envoie des instructions de formation " -"sur le réseau. Le client reçoit ces instructions et appelle l'une des " -"méthodes :code:`Client` pour exécuter ton code (c'est-à-dire pour former " -"le réseau neuronal que nous avons défini plus tôt)." -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/tutorial-quickstart-tensorflow.rst:147 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" msgstr "" -"Flower fournit une classe de commodité appelée :code:`NumPyClient` qui " -"facilite la mise en œuvre de l'interface :code:`Client` lorsque ta charge" -" de travail utilise Keras. L'interface :code:`NumPyClient` définit trois " -"méthodes qui peuvent être mises en œuvre de la manière suivante :" -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/tutorial-quickstart-tensorflow.rst:178 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" msgstr "" -"Nous pouvons maintenant créer une instance de notre classe " -":code:`CifarClient` et ajouter une ligne pour exécuter ce client :" -#: ../../source/tutorial-quickstart-tensorflow.rst:90 -#, fuzzy +#: ../../source/tutorial-quickstart-tensorflow.rst:212 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." msgstr "" -"C'est tout pour le client. Il nous suffit d'implémenter :code:`Client` ou" -" :code:`NumPyClient` et d'appeler :code:`fl.client.start_client()`. La " -"chaîne :code:`\"[: :]:8080\"` indique au client à quel serveur se " -"connecter. Dans notre cas, nous pouvons exécuter le serveur et le client " -"sur la même machine, c'est pourquoi nous utilisons :code:`\"[: " -":]:8080\"`. Si nous exécutons une charge de travail véritablement fédérée" -" avec le serveur et les clients fonctionnant sur des machines " -"différentes, tout ce qui doit changer est l'adresse " -":code:`server_address` vers laquelle nous dirigeons le client." - -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." -msgstr "Chaque client aura son propre ensemble de données." -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/tutorial-quickstart-tensorflow.rst:247 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." msgstr "" -"Tu devrais maintenant voir comment la formation se déroule dans le tout " -"premier terminal (celui qui a démarré le serveur) :" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#: ../../source/tutorial-quickstart-tensorflow.rst:284 #, fuzzy msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." msgstr "" "Félicitations ! Tu as réussi à construire et à faire fonctionner ton " -"premier système d'apprentissage fédéré. Le `code source complet " +"premier système d'apprentissage fédéré. Le code source complet " "`_ pour cela se trouve dans :code:`examples" -"/quickstart-tensorflow/client.py`." +"mxnet/client.py>`_ de cet exemple se trouve dans :code:`examples" +"/quickstart-mxnet`." + +#: ../../source/tutorial-quickstart-tensorflow.rst:299 +msgid "" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" +msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:-1 msgid "" @@ -28377,7 +28488,7 @@ msgstr "" "chose d'autre, comme la régression linéaire classique." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|e5918c1c06a4434bbe4bf49235e40059|" +msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -28396,7 +28507,7 @@ msgstr "" " Go." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|c0165741bd1944f09ec55ce49032377d|" +msgid "|33cacb7d985c4906b348515c1a5cd993|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -28427,7 +28538,7 @@ msgstr "" "chanson." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +msgid "|cc080a555947492fa66131dc3a967603|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -28448,7 +28559,7 @@ msgstr "" " données pour la même tâche." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +msgid "|085c3e0fb8664c6aa06246636524b20b|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -28469,7 +28580,7 @@ msgstr "" "cloud." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|74f26ca701254d3db57d7899bd91eb55|" +msgid "|bfe69c74e48c45d49b50251c38c2a019|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -28490,7 +28601,7 @@ msgstr "" "appuyés." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|bda79f21f8154258a40e5766b2634ad7|" +msgid "|ebbecd651f0348d99c6511ea859bf4ca|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -28515,7 +28626,7 @@ msgstr "" " sur un serveur centralisé." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|89d30862e62e4f9989e193483a08680a|" +msgid "|163117eb654a4273babba413cf8065f5|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -28534,7 +28645,7 @@ msgstr "" "suffisantes pour former un bon modèle." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|77e9918671c54b4f86e01369c0785ce8|" +msgid "|452ac3ba453b4cd1be27be1ba7560d64|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -28756,7 +28867,7 @@ msgstr "" "partir d'un point de contrôle précédemment sauvegardé." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|7e4ccef37cc94148a067107b34eb7447|" +msgid "|f403fcd69e4e44409627e748b404c086|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -28791,7 +28902,7 @@ msgstr "" "rendements décroissants." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|28e47e4cded14479a0846c8e5f22c872|" +msgid "|4b00fe63870145968f8443619a792a42|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -28824,7 +28935,7 @@ msgstr "" "données locales, ou même de quelques étapes (mini-batchs)." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +msgid "|368378731066486fa4397e89bc6b870c|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -28855,7 +28966,7 @@ msgstr "" " l'entraînement local." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +msgid "|a66aa83d85bf4ffba7ed660b718066da|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -28914,7 +29025,7 @@ msgstr "" "times as much as each of the 100 examples." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|81749d0ac0834c36a83bd38f433fea31|" +msgid "|82324b9af72a4582a81839d55caab767|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -29057,7 +29168,7 @@ msgstr "" "quel cadre de ML et n'importe quel langage de programmation." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|ed9aae51da70428eab7eef32f21e819e|" +msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -36805,3 +36916,618 @@ msgstr "" #~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" #~ msgstr "" +#~ msgid "run\\_client\\_app" +#~ msgstr "" + +#~ msgid "run\\_supernode" +#~ msgstr "flower-superlink" + +#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgstr "" + +#~ msgid "" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." +#~ msgstr "" + +#~ msgid "the string key as the query for the layout." +#~ msgstr "" + +#~ msgid "Corresponding layout based on the query." +#~ msgstr "" + +#~ msgid "run\\_server\\_app" +#~ msgstr "" + +#~ msgid "run\\_superlink" +#~ msgstr "flower-superlink" + +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\, num\\_clients\\)" +#~ msgstr "" + +#~ msgid "" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" + +#~ msgid "The total number of clients in this simulation." +#~ msgstr "" + +#~ msgid "" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." +#~ msgstr "" + +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." +#~ msgstr "" + +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." +#~ msgstr "" + +#~ msgid "" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "" + +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant fastai et Flower !" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." +#~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en savoir plus." + +#~ msgid "" +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " à l'aide des transformateurs Hugging " +#~ "Face et de Flower !" + +#~ msgid "Dependencies" +#~ msgstr "Dépendances" + +#~ msgid "" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" +#~ msgstr "" +#~ "Pour suivre ce tutoriel, tu devras " +#~ "installer les paquets suivants : " +#~ ":code:`datasets`, :code:`evaluate`, :code:`flwr`, " +#~ ":code:`torch`, et :code:`transformers`. Cela " +#~ "peut être fait en utilisant :code:`pip`" +#~ " :" + +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "Flux de travail standard pour le visage" + +#~ msgid "Handling the data" +#~ msgstr "Traitement des données" + +#~ msgid "" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" +#~ msgstr "" +#~ "Pour récupérer le jeu de données " +#~ "IMDB, nous utiliserons la bibliothèque " +#~ ":code:`datasets` de Hugging Face. Nous " +#~ "devons ensuite tokeniser les données et" +#~ " créer des :code:`PyTorch` dataloaders, ce" +#~ " qui est fait dans la fonction " +#~ ":code:`load_data` :" + +#~ msgid "Training and testing the model" +#~ msgstr "Former et tester le modèle" + +#~ msgid "" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" +#~ msgstr "" +#~ "Une fois que nous avons trouvé un" +#~ " moyen de créer notre trainloader et" +#~ " notre testloader, nous pouvons nous " +#~ "occuper de l'entraînement et du test." +#~ " C'est très similaire à n'importe " +#~ "quelle boucle d'entraînement ou de test" +#~ " :code:`PyTorch` :" + +#~ msgid "Creating the model itself" +#~ msgstr "Créer le modèle lui-même" + +#~ msgid "" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" +#~ msgstr "" +#~ "Pour créer le modèle lui-même, " +#~ "nous allons simplement charger le modèle" +#~ " distillBERT pré-entraîné en utilisant le" +#~ " :code:`AutoModelForSequenceClassification` de Hugging" +#~ " Face :" + +#~ msgid "Creating the IMDBClient" +#~ msgstr "Création du client IMDBC" + +#~ msgid "" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" +#~ "Pour fédérer notre exemple à plusieurs" +#~ " clients, nous devons d'abord écrire " +#~ "notre classe de client Flower (héritant" +#~ " de :code:`flwr.client.NumPyClient`). C'est très" +#~ " facile, car notre modèle est un " +#~ "modèle :code:`PyTorch` standard :" + +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" +#~ "La fonction :code:`get_parameters` permet au" +#~ " serveur d'obtenir les paramètres du " +#~ "client. Inversement, la fonction " +#~ ":code:`set_parameters` permet au serveur " +#~ "d'envoyer ses paramètres au client. " +#~ "Enfin, la fonction :code:`fit` forme le" +#~ " modèle localement pour le client, et" +#~ " la fonction :code:`evaluate` teste le " +#~ "modèle localement et renvoie les mesures" +#~ " correspondantes." + +#~ msgid "Starting the server" +#~ msgstr "Démarrer le serveur" + +#~ msgid "" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" +#~ "Maintenant que nous avons un moyen " +#~ "d'instancier les clients, nous devons " +#~ "créer notre serveur afin d'agréger les" +#~ " résultats. Avec Flower, cela peut " +#~ "être fait très facilement en choisissant" +#~ " d'abord une stratégie (ici, nous " +#~ "utilisons :code:`FedAvg`, qui définira les " +#~ "poids globaux comme la moyenne des " +#~ "poids de tous les clients à chaque" +#~ " tour) et en utilisant ensuite la " +#~ "fonction :code:`flwr.server.start_server` :" + +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" +#~ "La fonction :code:`weighted_average` est là" +#~ " pour fournir un moyen d'agréger les" +#~ " mesures réparties entre les clients " +#~ "(en gros, cela nous permet d'afficher" +#~ " une belle moyenne de précision et" +#~ " de perte pour chaque tour)." + +#~ msgid "Putting everything together" +#~ msgstr "Tout assembler" + +#~ msgid "We can now start client instances using:" +#~ msgstr "" +#~ "Nous pouvons maintenant démarrer des " +#~ "instances de clients en utilisant :" + +#~ msgid "" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "" +#~ "Et ils pourront se connecter au " +#~ "serveur et démarrer la formation " +#~ "fédérée." + +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." +#~ msgstr "" +#~ "Si tu veux voir tout ce qui " +#~ "est mis ensemble, tu devrais consulter" +#~ " l'exemple de code complet : " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." + +#~ msgid "" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." +#~ msgstr "" +#~ "Bien sûr, c'est un exemple très " +#~ "basique, et beaucoup de choses peuvent" +#~ " être ajoutées ou modifiées, il " +#~ "s'agissait juste de montrer avec quelle" +#~ " simplicité on pouvait fédérer un " +#~ "flux de travail Hugging Face à " +#~ "l'aide de Flower." + +#~ msgid "" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." +#~ msgstr "" +#~ "Notez que dans cet exemple, nous " +#~ "avons utilisé :code:`PyTorch`, mais nous " +#~ "aurions très bien pu utiliser " +#~ ":code:`TensorFlow`." + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en utilisant PyTorch Lightning et " +#~ "Flower !" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" +#~ "Réfère-toi à l'exemple de code " +#~ "complet `_ pour en " +#~ "savoir plus." + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." +#~ msgstr "" + +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "" +#~ "Construisons un système d'apprentissage fédéré" +#~ " en moins de 20 lignes de code" +#~ " !" + +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "Avant de pouvoir importer une fleur, nous devons l'installer :" + +#~ msgid "" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "" +#~ "Comme nous voulons utiliser l'API Keras" +#~ " de TensorFlow (TF), nous devons " +#~ "également installer TF :" + +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "" +#~ "Ensuite, dans un fichier appelé " +#~ ":code:`client.py`, importe Flower et " +#~ "TensorFlow :" + +#~ msgid "" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" +#~ "Nous utilisons les utilitaires Keras de" +#~ " TF pour charger CIFAR10, un ensemble" +#~ " de données de classification d'images " +#~ "colorées populaire pour l'apprentissage " +#~ "automatique. L'appel à " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` télécharge " +#~ "CIFAR10, le met en cache localement, " +#~ "puis renvoie l'ensemble d'entraînement et " +#~ "de test sous forme de NumPy " +#~ "ndarrays." + +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "" +#~ "Ensuite, nous avons besoin d'un modèle." +#~ " Pour les besoins de ce tutoriel, " +#~ "nous utilisons MobilNetV2 avec 10 " +#~ "classes de sortie :" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" +#~ "Le serveur Flower interagit avec les " +#~ "clients par le biais d'une interface " +#~ "appelée :code:`Client`. Lorsque le serveur " +#~ "sélectionne un client particulier pour " +#~ "la formation, il envoie des instructions" +#~ " de formation sur le réseau. Le " +#~ "client reçoit ces instructions et " +#~ "appelle l'une des méthodes :code:`Client` " +#~ "pour exécuter ton code (c'est-à-dire " +#~ "pour former le réseau neuronal que " +#~ "nous avons défini plus tôt)." + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" +#~ msgstr "" +#~ "Flower fournit une classe de commodité" +#~ " appelée :code:`NumPyClient` qui facilite " +#~ "la mise en œuvre de l'interface " +#~ ":code:`Client` lorsque ta charge de " +#~ "travail utilise Keras. L'interface " +#~ ":code:`NumPyClient` définit trois méthodes qui" +#~ " peuvent être mises en œuvre de " +#~ "la manière suivante :" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" +#~ "Nous pouvons maintenant créer une " +#~ "instance de notre classe :code:`CifarClient`" +#~ " et ajouter une ligne pour exécuter" +#~ " ce client :" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" +#~ "C'est tout pour le client. Il nous" +#~ " suffit d'implémenter :code:`Client` ou " +#~ ":code:`NumPyClient` et d'appeler " +#~ ":code:`fl.client.start_client()`. La chaîne " +#~ ":code:`\"[: :]:8080\"` indique au client " +#~ "à quel serveur se connecter. Dans " +#~ "notre cas, nous pouvons exécuter le " +#~ "serveur et le client sur la même" +#~ " machine, c'est pourquoi nous utilisons " +#~ ":code:`\"[: :]:8080\"`. Si nous exécutons " +#~ "une charge de travail véritablement " +#~ "fédérée avec le serveur et les " +#~ "clients fonctionnant sur des machines " +#~ "différentes, tout ce qui doit changer" +#~ " est l'adresse :code:`server_address` vers " +#~ "laquelle nous dirigeons le client." + +#~ msgid "Each client will have its own dataset." +#~ msgstr "Chaque client aura son propre ensemble de données." + +#~ msgid "" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" +#~ msgstr "" +#~ "Tu devrais maintenant voir comment la" +#~ " formation se déroule dans le tout" +#~ " premier terminal (celui qui a " +#~ "démarré le serveur) :" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." +#~ msgstr "" +#~ "Félicitations ! Tu as réussi à " +#~ "construire et à faire fonctionner ton" +#~ " premier système d'apprentissage fédéré. Le" +#~ " `code source complet " +#~ "`_ pour cela se trouve" +#~ " dans :code:`examples/quickstart-tensorflow/client.py`." + +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" + +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" + +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" + +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgstr "" + +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" + +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" + +#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgstr "" + +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" + +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgstr "" + +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgstr "" + +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgstr "" + +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgstr "" + +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgstr "" + +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgstr "" + diff --git a/doc/locales/ko/LC_MESSAGES/framework-docs.po b/doc/locales/ko/LC_MESSAGES/framework-docs.po index db201f613126..4c738e16b434 100644 --- a/doc/locales/ko/LC_MESSAGES/framework-docs.po +++ b/doc/locales/ko/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-09-15 09:09+0200\n" +"POT-Creation-Date: 2024-09-24 00:29+0000\n" "PO-Revision-Date: 2024-08-23 13:09+0000\n" "Last-Translator: Seulki Yun \n" "Language: ko\n" @@ -17,7 +17,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" #: ../../source/contributor-explanation-public-and-private-apis.rst:2 msgid "Public and private APIs" @@ -1468,7 +1468,7 @@ msgstr "" msgid "Setting up the repository" msgstr "레포지토리 설정하기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:12 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "**Create a GitHub account and setup Git**" msgstr "**GitHub 계정을 만들고 Git을 설정합니다**" @@ -1509,7 +1509,7 @@ msgstr "" "일반적인 Git 및 GitHub 워크플로우의 기본 개념은 다음과 같이 요약됩니다. GitHub의 원격 레포지토리에서 코드를 " "다운로드하고 로컬에서 변경한 후 Git을 사용하여 추적한 다음 새 기록을 다시 GitHub에 업로드하는 것입니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:23 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "**Forking the Flower repository**" msgstr "**Flower 레포지토리 포크하기**" @@ -1534,7 +1534,7 @@ msgstr "" "원하는 경우 이름을 변경할 수 있지만, 이 버전의 Flower는 자신의 계정(즉, 자신의 리포지토리 목록)에 위치하게 되므로 변경할" " 필요는 없습니다. 만들기가 완료되면 왼쪽 상단에Flower 버전이 표시되는 것을 볼 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:34 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "**Cloning your forked repository**" msgstr "**포크된 레포지토리 클론하기**" @@ -1560,7 +1560,7 @@ msgid "" "it) folder in the current working directory." msgstr "현재 작업 디렉터리에``flower/``(또는 포크 이름을 변경한 경우 포크 이름) 폴더가 생성됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:49 +#: ../../source/contributor-tutorial-contribute-on-github.rst:66 msgid "**Add origin**" msgstr "**origin 추가**" @@ -1584,7 +1584,7 @@ msgid "" "terminal:" msgstr "\\ 이 복사되면 터미널에 다음 명령을 입력하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:68 +#: ../../source/contributor-tutorial-contribute-on-github.rst:90 msgid "**Add upstream**" msgstr "**Upstream 추가하기**" @@ -1645,7 +1645,7 @@ msgstr "변경하기 전에 레포지토리를 최신 상태로 유지하세요: msgid "And with Flower's repository:" msgstr "Flower의 레포지토리도 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:114 +#: ../../source/contributor-tutorial-contribute-on-github.rst:122 msgid "**Create a new branch**" msgstr "**새 브랜치 만들기**" @@ -1662,7 +1662,7 @@ msgid "" "directory:" msgstr "이렇게 하려면 레포지토리 디렉토리에서 다음 명령을 실행하면 됩니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:124 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "**Make changes**" msgstr "**변경하기**" @@ -1670,7 +1670,7 @@ msgstr "**변경하기**" msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "선호하는 편집기를 사용하여 멋진 코드를 작성하고 훌륭한 변화를 만들어 보세요!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:127 +#: ../../source/contributor-tutorial-contribute-on-github.rst:138 msgid "**Test and format your code**" msgstr "**코드 테스트 및 서식 지정**" @@ -1687,7 +1687,7 @@ msgstr "" msgid "To do so, we have written a few scripts that you can execute:" msgstr "이를 위해 실행할 수 있는 몇 가지 스크립트를 작성했습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:140 +#: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "**Stage changes**" msgstr "**변경사항 스테이징**" @@ -1710,7 +1710,7 @@ msgstr "" "마지막 버전(마지막 커밋)과 비교하여 수정된 파일을 확인하고 커밋을 위해 스테이징된 파일을 확인하려면 :code:`git " "status` 명령을 사용하면 됩니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:152 +#: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "**Commit changes**" msgstr "**변경사항 커밋**" @@ -1729,7 +1729,7 @@ msgstr "" "커밋의 내용을 다른 사람에게 설명하기 위해 \\가 있습니다. 명령형 스타일로 작성해야 하며 간결해야" " 합니다. 예를 들면 :code:`git commit -m \"Add images to README\"`." -#: ../../source/contributor-tutorial-contribute-on-github.rst:162 +#: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "**Push the changes to the fork**" msgstr "**변경 사항을 포크에 푸시**" @@ -1752,7 +1752,7 @@ msgstr "이 작업이 완료되면 변경한 내용으로 포크된 레포지토 msgid "Creating and merging a pull request (PR)" msgstr "pull request(PR) 만들기 및 병합하기" -#: ../../source/contributor-tutorial-contribute-on-github.rst:176 +#: ../../source/contributor-tutorial-contribute-on-github.rst:206 msgid "**Create the PR**" msgstr "**PR 만들기**" @@ -1823,7 +1823,7 @@ msgid "" "anyone, you have the option to create a draft pull request:" msgstr "PR이 아직 검토할 준비가 되지 않았고 다른 사람에게 알리고 싶지 않은 경우 pull request 초안을 만드는 옵션이 있습니다:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:208 +#: ../../source/contributor-tutorial-contribute-on-github.rst:209 msgid "**Making new changes**" msgstr "**new changes 만들기**" @@ -1834,7 +1834,7 @@ msgid "" " associated with the PR." msgstr "PR이 초안으로 열렸든 아니든, PR과 연결된 브랜치를 변경하여 이전과 같은 방식으로 새 커밋을 푸시할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:211 +#: ../../source/contributor-tutorial-contribute-on-github.rst:231 msgid "**Review the PR**" msgstr "**PR 검토하기**" @@ -1870,7 +1870,7 @@ msgid "" "review." msgstr "모든 대화가 해결되면 검토를 다시 요청할 수 있습니다." -#: ../../source/contributor-tutorial-contribute-on-github.rst:233 +#: ../../source/contributor-tutorial-contribute-on-github.rst:251 msgid "**Once the PR is merged**" msgstr "**PR이 병합되면**" @@ -2157,6 +2157,7 @@ msgstr "기여자로 시작하기" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 #: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:15 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:12 #: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" @@ -2941,6 +2942,239 @@ msgid "" " the SuperNode to execute the ClientApp as a subprocess:" msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +#, fuzzy +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 +msgid "" +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:11 +msgid "" +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see `Limitations`_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +#, fuzzy +msgid "Before you start, make sure that:" +msgstr "시작하기 전에 Docker daemon이 실행 중인지 확인하세요:" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:19 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +#, fuzzy +msgid "The Docker daemon is running." +msgstr "Docker 데몬이 실행 중인지 확인하십시오." + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +msgid "Docker Compose is `installed `_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:24 +#, fuzzy +msgid "Run the Quickstart Example" +msgstr "예시 요청" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:26 +msgid "" +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:34 +msgid "" +"Download the `compose.yml " +"`_" +" file into the example directory:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:41 +#, fuzzy +msgid "Build and start the services using the following command:" +msgstr "다음 명령을 실행하여 가상 환경을 활성화합니다:" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:47 +#, fuzzy +msgid "" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:49 +#: ../../source/docker/tutorial-quickstart-docker.rst:319 +#, fuzzy +msgid "pyproject.toml" +msgstr "또는 ``pyproject.toml``:" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:58 +msgid "" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 +msgid "" +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +#, fuzzy +msgid "Run the example:" +msgstr "전체 코드 예제" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:71 +msgid "Follow the logs of the SuperExec service:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:77 +msgid "" +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +msgid "Run a Different Quickstart Example" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 +msgid "" +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:89 +msgid "After that, you can repeat the steps above." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:92 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:98 +#, fuzzy +msgid "Limitations" +msgstr "동기" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:97 +#, fuzzy +msgid "Quickstart Example" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 +#, fuzzy +msgid "quickstart-fastai" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#, fuzzy +msgid "quickstart-huggingface" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#, fuzzy +msgid "quickstart-jax" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +msgid "" +"The example has not yet been updated to work with the latest ``flwr`` " +"version." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#, fuzzy +msgid "quickstart-mlcube" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#, fuzzy +msgid "quickstart-mlx" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +msgid "" +"`Requires to run on macOS with Apple Silicon `_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#, fuzzy +msgid "quickstart-monai" +msgstr "빠른 시작" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#, fuzzy +msgid "quickstart-pandas" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 +msgid "quickstart-pytorch-lightning" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +msgid "" +"Requires an older pip version that is not supported by the Flower Docker " +"images." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#, fuzzy +msgid "quickstart-pytorch" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +msgid "quickstart-sklearn-tabular" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#, fuzzy +msgid "quickstart-tabnet" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#, fuzzy +msgid "quickstart-tensorflow" +msgstr "빠른 시작 튜토리얼" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +msgid "Only runs on AMD64." +msgstr "" + #: ../../source/docker/set-environment-variables.rst:2 #, fuzzy msgid "Set Environment Variables" @@ -2972,23 +3206,6 @@ msgid "" " understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 -#: ../../source/docker/tutorial-quickstart-docker.rst:13 -#, fuzzy -msgid "Before you start, make sure that:" -msgstr "시작하기 전에 Docker daemon이 실행 중인지 확인하세요:" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 -#: ../../source/docker/tutorial-quickstart-docker.rst:15 -msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 -#: ../../source/docker/tutorial-quickstart-docker.rst:16 -#, fuzzy -msgid "The Docker daemon is running." -msgstr "Docker 데몬이 실행 중인지 확인하십시오." - #: ../../source/docker/tutorial-quickstart-docker-compose.rst:21 #: ../../source/docker/tutorial-quickstart-docker.rst:19 msgid "Step 1: Set Up" @@ -3419,11 +3636,6 @@ msgstr "" msgid "Add the following lines to the ``pyproject.toml``:" msgstr "``pyproject.toml``에 다음 버전 제약 조건을 설정했는지 확인하세요:" -#: ../../source/docker/tutorial-quickstart-docker.rst:319 -#, fuzzy -msgid "pyproject.toml" -msgstr "또는 ``pyproject.toml``:" - #: ../../source/docker/tutorial-quickstart-docker.rst:326 msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" @@ -3472,6 +3684,7 @@ msgstr "" msgid "Remove the containers and the bridge network:" msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:401 #: ../../source/docker/tutorial-quickstart-docker.rst:399 #, fuzzy msgid "Where to Go Next" @@ -3508,10 +3721,6 @@ msgid "" "configuration that best suits your project's needs." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 -msgid "Docker Compose is `installed `_." -msgstr "" - #: ../../source/docker/tutorial-quickstart-docker-compose.rst:23 msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" @@ -3707,7 +3916,7 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker-compose.rst:188 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:362 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 msgid "Rerun the ``quickstart-compose`` project:" msgstr "" @@ -3771,76 +3980,81 @@ msgstr "" msgid "compose.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:303 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 msgid "" "If you also want to enable TLS for the new SuperNodes, duplicate the " "SuperNode definition for each new SuperNode service in the ``with-" "tls.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:306 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:313 msgid "" "Make sure that the names of the services match with the one in the " "``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:308 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:315 msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:317 msgid "with-tls.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:332 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:334 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:341 msgid "" "To run Flower with persisted SuperLink state and enabled TLS, a slight " "change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:337 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:344 msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:346 msgid "with-state.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:356 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 #, fuzzy msgid "Restart the services:" msgstr "이미 *서버*를 시작할 수 있습니다:" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:370 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:377 msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:372 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:379 msgid "" "You can merge multiple Compose files into a single file. For instance, if" " you wish to combine the basic configuration with the TLS configuration, " "execute the following command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:380 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 msgid "" "This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" " a new file called ``my_compose.yml``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:384 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 msgid "Step 10: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:386 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 #, fuzzy msgid "Remove all services and volumes:" msgstr "R에서 모든 항목을 제거합니다." +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:403 +#, fuzzy +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "빠른 시작 튜토리얼" + #: ../../source/docker/use-a-different-version.rst:2 #, fuzzy msgid "Use a Different Flower Version" @@ -4184,7 +4398,7 @@ msgstr "" "code:`CifarClient`는 모델 파라미터를 가져오거나 설정하는 메서드 2개, 모델 학습을 위한 메서드 1개, 모델 테스트를" " 위한 메서드 1개 등 네 가지 메서드를 구현해야 합니다:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:218 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 msgid ":code:`set_parameters`" msgstr ":code:`set_parameters`" @@ -4217,9 +4431,9 @@ msgstr "" "모델 매개변수를 가져와서 NumPy :code:`ndarray`의 목록으로 반환합니다(이는 " ":code:`flwr.client.NumPyClient`가 기대하는 바와 같습니다)" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:223 -#: ../../source/tutorial-quickstart-jax.rst:171 -#: ../../source/tutorial-quickstart-scikitlearn.rst:123 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 msgid ":code:`fit`" msgstr ":code:`fit`" @@ -4241,9 +4455,9 @@ msgstr "로컬 훈련 세트에서 모델을 훈련합니다" msgid "get the updated local model weights and return them to the server" msgstr "업데이트된 로컬 모델 가중치를 가져와 서버로 반환합니다" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:227 -#: ../../source/tutorial-quickstart-jax.rst:175 -#: ../../source/tutorial-quickstart-scikitlearn.rst:127 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 msgid ":code:`evaluate`" msgstr ":code:`evaluate`" @@ -4354,7 +4568,7 @@ msgstr "" "평균 소득 계산과 같은 모든 분석(M)이 두 데이터 세트에 대해 거의 동일한 결과를 산출하도록 보장합니다(O와 O' 는 비슷할 " "것입니다). 이렇게 하면 그룹 패턴은 보존하면서 개별 세부 정보는 가려져 개인의 정보가 군중 속에 숨겨집니다." -#: ../../source/explanation-differential-privacy.rst:16 +#: ../../source/explanation-differential-privacy.rst:-1 msgid "DP Intro" msgstr "DP 소개" @@ -4493,8 +4707,8 @@ msgstr "" "**로컬 차등 프라이버시**: DP는 정보를 서버로 보내기 전에 클라이언트 측에서 적용되며, 서버로 전송되는 업데이트가 클라이언트 " "데이터에 대한 정보를 유출하는 것을 방지하는 것이 목표입니다." +#: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/explanation-differential-privacy.rst:71 #: ../../source/how-to-use-differential-privacy.rst:11 msgid "Central Differential Privacy" msgstr "중앙 차등 프라이버시" @@ -4529,7 +4743,7 @@ msgstr "" "개인정보 보호에 중요하고 견고성에 도움이 되는 경우가 많습니다. 이를 달성하기 위한 일반적인 접근 방식은 클라이언트 모델 업데이트의" " `L2` 규범을 제한하여 더 큰 업데이트가 규범 `S`에 맞도록 축소되도록 하는 것입니다." -#: ../../source/explanation-differential-privacy.rst:84 +#: ../../source/explanation-differential-privacy.rst:-1 msgid "clipping" msgstr "클리핑" @@ -4581,8 +4795,8 @@ msgid "" "others." msgstr "고정 클리핑과 조정 클리핑 중 선택은 개인정보 보호 요구 사항, 데이터 배포, 모델 복잡성 등 다양한 요인에 따라 달라집니다." +#: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/explanation-differential-privacy.rst:110 #: ../../source/how-to-use-differential-privacy.rst:96 msgid "Local Differential Privacy" msgstr "로컬 차등 프라이버시" @@ -4850,7 +5064,7 @@ msgstr "" msgid "This is sometimes called a hub-and-spoke topology:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:18 +#: ../../source/explanation-flower-architecture.rst:24 #, fuzzy msgid "Hub-and-spoke topology in federated learning" msgstr "연합 학습이란 무엇입니까?" @@ -4923,7 +5137,7 @@ msgid "" "`missing link` between all those SuperNodes." msgstr "" -#: ../../source/explanation-flower-architecture.rst:65 +#: ../../source/explanation-flower-architecture.rst:71 #, fuzzy msgid "Basic Flower architecture" msgstr "Flower 아키텍처" @@ -4960,7 +5174,7 @@ msgid "" "SuperNodes." msgstr "" -#: ../../source/explanation-flower-architecture.rst:91 +#: ../../source/explanation-flower-architecture.rst:97 msgid "Multi-tenancy federated learning architecture" msgstr "" @@ -4982,7 +5196,7 @@ msgid "" "their corresponding ``ClientApp``\\s:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:107 +#: ../../source/explanation-flower-architecture.rst:113 msgid "Multi-tenancy federated learning architecture - Run 1" msgstr "" @@ -4998,7 +5212,7 @@ msgid "" " to participate in the training:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:119 +#: ../../source/explanation-flower-architecture.rst:125 msgid "Multi-tenancy federated learning architecture - Run 2" msgstr "" @@ -5034,7 +5248,7 @@ msgid "" "developer machine." msgstr "" -#: ../../source/explanation-flower-architecture.rst:145 +#: ../../source/explanation-flower-architecture.rst:151 msgid "Flower Deployment Engine with SuperExec" msgstr "" @@ -8205,7 +8419,7 @@ msgstr "" " 위한 :code:`DifferentialPrivacyServerSideFixedClipping`과 " ":code:`DifferentialPrivacyServerSideAdaptiveClipping`입니다." -#: ../../source/how-to-use-differential-privacy.rst:25 +#: ../../source/how-to-use-differential-privacy.rst:-1 msgid "server side clipping" msgstr "서버 측 클리핑" @@ -8244,7 +8458,7 @@ msgstr "" " 해당 서버 측 래퍼 :code:`DifferentialPrivacyClientSideFixedClipping` 및 " ":code:`DifferentialPrivacyClientSideAdaptiveClipping`이 있습니다." -#: ../../source/how-to-use-differential-privacy.rst:57 +#: ../../source/how-to-use-differential-privacy.rst:-1 msgid "client side clipping" msgstr "클라이언트 측 클리핑" @@ -8278,7 +8492,7 @@ msgstr "" "로컬 차분 프라이버시(DP)를 활용하고 클라이언트 모델 파라미터를 서버로 전송하기 전에 노이즈를 추가하려면 `LocalDpMod`를" " 사용하면 됩니다. 클리핑 노멀 값, 감도, 엡실론, 델타 등의 하이퍼파라미터를 설정해야 합니다." -#: ../../source/how-to-use-differential-privacy.rst:99 +#: ../../source/how-to-use-differential-privacy.rst:-1 msgid "local DP mod" msgstr "로컬 DP mod" @@ -8704,11 +8918,33 @@ msgstr "" msgid "Arguments" msgstr "빌드 전달인자" -#: ../../flwr install:1 new:1 run:1 +#: ../../flwr install:1 log:1 new:1 run:1 #, fuzzy msgid "Optional argument" msgstr "선택적 개선 사항" +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." +msgstr "" + +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" +msgstr "" + +#: ../../flwr log +msgid "default" +msgstr "" + +#: ../../flwr log:1 +#, fuzzy +msgid "``True``" +msgstr "``DISTRO``" + +#: ../../flwr log:1 +#, fuzzy +msgid "Required argument" +msgstr "빌드 전달인자" + #: ../../flwr new:1 #, fuzzy msgid "Create new Flower App." @@ -8797,7 +9033,7 @@ msgstr "Modules" #: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`client `\\" +msgid ":py:obj:`flwr.client `\\" msgstr ":py:obj:`flwr.client `\\" #: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of @@ -8806,7 +9042,7 @@ msgstr "Flower 클라이언트." #: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`common `\\" +msgid ":py:obj:`flwr.common `\\" msgstr ":py:obj:`flwr.common `\\" #: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of @@ -8815,7 +9051,7 @@ msgstr "서버와 클라이언트 간에 공유되는 공통 구성 요소입니 #: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`server `\\" +msgid ":py:obj:`flwr.server `\\" msgstr ":py:obj:`flwr.server `\\" #: ../../source/ref-api/flwr.rst:35::1 @@ -8826,7 +9062,7 @@ msgstr "Flower 서버." #: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`simulation `\\" +msgid ":py:obj:`flwr.simulation `\\" msgstr ":py:obj:`flwr.simulation `\\" #: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of @@ -8913,7 +9149,7 @@ msgstr "NumPy를 사용하는 Flower 클라이언트를 위한 추상 베이스 #: ../../source/ref-api/flwr.client.rst:50::1 #, fuzzy -msgid ":py:obj:`mod `\\" +msgid ":py:obj:`flwr.client.mod `\\" msgstr ":py:obj:`flwr.client.mod `\\" #: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of @@ -9110,48 +9346,57 @@ msgstr ":py:obj:`context `\\" msgid "Getter for `Context` client attribute." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst -#: ../../source/ref-api/flwr.client.NumPyClient.rst -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst -#: ../../source/ref-api/flwr.common.Array.rst -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst -#: ../../source/ref-api/flwr.common.Context.rst -#: ../../source/ref-api/flwr.common.Error.rst -#: ../../source/ref-api/flwr.common.Message.rst -#: ../../source/ref-api/flwr.common.Metadata.rst -#: ../../source/ref-api/flwr.common.MetricsRecord.rst #: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst -#: ../../source/ref-api/flwr.common.RecordSet.rst -#: ../../source/ref-api/flwr.server.ClientManager.rst -#: ../../source/ref-api/flwr.server.Driver.rst -#: ../../source/ref-api/flwr.server.ServerAppComponents.rst -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst -#: ../../source/ref-api/flwr.server.strategy.Krum.rst -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst -#: ../../source/ref-api/flwr.simulation.run_simulation.rst -#: ../../source/ref-api/flwr.simulation.start_simulation.rst #: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.server.app.start_server -#: flwr.server.driver.driver.Driver.send_and_receive of +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of msgid "Parameters" msgstr "파라미터" @@ -9162,21 +9407,31 @@ msgid "" "customize the local evaluation process." msgstr "서버에서 받은 (전역) 모델 파라미터와 로컬 평가 프로세스를 사용자 지정하는 데 사용되는 구성 값 사전이 포함된 평가 지침입니다." -#: ../../source/ref-api/flwr.client.Client.rst -#: ../../source/ref-api/flwr.client.NumPyClient.rst -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst -#: ../../source/ref-api/flwr.common.Message.rst -#: ../../source/ref-api/flwr.common.MetricsRecord.rst -#: ../../source/ref-api/flwr.common.ParametersRecord.rst -#: ../../source/ref-api/flwr.server.ClientManager.rst -#: ../../source/ref-api/flwr.server.Driver.rst -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst -#: ../../source/ref-api/flwr.simulation.start_simulation.rst -#: flwr.server.app.start_server -#: flwr.server.driver.driver.Driver.send_and_receive of +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of msgid "Returns" msgstr "반환" @@ -9186,18 +9441,29 @@ msgid "" "details such as the number of local data examples used for evaluation." msgstr "로컬 데이터 세트의 손실 및 평가에 사용된 로컬 데이터 예제 수와 같은 기타 세부 정보가 포함된 평가 결과입니다." -#: ../../source/ref-api/flwr.client.Client.rst -#: ../../source/ref-api/flwr.client.NumPyClient.rst -#: ../../source/ref-api/flwr.common.Message.rst -#: ../../source/ref-api/flwr.server.ClientManager.rst -#: ../../source/ref-api/flwr.server.Driver.rst -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst -#: ../../source/ref-api/flwr.simulation.start_simulation.rst -#: flwr.server.app.start_server -#: flwr.server.driver.driver.Driver.send_and_receive of +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of msgid "Return type" msgstr "반환 타입" @@ -9554,6 +9820,11 @@ msgstr "클라이언트 측 고정 클리핑 수정자." msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" msgstr ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." +msgstr "." + #: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" ":py:obj:`message_size_mod `\\ \\(msg\\," @@ -9730,10 +10001,6 @@ msgstr "일반적으로 fixedclipping_mod는 매개변수에서 가장 마지막 msgid "make\\_ffn" msgstr "make\\_ffn" -#: flwr.client.mod.utils.make_ffn:1 of -msgid "." -msgstr "." - #: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 msgid "message\\_size\\_mod" msgstr "message\\_size\\_mod" @@ -9760,14 +10027,6 @@ msgstr "secagg\\_mod" msgid "secaggplus\\_mod" msgstr "secaggplus\\_mod" -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -msgid "run\\_client\\_app" -msgstr "run\\_client\\_app" - -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -msgid "run\\_supernode" -msgstr "run\\_supernode" - #: ../../source/ref-api/flwr.client.start_client.rst:2 msgid "start\\_client" msgstr "start\\_client" @@ -10569,14 +10828,9 @@ msgstr "이 객체에 저장된 바이트 수를 반환합니다." #: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`get `\\ \\(key\\[\\, default\\]\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: collections.abc.Mapping.get:1 -#: collections.abc.MutableMapping.clear:1::1 of -msgid "Retrieve the corresponding layout by the string key." -msgstr "" - #: collections.abc.MutableMapping.clear:1::1 of msgid ":py:obj:`items `\\ \\(\\)" msgstr ":py:obj:`items `\\ \\(\\)" @@ -10635,22 +10889,6 @@ msgstr ":py:obj:`values `\\ \\(\\)" msgid "This function counts booleans as occupying 1 Byte." msgstr "이 함수는 booleans을 1바이트를 차지하는 것으로 계산합니다." -#: collections.abc.Mapping.get:3 of -msgid "" -"When there isn't an exact match, all the existing keys in the layout map " -"will be treated as a regex and map against the input key again. The first" -" match will be returned, based on the key insertion order. Return None if" -" there isn't any match found." -msgstr "" - -#: collections.abc.Mapping.get:8 of -msgid "the string key as the query for the layout." -msgstr "" - -#: collections.abc.Mapping.get:10 of -msgid "Corresponding layout based on the query." -msgstr "" - #: ../../source/ref-api/flwr.common.Context.rst:2 msgid "Context" msgstr "컨텍스트" @@ -11446,7 +11684,7 @@ msgstr "인코딩" msgid "The encoding in which to encode the string." msgstr "문자열을 인코딩합니다." -#: flwr.common.EventType.encode:5 of +#: flwr.common.EventType.encode:9 of msgid "errors" msgstr "오류" @@ -11640,7 +11878,7 @@ msgstr "" "문자열이 접미사 문자열로 끝나고 해당 접미사가 비어 있지 않으면 문자열[:-len(suffix)]을 반환합니다. 그렇지 않으면 원본" " 문자열의 복사본을 반환합니다." -#: flwr.common.EventType.replace:3 of +#: flwr.common.EventType.replace:5 of msgid "count" msgstr "카운트" @@ -11680,7 +11918,7 @@ msgid "" "strings and the original string." msgstr "구분 기호를 찾을 수 없는 경우 빈 문자열 2개와 원래 문자열을 포함하는 3-tuple을 반환합니다." -#: flwr.common.EventType.rsplit:3 flwr.common.EventType.split:3 of +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of msgid "sep" msgstr "sep" @@ -11697,7 +11935,7 @@ msgstr "" "None(기본값)으로 설정하면 모든 공백 문자(\\\\n \\\\r \\\\t \\\\f 및 공백 포함)를 분할하고 결과에서 빈 " "문자열을 삭제합니다." -#: flwr.common.EventType.rsplit:9 flwr.common.EventType.split:9 of +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of msgid "maxsplit" msgstr "maxsplit" @@ -11743,7 +11981,7 @@ msgid "" "remaining cased characters have lower case." msgstr "보다 구체적으로, 단어는 대문자로 시작하고 나머지 모든 대소문자는 소문자로 표기합니다." -#: flwr.common.EventType.translate:3 of +#: flwr.common.EventType.translate:5 of msgid "table" msgstr "table" @@ -12178,7 +12416,7 @@ msgstr ":py:obj:`count_bytes `\\ \\(\\)" #: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`get `\\ \\(key\\[\\, default\\]\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #: collections.abc.MutableMapping.clear:1::1 of @@ -12325,9 +12563,7 @@ msgstr ":py:obj:`count_bytes `\\ \\(\\ #: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`get `\\ \\(key\\[\\, " -"default\\]\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #: collections.abc.MutableMapping.clear:1::1 of @@ -12669,7 +12905,7 @@ msgstr "사용 가능한 클라이언트 그룹 제공." #: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid ":py:obj:`strategy `\\" +msgid ":py:obj:`flwr.server.strategy `\\" msgstr ":py:obj:`state `\\" #: ../../source/ref-api/flwr.server.rst:56::1 @@ -12679,7 +12915,7 @@ msgstr "" #: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid ":py:obj:`workflow `\\" +msgid ":py:obj:`flwr.server.workflow `\\" msgstr ":py:obj:`flwr.server `\\" #: ../../source/ref-api/flwr.server.rst:56::1 @@ -13161,8 +13397,7 @@ msgid "" msgstr "" #: flwr.server.app.start_server:9 -#: flwr.server.serverapp_components.ServerAppComponents:6 -#: flwr.simulation.app.start_simulation:29 of +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" "Currently supported values are `num_rounds` (int, default: 1) and " "`round_timeout` in seconds (float, default: None)." @@ -13284,14 +13519,6 @@ msgstr "" msgid "**success**" msgstr "" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -msgid "run\\_superlink" -msgstr "" - #: ../../source/ref-api/flwr.server.start_server.rst:2 msgid "start\\_server" msgstr "" @@ -16319,15 +16546,15 @@ msgstr "" #: ../../source/ref-api/flwr.simulation.rst:18::1 #, fuzzy msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\, num\\_clients\\)" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" ":py:obj:`start_client `\\ \\(\\*\\, " "server\\_address\\[\\, client\\_fn\\, ...\\]\\)" #: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" #: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 @@ -16388,170 +16615,56 @@ msgstr "" msgid "start\\_simulation" msgstr "" -#: flwr.simulation.app.start_simulation:3 of -msgid "" -"A function creating `Client` instances. The function must have the " -"signature `client_fn(context: Context). It should return a single client " -"instance of type `Client`. Note that the created client instances are " -"ephemeral and will often be destroyed after a single method invocation. " -"Since client instances are not long-lived, they should not attempt to " -"carry state over method invocations. Any state required by the instance " -"(model, dataset, hyperparameters, ...) should be (re-)created in either " -"the call to `client_fn` or the call to any of the client methods (e.g., " -"load evaluation data in the `evaluate` method itself)." +#: ../../source/ref-changelog.md:1 +msgid "Changelog" msgstr "" -#: flwr.simulation.app.start_simulation:13 of -msgid "The total number of clients in this simulation." +#: ../../source/ref-changelog.md:3 +msgid "v1.11.1 (2024-09-11)" msgstr "" -#: flwr.simulation.app.start_simulation:15 of -msgid "" -"UNSUPPORTED, WILL BE REMOVED. USE `num_clients` INSTEAD. List " -"`client_id`s for each client. This is only required if `num_clients` is " -"not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error. Using " -"this argument will raise an error." +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" msgstr "" -#: flwr.simulation.app.start_simulation:21 of +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." +"We would like to give our special thanks to all the contributors who made" +" the new version of Flower possible (in `git shortlog` order):" msgstr "" -#: flwr.simulation.app.start_simulation:26 of +#: ../../source/ref-changelog.md:9 msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." +"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " +"`Javier`, `Robert Steiner`, `Yan Gao` " msgstr "" -#: flwr.simulation.app.start_simulation:32 of +#: ../../source/ref-changelog.md:11 +#, fuzzy +msgid "Improvements" +msgstr "선택적 개선 사항" + +#: ../../source/ref-changelog.md:13 msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." +"**Implement** `keys/values/items` **methods for** `TypedDict` " +"([#4146](https://github.com/adap/flower/pull/4146))" msgstr "" -#: flwr.simulation.app.start_simulation:36 of +#: ../../source/ref-changelog.md:15 msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." -msgstr "" - -#: flwr.simulation.app.start_simulation:40 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." -msgstr "" - -#: flwr.simulation.app.start_simulation:40 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" -msgstr "" - -#: flwr.simulation.app.start_simulation:44 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" -msgstr "" - -#: flwr.simulation.app.start_simulation:46 of -msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." -msgstr "" - -#: flwr.simulation.app.start_simulation:49 of -msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." -msgstr "" - -#: flwr.simulation.app.start_simulation:51 of -msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." -msgstr "" - -#: flwr.simulation.app.start_simulation:55 of -msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." -msgstr "" - -#: flwr.simulation.app.start_simulation:58 of -msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" -msgstr "" - -#: flwr.simulation.app.start_simulation:67 of -msgid "**hist** -- Object containing metrics from training." -msgstr "" - -#: ../../source/ref-changelog.md:1 -msgid "Changelog" -msgstr "" - -#: ../../source/ref-changelog.md:3 -msgid "v1.11.1 (2024-09-11)" -msgstr "" - -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 -#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 -#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 -#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 -#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 -#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 -#: ../../source/ref-changelog.md:940 -msgid "Thanks to our contributors" -msgstr "" - -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 -#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 -#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 -#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 -#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 -#: ../../source/ref-changelog.md:804 -msgid "" -"We would like to give our special thanks to all the contributors who made" -" the new version of Flower possible (in `git shortlog` order):" -msgstr "" - -#: ../../source/ref-changelog.md:9 -msgid "" -"`Charles Beauville`, `Chong Shen Ng`, `Daniel J. Beutel`, `Heng Pan`, " -"`Javier`, `Robert Steiner`, `Yan Gao` " -msgstr "" - -#: ../../source/ref-changelog.md:11 -#, fuzzy -msgid "Improvements" -msgstr "선택적 개선 사항" - -#: ../../source/ref-changelog.md:13 -msgid "" -"**Implement** `keys/values/items` **methods for** `TypedDict` " -"([#4146](https://github.com/adap/flower/pull/4146))" -msgstr "" - -#: ../../source/ref-changelog.md:15 -msgid "" -"**Fix parsing of** `--executor-config` **if present** " -"([#4125](https://github.com/adap/flower/pull/4125))" +"**Fix parsing of** `--executor-config` **if present** " +"([#4125](https://github.com/adap/flower/pull/4125))" msgstr "" #: ../../source/ref-changelog.md:17 @@ -16608,13 +16721,6 @@ msgstr "" msgid "Incompatible changes" msgstr "" -#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 -#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 -#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 -#: ../../source/ref-changelog.md:929 -msgid "None" -msgstr "" - #: ../../source/ref-changelog.md:35 msgid "v1.11.0 (2024-08-30)" msgstr "" @@ -21742,32 +21848,44 @@ msgid "" "blockchain environment is available here:" msgstr "" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-faq.rst:29 +msgid "`FLock: A Decentralised AI Training Platform `_." +msgstr "" + +#: ../../source/ref-faq.rst:29 +msgid "Contribute to on-chain training the model and earn rewards." +msgstr "" + +#: ../../source/ref-faq.rst:30 +msgid "Local blockchain with federated learning simulation." +msgstr "" + +#: ../../source/ref-faq.rst:31 msgid "" "`Flower meets Nevermined GitHub Repository `_." msgstr "" -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-faq.rst:32 msgid "" "`Flower meets Nevermined YouTube video " "`_." msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-faq.rst:33 msgid "" "`Flower meets KOSMoS `_." msgstr "" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-faq.rst:34 msgid "" "`Flower meets Talan blog post `_ ." msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-faq.rst:35 msgid "" "`Flower meets Talan GitHub Repository " "`_ ." @@ -21990,178 +22108,298 @@ msgid "" "more." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." -msgstr "" - #: ../../source/tutorial-quickstart-fastai.rst:5 msgid "Quickstart fastai" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +#: ../../source/tutorial-quickstart-fastai.rst:7 +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" #: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +msgid "Then, clone the code example directly from GitHub:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:20 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:33 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:33 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "그 후 가상 환경을 활성화합니다:" + +#: ../../source/tutorial-quickstart-fastai.rst:43 +msgid "" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:56 +#: ../../source/tutorial-quickstart-huggingface.rst:65 +#: ../../source/tutorial-quickstart-mlx.rst:64 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:56 +#: ../../source/tutorial-quickstart-pytorch.rst:64 +#: ../../source/tutorial-quickstart-tensorflow.rst:65 +msgid "With default arguments you will see an output like this one:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:100 +#: ../../source/tutorial-quickstart-huggingface.rst:116 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:106 +#: ../../source/tutorial-quickstart-pytorch.rst:105 +#: ../../source/tutorial-quickstart-tensorflow.rst:106 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:110 +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." msgstr "" #: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" "Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" #: ../../source/tutorial-quickstart-huggingface.rst:5 msgid "Quickstart 🤗 Transformers" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/tutorial-quickstart-huggingface.rst:7 msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/tutorial-quickstart-huggingface.rst:14 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" #: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/tutorial-quickstart-mlx.rst:19 +#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/tutorial-quickstart-tensorflow.rst:20 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" +#: ../../source/tutorial-quickstart-huggingface.rst:28 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" +#: ../../source/tutorial-quickstart-huggingface.rst:36 +#: ../../source/tutorial-quickstart-mlx.rst:35 +#: ../../source/tutorial-quickstart-pytorch.rst:35 +#: ../../source/tutorial-quickstart-tensorflow.rst:36 +msgid "" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/tutorial-quickstart-huggingface.rst:50 +#: ../../source/tutorial-quickstart-mlx.rst:49 +#: ../../source/tutorial-quickstart-pytorch.rst:49 +#: ../../source/tutorial-quickstart-tensorflow.rst:50 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" +#: ../../source/tutorial-quickstart-huggingface.rst:58 +#: ../../source/tutorial-quickstart-pytorch.rst:57 +#: ../../source/tutorial-quickstart-tensorflow.rst:58 +msgid "To run the project, do:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:83 -msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +#: ../../source/tutorial-quickstart-huggingface.rst:106 +msgid "You can also run the project with GPU as follows:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" +#: ../../source/tutorial-quickstart-huggingface.rst:113 +msgid "" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:113 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" -msgstr "" +#: ../../source/tutorial-quickstart-huggingface.rst:130 +#: ../../source/tutorial-quickstart-mlx.rst:120 +#: ../../source/tutorial-quickstart-pytorch.rst:119 +#: ../../source/tutorial-quickstart-tensorflow.rst:116 +#, fuzzy +msgid "The Data" +msgstr "Metadata" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" +#: ../../source/tutorial-quickstart-huggingface.rst:132 +msgid "" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:141 -msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +#: ../../source/tutorial-quickstart-huggingface.rst:178 +#: ../../source/tutorial-quickstart-mlx.rst:164 +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:145 +msgid "The Model" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/tutorial-quickstart-huggingface.rst:180 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" +#: ../../source/tutorial-quickstart-huggingface.rst:193 +msgid "" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/tutorial-quickstart-huggingface.rst:196 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/tutorial-quickstart-huggingface.rst:239 +#: ../../source/tutorial-quickstart-mlx.rst:210 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +#: ../../source/tutorial-quickstart-tensorflow.rst:176 +#, fuzzy +msgid "The ClientApp" +msgstr "클라이언트앱" + +#: ../../source/tutorial-quickstart-huggingface.rst:241 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" +#: ../../source/tutorial-quickstart-huggingface.rst:254 +#: ../../source/tutorial-quickstart-pytorch.rst:245 +msgid "" +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" +#: ../../source/tutorial-quickstart-huggingface.rst:269 +#: ../../source/tutorial-quickstart-pytorch.rst:261 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:221 +#: ../../source/tutorial-quickstart-huggingface.rst:296 msgid "" -"And they will be able to connect to the server and start the federated " -"training." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/tutorial-quickstart-huggingface.rst:330 +#: ../../source/tutorial-quickstart-mlx.rst:376 +#: ../../source/tutorial-quickstart-pytorch.rst:321 +#: ../../source/tutorial-quickstart-tensorflow.rst:245 +#, fuzzy +msgid "The ServerApp" +msgstr "Flower 서버앱" + +#: ../../source/tutorial-quickstart-huggingface.rst:332 msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/tutorial-quickstart-huggingface.rst:371 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/tutorial-quickstart-huggingface.rst:376 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." msgstr "" #: ../../source/tutorial-quickstart-ios.rst:-1 @@ -22216,7 +22454,6 @@ msgstr "" #: ../../source/tutorial-quickstart-ios.rst:34 #: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 #: ../../source/tutorial-quickstart-xgboost.rst:55 msgid "Flower Client" msgstr "" @@ -22290,13 +22527,11 @@ msgstr "" #: ../../source/tutorial-quickstart-ios.rst:129 #: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 #: ../../source/tutorial-quickstart-xgboost.rst:341 msgid "Flower Server" msgstr "" #: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 msgid "" "For simple workloads we can start a Flower server and leave all the " "configuration possibilities at their default values. In a file named " @@ -22305,12 +22540,10 @@ msgstr "" #: ../../source/tutorial-quickstart-ios.rst:142 #: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 msgid "Train the model, federated!" msgstr "" #: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 #: ../../source/tutorial-quickstart-xgboost.rst:567 msgid "" "With both client and server ready, we can now run everything and see " @@ -22515,7 +22748,7 @@ msgstr "" "code:`FlowerClient`는 모델 매개변수를 가져오거나 설정하는 메서드 2개, 모델 학습을 위한 메서드 1개, 모델 " "테스트를 위한 메서드 1개 등 총 4개의 메서드를 구현해야 합니다:" -#: ../../source/tutorial-quickstart-jax.rst:165 +#: ../../source/tutorial-quickstart-jax.rst:167 msgid ":code:`set_parameters (optional)`" msgstr ":code:`set_parameters (선택사항)`" @@ -22614,13 +22847,6 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:19 -#: ../../source/tutorial-quickstart-pytorch.rst:19 -msgid "" -"Now that we have a rough idea of what this example is about, let's get " -"started. First, install Flower in your new environment:" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:27 msgid "" "Then, run the command below. You will be prompted to select of the " @@ -22628,49 +22854,16 @@ msgid "" "type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:35 -#: ../../source/tutorial-quickstart-pytorch.rst:35 -msgid "" -"After running it you'll notice a new directory with your project name has" -" been created. It should have the following structure:" -msgstr "" - -#: ../../source/tutorial-quickstart-mlx.rst:49 -#: ../../source/tutorial-quickstart-pytorch.rst:49 -msgid "" -"If you haven't yet installed the project and its dependencies, you can do" -" so by:" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:57 msgid "To run the project do:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:64 -#: ../../source/tutorial-quickstart-pytorch.rst:64 -msgid "With default arguments you will see an output like this one:" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:106 msgid "" "You can also override the parameters defined in " "``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:114 -#: ../../source/tutorial-quickstart-pytorch.rst:113 -msgid "" -"What follows is an explanation of each component in the project you just " -"created: dataset partition, the model, defining the ``ClientApp`` and " -"defining the ``ServerApp``." -msgstr "" - -#: ../../source/tutorial-quickstart-mlx.rst:120 -#: ../../source/tutorial-quickstart-pytorch.rst:119 -#, fuzzy -msgid "The Data" -msgstr "Metadata" - #: ../../source/tutorial-quickstart-mlx.rst:122 msgid "" "We will use `Flower Datasets `_ to " @@ -22682,11 +22875,6 @@ msgid "" "api/flwr_datasets.partitioner.html>`_ available in Flower Datasets:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:164 -#: ../../source/tutorial-quickstart-pytorch.rst:157 -msgid "The Model" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:166 msgid "" "We define the model as in the `centralized MLX example " @@ -22700,12 +22888,6 @@ msgid "" "over batches." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:210 -#: ../../source/tutorial-quickstart-pytorch.rst:234 -#, fuzzy -msgid "The ClientApp" -msgstr "클라이언트앱" - #: ../../source/tutorial-quickstart-mlx.rst:212 msgid "" "The main changes we have to make to use `MLX` with `Flower` will be found" @@ -22773,12 +22955,6 @@ msgid "" "method." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:376 -#: ../../source/tutorial-quickstart-pytorch.rst:321 -#, fuzzy -msgid "The ServerApp" -msgstr "Flower 서버앱" - #: ../../source/tutorial-quickstart-mlx.rst:378 msgid "" "To construct a ``ServerApp``, we define a ``server_fn()`` callback with " @@ -22792,6 +22968,7 @@ msgstr "" #: ../../source/tutorial-quickstart-mlx.rst:402 #: ../../source/tutorial-quickstart-pytorch.rst:360 +#: ../../source/tutorial-quickstart-tensorflow.rst:279 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system." @@ -22858,16 +23035,6 @@ msgid "" "and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:57 -msgid "To run the project, do:" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:105 -msgid "" -"You can also override the parameters defined in the " -"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" -msgstr "" - #: ../../source/tutorial-quickstart-pytorch.rst:121 msgid "" "This tutorial uses `Flower Datasets `_ " @@ -22911,22 +23078,6 @@ msgid "" "PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:245 -msgid "" -"The specific implementation of ``get_weights()`` and ``set_weights()`` " -"depends on the type of models you use. The ones shown below work for a " -"wide range of PyTorch models but you might need to adjust them if you " -"have more exotic model architectures." -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:261 -msgid "" -"The rest of the functionality is directly inspired by the centralized " -"case. The ``fit()`` method in the client trains the model using the local" -" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " -"model received on a held-out validation set that the client might have:" -msgstr "" - #: ../../source/tutorial-quickstart-pytorch.rst:294 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " @@ -22960,6 +23111,7 @@ msgid "" msgstr "" #: ../../source/tutorial-quickstart-pytorch.rst:372 +#: ../../source/tutorial-quickstart-tensorflow.rst:295 #, fuzzy msgid "Video tutorial" msgstr "튜토리얼" @@ -22971,27 +23123,46 @@ msgid "" "that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +msgid "Quickstart PyTorch Lightning" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:20 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:43 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:94 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:116 +msgid "" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." msgstr "" #: ../../source/tutorial-quickstart-scikitlearn.rst:-1 @@ -23072,7 +23243,7 @@ msgstr "" msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:49 +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 msgid ":code:`set_initial_params()`" msgstr "" @@ -23128,7 +23299,7 @@ msgstr "" msgid "return the model weight as a list of NumPy ndarrays" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:120 +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 msgid ":code:`set_parameters` (optional)" msgstr "" @@ -23223,7 +23394,6 @@ msgid "" msgstr "" #: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 #: ../../source/tutorial-quickstart-xgboost.rst:575 msgid "" "Once the server is running we can start the clients in different " @@ -23231,7 +23401,6 @@ msgid "" msgstr "" #: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 #: ../../source/tutorial-quickstart-xgboost.rst:582 msgid "Open another terminal and start the second client:" msgstr "" @@ -23256,99 +23425,107 @@ msgstr "" #: ../../source/tutorial-quickstart-tensorflow.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +"with TensorFlow to train a CNN model on CIFAR-10." msgstr "" #: ../../source/tutorial-quickstart-tensorflow.rst:5 msgid "Quickstart TensorFlow" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" -msgstr "" - -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "" - -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/tutorial-quickstart-tensorflow.rst:7 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" -msgstr "" - -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/tutorial-quickstart-tensorflow.rst:13 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/tutorial-quickstart-tensorflow.rst:28 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/tutorial-quickstart-tensorflow.rst:118 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/tutorial-quickstart-tensorflow.rst:147 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/tutorial-quickstart-tensorflow.rst:178 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:90 +#: ../../source/tutorial-quickstart-tensorflow.rst:212 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." +#: ../../source/tutorial-quickstart-tensorflow.rst:247 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/tutorial-quickstart-tensorflow.rst:284 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#: ../../source/tutorial-quickstart-tensorflow.rst:299 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" msgstr "" #: ../../source/tutorial-quickstart-xgboost.rst:-1 @@ -25217,7 +25394,7 @@ msgstr "" " 수도 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|e5918c1c06a4434bbe4bf49235e40059|" +msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -25234,7 +25411,7 @@ msgstr "" " 바둑과 같은 게임을 하는 것일 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|c0165741bd1944f09ec55ce49032377d|" +msgid "|33cacb7d985c4906b348515c1a5cd993|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -25257,7 +25434,7 @@ msgstr "" "부르리는 것을 듣는 스마트 스피커에서 비롯됩니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +msgid "|cc080a555947492fa66131dc3a967603|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -25275,7 +25452,7 @@ msgstr "" "있습니다. 하지만 여러 조직이 모두 같은 작업을 위해 데이터를 생성하는 것일 수도 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +msgid "|085c3e0fb8664c6aa06246636524b20b|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -25293,7 +25470,7 @@ msgstr "" "서버는 데이터 센터 어딘가에 있을 수도 있고 클라우드 어딘가에 있을 수도 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|74f26ca701254d3db57d7899bd91eb55|" +msgid "|bfe69c74e48c45d49b50251c38c2a019|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -25310,7 +25487,7 @@ msgstr "" " 우리가 기본적으로 사용해 온 머신러닝 방법입니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|bda79f21f8154258a40e5766b2634ad7|" +msgid "|ebbecd651f0348d99c6511ea859bf4ca|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -25332,7 +25509,7 @@ msgstr "" "트래픽을 분석하는 것이 있습니다. 이러한 사례에서 모든 데이터는 자연스럽게 중앙 서버에 존재합니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|89d30862e62e4f9989e193483a08680a|" +msgid "|163117eb654a4273babba413cf8065f5|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -25349,7 +25526,7 @@ msgstr "" "좋은 모델을 훈련하기에 충분하지 않을 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|77e9918671c54b4f86e01369c0785ce8|" +msgid "|452ac3ba453b4cd1be27be1ba7560d64|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -25516,7 +25693,7 @@ msgstr "" "체크포인트에서 모델 매개변수를 초기화합니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|7e4ccef37cc94148a067107b34eb7447|" +msgid "|f403fcd69e4e44409627e748b404c086|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -25543,7 +25720,7 @@ msgstr "" "개의 연결 노드만 사용합니다. 그 이유는 점점 더 많은 클라이언트 노드를 선택하면 학습의 효율성이 감소하기 때문입니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|28e47e4cded14479a0846c8e5f22c872|" +msgid "|4b00fe63870145968f8443619a792a42|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -25570,7 +25747,7 @@ msgstr "" "데이터에서 한 단계 정도로 짧거나 몇 단계(mini-batches)에 불과할 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +msgid "|368378731066486fa4397e89bc6b870c|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -25596,7 +25773,7 @@ msgstr "" "보냅니다. 보내는 모델 업데이트는 전체 모델 파라미터거나 로컬 교육 중에 누적된 그레디언트(gradient)일 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +msgid "|a66aa83d85bf4ffba7ed660b718066da|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -25645,7 +25822,7 @@ msgstr "" "많은 영향을 미칩니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|81749d0ac0834c36a83bd38f433fea31|" +msgid "|82324b9af72a4582a81839d55caab767|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -25764,7 +25941,7 @@ msgstr "" "사용자는 모든 워크로드, 머신러닝 프레임워크 및 모든 프로그래밍 언어를 통합할 수 있습니다." #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|ed9aae51da70428eab7eef32f21e819e|" +msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -27424,3 +27601,523 @@ msgstr "" #~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" #~ msgstr "|c00bf2750bc24d229737a0fe1395f0fc|" +#~ msgid "run\\_client\\_app" +#~ msgstr "run\\_client\\_app" + +#~ msgid "run\\_supernode" +#~ msgstr "run\\_supernode" + +#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgstr "" + +#~ msgid "" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." +#~ msgstr "" + +#~ msgid "the string key as the query for the layout." +#~ msgstr "" + +#~ msgid "Corresponding layout based on the query." +#~ msgstr "" + +#~ msgid "run\\_server\\_app" +#~ msgstr "" + +#~ msgid "run\\_superlink" +#~ msgstr "" + +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "" + +#~ msgid "" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" + +#~ msgid "The total number of clients in this simulation." +#~ msgstr "" + +#~ msgid "" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." +#~ msgstr "" + +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" + +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "" + +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "" + +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "" + +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." +#~ msgstr "" + +#~ msgid "" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" +#~ msgstr "" + +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "" + +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "We will leverage Hugging Face to " +#~ "federate the training of language models" +#~ " over multiple clients using Flower. " +#~ "More specifically, we will fine-tune " +#~ "a pre-trained Transformer model " +#~ "(distilBERT) for sequence classification over" +#~ " a dataset of IMDB ratings. The " +#~ "end goal is to detect if a " +#~ "movie rating is positive or negative." +#~ msgstr "" + +#~ msgid "Dependencies" +#~ msgstr "" + +#~ msgid "" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" +#~ msgstr "" + +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "" + +#~ msgid "Handling the data" +#~ msgstr "" + +#~ msgid "" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" +#~ msgstr "" + +#~ msgid "Training and testing the model" +#~ msgstr "" + +#~ msgid "" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" +#~ msgstr "" + +#~ msgid "Creating the model itself" +#~ msgstr "" + +#~ msgid "" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" +#~ msgstr "" + +#~ msgid "Federating the example" +#~ msgstr "" + +#~ msgid "Creating the IMDBClient" +#~ msgstr "" + +#~ msgid "" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" + +#~ msgid "Starting the server" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" + +#~ msgid "Putting everything together" +#~ msgstr "" + +#~ msgid "We can now start client instances using:" +#~ msgstr "" + +#~ msgid "" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." +#~ msgstr "" + +#~ msgid "" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." +#~ msgstr "" + +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "" + +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "" + +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "" + +#~ msgid "" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" +#~ msgstr "" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "Each client will have its own dataset." +#~ msgstr "" + +#~ msgid "" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." +#~ msgstr "" + +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" + +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" + +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" + +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgstr "" + +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" + +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" + +#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgstr "" + +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" + +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgstr "" + +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgstr "" + +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgstr "" + +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgstr "" + +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgstr "" + +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgstr "" + diff --git a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po index d5f52b193e87..44223940cdce 100644 --- a/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po +++ b/doc/locales/pt_BR/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-09-15 09:09+0200\n" +"POT-Creation-Date: 2024-09-24 00:29+0000\n" "PO-Revision-Date: 2024-05-25 11:09+0000\n" "Last-Translator: Gustavo Bertoli \n" "Language: pt_BR\n" @@ -17,7 +17,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" #: ../../source/contributor-explanation-public-and-private-apis.rst:2 msgid "Public and private APIs" @@ -1401,7 +1401,7 @@ msgstr "" msgid "Setting up the repository" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:12 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "**Create a GitHub account and setup Git**" msgstr "" @@ -1435,7 +1435,7 @@ msgid "" "history back to GitHub." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:23 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "**Forking the Flower repository**" msgstr "" @@ -1455,7 +1455,7 @@ msgid "" " the top left corner that you are looking at your own version of Flower." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:34 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "**Cloning your forked repository**" msgstr "" @@ -1479,7 +1479,7 @@ msgid "" "it) folder in the current working directory." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:49 +#: ../../source/contributor-tutorial-contribute-on-github.rst:66 msgid "**Add origin**" msgstr "" @@ -1501,7 +1501,7 @@ msgid "" "terminal:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:68 +#: ../../source/contributor-tutorial-contribute-on-github.rst:90 msgid "**Add upstream**" msgstr "" @@ -1556,7 +1556,7 @@ msgstr "" msgid "And with Flower's repository:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:114 +#: ../../source/contributor-tutorial-contribute-on-github.rst:122 msgid "**Create a new branch**" msgstr "" @@ -1573,7 +1573,7 @@ msgid "" "directory:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:124 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "**Make changes**" msgstr "" @@ -1581,7 +1581,7 @@ msgstr "" msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:127 +#: ../../source/contributor-tutorial-contribute-on-github.rst:138 msgid "**Test and format your code**" msgstr "" @@ -1596,7 +1596,7 @@ msgstr "" msgid "To do so, we have written a few scripts that you can execute:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:140 +#: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "**Stage changes**" msgstr "" @@ -1617,7 +1617,7 @@ msgid "" "the :code:`git status` command." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:152 +#: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "**Commit changes**" msgstr "" @@ -1634,7 +1634,7 @@ msgid "" "example would be :code:`git commit -m \"Add images to README\"`." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:162 +#: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "**Push the changes to the fork**" msgstr "" @@ -1655,7 +1655,7 @@ msgstr "" msgid "Creating and merging a pull request (PR)" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:176 +#: ../../source/contributor-tutorial-contribute-on-github.rst:206 msgid "**Create the PR**" msgstr "" @@ -1718,7 +1718,7 @@ msgid "" "anyone, you have the option to create a draft pull request:" msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:208 +#: ../../source/contributor-tutorial-contribute-on-github.rst:209 msgid "**Making new changes**" msgstr "" @@ -1729,7 +1729,7 @@ msgid "" " associated with the PR." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:211 +#: ../../source/contributor-tutorial-contribute-on-github.rst:231 msgid "**Review the PR**" msgstr "" @@ -1765,7 +1765,7 @@ msgid "" "review." msgstr "" -#: ../../source/contributor-tutorial-contribute-on-github.rst:233 +#: ../../source/contributor-tutorial-contribute-on-github.rst:251 msgid "**Once the PR is merged**" msgstr "" @@ -2033,6 +2033,7 @@ msgstr "" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 #: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:15 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:12 #: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" @@ -2737,6 +2738,221 @@ msgid "" " the SuperNode to execute the ClientApp as a subprocess:" msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 +msgid "" +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:11 +msgid "" +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see `Limitations`_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +msgid "Before you start, make sure that:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:19 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +#, fuzzy +msgid "The Docker daemon is running." +msgstr "Verifique que o serviço Docker está rodando." + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +msgid "Docker Compose is `installed `_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:24 +msgid "Run the Quickstart Example" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:26 +msgid "" +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:34 +msgid "" +"Download the `compose.yml " +"`_" +" file into the example directory:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:41 +msgid "Build and start the services using the following command:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:47 +msgid "" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:49 +#: ../../source/docker/tutorial-quickstart-docker.rst:319 +msgid "pyproject.toml" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:58 +msgid "" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 +msgid "" +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +#, fuzzy +msgid "Run the example:" +msgstr "Exemplo" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:71 +msgid "Follow the logs of the SuperExec service:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:77 +msgid "" +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +msgid "Run a Different Quickstart Example" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 +msgid "" +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:89 +msgid "After that, you can repeat the steps above." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:92 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:98 +msgid "Limitations" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:97 +msgid "Quickstart Example" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 +msgid "quickstart-fastai" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +msgid "quickstart-huggingface" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +msgid "quickstart-jax" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +msgid "" +"The example has not yet been updated to work with the latest ``flwr`` " +"version." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +msgid "quickstart-mlcube" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +msgid "quickstart-mlx" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +msgid "" +"`Requires to run on macOS with Apple Silicon `_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +msgid "quickstart-monai" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +msgid "quickstart-pandas" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 +msgid "quickstart-pytorch-lightning" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +msgid "" +"Requires an older pip version that is not supported by the Flower Docker " +"images." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +msgid "quickstart-pytorch" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +msgid "quickstart-sklearn-tabular" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +msgid "quickstart-tabnet" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +msgid "quickstart-tensorflow" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +msgid "Only runs on AMD64." +msgstr "" + #: ../../source/docker/set-environment-variables.rst:2 msgid "Set Environment Variables" msgstr "" @@ -2765,22 +2981,6 @@ msgid "" " understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 -#: ../../source/docker/tutorial-quickstart-docker.rst:13 -msgid "Before you start, make sure that:" -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 -#: ../../source/docker/tutorial-quickstart-docker.rst:15 -msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 -#: ../../source/docker/tutorial-quickstart-docker.rst:16 -#, fuzzy -msgid "The Docker daemon is running." -msgstr "Verifique que o serviço Docker está rodando." - #: ../../source/docker/tutorial-quickstart-docker-compose.rst:21 #: ../../source/docker/tutorial-quickstart-docker.rst:19 msgid "Step 1: Set Up" @@ -3197,10 +3397,6 @@ msgstr "" msgid "Add the following lines to the ``pyproject.toml``:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker.rst:319 -msgid "pyproject.toml" -msgstr "" - #: ../../source/docker/tutorial-quickstart-docker.rst:326 msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" @@ -3248,6 +3444,7 @@ msgstr "" msgid "Remove the containers and the bridge network:" msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:401 #: ../../source/docker/tutorial-quickstart-docker.rst:399 msgid "Where to Go Next" msgstr "" @@ -3282,10 +3479,6 @@ msgid "" "configuration that best suits your project's needs." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 -msgid "Docker Compose is `installed `_." -msgstr "" - #: ../../source/docker/tutorial-quickstart-docker-compose.rst:23 msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" @@ -3478,7 +3671,7 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker-compose.rst:188 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:362 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 msgid "Rerun the ``quickstart-compose`` project:" msgstr "" @@ -3542,74 +3735,78 @@ msgstr "" msgid "compose.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:303 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 msgid "" "If you also want to enable TLS for the new SuperNodes, duplicate the " "SuperNode definition for each new SuperNode service in the ``with-" "tls.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:306 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:313 msgid "" "Make sure that the names of the services match with the one in the " "``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:308 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:315 msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:317 msgid "with-tls.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:332 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:334 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:341 msgid "" "To run Flower with persisted SuperLink state and enabled TLS, a slight " "change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:337 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:344 msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:346 msgid "with-state.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:356 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 msgid "Restart the services:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:370 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:377 msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:372 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:379 msgid "" "You can merge multiple Compose files into a single file. For instance, if" " you wish to combine the basic configuration with the TLS configuration, " "execute the following command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:380 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 msgid "" "This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" " a new file called ``my_compose.yml``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:384 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 msgid "Step 10: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:386 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 msgid "Remove all services and volumes:" msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:403 +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "" + #: ../../source/docker/use-a-different-version.rst:2 msgid "Use a Different Flower Version" msgstr "" @@ -3873,7 +4070,7 @@ msgid "" "one method for testing the model:" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:218 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 msgid ":code:`set_parameters`" msgstr "" @@ -3904,9 +4101,9 @@ msgid "" ":code:`ndarray`'s (which is what :code:`flwr.client.NumPyClient` expects)" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:223 -#: ../../source/tutorial-quickstart-jax.rst:171 -#: ../../source/tutorial-quickstart-scikitlearn.rst:123 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 msgid ":code:`fit`" msgstr "" @@ -3928,9 +4125,9 @@ msgstr "" msgid "get the updated local model weights and return them to the server" msgstr "" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:227 -#: ../../source/tutorial-quickstart-jax.rst:175 -#: ../../source/tutorial-quickstart-scikitlearn.rst:127 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 msgid ":code:`evaluate`" msgstr "" @@ -4019,7 +4216,7 @@ msgid "" " individual's information remains hidden in the crowd." msgstr "" -#: ../../source/explanation-differential-privacy.rst:16 +#: ../../source/explanation-differential-privacy.rst:-1 msgid "DP Intro" msgstr "" @@ -4129,8 +4326,8 @@ msgid "" "the client's data." msgstr "" +#: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/explanation-differential-privacy.rst:71 #: ../../source/how-to-use-differential-privacy.rst:11 msgid "Central Differential Privacy" msgstr "" @@ -4157,7 +4354,7 @@ msgid "" "that larger updates are scaled down to fit within the norm `S`." msgstr "" -#: ../../source/explanation-differential-privacy.rst:84 +#: ../../source/explanation-differential-privacy.rst:-1 msgid "clipping" msgstr "" @@ -4202,8 +4399,8 @@ msgid "" "others." msgstr "" +#: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/explanation-differential-privacy.rst:110 #: ../../source/how-to-use-differential-privacy.rst:96 msgid "Local Differential Privacy" msgstr "" @@ -4434,7 +4631,7 @@ msgstr "" msgid "This is sometimes called a hub-and-spoke topology:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:18 +#: ../../source/explanation-flower-architecture.rst:24 msgid "Hub-and-spoke topology in federated learning" msgstr "" @@ -4506,7 +4703,7 @@ msgid "" "`missing link` between all those SuperNodes." msgstr "" -#: ../../source/explanation-flower-architecture.rst:65 +#: ../../source/explanation-flower-architecture.rst:71 #, fuzzy msgid "Basic Flower architecture" msgstr "Arquitetura do Flower" @@ -4543,7 +4740,7 @@ msgid "" "SuperNodes." msgstr "" -#: ../../source/explanation-flower-architecture.rst:91 +#: ../../source/explanation-flower-architecture.rst:97 msgid "Multi-tenancy federated learning architecture" msgstr "" @@ -4565,7 +4762,7 @@ msgid "" "their corresponding ``ClientApp``\\s:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:107 +#: ../../source/explanation-flower-architecture.rst:113 msgid "Multi-tenancy federated learning architecture - Run 1" msgstr "" @@ -4581,7 +4778,7 @@ msgid "" " to participate in the training:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:119 +#: ../../source/explanation-flower-architecture.rst:125 msgid "Multi-tenancy federated learning architecture - Run 2" msgstr "" @@ -4617,7 +4814,7 @@ msgid "" "developer machine." msgstr "" -#: ../../source/explanation-flower-architecture.rst:145 +#: ../../source/explanation-flower-architecture.rst:151 msgid "Flower Deployment Engine with SuperExec" msgstr "" @@ -7286,7 +7483,7 @@ msgid "" "adaptive clipping." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:25 +#: ../../source/how-to-use-differential-privacy.rst:-1 msgid "server side clipping" msgstr "" @@ -7315,7 +7512,7 @@ msgid "" ":code:`DifferentialPrivacyClientSideAdaptiveClipping`." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:57 +#: ../../source/how-to-use-differential-privacy.rst:-1 msgid "client side clipping" msgstr "" @@ -7342,7 +7539,7 @@ msgid "" "clipping norm value, sensitivity, epsilon, and delta." msgstr "" -#: ../../source/how-to-use-differential-privacy.rst:99 +#: ../../source/how-to-use-differential-privacy.rst:-1 msgid "local DP mod" msgstr "" @@ -7725,11 +7922,32 @@ msgstr "" msgid "Arguments" msgstr "Argumento de compilação" -#: ../../flwr install:1 new:1 run:1 +#: ../../flwr install:1 log:1 new:1 run:1 #, fuzzy msgid "Optional argument" msgstr "Argumento de compilação" +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." +msgstr "" + +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" +msgstr "" + +#: ../../flwr log +msgid "default" +msgstr "" + +#: ../../flwr log:1 +msgid "``True``" +msgstr "" + +#: ../../flwr log:1 +#, fuzzy +msgid "Required argument" +msgstr "Argumento de compilação" + #: ../../flwr new:1 msgid "Create new Flower App." msgstr "" @@ -7812,7 +8030,7 @@ msgid "Modules" msgstr "" #: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`client `\\" +msgid ":py:obj:`flwr.client `\\" msgstr "" #: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of @@ -7820,7 +8038,7 @@ msgid "Flower client." msgstr "" #: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`common `\\" +msgid ":py:obj:`flwr.common `\\" msgstr "" #: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of @@ -7828,7 +8046,7 @@ msgid "Common components shared between server and client." msgstr "" #: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`server `\\" +msgid ":py:obj:`flwr.server `\\" msgstr "" #: ../../source/ref-api/flwr.rst:35::1 @@ -7838,7 +8056,7 @@ msgid "Flower server." msgstr "" #: ../../source/ref-api/flwr.rst:35::1 -msgid ":py:obj:`simulation `\\" +msgid ":py:obj:`flwr.simulation `\\" msgstr "" #: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of @@ -7918,7 +8136,7 @@ msgid "Abstract base class for Flower clients using NumPy." msgstr "" #: ../../source/ref-api/flwr.client.rst:50::1 -msgid ":py:obj:`mod `\\" +msgid ":py:obj:`flwr.client.mod `\\" msgstr "" #: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of @@ -8115,48 +8333,57 @@ msgstr "" msgid "Getter for `Context` client attribute." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst -#: ../../source/ref-api/flwr.client.NumPyClient.rst -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst -#: ../../source/ref-api/flwr.common.Array.rst -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst -#: ../../source/ref-api/flwr.common.Context.rst -#: ../../source/ref-api/flwr.common.Error.rst -#: ../../source/ref-api/flwr.common.Message.rst -#: ../../source/ref-api/flwr.common.Metadata.rst -#: ../../source/ref-api/flwr.common.MetricsRecord.rst #: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst -#: ../../source/ref-api/flwr.common.RecordSet.rst -#: ../../source/ref-api/flwr.server.ClientManager.rst -#: ../../source/ref-api/flwr.server.Driver.rst -#: ../../source/ref-api/flwr.server.ServerAppComponents.rst -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst -#: ../../source/ref-api/flwr.server.strategy.Krum.rst -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst -#: ../../source/ref-api/flwr.simulation.run_simulation.rst -#: ../../source/ref-api/flwr.simulation.start_simulation.rst #: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.server.app.start_server -#: flwr.server.driver.driver.Driver.send_and_receive of +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of msgid "Parameters" msgstr "" @@ -8167,21 +8394,31 @@ msgid "" "customize the local evaluation process." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst -#: ../../source/ref-api/flwr.client.NumPyClient.rst -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst -#: ../../source/ref-api/flwr.common.Message.rst -#: ../../source/ref-api/flwr.common.MetricsRecord.rst -#: ../../source/ref-api/flwr.common.ParametersRecord.rst -#: ../../source/ref-api/flwr.server.ClientManager.rst -#: ../../source/ref-api/flwr.server.Driver.rst -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst -#: ../../source/ref-api/flwr.simulation.start_simulation.rst -#: flwr.server.app.start_server -#: flwr.server.driver.driver.Driver.send_and_receive of +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of msgid "Returns" msgstr "" @@ -8191,18 +8428,29 @@ msgid "" "details such as the number of local data examples used for evaluation." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst -#: ../../source/ref-api/flwr.client.NumPyClient.rst -#: ../../source/ref-api/flwr.common.Message.rst -#: ../../source/ref-api/flwr.server.ClientManager.rst -#: ../../source/ref-api/flwr.server.Driver.rst -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst -#: ../../source/ref-api/flwr.simulation.start_simulation.rst -#: flwr.server.app.start_server -#: flwr.server.driver.driver.Driver.send_and_receive of +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of msgid "Return type" msgstr "" @@ -8521,6 +8769,11 @@ msgstr "" msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" msgstr "" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." +msgstr "" + #: ../../source/ref-api/flwr.client.mod.rst:28::1 msgid "" ":py:obj:`message_size_mod `\\ \\(msg\\," @@ -8685,10 +8938,6 @@ msgstr "" msgid "make\\_ffn" msgstr "" -#: flwr.client.mod.utils.make_ffn:1 of -msgid "." -msgstr "" - #: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 msgid "message\\_size\\_mod" msgstr "" @@ -8715,14 +8964,6 @@ msgstr "" msgid "secaggplus\\_mod" msgstr "" -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -msgid "run\\_client\\_app" -msgstr "" - -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -msgid "run\\_supernode" -msgstr "" - #: ../../source/ref-api/flwr.client.start_client.rst:2 msgid "start\\_client" msgstr "" @@ -9428,12 +9669,7 @@ msgid "Return number of Bytes stored in this object." msgstr "" #: collections.abc.MutableMapping.clear:1::1 of -msgid ":py:obj:`get `\\ \\(key\\[\\, default\\]\\)" -msgstr "" - -#: collections.abc.Mapping.get:1 -#: collections.abc.MutableMapping.clear:1::1 of -msgid "Retrieve the corresponding layout by the string key." +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" #: collections.abc.MutableMapping.clear:1::1 of @@ -9490,22 +9726,6 @@ msgstr "" msgid "This function counts booleans as occupying 1 Byte." msgstr "" -#: collections.abc.Mapping.get:3 of -msgid "" -"When there isn't an exact match, all the existing keys in the layout map " -"will be treated as a regex and map against the input key again. The first" -" match will be returned, based on the key insertion order. Return None if" -" there isn't any match found." -msgstr "" - -#: collections.abc.Mapping.get:8 of -msgid "the string key as the query for the layout." -msgstr "" - -#: collections.abc.Mapping.get:10 of -msgid "Corresponding layout based on the query." -msgstr "" - #: ../../source/ref-api/flwr.common.Context.rst:2 msgid "Context" msgstr "" @@ -10218,7 +10438,7 @@ msgstr "" msgid "The encoding in which to encode the string." msgstr "" -#: flwr.common.EventType.encode:5 of +#: flwr.common.EventType.encode:9 of msgid "errors" msgstr "" @@ -10394,7 +10614,7 @@ msgid "" "string." msgstr "" -#: flwr.common.EventType.replace:3 of +#: flwr.common.EventType.replace:5 of msgid "count" msgstr "" @@ -10430,7 +10650,7 @@ msgid "" "strings and the original string." msgstr "" -#: flwr.common.EventType.rsplit:3 flwr.common.EventType.split:3 of +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of msgid "sep" msgstr "" @@ -10445,7 +10665,7 @@ msgid "" " empty strings from the result." msgstr "" -#: flwr.common.EventType.rsplit:9 flwr.common.EventType.split:9 of +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of msgid "maxsplit" msgstr "" @@ -10486,7 +10706,7 @@ msgid "" "remaining cased characters have lower case." msgstr "" -#: flwr.common.EventType.translate:3 of +#: flwr.common.EventType.translate:5 of msgid "table" msgstr "" @@ -10901,7 +11121,7 @@ msgid ":py:obj:`count_bytes `\\ \\(\\)" msgstr "" #: collections.abc.MutableMapping.clear:1::1 of -msgid ":py:obj:`get `\\ \\(key\\[\\, default\\]\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" #: collections.abc.MutableMapping.clear:1::1 of @@ -11033,9 +11253,7 @@ msgid ":py:obj:`count_bytes `\\ \\(\\) msgstr "" #: collections.abc.MutableMapping.clear:1::1 of -msgid "" -":py:obj:`get `\\ \\(key\\[\\, " -"default\\]\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr "" #: collections.abc.MutableMapping.clear:1::1 of @@ -11362,7 +11580,7 @@ msgid "Provides a pool of available clients." msgstr "" #: ../../source/ref-api/flwr.server.rst:56::1 -msgid ":py:obj:`strategy `\\" +msgid ":py:obj:`flwr.server.strategy `\\" msgstr "" #: ../../source/ref-api/flwr.server.rst:56::1 @@ -11371,7 +11589,7 @@ msgid "Contains the strategy abstraction and different implementations." msgstr "" #: ../../source/ref-api/flwr.server.rst:56::1 -msgid ":py:obj:`workflow `\\" +msgid ":py:obj:`flwr.server.workflow `\\" msgstr "" #: ../../source/ref-api/flwr.server.rst:56::1 @@ -11848,8 +12066,7 @@ msgid "" msgstr "" #: flwr.server.app.start_server:9 -#: flwr.server.serverapp_components.ServerAppComponents:6 -#: flwr.simulation.app.start_simulation:29 of +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" "Currently supported values are `num_rounds` (int, default: 1) and " "`round_timeout` in seconds (float, default: None)." @@ -11965,14 +12182,6 @@ msgstr "" msgid "**success**" msgstr "" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -msgid "run\\_server\\_app" -msgstr "" - -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -msgid "run\\_superlink" -msgstr "" - #: ../../source/ref-api/flwr.server.start_server.rst:2 msgid "start\\_server" msgstr "" @@ -14999,13 +15208,13 @@ msgstr "" #: ../../source/ref-api/flwr.simulation.rst:18::1 msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\, num\\_clients\\)" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" #: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." msgstr "" #: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 @@ -15066,120 +15275,6 @@ msgstr "" msgid "start\\_simulation" msgstr "" -#: flwr.simulation.app.start_simulation:3 of -msgid "" -"A function creating `Client` instances. The function must have the " -"signature `client_fn(context: Context). It should return a single client " -"instance of type `Client`. Note that the created client instances are " -"ephemeral and will often be destroyed after a single method invocation. " -"Since client instances are not long-lived, they should not attempt to " -"carry state over method invocations. Any state required by the instance " -"(model, dataset, hyperparameters, ...) should be (re-)created in either " -"the call to `client_fn` or the call to any of the client methods (e.g., " -"load evaluation data in the `evaluate` method itself)." -msgstr "" - -#: flwr.simulation.app.start_simulation:13 of -msgid "The total number of clients in this simulation." -msgstr "" - -#: flwr.simulation.app.start_simulation:15 of -msgid "" -"UNSUPPORTED, WILL BE REMOVED. USE `num_clients` INSTEAD. List " -"`client_id`s for each client. This is only required if `num_clients` is " -"not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error. Using " -"this argument will raise an error." -msgstr "" - -#: flwr.simulation.app.start_simulation:21 of -msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." -msgstr "" - -#: flwr.simulation.app.start_simulation:26 of -msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." -msgstr "" - -#: flwr.simulation.app.start_simulation:32 of -msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." -msgstr "" - -#: flwr.simulation.app.start_simulation:36 of -msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." -msgstr "" - -#: flwr.simulation.app.start_simulation:40 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." -msgstr "" - -#: flwr.simulation.app.start_simulation:40 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" -msgstr "" - -#: flwr.simulation.app.start_simulation:44 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" -msgstr "" - -#: flwr.simulation.app.start_simulation:46 of -msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." -msgstr "" - -#: flwr.simulation.app.start_simulation:49 of -msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." -msgstr "" - -#: flwr.simulation.app.start_simulation:51 of -msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." -msgstr "" - -#: flwr.simulation.app.start_simulation:55 of -msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." -msgstr "" - -#: flwr.simulation.app.start_simulation:58 of -msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" -msgstr "" - -#: flwr.simulation.app.start_simulation:67 of -msgid "**hist** -- Object containing metrics from training." -msgstr "" - #: ../../source/ref-changelog.md:1 msgid "Changelog" msgstr "" @@ -15285,13 +15380,6 @@ msgstr "" msgid "Incompatible changes" msgstr "" -#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 -#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 -#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 -#: ../../source/ref-changelog.md:929 -msgid "None" -msgstr "" - #: ../../source/ref-changelog.md:35 msgid "v1.11.0 (2024-08-30)" msgstr "" @@ -20418,32 +20506,44 @@ msgid "" "blockchain environment is available here:" msgstr "" -#: ../../source/ref-faq.rst:28 -msgid "" -"`Flower meets Nevermined GitHub Repository `_." +#: ../../source/ref-faq.rst:29 +msgid "`FLock: A Decentralised AI Training Platform `_." msgstr "" #: ../../source/ref-faq.rst:29 -msgid "" +msgid "Contribute to on-chain training the model and earn rewards." +msgstr "" + +#: ../../source/ref-faq.rst:30 +msgid "Local blockchain with federated learning simulation." +msgstr "" + +#: ../../source/ref-faq.rst:31 +msgid "" +"`Flower meets Nevermined GitHub Repository `_." +msgstr "" + +#: ../../source/ref-faq.rst:32 +msgid "" "`Flower meets Nevermined YouTube video " "`_." msgstr "" -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-faq.rst:33 msgid "" "`Flower meets KOSMoS `_." msgstr "" -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-faq.rst:34 msgid "" "`Flower meets Talan blog post `_ ." msgstr "" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-faq.rst:35 msgid "" "`Flower meets Talan GitHub Repository " "`_ ." @@ -20666,178 +20766,294 @@ msgid "" "more." msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." -msgstr "" - #: ../../source/tutorial-quickstart-fastai.rst:5 msgid "Quickstart fastai" msgstr "" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" +#: ../../source/tutorial-quickstart-fastai.rst:7 +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." msgstr "" #: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +msgid "Then, clone the code example directly from GitHub:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:20 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:33 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:33 +msgid "Next, activate your environment, then run:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:43 +msgid "" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:56 +#: ../../source/tutorial-quickstart-huggingface.rst:65 +#: ../../source/tutorial-quickstart-mlx.rst:64 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:56 +#: ../../source/tutorial-quickstart-pytorch.rst:64 +#: ../../source/tutorial-quickstart-tensorflow.rst:65 +msgid "With default arguments you will see an output like this one:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:100 +#: ../../source/tutorial-quickstart-huggingface.rst:116 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:106 +#: ../../source/tutorial-quickstart-pytorch.rst:105 +#: ../../source/tutorial-quickstart-tensorflow.rst:106 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:110 +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." msgstr "" #: ../../source/tutorial-quickstart-huggingface.rst:-1 msgid "" "Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "" #: ../../source/tutorial-quickstart-huggingface.rst:5 msgid "Quickstart 🤗 Transformers" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/tutorial-quickstart-huggingface.rst:7 msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/tutorial-quickstart-huggingface.rst:14 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." -msgstr "" - -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" #: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/tutorial-quickstart-mlx.rst:19 +#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/tutorial-quickstart-tensorflow.rst:20 msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" +#: ../../source/tutorial-quickstart-huggingface.rst:28 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" +#: ../../source/tutorial-quickstart-huggingface.rst:36 +#: ../../source/tutorial-quickstart-mlx.rst:35 +#: ../../source/tutorial-quickstart-pytorch.rst:35 +#: ../../source/tutorial-quickstart-tensorflow.rst:36 +msgid "" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/tutorial-quickstart-huggingface.rst:50 +#: ../../source/tutorial-quickstart-mlx.rst:49 +#: ../../source/tutorial-quickstart-pytorch.rst:49 +#: ../../source/tutorial-quickstart-tensorflow.rst:50 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:58 +#: ../../source/tutorial-quickstart-pytorch.rst:57 +#: ../../source/tutorial-quickstart-tensorflow.rst:58 +msgid "To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:106 +msgid "You can also run the project with GPU as follows:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" +#: ../../source/tutorial-quickstart-huggingface.rst:113 +msgid "" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:113 msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" +#: ../../source/tutorial-quickstart-huggingface.rst:130 +#: ../../source/tutorial-quickstart-mlx.rst:120 +#: ../../source/tutorial-quickstart-pytorch.rst:119 +#: ../../source/tutorial-quickstart-tensorflow.rst:116 +msgid "The Data" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/tutorial-quickstart-huggingface.rst:132 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" +#: ../../source/tutorial-quickstart-huggingface.rst:178 +#: ../../source/tutorial-quickstart-mlx.rst:164 +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:145 +msgid "The Model" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" +#: ../../source/tutorial-quickstart-huggingface.rst:180 +msgid "" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/tutorial-quickstart-huggingface.rst:193 msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/tutorial-quickstart-huggingface.rst:196 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" +#: ../../source/tutorial-quickstart-huggingface.rst:239 +#: ../../source/tutorial-quickstart-mlx.rst:210 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +#: ../../source/tutorial-quickstart-tensorflow.rst:176 +msgid "The ClientApp" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/tutorial-quickstart-huggingface.rst:241 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/tutorial-quickstart-huggingface.rst:254 +#: ../../source/tutorial-quickstart-pytorch.rst:245 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" +#: ../../source/tutorial-quickstart-huggingface.rst:269 +#: ../../source/tutorial-quickstart-pytorch.rst:261 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" +#: ../../source/tutorial-quickstart-huggingface.rst:296 +msgid "" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:221 -msgid "" -"And they will be able to connect to the server and start the federated " -"training." +#: ../../source/tutorial-quickstart-huggingface.rst:330 +#: ../../source/tutorial-quickstart-mlx.rst:376 +#: ../../source/tutorial-quickstart-pytorch.rst:321 +#: ../../source/tutorial-quickstart-tensorflow.rst:245 +msgid "The ServerApp" msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/tutorial-quickstart-huggingface.rst:332 msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/tutorial-quickstart-huggingface.rst:371 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/tutorial-quickstart-huggingface.rst:376 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." msgstr "" #: ../../source/tutorial-quickstart-ios.rst:-1 @@ -20892,7 +21108,6 @@ msgstr "" #: ../../source/tutorial-quickstart-ios.rst:34 #: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 #: ../../source/tutorial-quickstart-xgboost.rst:55 msgid "Flower Client" msgstr "" @@ -20966,13 +21181,11 @@ msgstr "" #: ../../source/tutorial-quickstart-ios.rst:129 #: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 #: ../../source/tutorial-quickstart-xgboost.rst:341 msgid "Flower Server" msgstr "" #: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 msgid "" "For simple workloads we can start a Flower server and leave all the " "configuration possibilities at their default values. In a file named " @@ -20981,12 +21194,10 @@ msgstr "" #: ../../source/tutorial-quickstart-ios.rst:142 #: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 msgid "Train the model, federated!" msgstr "" #: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 #: ../../source/tutorial-quickstart-xgboost.rst:567 msgid "" "With both client and server ready, we can now run everything and see " @@ -21151,7 +21362,7 @@ msgid "" " the model:" msgstr "" -#: ../../source/tutorial-quickstart-jax.rst:165 +#: ../../source/tutorial-quickstart-jax.rst:167 msgid ":code:`set_parameters (optional)`" msgstr "" @@ -21235,13 +21446,6 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:19 -#: ../../source/tutorial-quickstart-pytorch.rst:19 -msgid "" -"Now that we have a rough idea of what this example is about, let's get " -"started. First, install Flower in your new environment:" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:27 msgid "" "Then, run the command below. You will be prompted to select of the " @@ -21249,48 +21453,16 @@ msgid "" "type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:35 -#: ../../source/tutorial-quickstart-pytorch.rst:35 -msgid "" -"After running it you'll notice a new directory with your project name has" -" been created. It should have the following structure:" -msgstr "" - -#: ../../source/tutorial-quickstart-mlx.rst:49 -#: ../../source/tutorial-quickstart-pytorch.rst:49 -msgid "" -"If you haven't yet installed the project and its dependencies, you can do" -" so by:" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:57 msgid "To run the project do:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:64 -#: ../../source/tutorial-quickstart-pytorch.rst:64 -msgid "With default arguments you will see an output like this one:" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:106 msgid "" "You can also override the parameters defined in " "``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:114 -#: ../../source/tutorial-quickstart-pytorch.rst:113 -msgid "" -"What follows is an explanation of each component in the project you just " -"created: dataset partition, the model, defining the ``ClientApp`` and " -"defining the ``ServerApp``." -msgstr "" - -#: ../../source/tutorial-quickstart-mlx.rst:120 -#: ../../source/tutorial-quickstart-pytorch.rst:119 -msgid "The Data" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:122 msgid "" "We will use `Flower Datasets `_ to " @@ -21302,11 +21474,6 @@ msgid "" "api/flwr_datasets.partitioner.html>`_ available in Flower Datasets:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:164 -#: ../../source/tutorial-quickstart-pytorch.rst:157 -msgid "The Model" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:166 msgid "" "We define the model as in the `centralized MLX example " @@ -21320,11 +21487,6 @@ msgid "" "over batches." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:210 -#: ../../source/tutorial-quickstart-pytorch.rst:234 -msgid "The ClientApp" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:212 msgid "" "The main changes we have to make to use `MLX` with `Flower` will be found" @@ -21392,11 +21554,6 @@ msgid "" "method." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:376 -#: ../../source/tutorial-quickstart-pytorch.rst:321 -msgid "The ServerApp" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:378 msgid "" "To construct a ``ServerApp``, we define a ``server_fn()`` callback with " @@ -21410,6 +21567,7 @@ msgstr "" #: ../../source/tutorial-quickstart-mlx.rst:402 #: ../../source/tutorial-quickstart-pytorch.rst:360 +#: ../../source/tutorial-quickstart-tensorflow.rst:279 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system." @@ -21476,16 +21634,6 @@ msgid "" "and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:57 -msgid "To run the project, do:" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:105 -msgid "" -"You can also override the parameters defined in the " -"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" -msgstr "" - #: ../../source/tutorial-quickstart-pytorch.rst:121 msgid "" "This tutorial uses `Flower Datasets `_ " @@ -21529,22 +21677,6 @@ msgid "" "PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:245 -msgid "" -"The specific implementation of ``get_weights()`` and ``set_weights()`` " -"depends on the type of models you use. The ones shown below work for a " -"wide range of PyTorch models but you might need to adjust them if you " -"have more exotic model architectures." -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:261 -msgid "" -"The rest of the functionality is directly inspired by the centralized " -"case. The ``fit()`` method in the client trains the model using the local" -" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " -"model received on a held-out validation set that the client might have:" -msgstr "" - #: ../../source/tutorial-quickstart-pytorch.rst:294 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " @@ -21578,6 +21710,7 @@ msgid "" msgstr "" #: ../../source/tutorial-quickstart-pytorch.rst:372 +#: ../../source/tutorial-quickstart-tensorflow.rst:295 msgid "Video tutorial" msgstr "" @@ -21588,27 +21721,46 @@ msgid "" "that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 +msgid "Quickstart PyTorch Lightning" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 -msgid "Quickstart PyTorch Lightning" +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:20 +msgid "" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:43 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:94 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:116 +msgid "" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." msgstr "" #: ../../source/tutorial-quickstart-scikitlearn.rst:-1 @@ -21689,7 +21841,7 @@ msgstr "" msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:49 +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 msgid ":code:`set_initial_params()`" msgstr "" @@ -21745,7 +21897,7 @@ msgstr "" msgid "return the model weight as a list of NumPy ndarrays" msgstr "" -#: ../../source/tutorial-quickstart-scikitlearn.rst:120 +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 msgid ":code:`set_parameters` (optional)" msgstr "" @@ -21839,7 +21991,6 @@ msgid "" msgstr "" #: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 #: ../../source/tutorial-quickstart-xgboost.rst:575 msgid "" "Once the server is running we can start the clients in different " @@ -21847,7 +21998,6 @@ msgid "" msgstr "" #: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 #: ../../source/tutorial-quickstart-xgboost.rst:582 msgid "Open another terminal and start the second client:" msgstr "" @@ -21872,102 +22022,110 @@ msgstr "" #: ../../source/tutorial-quickstart-tensorflow.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +"with TensorFlow to train a CNN model on CIFAR-10." msgstr "" #: ../../source/tutorial-quickstart-tensorflow.rst:5 msgid "Quickstart TensorFlow" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" -msgstr "" - -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "" - -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/tutorial-quickstart-tensorflow.rst:7 msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#: ../../source/tutorial-quickstart-tensorflow.rst:13 +msgid "" +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/tutorial-quickstart-tensorflow.rst:28 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/tutorial-quickstart-tensorflow.rst:118 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/tutorial-quickstart-tensorflow.rst:147 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/tutorial-quickstart-tensorflow.rst:178 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/tutorial-quickstart-tensorflow.rst:212 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:90 +#: ../../source/tutorial-quickstart-tensorflow.rst:247 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." +#: ../../source/tutorial-quickstart-tensorflow.rst:284 +msgid "" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/tutorial-quickstart-tensorflow.rst:299 msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 -msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." -msgstr "" - -#: ../../source/tutorial-quickstart-xgboost.rst:-1 +#: ../../source/tutorial-quickstart-xgboost.rst:-1 msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " "with XGBoost to train classification models on trees." @@ -23811,7 +23969,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|e5918c1c06a4434bbe4bf49235e40059|" +msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -23826,7 +23984,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|c0165741bd1944f09ec55ce49032377d|" +msgid "|33cacb7d985c4906b348515c1a5cd993|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -23847,7 +24005,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +msgid "|cc080a555947492fa66131dc3a967603|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -23863,7 +24021,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +msgid "|085c3e0fb8664c6aa06246636524b20b|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -23879,7 +24037,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|74f26ca701254d3db57d7899bd91eb55|" +msgid "|bfe69c74e48c45d49b50251c38c2a019|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -23894,7 +24052,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|bda79f21f8154258a40e5766b2634ad7|" +msgid "|ebbecd651f0348d99c6511ea859bf4ca|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -23914,7 +24072,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|89d30862e62e4f9989e193483a08680a|" +msgid "|163117eb654a4273babba413cf8065f5|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -23929,7 +24087,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|77e9918671c54b4f86e01369c0785ce8|" +msgid "|452ac3ba453b4cd1be27be1ba7560d64|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -24069,7 +24227,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|7e4ccef37cc94148a067107b34eb7447|" +msgid "|f403fcd69e4e44409627e748b404c086|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -24093,7 +24251,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|28e47e4cded14479a0846c8e5f22c872|" +msgid "|4b00fe63870145968f8443619a792a42|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -24117,7 +24275,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +msgid "|368378731066486fa4397e89bc6b870c|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -24140,7 +24298,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +msgid "|a66aa83d85bf4ffba7ed660b718066da|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -24178,7 +24336,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|81749d0ac0834c36a83bd38f433fea31|" +msgid "|82324b9af72a4582a81839d55caab767|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -24272,7 +24430,7 @@ msgid "" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|ed9aae51da70428eab7eef32f21e819e|" +msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -29467,3 +29625,564 @@ msgstr "" #~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" #~ msgstr "" +#~ msgid ":py:obj:`client `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`common `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`server `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`simulation `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`mod `\\" +#~ msgstr "" + +#~ msgid "run\\_client\\_app" +#~ msgstr "" + +#~ msgid "run\\_supernode" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" +#~ msgstr "" + +#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgstr "" + +#~ msgid "" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." +#~ msgstr "" + +#~ msgid "the string key as the query for the layout." +#~ msgstr "" + +#~ msgid "Corresponding layout based on the query." +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`get `\\ " +#~ "\\(key\\[\\, default\\]\\)" +#~ msgstr "" + +#~ msgid ":py:obj:`strategy `\\" +#~ msgstr "" + +#~ msgid ":py:obj:`workflow `\\" +#~ msgstr "" + +#~ msgid "run\\_server\\_app" +#~ msgstr "" + +#~ msgid "run\\_superlink" +#~ msgstr "" + +#~ msgid "" +#~ ":py:obj:`start_simulation `\\" +#~ " \\(\\*\\, client\\_fn\\, num\\_clients\\)" +#~ msgstr "" + +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "" + +#~ msgid "" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" + +#~ msgid "The total number of clients in this simulation." +#~ msgstr "" + +#~ msgid "" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." +#~ msgstr "" + +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" + +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "" + +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "" + +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "" + +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." +#~ msgstr "" + +#~ msgid "" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" +#~ msgstr "" + +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "" + +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federating Learning " +#~ "quickstart tutorial for using Flower " +#~ "with HuggingFace Transformers in order " +#~ "to fine-tune an LLM." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "We will leverage Hugging Face to " +#~ "federate the training of language models" +#~ " over multiple clients using Flower. " +#~ "More specifically, we will fine-tune " +#~ "a pre-trained Transformer model " +#~ "(distilBERT) for sequence classification over" +#~ " a dataset of IMDB ratings. The " +#~ "end goal is to detect if a " +#~ "movie rating is positive or negative." +#~ msgstr "" + +#~ msgid "Dependencies" +#~ msgstr "" + +#~ msgid "" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" +#~ msgstr "" + +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "" + +#~ msgid "Handling the data" +#~ msgstr "" + +#~ msgid "" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" +#~ msgstr "" + +#~ msgid "Training and testing the model" +#~ msgstr "" + +#~ msgid "" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" +#~ msgstr "" + +#~ msgid "Creating the model itself" +#~ msgstr "" + +#~ msgid "" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" +#~ msgstr "" + +#~ msgid "Federating the example" +#~ msgstr "" + +#~ msgid "Creating the IMDBClient" +#~ msgstr "" + +#~ msgid "" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" + +#~ msgid "Starting the server" +#~ msgstr "" + +#~ msgid "" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" + +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" + +#~ msgid "Putting everything together" +#~ msgstr "" + +#~ msgid "We can now start client instances using:" +#~ msgstr "" + +#~ msgid "" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "" + +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." +#~ msgstr "" + +#~ msgid "" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." +#~ msgstr "" + +#~ msgid "" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." +#~ msgstr "" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with TensorFlow to train a MobilNetV2" +#~ " model on CIFAR-10." +#~ msgstr "" + +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "" + +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "" + +#~ msgid "" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "" + +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "" + +#~ msgid "" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" + +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" +#~ msgstr "" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" + +#~ msgid "Each client will have its own dataset." +#~ msgstr "" + +#~ msgid "" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" +#~ msgstr "" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." +#~ msgstr "" + +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" + +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" + +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" + +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgstr "" + +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" + +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" + +#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgstr "" + +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" + +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgstr "" + +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgstr "" + +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgstr "" + +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgstr "" + +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgstr "" + +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgstr "" + diff --git a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po index 674417e69791..9af452fb0be2 100644 --- a/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po +++ b/doc/locales/zh_Hans/LC_MESSAGES/framework-docs.po @@ -7,7 +7,7 @@ msgid "" msgstr "" "Project-Id-Version: Flower main\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-09-15 09:09+0200\n" +"POT-Creation-Date: 2024-09-24 00:29+0000\n" "PO-Revision-Date: 2024-06-12 10:09+0000\n" "Last-Translator: Yan Gao \n" "Language: zh_Hans\n" @@ -17,7 +17,7 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" -"Generated-By: Babel 2.15.0\n" +"Generated-By: Babel 2.16.0\n" #: ../../source/contributor-explanation-public-and-private-apis.rst:2 msgid "Public and private APIs" @@ -1476,7 +1476,7 @@ msgstr "" msgid "Setting up the repository" msgstr "建立资源库" -#: ../../source/contributor-tutorial-contribute-on-github.rst:12 +#: ../../source/contributor-tutorial-contribute-on-github.rst:21 msgid "**Create a GitHub account and setup Git**" msgstr "**创建 GitHub 账户并设置 Git**" @@ -1515,7 +1515,7 @@ msgstr "" "通用的 Git 和 GitHub 工作流程背后的理念可以归结为:从 GitHub 上的远程仓库下载代码,在本地进行修改并使用 Git " "进行跟踪,然后将新的历史记录上传回 GitHub。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:23 +#: ../../source/contributor-tutorial-contribute-on-github.rst:32 msgid "**Forking the Flower repository**" msgstr "**叉花仓库**" @@ -1540,7 +1540,7 @@ msgstr "" "您可以更改名称,但没有必要,因为这个版本的 Flower " "将是您自己的,并位于您自己的账户中(即,在您自己的版本库列表中)。创建完成后,您会在左上角看到自己的 Flower 版本。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:34 +#: ../../source/contributor-tutorial-contribute-on-github.rst:47 msgid "**Cloning your forked repository**" msgstr "**克隆你的分叉仓库**" @@ -1567,7 +1567,7 @@ msgid "" "it) folder in the current working directory." msgstr "这将在当前工作目录下创建一个 `flower/`(如果重命名了,则使用 fork 的名称)文件夹。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:49 +#: ../../source/contributor-tutorial-contribute-on-github.rst:66 msgid "**Add origin**" msgstr "**添加原产地**" @@ -1591,7 +1591,7 @@ msgid "" "terminal:" msgstr "一旦复制了 \\ ,我们就可以在终端中键入以下命令:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:68 +#: ../../source/contributor-tutorial-contribute-on-github.rst:90 msgid "**Add upstream**" msgstr "**增加上游**" @@ -1650,7 +1650,7 @@ msgstr "在进行任何更改之前,请确保您的版本库是最新的:" msgid "And with Flower's repository:" msgstr "还有Flower的存储库:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:114 +#: ../../source/contributor-tutorial-contribute-on-github.rst:122 msgid "**Create a new branch**" msgstr "**创建一个新分支**" @@ -1667,7 +1667,7 @@ msgid "" "directory:" msgstr "为此,只需在版本库目录下运行以下命令即可:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:124 +#: ../../source/contributor-tutorial-contribute-on-github.rst:125 msgid "**Make changes**" msgstr "**进行修改**" @@ -1675,7 +1675,7 @@ msgstr "**进行修改**" msgid "Write great code and create wonderful changes using your favorite editor!" msgstr "使用您最喜欢的编辑器编写优秀的代码并创建精彩的更改!" -#: ../../source/contributor-tutorial-contribute-on-github.rst:127 +#: ../../source/contributor-tutorial-contribute-on-github.rst:138 msgid "**Test and format your code**" msgstr "**测试并格式化您的代码**" @@ -1690,7 +1690,7 @@ msgstr "不要忘记测试和格式化您的代码!否则您的代码将无法 msgid "To do so, we have written a few scripts that you can execute:" msgstr "为此,我们编写了一些脚本供您执行:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:140 +#: ../../source/contributor-tutorial-contribute-on-github.rst:150 msgid "**Stage changes**" msgstr "**舞台变化**" @@ -1711,7 +1711,7 @@ msgid "" "the :code:`git status` command." msgstr "要查看与上一版本(上次提交)相比哪些文件已被修改,以及哪些文件处于提交阶段,可以使用 :code:`git status` 命令。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:152 +#: ../../source/contributor-tutorial-contribute-on-github.rst:160 msgid "**Commit changes**" msgstr "**提交更改**" @@ -1730,7 +1730,7 @@ msgstr "" " 用于向他人解释提交的作用。它应该以命令式风格书写,并且简明扼要。例如 :code:`git commit " "-m \"Add images to README\"`。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:162 +#: ../../source/contributor-tutorial-contribute-on-github.rst:171 msgid "**Push the changes to the fork**" msgstr "**将更改推送到分叉**" @@ -1751,7 +1751,7 @@ msgstr "完成此操作后,您将在 GitHub 上看到您的分叉仓库已根 msgid "Creating and merging a pull request (PR)" msgstr "创建和合并拉取请求 (PR)" -#: ../../source/contributor-tutorial-contribute-on-github.rst:176 +#: ../../source/contributor-tutorial-contribute-on-github.rst:206 msgid "**Create the PR**" msgstr "**创建 PR**" @@ -1820,7 +1820,7 @@ msgid "" "anyone, you have the option to create a draft pull request:" msgstr "如果您的 PR 尚未准备好接受审核,而且您不想通知任何人,您可以选择创建一个草案拉取请求:" -#: ../../source/contributor-tutorial-contribute-on-github.rst:208 +#: ../../source/contributor-tutorial-contribute-on-github.rst:209 msgid "**Making new changes**" msgstr "**作出新的改变**" @@ -1831,7 +1831,7 @@ msgid "" " associated with the PR." msgstr "一旦 PR 被打开(无论是否作为草案),你仍然可以像以前一样,通过修改与 PR 关联的分支来推送新的提交。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:211 +#: ../../source/contributor-tutorial-contribute-on-github.rst:231 msgid "**Review the PR**" msgstr "**审查 PR**" @@ -1867,7 +1867,7 @@ msgid "" "review." msgstr "一旦所有对话都得到解决,您就可以重新申请审核。" -#: ../../source/contributor-tutorial-contribute-on-github.rst:233 +#: ../../source/contributor-tutorial-contribute-on-github.rst:251 msgid "**Once the PR is merged**" msgstr "**一旦 PR 被合并**" @@ -2179,6 +2179,7 @@ msgstr "成为贡献者" #: ../../source/contributor-tutorial-get-started-as-a-contributor.rst:5 #: ../../source/docker/run-as-subprocess.rst:11 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:15 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:12 #: ../../source/docker/tutorial-quickstart-docker.rst:11 msgid "Prerequisites" @@ -2960,6 +2961,242 @@ msgid "" " the SuperNode to execute the ClientApp as a subprocess:" msgstr "" +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:2 +#, fuzzy +msgid "Run Flower Quickstart Examples with Docker Compose" +msgstr "快速入门 iOS" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:4 +msgid "" +"Flower provides a set of `quickstart examples " +"`_ to help you get " +"started with the framework. These examples are designed to demonstrate " +"the capabilities of Flower and by default run using the Simulation " +"Engine. This guide demonstrates how to run them using Flower's Deployment" +" Engine via Docker Compose." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:11 +msgid "" +"Some quickstart examples may have limitations or requirements that " +"prevent them from running on every environment. For more information, " +"please see `Limitations`_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 +#: ../../source/docker/tutorial-quickstart-docker.rst:13 +#, fuzzy +msgid "Before you start, make sure that:" +msgstr "开始之前,请确保 Docker 守护进程正在运行:" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:19 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 +#: ../../source/docker/tutorial-quickstart-docker.rst:15 +msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:20 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 +#: ../../source/docker/tutorial-quickstart-docker.rst:16 +#, fuzzy +msgid "The Docker daemon is running." +msgstr "验证 Docker 守护进程是否正在运行。" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:21 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 +msgid "Docker Compose is `installed `_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:24 +#, fuzzy +msgid "Run the Quickstart Example" +msgstr "示例请求" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:26 +msgid "" +"Clone the quickstart example you like to run. For example, ``quickstart-" +"pytorch``:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:34 +msgid "" +"Download the `compose.yml " +"`_" +" file into the example directory:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:41 +#, fuzzy +msgid "Build and start the services using the following command:" +msgstr "运行以下命令激活 virtualenv:" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:47 +#, fuzzy +msgid "" +"Append the following lines to the end of the ``pyproject.toml`` file and " +"save it:" +msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:49 +#: ../../source/docker/tutorial-quickstart-docker.rst:319 +#, fuzzy +msgid "pyproject.toml" +msgstr "或 ``pyproject.toml```:" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:58 +msgid "" +"You can customize the string that follows ``tool.flwr.federations.`` to " +"fit your needs. However, please note that the string cannot contain a dot" +" (``.``)." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:61 +msgid "" +"In this example, ``local-deployment`` has been used. Just remember to " +"replace ``local-deployment`` with your chosen name in both the " +"``tool.flwr.federations.`` string and the corresponding ``flwr run .`` " +"command." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:65 +#, fuzzy +msgid "Run the example:" +msgstr "将示例联邦化" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:71 +msgid "Follow the logs of the SuperExec service:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:77 +msgid "" +"That is all it takes! You can monitor the progress of the run through the" +" logs of the SuperExec." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:80 +msgid "Run a Different Quickstart Example" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:82 +msgid "" +"To run a different quickstart example, such as ``quickstart-tensorflow``," +" first, shut down the Docker Compose services of the current example:" +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:89 +msgid "After that, you can repeat the steps above." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:92 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:98 +#, fuzzy +msgid "Limitations" +msgstr "运行模拟" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:97 +#, fuzzy +msgid "Quickstart Example" +msgstr "快速入门 JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:99 +#, fuzzy +msgid "quickstart-fastai" +msgstr "快速入门 fastai" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:100 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:102 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:110 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:112 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:116 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:118 +#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 +#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 +#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 +#: ../../source/ref-changelog.md:929 +msgid "None" +msgstr "无" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:101 +#, fuzzy +msgid "quickstart-huggingface" +msgstr "快速入门教程" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:103 +#, fuzzy +msgid "quickstart-jax" +msgstr "快速入门 JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:104 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:106 +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:120 +#, fuzzy +msgid "" +"The example has not yet been updated to work with the latest ``flwr`` " +"version." +msgstr "涵盖 scikit-learn 和 PyTorch Lightning 的代码示例已更新,以便与最新版本的 Flower 配合使用。" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:105 +#, fuzzy +msgid "quickstart-mlcube" +msgstr "快速入门 JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:107 +#, fuzzy +msgid "quickstart-mlx" +msgstr "快速入门 JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:108 +msgid "" +"`Requires to run on macOS with Apple Silicon `_." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:109 +#, fuzzy +msgid "quickstart-monai" +msgstr "快速入门 JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:111 +#, fuzzy +msgid "quickstart-pandas" +msgstr "快速入门Pandas" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:113 +#, fuzzy +msgid "quickstart-pytorch-lightning" +msgstr "快速入门 PyTorch Lightning" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:114 +msgid "" +"Requires an older pip version that is not supported by the Flower Docker " +"images." +msgstr "" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:115 +#, fuzzy +msgid "quickstart-pytorch" +msgstr "PyTorch快速入门" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:117 +#, fuzzy +msgid "quickstart-sklearn-tabular" +msgstr "scikit-learn快速入门" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:119 +#, fuzzy +msgid "quickstart-tabnet" +msgstr "快速入门 JAX" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:121 +#, fuzzy +msgid "quickstart-tensorflow" +msgstr "快速入门 TensorFlow" + +#: ../../source/docker/run-quickstart-examples-docker-compose.rst:122 +msgid "Only runs on AMD64." +msgstr "" + #: ../../source/docker/set-environment-variables.rst:2 #, fuzzy msgid "Set Environment Variables" @@ -2991,23 +3228,6 @@ msgid "" " understanding the basic workflow that uses the minimum configurations." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:14 -#: ../../source/docker/tutorial-quickstart-docker.rst:13 -#, fuzzy -msgid "Before you start, make sure that:" -msgstr "开始之前,请确保 Docker 守护进程正在运行:" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:16 -#: ../../source/docker/tutorial-quickstart-docker.rst:15 -msgid "The ``flwr`` CLI is :doc:`installed <../how-to-install-flower>` locally." -msgstr "" - -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:17 -#: ../../source/docker/tutorial-quickstart-docker.rst:16 -#, fuzzy -msgid "The Docker daemon is running." -msgstr "验证 Docker 守护进程是否正在运行。" - #: ../../source/docker/tutorial-quickstart-docker-compose.rst:21 #: ../../source/docker/tutorial-quickstart-docker.rst:19 msgid "Step 1: Set Up" @@ -3440,11 +3660,6 @@ msgstr "" msgid "Add the following lines to the ``pyproject.toml``:" msgstr "将 ``pyproject.toml`` 中的次要版本增加一个。" -#: ../../source/docker/tutorial-quickstart-docker.rst:319 -#, fuzzy -msgid "pyproject.toml" -msgstr "或 ``pyproject.toml```:" - #: ../../source/docker/tutorial-quickstart-docker.rst:326 msgid "Run the ``quickstart-docker`` project by executing the command:" msgstr "" @@ -3495,6 +3710,7 @@ msgstr "" msgid "Remove the containers and the bridge network:" msgstr "" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:401 #: ../../source/docker/tutorial-quickstart-docker.rst:399 #, fuzzy msgid "Where to Go Next" @@ -3531,10 +3747,6 @@ msgid "" "configuration that best suits your project's needs." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:18 -msgid "Docker Compose is `installed `_." -msgstr "" - #: ../../source/docker/tutorial-quickstart-docker-compose.rst:23 msgid "Clone the Docker Compose ``complete`` directory:" msgstr "" @@ -3731,7 +3943,7 @@ msgstr "" #: ../../source/docker/tutorial-quickstart-docker-compose.rst:188 #: ../../source/docker/tutorial-quickstart-docker-compose.rst:241 -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:362 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:369 msgid "Rerun the ``quickstart-compose`` project:" msgstr "" @@ -3795,76 +4007,81 @@ msgstr "" msgid "compose.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:303 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 msgid "" "If you also want to enable TLS for the new SuperNodes, duplicate the " "SuperNode definition for each new SuperNode service in the ``with-" "tls.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:306 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:313 msgid "" "Make sure that the names of the services match with the one in the " "``compose.yml`` file." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:308 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:315 msgid "In ``with-tls.yml``, add the following:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:310 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:317 msgid "with-tls.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:332 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 msgid "Step 8: Persisting the SuperLink State and Enabling TLS" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:334 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:341 msgid "" "To run Flower with persisted SuperLink state and enabled TLS, a slight " "change in the ``with-state.yml`` file is required:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:337 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:344 msgid "Comment out the lines 2-4 and uncomment the lines 5-9:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:339 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:346 msgid "with-state.yml" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:356 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:363 #, fuzzy msgid "Restart the services:" msgstr "启动服务器" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:370 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:377 msgid "Step 9: Merge Multiple Compose Files" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:372 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:379 msgid "" "You can merge multiple Compose files into a single file. For instance, if" " you wish to combine the basic configuration with the TLS configuration, " "execute the following command:" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:380 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:387 msgid "" "This will merge the contents of ``compose.yml`` and ``with-tls.yml`` into" " a new file called ``my_compose.yml``." msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:384 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:391 msgid "Step 10: Clean Up" msgstr "" -#: ../../source/docker/tutorial-quickstart-docker-compose.rst:386 +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:393 #, fuzzy msgid "Remove all services and volumes:" msgstr "从 R 中删除所有项目。" +#: ../../source/docker/tutorial-quickstart-docker-compose.rst:403 +#, fuzzy +msgid ":doc:`run-quickstart-examples-docker-compose`" +msgstr "快速入门 iOS" + #: ../../source/docker/use-a-different-version.rst:2 #, fuzzy msgid "Use a Different Flower Version" @@ -4189,7 +4406,7 @@ msgstr "" ":code:`Client`略微容易一些,因为它避免了一些不必要的操作。:code:`CifarClient` " "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:218 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:219 msgid ":code:`set_parameters`" msgstr ":code:`set_parameters`" @@ -4222,9 +4439,9 @@ msgstr "" "获取模型参数,并以 NumPy :code:`ndarray`的列表形式返回(这正是 " ":code:`flwr.client.NumPyClient`所匹配的格式)" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:223 -#: ../../source/tutorial-quickstart-jax.rst:171 -#: ../../source/tutorial-quickstart-scikitlearn.rst:123 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:225 +#: ../../source/tutorial-quickstart-jax.rst:173 +#: ../../source/tutorial-quickstart-scikitlearn.rst:125 msgid ":code:`fit`" msgstr ":code:`fit`" @@ -4246,9 +4463,9 @@ msgstr "在本地训练集上训练模型" msgid "get the updated local model weights and return them to the server" msgstr "获取更新后的本地模型参数并发送回服务器" -#: ../../source/example-pytorch-from-centralized-to-federated.rst:227 -#: ../../source/tutorial-quickstart-jax.rst:175 -#: ../../source/tutorial-quickstart-scikitlearn.rst:127 +#: ../../source/example-pytorch-from-centralized-to-federated.rst:230 +#: ../../source/tutorial-quickstart-jax.rst:178 +#: ../../source/tutorial-quickstart-scikitlearn.rst:128 msgid ":code:`evaluate`" msgstr ":code:`evaluate`" @@ -4352,7 +4569,7 @@ msgstr "" "的数据)之外完全相同。差分隐私(DP)可以保证任何分析(M),比如计算平均收入,对两个数据集都会产生几乎相同的结果(O 和 O' " "将是相似的)。这既保留了群体模式,又掩盖了个人细节,确保个人的信息隐藏在人群中。" -#: ../../source/explanation-differential-privacy.rst:16 +#: ../../source/explanation-differential-privacy.rst:-1 #, fuzzy msgid "DP Intro" msgstr "DP 介绍" @@ -4490,8 +4707,8 @@ msgid "" "the client's data." msgstr "**本地差分隐私**: 在向服务器发送任何信息之前,在客户端应用 DP,目的是防止向服务器发送的更新泄露任何有关客户端数据的信息。" +#: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:68 -#: ../../source/explanation-differential-privacy.rst:71 #: ../../source/how-to-use-differential-privacy.rst:11 #, fuzzy msgid "Central Differential Privacy" @@ -4523,7 +4740,7 @@ msgstr "" "虽然在联合学习中实现中央数据处理的方法有很多种,但我们将重点放在[2]和[3]提出的算法上。总体方法是剪辑客户端发送的模型更新,并在聚合模型中添加一定量的噪声。在每次迭代中,以特定概率随机选择一组客户端进行训练。每个客户端对自己的数据进行局部训练。然后,每个客户端的更新会被某个值`S`(灵敏度`S`)剪切。这将限制任何单个客户端的影响,这对隐私至关重要,通常也有利于稳健性。实现这一点的常用方法是限制客户机模型更新的" " `L2` 准则,确保较大的更新被缩减以适应 `S` 准则。" -#: ../../source/explanation-differential-privacy.rst:84 +#: ../../source/explanation-differential-privacy.rst:-1 #, fuzzy msgid "clipping" msgstr "剪贴" @@ -4577,8 +4794,8 @@ msgid "" "others." msgstr "在固定剪切和自适应剪切之间做出选择取决于各种因素,如隐私要求、数据分布、模型复杂性等。" +#: ../../source/explanation-differential-privacy.rst:-1 #: ../../source/explanation-differential-privacy.rst:105 -#: ../../source/explanation-differential-privacy.rst:110 #: ../../source/how-to-use-differential-privacy.rst:96 #, fuzzy msgid "Local Differential Privacy" @@ -4849,7 +5066,7 @@ msgstr "" msgid "This is sometimes called a hub-and-spoke topology:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:18 +#: ../../source/explanation-flower-architecture.rst:24 #, fuzzy msgid "Hub-and-spoke topology in federated learning" msgstr "什么是联邦学习?" @@ -4922,7 +5139,7 @@ msgid "" "`missing link` between all those SuperNodes." msgstr "" -#: ../../source/explanation-flower-architecture.rst:65 +#: ../../source/explanation-flower-architecture.rst:71 #, fuzzy msgid "Basic Flower architecture" msgstr "Flower的架构" @@ -4960,7 +5177,7 @@ msgid "" "SuperNodes." msgstr "" -#: ../../source/explanation-flower-architecture.rst:91 +#: ../../source/explanation-flower-architecture.rst:97 #, fuzzy msgid "Multi-tenancy federated learning architecture" msgstr "使用联邦学习策略" @@ -4984,7 +5201,7 @@ msgid "" "their corresponding ``ClientApp``\\s:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:107 +#: ../../source/explanation-flower-architecture.rst:113 #, fuzzy msgid "Multi-tenancy federated learning architecture - Run 1" msgstr "使用联邦学习策略" @@ -5001,7 +5218,7 @@ msgid "" " to participate in the training:" msgstr "" -#: ../../source/explanation-flower-architecture.rst:119 +#: ../../source/explanation-flower-architecture.rst:125 #, fuzzy msgid "Multi-tenancy federated learning architecture - Run 2" msgstr "使用联邦学习策略" @@ -5038,7 +5255,7 @@ msgid "" "developer machine." msgstr "" -#: ../../source/explanation-flower-architecture.rst:145 +#: ../../source/explanation-flower-architecture.rst:151 msgid "Flower Deployment Engine with SuperExec" msgstr "" @@ -8153,7 +8370,7 @@ msgstr "" ":code:`DifferentialPrivacyServerSideFixedClipping` 和 " ":code:`DifferentialPrivacyServerSideAdaptiveClipping` ,用于固定剪辑和自适应剪辑。" -#: ../../source/how-to-use-differential-privacy.rst:25 +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy msgid "server side clipping" msgstr "服务器端逻辑" @@ -8194,7 +8411,7 @@ msgstr "" ":code:`DifferentialPrivacyClientSideFixedClipping` 和 " ":code:`DifferentialPrivacyClientSideAdaptiveClipping`。" -#: ../../source/how-to-use-differential-privacy.rst:57 +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy msgid "client side clipping" msgstr "客户端逻辑" @@ -8231,7 +8448,7 @@ msgstr "" "要利用本地差分隐私(DP)并在将客户端模型参数传输到 Flower 服务器之前为其添加噪声,可以使用 " "`LocalDpMod`。需要设置以下超参数:剪切规范值、灵敏度、ε 和 delta。" -#: ../../source/how-to-use-differential-privacy.rst:99 +#: ../../source/how-to-use-differential-privacy.rst:-1 #, fuzzy msgid "local DP mod" msgstr "本地 DP 模式" @@ -8653,11 +8870,33 @@ msgstr "" msgid "Arguments" msgstr "参数解析器" -#: ../../flwr install:1 new:1 run:1 +#: ../../flwr install:1 log:1 new:1 run:1 #, fuzzy msgid "Optional argument" msgstr "可选的改进措施" +#: ../../flwr log:1 +msgid "Get logs from a Flower project run." +msgstr "" + +#: ../../flwr log:1 +msgid "Flag to stream or print logs from the Flower run" +msgstr "" + +#: ../../flwr log +#, fuzzy +msgid "default" +msgstr "工作流程" + +#: ../../flwr log:1 +msgid "``True``" +msgstr "" + +#: ../../flwr log:1 +#, fuzzy +msgid "Required argument" +msgstr "构建文档" + #: ../../flwr new:1 #, fuzzy msgid "Create new Flower App." @@ -8750,7 +8989,7 @@ msgstr "模块" #: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`client `\\" +msgid ":py:obj:`flwr.client `\\" msgstr ":py:obj:`flwr.client `\\" #: ../../source/ref-api/flwr.rst:35::1 flwr.client:1 of @@ -8759,7 +8998,7 @@ msgstr "Flower 客户端。" #: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`common `\\" +msgid ":py:obj:`flwr.common `\\" msgstr ":py:obj:`flwr.common `\\" #: ../../source/ref-api/flwr.rst:35::1 flwr.common:1 of @@ -8768,7 +9007,7 @@ msgstr "服务器和客户端共享的通用组件。" #: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`server `\\" +msgid ":py:obj:`flwr.server `\\" msgstr ":py:obj:`flwr.server `\\" #: ../../source/ref-api/flwr.rst:35::1 @@ -8779,7 +9018,7 @@ msgstr "Flower 服务器。" #: ../../source/ref-api/flwr.rst:35::1 #, fuzzy -msgid ":py:obj:`simulation `\\" +msgid ":py:obj:`flwr.simulation `\\" msgstr ":py:obj:`flwr.simulation `\\" #: ../../source/ref-api/flwr.rst:35::1 flwr.simulation:1 of @@ -8875,7 +9114,7 @@ msgstr "使用 NumPy 的 Flower 客户端的抽象基类。" #: ../../source/ref-api/flwr.client.rst:50::1 #, fuzzy -msgid ":py:obj:`mod `\\" +msgid ":py:obj:`flwr.client.mod `\\" msgstr ":py:obj:`flwr.client `\\" #: ../../source/ref-api/flwr.client.rst:50::1 flwr.client.mod:1 of @@ -9086,48 +9325,57 @@ msgstr ":py:obj:`context `\\" msgid "Getter for `Context` client attribute." msgstr "" -#: ../../source/ref-api/flwr.client.Client.rst -#: ../../source/ref-api/flwr.client.NumPyClient.rst -#: ../../source/ref-api/flwr.client.mod.LocalDpMod.rst -#: ../../source/ref-api/flwr.common.Array.rst -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst -#: ../../source/ref-api/flwr.common.Context.rst -#: ../../source/ref-api/flwr.common.Error.rst -#: ../../source/ref-api/flwr.common.Message.rst -#: ../../source/ref-api/flwr.common.Metadata.rst -#: ../../source/ref-api/flwr.common.MetricsRecord.rst #: ../../source/ref-api/flwr.common.Parameters.rst:2 -#: ../../source/ref-api/flwr.common.ParametersRecord.rst -#: ../../source/ref-api/flwr.common.RecordSet.rst -#: ../../source/ref-api/flwr.server.ClientManager.rst -#: ../../source/ref-api/flwr.server.Driver.rst -#: ../../source/ref-api/flwr.server.ServerAppComponents.rst -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst -#: ../../source/ref-api/flwr.server.strategy.Bulyan.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideAdaptiveClipping.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyClientSideFixedClipping.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideAdaptiveClipping.rst -#: ../../source/ref-api/flwr.server.strategy.DifferentialPrivacyServerSideFixedClipping.rst -#: ../../source/ref-api/flwr.server.strategy.FedAdagrad.rst -#: ../../source/ref-api/flwr.server.strategy.FedAdam.rst -#: ../../source/ref-api/flwr.server.strategy.FedAvg.rst -#: ../../source/ref-api/flwr.server.strategy.FedAvgAndroid.rst -#: ../../source/ref-api/flwr.server.strategy.FedAvgM.rst -#: ../../source/ref-api/flwr.server.strategy.FedOpt.rst -#: ../../source/ref-api/flwr.server.strategy.FedProx.rst -#: ../../source/ref-api/flwr.server.strategy.FedTrimmedAvg.rst -#: ../../source/ref-api/flwr.server.strategy.FedYogi.rst -#: ../../source/ref-api/flwr.server.strategy.Krum.rst -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst -#: ../../source/ref-api/flwr.server.workflow.SecAggPlusWorkflow.rst -#: ../../source/ref-api/flwr.server.workflow.SecAggWorkflow.rst -#: ../../source/ref-api/flwr.simulation.run_simulation.rst -#: ../../source/ref-api/flwr.simulation.start_simulation.rst #: flwr.client.app.start_client flwr.client.app.start_numpy_client -#: flwr.server.app.start_server -#: flwr.server.driver.driver.Driver.send_and_receive of +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.mod.localdp_mod.LocalDpMod +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.context.Context flwr.common.message.Error +#: flwr.common.message.Message flwr.common.message.Message.create_error_reply +#: flwr.common.message.Message.create_reply flwr.common.message.Metadata +#: flwr.common.record.configsrecord.ConfigsRecord +#: flwr.common.record.metricsrecord.MetricsRecord +#: flwr.common.record.parametersrecord.Array +#: flwr.common.record.parametersrecord.ParametersRecord +#: flwr.common.record.recordset.RecordSet flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.ClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.unregister +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.serverapp_components.ServerAppComponents +#: flwr.server.strategy.bulyan.Bulyan +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyClientSideAdaptiveClipping +#: flwr.server.strategy.dp_adaptive_clipping.DifferentialPrivacyServerSideAdaptiveClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyClientSideFixedClipping +#: flwr.server.strategy.dp_fixed_clipping.DifferentialPrivacyServerSideFixedClipping +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.fedadagrad.FedAdagrad +#: flwr.server.strategy.fedadam.FedAdam flwr.server.strategy.fedavg.FedAvg +#: flwr.server.strategy.fedavg_android.FedAvgAndroid +#: flwr.server.strategy.fedavgm.FedAvgM flwr.server.strategy.fedopt.FedOpt +#: flwr.server.strategy.fedprox.FedProx +#: flwr.server.strategy.fedtrimmedavg.FedTrimmedAvg +#: flwr.server.strategy.fedyogi.FedYogi flwr.server.strategy.krum.Krum +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters +#: flwr.server.workflow.secure_aggregation.secagg_workflow.SecAggWorkflow +#: flwr.server.workflow.secure_aggregation.secaggplus_workflow.SecAggPlusWorkflow +#: flwr.simulation.run_simulation.run_simulation of msgid "Parameters" msgstr "参数" @@ -9138,21 +9386,31 @@ msgid "" "customize the local evaluation process." msgstr "评估指令包含从服务器接收的(全局)模型参数,以及用于定制本地评估流程的配置值字典。" -#: ../../source/ref-api/flwr.client.Client.rst -#: ../../source/ref-api/flwr.client.NumPyClient.rst -#: ../../source/ref-api/flwr.common.ConfigsRecord.rst -#: ../../source/ref-api/flwr.common.Message.rst -#: ../../source/ref-api/flwr.common.MetricsRecord.rst -#: ../../source/ref-api/flwr.common.ParametersRecord.rst -#: ../../source/ref-api/flwr.server.ClientManager.rst -#: ../../source/ref-api/flwr.server.Driver.rst -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst -#: ../../source/ref-api/flwr.simulation.start_simulation.rst -#: flwr.server.app.start_server -#: flwr.server.driver.driver.Driver.send_and_receive of +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.evaluate +#: flwr.client.numpy_client.NumPyClient.fit +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of msgid "Returns" msgstr "返回" @@ -9162,18 +9420,29 @@ msgid "" "details such as the number of local data examples used for evaluation." msgstr "评估结果包含本地数据集上的损失值和其他详细信息,如用于评估的本地数据的数量。" -#: ../../source/ref-api/flwr.client.Client.rst -#: ../../source/ref-api/flwr.client.NumPyClient.rst -#: ../../source/ref-api/flwr.common.Message.rst -#: ../../source/ref-api/flwr.server.ClientManager.rst -#: ../../source/ref-api/flwr.server.Driver.rst -#: ../../source/ref-api/flwr.server.SimpleClientManager.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgAdaptive.rst -#: ../../source/ref-api/flwr.server.strategy.DPFedAvgFixed.rst -#: ../../source/ref-api/flwr.server.strategy.Strategy.rst -#: ../../source/ref-api/flwr.simulation.start_simulation.rst -#: flwr.server.app.start_server -#: flwr.server.driver.driver.Driver.send_and_receive of +#: flwr.client.client.Client.evaluate flwr.client.client.Client.fit +#: flwr.client.client.Client.get_parameters +#: flwr.client.client.Client.get_properties +#: flwr.client.numpy_client.NumPyClient.get_parameters +#: flwr.client.numpy_client.NumPyClient.get_properties +#: flwr.common.message.Message.create_reply flwr.server.app.start_server +#: flwr.server.client_manager.ClientManager.num_available +#: flwr.server.client_manager.ClientManager.register +#: flwr.server.client_manager.SimpleClientManager.num_available +#: flwr.server.client_manager.SimpleClientManager.register +#: flwr.server.client_manager.SimpleClientManager.wait_for +#: flwr.server.driver.driver.Driver.create_message +#: flwr.server.driver.driver.Driver.pull_messages +#: flwr.server.driver.driver.Driver.push_messages +#: flwr.server.driver.driver.Driver.send_and_receive +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_evaluate +#: flwr.server.strategy.dpfedavg_fixed.DPFedAvgFixed.configure_fit +#: flwr.server.strategy.strategy.Strategy.aggregate_evaluate +#: flwr.server.strategy.strategy.Strategy.aggregate_fit +#: flwr.server.strategy.strategy.Strategy.configure_evaluate +#: flwr.server.strategy.strategy.Strategy.configure_fit +#: flwr.server.strategy.strategy.Strategy.evaluate +#: flwr.server.strategy.strategy.Strategy.initialize_parameters of msgid "Return type" msgstr "返回类型" @@ -9537,6 +9806,11 @@ msgstr "客户端逻辑" msgid ":py:obj:`make_ffn `\\ \\(ffn\\, mods\\)" msgstr ":py:obj:`Client `\\ \\(\\)" +#: ../../source/ref-api/flwr.client.mod.rst:28::1 +#: flwr.client.mod.utils.make_ffn:1 of +msgid "." +msgstr "" + #: ../../source/ref-api/flwr.client.mod.rst:28::1 #, fuzzy msgid "" @@ -9724,10 +9998,6 @@ msgstr "" msgid "make\\_ffn" msgstr "" -#: flwr.client.mod.utils.make_ffn:1 of -msgid "." -msgstr "" - #: ../../source/ref-api/flwr.client.mod.message_size_mod.rst:2 msgid "message\\_size\\_mod" msgstr "" @@ -9756,16 +10026,6 @@ msgstr "" msgid "secaggplus\\_mod" msgstr "工作流程" -#: ../../source/ref-api/flwr.client.run_client_app.rst:2 -#, fuzzy -msgid "run\\_client\\_app" -msgstr "run\\_client\\_app" - -#: ../../source/ref-api/flwr.client.run_supernode.rst:2 -#, fuzzy -msgid "run\\_supernode" -msgstr "flower-superlink" - #: ../../source/ref-api/flwr.client.start_client.rst:2 #, fuzzy msgid "start\\_client" @@ -10643,14 +10903,9 @@ msgstr "返回存储在此对象中的字节数。" #: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`get `\\ \\(key\\[\\, default\\]\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" -#: collections.abc.Mapping.get:1 -#: collections.abc.MutableMapping.clear:1::1 of -msgid "Retrieve the corresponding layout by the string key." -msgstr "" - #: collections.abc.MutableMapping.clear:1::1 of #, fuzzy msgid ":py:obj:`items `\\ \\(\\)" @@ -10716,22 +10971,6 @@ msgstr ":py:obj:`values `\\ \\(\\)" msgid "This function counts booleans as occupying 1 Byte." msgstr "该函数将布尔值计算为占用 1 个字节。" -#: collections.abc.Mapping.get:3 of -msgid "" -"When there isn't an exact match, all the existing keys in the layout map " -"will be treated as a regex and map against the input key again. The first" -" match will be returned, based on the key insertion order. Return None if" -" there isn't any match found." -msgstr "" - -#: collections.abc.Mapping.get:8 of -msgid "the string key as the query for the layout." -msgstr "" - -#: collections.abc.Mapping.get:10 of -msgid "Corresponding layout based on the query." -msgstr "" - #: ../../source/ref-api/flwr.common.Context.rst:2 #, fuzzy msgid "Context" @@ -11620,7 +11859,7 @@ msgstr "编码" msgid "The encoding in which to encode the string." msgstr "字符串的编码。" -#: flwr.common.EventType.encode:5 of +#: flwr.common.EventType.encode:9 of #, fuzzy msgid "errors" msgstr "错误" @@ -11832,7 +12071,7 @@ msgid "" "string." msgstr "如果字符串以后缀字符串结尾,且后缀不为空,则返回 string[:-len(suffix)]。否则,返回原始字符串的副本。" -#: flwr.common.EventType.replace:3 of +#: flwr.common.EventType.replace:5 of #, fuzzy msgid "count" msgstr "背景" @@ -11874,7 +12113,7 @@ msgid "" "strings and the original string." msgstr "如果找不到分隔符,则返回一个包含两个空字符串和原始字符串的 3 元组。" -#: flwr.common.EventType.rsplit:3 flwr.common.EventType.split:3 of +#: flwr.common.EventType.rsplit:7 flwr.common.EventType.split:7 of #, fuzzy msgid "sep" msgstr "sep" @@ -11892,7 +12131,7 @@ msgid "" " empty strings from the result." msgstr "当设置为 \"无\"(默认值)时,将对任何空白字符(包括 \\n \\r \\t \\f 和空格)进行分割,并从结果中剔除空字符串。" -#: flwr.common.EventType.rsplit:9 flwr.common.EventType.split:9 of +#: flwr.common.EventType.rsplit:11 flwr.common.EventType.split:11 of #, fuzzy msgid "maxsplit" msgstr "最大分割" @@ -11942,7 +12181,7 @@ msgid "" "remaining cased characters have lower case." msgstr "更具体地说,单词以大写字母开头,其余所有大小写字符均为小写。" -#: flwr.common.EventType.translate:3 of +#: flwr.common.EventType.translate:5 of #, fuzzy msgid "table" msgstr "数据库" @@ -12451,7 +12690,7 @@ msgstr ":py:obj:`count_bytes `\\ \\(\\)" #: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid ":py:obj:`get `\\ \\(key\\[\\, default\\]\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #: collections.abc.MutableMapping.clear:1::1 of @@ -12607,9 +12846,7 @@ msgstr ":py:obj:`count_bytes `\\ \\(\\ #: collections.abc.MutableMapping.clear:1::1 of #, fuzzy -msgid "" -":py:obj:`get `\\ \\(key\\[\\, " -"default\\]\\)" +msgid ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" msgstr ":py:obj:`get `\\ \\(k\\[\\,d\\]\\)" #: collections.abc.MutableMapping.clear:1::1 of @@ -13005,7 +13242,7 @@ msgstr "使用部分可用客户进行评估。" #: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid ":py:obj:`strategy `\\" +msgid ":py:obj:`flwr.server.strategy `\\" msgstr "server.strategy.Strategy" #: ../../source/ref-api/flwr.server.rst:56::1 @@ -13015,7 +13252,7 @@ msgstr "包含策略抽象和不同的实现方法。" #: ../../source/ref-api/flwr.server.rst:56::1 #, fuzzy -msgid ":py:obj:`workflow `\\" +msgid ":py:obj:`flwr.server.workflow `\\" msgstr "server.strategy.Strategy" #: ../../source/ref-api/flwr.server.rst:56::1 @@ -13619,8 +13856,7 @@ msgid "" msgstr "服务器实现,可以是 `flwr.server.Server` 或其子类。如果没有提供实例,`start_server` 将创建一个。" #: flwr.server.app.start_server:9 -#: flwr.server.serverapp_components.ServerAppComponents:6 -#: flwr.simulation.app.start_simulation:29 of +#: flwr.server.serverapp_components.ServerAppComponents:6 of msgid "" "Currently supported values are `num_rounds` (int, default: 1) and " "`round_timeout` in seconds (float, default: None)." @@ -13772,16 +14008,6 @@ msgstr "以秒为单位的等待时间,默认为 86400(24 小时)。" msgid "**success**" msgstr "**success**" -#: ../../source/ref-api/flwr.server.run_server_app.rst:2 -#, fuzzy -msgid "run\\_server\\_app" -msgstr "run\\_server\\_app" - -#: ../../source/ref-api/flwr.server.run_superlink.rst:2 -#, fuzzy -msgid "run\\_superlink" -msgstr "flower-superlink" - #: ../../source/ref-api/flwr.server.start_server.rst:2 #, fuzzy msgid "start\\_server" @@ -17774,16 +18000,16 @@ msgstr "使用模拟引擎运行花朵应用程序。" #: ../../source/ref-api/flwr.simulation.rst:18::1 #, fuzzy msgid "" -":py:obj:`start_simulation `\\ \\(\\*\\," -" client\\_fn\\, num\\_clients\\)" +":py:obj:`start_simulation `\\ " +"\\(\\*args\\, \\*\\*kwargs\\)" msgstr "" ":py:obj:`start_simulation `\\ \\(\\*\\," " client\\_fn\\[\\, ...\\]\\)" #: ../../source/ref-api/flwr.simulation.rst:18::1 -#: flwr.simulation.app.start_simulation:1 of -msgid "Start a Ray-based Flower simulation server." -msgstr "启动基于 Ray 的Flower模拟服务器。" +#: flwr.simulation.start_simulation:1 of +msgid "Log error stating that module `ray` could not be imported." +msgstr "" #: ../../source/ref-api/flwr.simulation.run_simulation.rst:2 #, fuzzy @@ -17855,168 +18081,31 @@ msgstr "启用后,将只显示 INFO、WARNING 和 ERROR 日志信息。启用 msgid "start\\_simulation" msgstr "start_simulation" -#: flwr.simulation.app.start_simulation:3 of -#, fuzzy -msgid "" -"A function creating `Client` instances. The function must have the " -"signature `client_fn(context: Context). It should return a single client " -"instance of type `Client`. Note that the created client instances are " -"ephemeral and will often be destroyed after a single method invocation. " -"Since client instances are not long-lived, they should not attempt to " -"carry state over method invocations. Any state required by the instance " -"(model, dataset, hyperparameters, ...) should be (re-)created in either " -"the call to `client_fn` or the call to any of the client methods (e.g., " -"load evaluation data in the `evaluate` method itself)." -msgstr "" -"创建客户端实例的函数。该函数必须接受一个名为 `cid` 的 `str` 参数。它应返回一个 Client " -"类型的客户端实例。请注意,创建的客户端实例是短暂的,通常在调用一个方法后就会被销毁。由于客户机实例不是长期存在的,它们不应试图在方法调用时携带状态数据。实例所需的任何状态数据(模型、数据集、超参数......)都应在调用" -" `client_fn` 或任何客户端方法(例如,在 `evaluate` 方法中加载评估数据)时(重新)创建。" +#: ../../source/ref-changelog.md:1 +msgid "Changelog" +msgstr "更新日志" -#: flwr.simulation.app.start_simulation:13 of +#: ../../source/ref-changelog.md:3 #, fuzzy -msgid "The total number of clients in this simulation." -msgstr "需要等待的客户数量。" +msgid "v1.11.1 (2024-09-11)" +msgstr "v1.3.0 (2023-02-06)" -#: flwr.simulation.app.start_simulation:15 of -#, fuzzy -msgid "" -"UNSUPPORTED, WILL BE REMOVED. USE `num_clients` INSTEAD. List " -"`client_id`s for each client. This is only required if `num_clients` is " -"not set. Setting both `num_clients` and `clients_ids` with " -"`len(clients_ids)` not equal to `num_clients` generates an error. Using " -"this argument will raise an error." -msgstr "" -"列出每个客户的 `client_id`。只有在未设置 `num_clients` " -"时才需要这样做。同时设置`num_clients`和`clients_ids`,且`len(clients_ids)`不等于`num_clients`,会产生错误。" +#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 +#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 +#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 +#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 +#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 +#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 +#: ../../source/ref-changelog.md:940 +msgid "Thanks to our contributors" +msgstr "感谢我们的贡献者" -#: flwr.simulation.app.start_simulation:21 of -#, fuzzy -msgid "" -"CPU and GPU resources for a single client. Supported keys are `num_cpus` " -"and `num_gpus`. To understand the GPU utilization caused by `num_gpus`, " -"as well as using custom resources, please consult the Ray documentation." -msgstr "" -"\"num_gpus\": 0.0` 单个客户端的 CPU 和 GPU 资源。支持的键值为 `num_cpus` 和 `num_gpus`。要了解" -" `num_gpus` 所导致的 GPU 利用率,以及使用自定义资源的情况,请查阅 Ray 文档。" - -#: flwr.simulation.app.start_simulation:26 of -msgid "" -"An implementation of the abstract base class `flwr.server.Server`. If no " -"instance is provided, then `start_server` will create one." -msgstr "抽象基类 `flwr.server.Server`的实现。如果没有提供实例,`start_server` 将创建一个。" - -#: flwr.simulation.app.start_simulation:32 of -msgid "" -"An implementation of the abstract base class `flwr.server.Strategy`. If " -"no strategy is provided, then `start_server` will use " -"`flwr.server.strategy.FedAvg`." -msgstr "" -"抽象基类 `flwr.server.strategy` 的实现。如果没有提供策略,`start_server` 将使用 " -"`flwr.server.strategy.FedAvg`。" - -#: flwr.simulation.app.start_simulation:36 of -msgid "" -"An implementation of the abstract base class `flwr.server.ClientManager`." -" If no implementation is provided, then `start_simulation` will use " -"`flwr.server.client_manager.SimpleClientManager`." -msgstr "" -"抽象基类 `flwr.server.ClientManager` 的实现。如果没有提供实现,`start_simulation` 将使用 " -"`flwr.server.client_manager.SimpleClientManager`。" - -#: flwr.simulation.app.start_simulation:40 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args: { \"ignore_reinit_error\": True, " -"\"include_dashboard\": False } An empty dictionary can be used " -"(ray_init_args={}) to prevent any arguments from being passed to " -"ray.init." -msgstr "" -"可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认值),则将使用以下默认参数初始化 Ray:" -" { \"ignore_reinit_error\": True, \"include_dashboard\": False } " -"可以使用空字典(ray_init_args={})来防止向 ray.init 传递任何参数。" - -#: flwr.simulation.app.start_simulation:40 of -msgid "" -"Optional dictionary containing arguments for the call to `ray.init`. If " -"ray_init_args is None (the default), Ray will be initialized with the " -"following default args:" -msgstr "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为 None(默认值),则将使用以下默认参数初始化 Ray:" - -#: flwr.simulation.app.start_simulation:44 of -msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" -msgstr "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" - -#: flwr.simulation.app.start_simulation:46 of -msgid "" -"An empty dictionary can be used (ray_init_args={}) to prevent any " -"arguments from being passed to ray.init." -msgstr "可以使用空字典 (ray_init_args={}) 来防止向 ray.init 传递任何参数。" - -#: flwr.simulation.app.start_simulation:49 of -msgid "" -"Set to True to prevent `ray.shutdown()` in case " -"`ray.is_initialized()=True`." -msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" - -#: flwr.simulation.app.start_simulation:51 of -#, fuzzy -msgid "" -"Optionally specify the type of actor to use. The actor object, which " -"persists throughout the simulation, will be the process in charge of " -"executing a ClientApp wrapping input argument `client_fn`." -msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" - -#: flwr.simulation.app.start_simulation:55 of -msgid "" -"If you want to create your own Actor classes, you might need to pass some" -" input argument. You can use this dictionary for such purpose." -msgstr "如果您想创建自己的 Actor 类,可能需要传递一些输入参数。为此,您可以使用本字典。" - -#: flwr.simulation.app.start_simulation:58 of -msgid "" -"(default: \"DEFAULT\") Optional string (\"DEFAULT\" or \"SPREAD\") for " -"the VCE to choose in which node the actor is placed. If you are an " -"advanced user needed more control you can use lower-level scheduling " -"strategies to pin actors to specific compute nodes (e.g. via " -"NodeAffinitySchedulingStrategy). Please note this is an advanced feature." -" For all details, please refer to the Ray documentation: " -"https://docs.ray.io/en/latest/ray-core/scheduling/index.html" -msgstr "" -"(默认:\"DEFAULT\")可选字符串(\"DEFAULT \"或 \"SPREAD\"),供 VCE " -"选择将行为体放置在哪个节点上。如果你是需要更多控制权的高级用户,可以使用低级调度策略将actor固定到特定计算节点(例如,通过 " -"NodeAffinitySchedulingStrategy)。请注意,这是一项高级功能。有关详细信息,请参阅 Ray " -"文档:https://docs.ray.io/en/latest/ray-core/scheduling/index.html" - -#: flwr.simulation.app.start_simulation:67 of -msgid "**hist** -- Object containing metrics from training." -msgstr "**hist** -- 包含训练指标的对象。" - -#: ../../source/ref-changelog.md:1 -msgid "Changelog" -msgstr "更新日志" - -#: ../../source/ref-changelog.md:3 -#, fuzzy -msgid "v1.11.1 (2024-09-11)" -msgstr "v1.3.0 (2023-02-06)" - -#: ../../source/ref-changelog.md:5 ../../source/ref-changelog.md:37 -#: ../../source/ref-changelog.md:141 ../../source/ref-changelog.md:239 -#: ../../source/ref-changelog.md:339 ../../source/ref-changelog.md:403 -#: ../../source/ref-changelog.md:496 ../../source/ref-changelog.md:596 -#: ../../source/ref-changelog.md:680 ../../source/ref-changelog.md:744 -#: ../../source/ref-changelog.md:802 ../../source/ref-changelog.md:871 -#: ../../source/ref-changelog.md:940 -msgid "Thanks to our contributors" -msgstr "感谢我们的贡献者" - -#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 -#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 -#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 -#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 -#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 -#: ../../source/ref-changelog.md:804 +#: ../../source/ref-changelog.md:7 ../../source/ref-changelog.md:39 +#: ../../source/ref-changelog.md:143 ../../source/ref-changelog.md:241 +#: ../../source/ref-changelog.md:341 ../../source/ref-changelog.md:405 +#: ../../source/ref-changelog.md:498 ../../source/ref-changelog.md:598 +#: ../../source/ref-changelog.md:682 ../../source/ref-changelog.md:746 +#: ../../source/ref-changelog.md:804 msgid "" "We would like to give our special thanks to all the contributors who made" " the new version of Flower possible (in `git shortlog` order):" @@ -18122,13 +18211,6 @@ msgstr "** 添加一个新的 gRPC 选项**([#2197](https://github.com/adap/fl msgid "Incompatible changes" msgstr "不兼容的更改" -#: ../../source/ref-changelog.md:33 ../../source/ref-changelog.md:399 -#: ../../source/ref-changelog.md:676 ../../source/ref-changelog.md:740 -#: ../../source/ref-changelog.md:798 ../../source/ref-changelog.md:867 -#: ../../source/ref-changelog.md:929 -msgid "None" -msgstr "无" - #: ../../source/ref-changelog.md:35 #, fuzzy msgid "v1.11.0 (2024-08-30)" @@ -24877,7 +24959,20 @@ msgid "" "blockchain environment is available here:" msgstr "当然可以。有关在区块链环境中使用 Flower 的可用示例列表,请点击此处:" -#: ../../source/ref-faq.rst:28 +#: ../../source/ref-faq.rst:29 +msgid "`FLock: A Decentralised AI Training Platform `_." +msgstr "" + +#: ../../source/ref-faq.rst:29 +msgid "Contribute to on-chain training the model and earn rewards." +msgstr "" + +#: ../../source/ref-faq.rst:30 +#, fuzzy +msgid "Local blockchain with federated learning simulation." +msgstr "扩大联邦学习的规模" + +#: ../../source/ref-faq.rst:31 msgid "" "`Flower meets Nevermined GitHub Repository `_." @@ -24885,7 +24980,7 @@ msgstr "" "`Flower meets Nevermined GitHub Repository `_." -#: ../../source/ref-faq.rst:29 +#: ../../source/ref-faq.rst:32 msgid "" "`Flower meets Nevermined YouTube video " "`_." @@ -24893,7 +24988,7 @@ msgstr "" "`Flower meets Nevermined YouTube 视频 " "`_." -#: ../../source/ref-faq.rst:30 +#: ../../source/ref-faq.rst:33 #, fuzzy msgid "" "`Flower meets KOSMoS `_." -#: ../../source/ref-faq.rst:31 +#: ../../source/ref-faq.rst:34 msgid "" "`Flower meets Talan blog post `_ 。" -#: ../../source/ref-faq.rst:32 +#: ../../source/ref-faq.rst:35 msgid "" "`Flower meets Talan GitHub Repository " "`_ ." @@ -25163,205 +25258,314 @@ msgstr "" "请参阅`完整代码示例 " "`_了解更多信息。" -#: ../../source/tutorial-quickstart-fastai.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with FastAI to train a vision model on CIFAR-10." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练视觉模型。" - #: ../../source/tutorial-quickstart-fastai.rst:5 msgid "Quickstart fastai" msgstr "快速入门 fastai" -#: ../../source/tutorial-quickstart-fastai.rst:10 -msgid "Let's build a federated learning system using fastai and Flower!" -msgstr "让我们用 fastai 和 Flower 建立一个联邦学习系统!" +#: ../../source/tutorial-quickstart-fastai.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train a " +"SqueezeNet model on MNIST using Flower and fastai. It is recommended to " +"create a virtual environment and run everything within a :doc:`virtualenv" +" `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" #: ../../source/tutorial-quickstart-fastai.rst:12 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +msgid "Then, clone the code example directly from GitHub:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:20 msgid "" -"Please refer to the `full code example " -"`_ " -"to learn more." +"This will create a new directory called `quickstart-fastai` containing " +"the following files:" msgstr "" -"请参阅 `完整代码示例 `_了解更多信息。" + +#: ../../source/tutorial-quickstart-fastai.rst:33 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:33 +#, fuzzy +msgid "Next, activate your environment, then run:" +msgstr "并激活虚拟环境:" + +#: ../../source/tutorial-quickstart-fastai.rst:43 +msgid "" +"This example by default runs the Flower Simulation Engine, creating a " +"federation of 10 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" Let's run the project:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:56 +#: ../../source/tutorial-quickstart-huggingface.rst:65 +#: ../../source/tutorial-quickstart-mlx.rst:64 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:56 +#: ../../source/tutorial-quickstart-pytorch.rst:64 +#: ../../source/tutorial-quickstart-tensorflow.rst:65 +msgid "With default arguments you will see an output like this one:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:100 +#: ../../source/tutorial-quickstart-huggingface.rst:116 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:106 +#: ../../source/tutorial-quickstart-pytorch.rst:105 +#: ../../source/tutorial-quickstart-tensorflow.rst:106 +msgid "" +"You can also override the parameters defined in the " +"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" +msgstr "" + +#: ../../source/tutorial-quickstart-fastai.rst:110 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples/quickstart-fasai`` " +"in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" #: ../../source/tutorial-quickstart-huggingface.rst:-1 +#, fuzzy msgid "" "Check out this Federating Learning quickstart tutorial for using Flower " -"with HuggingFace Transformers in order to fine-tune an LLM." +"with 🤗 HuggingFace Transformers in order to fine-tune an LLM." msgstr "查看此联邦学习 快速入门教程,了解如何使用 Flower 和 HuggingFace Transformers 来微调 LLM。" #: ../../source/tutorial-quickstart-huggingface.rst:5 msgid "Quickstart 🤗 Transformers" msgstr "🤗 Transformers快速入门" -#: ../../source/tutorial-quickstart-huggingface.rst:10 +#: ../../source/tutorial-quickstart-huggingface.rst:7 +#, fuzzy msgid "" -"Let's build a federated learning system using Hugging Face Transformers " -"and Flower!" -msgstr "让我们用Hugging Face Transformers和Flower来构建一个联邦学习系统!" +"In this federated learning tutorial we will learn how to train a large " +"language model (LLM) on the `IMDB " +"`_ dataset using Flower" +" and the 🤗 Hugging Face Transformers library. It is recommended to create" +" a virtual environment and run everything within a :doc:`virtualenv " +"`." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-huggingface.rst:12 +#: ../../source/tutorial-quickstart-huggingface.rst:14 msgid "" -"We will leverage Hugging Face to federate the training of language models" -" over multiple clients using Flower. More specifically, we will fine-tune" -" a pre-trained Transformer model (distilBERT) for sequence classification" -" over a dataset of IMDB ratings. The end goal is to detect if a movie " -"rating is positive or negative." +"Let's use ``flwr new`` to create a complete Flower+🤗 Hugging Face " +"project. It will generate all the files needed to run, by default with " +"the Flower Simulation Engine, a federation of 10 nodes using |fedavg|_ " +"The dataset will be partitioned using |flowerdatasets|_'s " +"|iidpartitioner|_." msgstr "" -"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " -"Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" - -#: ../../source/tutorial-quickstart-huggingface.rst:18 -msgid "Dependencies" -msgstr "依赖关系" #: ../../source/tutorial-quickstart-huggingface.rst:20 +#: ../../source/tutorial-quickstart-mlx.rst:19 +#: ../../source/tutorial-quickstart-pytorch.rst:19 +#: ../../source/tutorial-quickstart-tensorflow.rst:20 +#, fuzzy msgid "" -"To follow along this tutorial you will need to install the following " -"packages: :code:`datasets`, :code:`evaluate`, :code:`flwr`, " -":code:`torch`, and :code:`transformers`. This can be done using " -":code:`pip`:" -msgstr "" -"要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 :code:`flwr`、 " -":code:`torch`和 :code:`transformers`。这可以通过 :code:`pip` 来完成:" +"Now that we have a rough idea of what this example is about, let's get " +"started. First, install Flower in your new environment:" +msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" -#: ../../source/tutorial-quickstart-huggingface.rst:30 -msgid "Standard Hugging Face workflow" -msgstr "标准Hugging Face工作流程" +#: ../../source/tutorial-quickstart-huggingface.rst:28 +msgid "" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``HuggingFace``), give a name to your " +"project, and type in your developer name:" +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:33 -msgid "Handling the data" -msgstr "处理数据" +#: ../../source/tutorial-quickstart-huggingface.rst:36 +#: ../../source/tutorial-quickstart-mlx.rst:35 +#: ../../source/tutorial-quickstart-pytorch.rst:35 +#: ../../source/tutorial-quickstart-tensorflow.rst:36 +msgid "" +"After running it you'll notice a new directory with your project name has" +" been created. It should have the following structure:" +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:35 +#: ../../source/tutorial-quickstart-huggingface.rst:50 +#: ../../source/tutorial-quickstart-mlx.rst:49 +#: ../../source/tutorial-quickstart-pytorch.rst:49 +#: ../../source/tutorial-quickstart-tensorflow.rst:50 msgid "" -"To fetch the IMDB dataset, we will use Hugging Face's :code:`datasets` " -"library. We then need to tokenize the data and create :code:`PyTorch` " -"dataloaders, this is all done in the :code:`load_data` function:" +"If you haven't yet installed the project and its dependencies, you can do" +" so by:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:58 +#: ../../source/tutorial-quickstart-pytorch.rst:57 +#: ../../source/tutorial-quickstart-tensorflow.rst:58 +msgid "To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-huggingface.rst:106 +msgid "You can also run the project with GPU as follows:" msgstr "" -"为了获取 IMDB 数据集,我们将使用 Hugging Face 的 :code:`datasets` 库。然后,我们需要对数据进行标记化,并创建" -" :code:`PyTorch` 数据加载器,这些都将在 :code:`load_data` 函数中完成:" -#: ../../source/tutorial-quickstart-huggingface.rst:81 -msgid "Training and testing the model" -msgstr "训练和测试模型" +#: ../../source/tutorial-quickstart-huggingface.rst:113 +msgid "" +"This will use the default arguments where each ``ClientApp`` will use 2 " +"CPUs and at most 4 ``ClientApp``\\s will run in a given GPU." +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:83 +#: ../../source/tutorial-quickstart-huggingface.rst:124 +#: ../../source/tutorial-quickstart-mlx.rst:114 +#: ../../source/tutorial-quickstart-pytorch.rst:113 msgid "" -"Once we have a way of creating our trainloader and testloader, we can " -"take care of the training and testing. This is very similar to any " -":code:`PyTorch` training or testing loop:" +"What follows is an explanation of each component in the project you just " +"created: dataset partition, the model, defining the ``ClientApp`` and " +"defining the ``ServerApp``." msgstr "" -"有了创建 trainloader 和 testloader 的方法后,我们就可以进行训练和测试了。这与任何 :code:`PyTorch` " -"训练或测试循环都非常相似:" -#: ../../source/tutorial-quickstart-huggingface.rst:121 -msgid "Creating the model itself" -msgstr "创建模型本身" +#: ../../source/tutorial-quickstart-huggingface.rst:130 +#: ../../source/tutorial-quickstart-mlx.rst:120 +#: ../../source/tutorial-quickstart-pytorch.rst:119 +#: ../../source/tutorial-quickstart-tensorflow.rst:116 +#, fuzzy +msgid "The Data" +msgstr "加载数据" -#: ../../source/tutorial-quickstart-huggingface.rst:123 +#: ../../source/tutorial-quickstart-huggingface.rst:132 msgid "" -"To create the model itself, we will just load the pre-trained distillBERT" -" model using Hugging Face’s :code:`AutoModelForSequenceClassification` :" +"This tutorial uses |flowerdatasets|_ to easily download and partition the" +" `IMDB `_ dataset. In " +"this example you'll make use of the |iidpartitioner|_ to generate " +"``num_partitions`` partitions. You can choose |otherpartitioners|_ " +"available in Flower Datasets. To tokenize the text, we will also load the" +" tokenizer from the pre-trained Transformer model that we'll use during " +"training - more on that in the next section. Each ``ClientApp`` will call" +" this function to create dataloaders with the data that correspond to " +"their data partition." msgstr "" -"要创建模型本身,我们只需使用 Hugging Face 的 :code:`AutoModelForSequenceClassification` " -"加载预训练的 distillBERT 模型:" -#: ../../source/tutorial-quickstart-huggingface.rst:136 -msgid "Federating the example" -msgstr "将示例联邦化" +#: ../../source/tutorial-quickstart-huggingface.rst:178 +#: ../../source/tutorial-quickstart-mlx.rst:164 +#: ../../source/tutorial-quickstart-pytorch.rst:157 +#: ../../source/tutorial-quickstart-tensorflow.rst:145 +#, fuzzy +msgid "The Model" +msgstr "训练模型" -#: ../../source/tutorial-quickstart-huggingface.rst:139 -msgid "Creating the IMDBClient" -msgstr "创建 IMDBClient" +#: ../../source/tutorial-quickstart-huggingface.rst:180 +#, fuzzy +msgid "" +"We will leverage 🤗 Hugging Face to federate the training of language " +"models over multiple clients using Flower. More specifically, we will " +"fine-tune a pre-trained Transformer model (|berttiny|_) for sequence " +"classification over the dataset of IMDB ratings. The end goal is to " +"detect if a movie rating is positive or negative. If you have access to " +"larger GPUs, feel free to use larger models!" +msgstr "" +"我们将利用Hugging Face技术,使用 Flower 在多个客户端上联邦训练语言模型。更具体地说,我们将对预先训练好的 " +"Transformer 模型(distilBERT)进行微调,以便在 IMDB 评分数据集上进行序列分类。最终目标是检测电影评分是正面还是负面。" -#: ../../source/tutorial-quickstart-huggingface.rst:141 +#: ../../source/tutorial-quickstart-huggingface.rst:193 msgid "" -"To federate our example to multiple clients, we first need to write our " -"Flower client class (inheriting from :code:`flwr.client.NumPyClient`). " -"This is very easy, as our model is a standard :code:`PyTorch` model:" +"Note that here, ``model_name`` is a string that will be loaded from the " +"``Context`` in the ClientApp and ServerApp." msgstr "" -"要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 " -":code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 :code:`PyTorch` 模型:" -#: ../../source/tutorial-quickstart-huggingface.rst:169 +#: ../../source/tutorial-quickstart-huggingface.rst:196 msgid "" -"The :code:`get_parameters` function lets the server get the client's " -"parameters. Inversely, the :code:`set_parameters` function allows the " -"server to send its parameters to the client. Finally, the :code:`fit` " -"function trains the model locally for the client, and the " -":code:`evaluate` function tests the model locally and returns the " -"relevant metrics." +"In addition to loading the pretrained model weights and architecture, we " +"also include two utility functions to perform both training (i.e. " +"``train()``) and evaluation (i.e. ``test()``) using the above model. " +"These functions should look fairly familiar if you have some prior " +"experience with PyTorch. Note these functions do not have anything " +"specific to Flower. That being said, the training function will normally " +"be called, as we'll see later, from a Flower client passing its own data." +" In summary, your clients can use standard training/testing functions to " +"perform local training or evaluation:" msgstr "" -":code:`get_parameters` " -"函数允许服务器获取客户端的参数。相反,:code:`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" -#: ../../source/tutorial-quickstart-huggingface.rst:175 -msgid "Starting the server" -msgstr "启动服务器" +#: ../../source/tutorial-quickstart-huggingface.rst:239 +#: ../../source/tutorial-quickstart-mlx.rst:210 +#: ../../source/tutorial-quickstart-pytorch.rst:234 +#: ../../source/tutorial-quickstart-tensorflow.rst:176 +#, fuzzy +msgid "The ClientApp" +msgstr "客户端" -#: ../../source/tutorial-quickstart-huggingface.rst:177 +#: ../../source/tutorial-quickstart-huggingface.rst:241 msgid "" -"Now that we have a way to instantiate clients, we need to create our " -"server in order to aggregate the results. Using Flower, this can be done " -"very easily by first choosing a strategy (here, we are using " -":code:`FedAvg`, which will define the global weights as the average of " -"all the clients' weights at each round) and then using the " -":code:`flwr.server.start_server` function:" +"The main changes we have to make to use 🤗 Hugging Face with Flower will " +"be found in the ``get_weights()`` and ``set_weights()`` functions. Under " +"the hood, the ``transformers`` library uses PyTorch, which means we can " +"reuse the ``get_weights()`` and ``set_weights()`` code that we defined in" +" the :doc:`Quickstart PyTorch ` tutorial. As" +" a reminder, in ``get_weights()``, PyTorch model parameters are extracted" +" and represented as a list of NumPy arrays. The ``set_weights()`` " +"function that's the opposite: given a list of NumPy arrays it applies " +"them to an existing PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -"现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 Flower,首先选择一个策略(这里我们使用 " -":code:`FedAvg`,它将把全局模型参数定义为每轮所有客户端模型参数的平均值),然后使用 " -":code:`flwr.server.start_server`函数,就可以非常轻松地完成这项工作:" -#: ../../source/tutorial-quickstart-huggingface.rst:205 +#: ../../source/tutorial-quickstart-huggingface.rst:254 +#: ../../source/tutorial-quickstart-pytorch.rst:245 msgid "" -"The :code:`weighted_average` function is there to provide a way to " -"aggregate the metrics distributed amongst the clients (basically this " -"allows us to display a nice average accuracy and loss for every round)." +"The specific implementation of ``get_weights()`` and ``set_weights()`` " +"depends on the type of models you use. The ones shown below work for a " +"wide range of PyTorch models but you might need to adjust them if you " +"have more exotic model architectures." msgstr "" -"使用 :code:`weighted_average` " -"函数是为了提供一种方法来汇总分布在客户端的指标(基本上,这可以让我们显示每一轮的平均精度和损失值)。" - -#: ../../source/tutorial-quickstart-huggingface.rst:209 -msgid "Putting everything together" -msgstr "把所有东西放在一起" -#: ../../source/tutorial-quickstart-huggingface.rst:211 -msgid "We can now start client instances using:" -msgstr "现在我们可以使用:" +#: ../../source/tutorial-quickstart-huggingface.rst:269 +#: ../../source/tutorial-quickstart-pytorch.rst:261 +msgid "" +"The rest of the functionality is directly inspired by the centralized " +"case. The ``fit()`` method in the client trains the model using the local" +" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " +"model received on a held-out validation set that the client might have:" +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:221 +#: ../../source/tutorial-quickstart-huggingface.rst:296 msgid "" -"And they will be able to connect to the server and start the federated " -"training." -msgstr "他们就能连接到服务器,开始联邦训练。" +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparemeters defined in your " +"``pyproject.toml`` to configure the run. In this tutorial we access the " +"``local-epochs`` setting to control the number of epochs a ``ClientApp`` " +"will perform when running the ``fit()`` method. You could define " +"additional hyperparameters in ``pyproject.toml`` and access them here." +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:223 +#: ../../source/tutorial-quickstart-huggingface.rst:330 +#: ../../source/tutorial-quickstart-mlx.rst:376 +#: ../../source/tutorial-quickstart-pytorch.rst:321 +#: ../../source/tutorial-quickstart-tensorflow.rst:245 #, fuzzy +msgid "The ServerApp" +msgstr "服务器" + +#: ../../source/tutorial-quickstart-huggingface.rst:332 msgid "" -"If you want to check out everything put together, you should check out " -"the `full code example `_ ." +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"|serverappcomponents|_ as opposed to a |client|_ In this example we use " +"the `FedAvg` strategy. To it we pass a randomly initialized model that " +"will server as the global model to federated. Note that the value of " +"``fraction_fit`` is read from the run config. You can find the default " +"value defined in the ``pyproject.toml``." msgstr "" -"如果您想查看所有内容,请查看完整的代码示例: [https://github.com/adap/flower/tree/main/examples" -"/quickstart-" -"huggingface](https://github.com/adap/flower/tree/main/examples" -"/quickstart-huggingface)." -#: ../../source/tutorial-quickstart-huggingface.rst:226 +#: ../../source/tutorial-quickstart-huggingface.rst:371 msgid "" -"Of course, this is a very basic example, and a lot can be added or " -"modified, it was just to showcase how simply we could federate a Hugging " -"Face workflow using Flower." -msgstr "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower 联合Hugging Face的工作流程。" +"Congratulations! You've successfully built and run your first federated " +"learning system for an LLM." +msgstr "" -#: ../../source/tutorial-quickstart-huggingface.rst:229 +#: ../../source/tutorial-quickstart-huggingface.rst:376 msgid "" -"Note that in this example we used :code:`PyTorch`, but we could have very" -" well used :code:`TensorFlow`." -msgstr "请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:`TensorFlow`。" +"Check the source code of the extended version of this tutorial in " +"|quickstart_hf_link|_ in the Flower GitHub repository. For a " +"comprehensive example of a federated fine-tuning of an LLM with Flower, " +"refer to the |flowertune|_ example in the Flower GitHub repository." +msgstr "" #: ../../source/tutorial-quickstart-ios.rst:-1 msgid "" @@ -25419,7 +25623,6 @@ msgstr "或者Poetry:" #: ../../source/tutorial-quickstart-ios.rst:34 #: ../../source/tutorial-quickstart-scikitlearn.rst:40 -#: ../../source/tutorial-quickstart-tensorflow.rst:29 #: ../../source/tutorial-quickstart-xgboost.rst:55 msgid "Flower Client" msgstr "Flower 客户端" @@ -25511,13 +25714,11 @@ msgstr "" #: ../../source/tutorial-quickstart-ios.rst:129 #: ../../source/tutorial-quickstart-scikitlearn.rst:167 -#: ../../source/tutorial-quickstart-tensorflow.rst:98 #: ../../source/tutorial-quickstart-xgboost.rst:341 msgid "Flower Server" msgstr "Flower 服务器" #: ../../source/tutorial-quickstart-ios.rst:131 -#: ../../source/tutorial-quickstart-tensorflow.rst:100 msgid "" "For simple workloads we can start a Flower server and leave all the " "configuration possibilities at their default values. In a file named " @@ -25528,12 +25729,10 @@ msgstr "" #: ../../source/tutorial-quickstart-ios.rst:142 #: ../../source/tutorial-quickstart-scikitlearn.rst:230 -#: ../../source/tutorial-quickstart-tensorflow.rst:112 msgid "Train the model, federated!" msgstr "联邦训练模型!" #: ../../source/tutorial-quickstart-ios.rst:144 -#: ../../source/tutorial-quickstart-tensorflow.rst:114 #: ../../source/tutorial-quickstart-xgboost.rst:567 msgid "" "With both client and server ready, we can now run everything and see " @@ -25736,7 +25935,7 @@ msgstr "" ":code:`Client`更容易实现,因为它避免了一些不必要的操作。:code:`FlowerClient` " "需要实现四个方法,两个用于获取/设置模型参数,一个用于训练模型,一个用于测试模型:" -#: ../../source/tutorial-quickstart-jax.rst:165 +#: ../../source/tutorial-quickstart-jax.rst:167 msgid ":code:`set_parameters (optional)`" msgstr ":code:`set_parameters (可选)`" @@ -25833,14 +26032,6 @@ msgid "" "api/flwr_datasets.partitioner.IidPartitioner.html#flwr_datasets.partitioner.IidPartitioner>`_." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:19 -#: ../../source/tutorial-quickstart-pytorch.rst:19 -#, fuzzy -msgid "" -"Now that we have a rough idea of what this example is about, let's get " -"started. First, install Flower in your new environment:" -msgstr "现在,我们已经有了一个大致的概念,让我们开始吧。首先,我们需要安装 Flower。运行:" - #: ../../source/tutorial-quickstart-mlx.rst:27 msgid "" "Then, run the command below. You will be prompted to select of the " @@ -25848,66 +26039,27 @@ msgid "" "type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:35 -#: ../../source/tutorial-quickstart-pytorch.rst:35 -msgid "" -"After running it you'll notice a new directory with your project name has" -" been created. It should have the following structure:" -msgstr "" - -#: ../../source/tutorial-quickstart-mlx.rst:49 -#: ../../source/tutorial-quickstart-pytorch.rst:49 -msgid "" -"If you haven't yet installed the project and its dependencies, you can do" -" so by:" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:57 msgid "To run the project do:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:64 -#: ../../source/tutorial-quickstart-pytorch.rst:64 -msgid "With default arguments you will see an output like this one:" -msgstr "" - #: ../../source/tutorial-quickstart-mlx.rst:106 msgid "" "You can also override the parameters defined in " "``[tool.flwr.app.config]`` section in the ``pyproject.toml`` like this:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:114 -#: ../../source/tutorial-quickstart-pytorch.rst:113 +#: ../../source/tutorial-quickstart-mlx.rst:122 msgid "" -"What follows is an explanation of each component in the project you just " -"created: dataset partition, the model, defining the ``ClientApp`` and " -"defining the ``ServerApp``." +"We will use `Flower Datasets `_ to " +"easily download and partition the `MNIST` dataset. In this example you'll" +" make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets:" msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:120 -#: ../../source/tutorial-quickstart-pytorch.rst:119 -#, fuzzy -msgid "The Data" -msgstr "加载数据" - -#: ../../source/tutorial-quickstart-mlx.rst:122 -msgid "" -"We will use `Flower Datasets `_ to " -"easily download and partition the `MNIST` dataset. In this example you'll" -" make use of the `IidPartitioner `_" -" to generate `num_partitions` partitions. You can choose `other " -"partitioners `_ available in Flower Datasets:" -msgstr "" - -#: ../../source/tutorial-quickstart-mlx.rst:164 -#: ../../source/tutorial-quickstart-pytorch.rst:157 -#, fuzzy -msgid "The Model" -msgstr "训练模型" - #: ../../source/tutorial-quickstart-mlx.rst:166 msgid "" "We define the model as in the `centralized MLX example " @@ -25921,12 +26073,6 @@ msgid "" "over batches." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:210 -#: ../../source/tutorial-quickstart-pytorch.rst:234 -#, fuzzy -msgid "The ClientApp" -msgstr "客户端" - #: ../../source/tutorial-quickstart-mlx.rst:212 msgid "" "The main changes we have to make to use `MLX` with `Flower` will be found" @@ -25995,12 +26141,6 @@ msgid "" "method." msgstr "" -#: ../../source/tutorial-quickstart-mlx.rst:376 -#: ../../source/tutorial-quickstart-pytorch.rst:321 -#, fuzzy -msgid "The ServerApp" -msgstr "服务器" - #: ../../source/tutorial-quickstart-mlx.rst:378 msgid "" "To construct a ``ServerApp``, we define a ``server_fn()`` callback with " @@ -26014,6 +26154,7 @@ msgstr "" #: ../../source/tutorial-quickstart-mlx.rst:402 #: ../../source/tutorial-quickstart-pytorch.rst:360 +#: ../../source/tutorial-quickstart-tensorflow.rst:279 msgid "" "Congratulations! You've successfully built and run your first federated " "learning system." @@ -26088,16 +26229,6 @@ msgid "" "and type in your developer name:" msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:57 -msgid "To run the project, do:" -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:105 -msgid "" -"You can also override the parameters defined in the " -"``[tool.flwr.app.config]`` section in ``pyproject.toml`` like this:" -msgstr "" - #: ../../source/tutorial-quickstart-pytorch.rst:121 msgid "" "This tutorial uses `Flower Datasets `_ " @@ -26141,22 +26272,6 @@ msgid "" "PyTorch model. Doing this in fairly easy in PyTorch." msgstr "" -#: ../../source/tutorial-quickstart-pytorch.rst:245 -msgid "" -"The specific implementation of ``get_weights()`` and ``set_weights()`` " -"depends on the type of models you use. The ones shown below work for a " -"wide range of PyTorch models but you might need to adjust them if you " -"have more exotic model architectures." -msgstr "" - -#: ../../source/tutorial-quickstart-pytorch.rst:261 -msgid "" -"The rest of the functionality is directly inspired by the centralized " -"case. The ``fit()`` method in the client trains the model using the local" -" dataset. Similarly, the ``evaluate()`` method is used to evaluate the " -"model received on a held-out validation set that the client might have:" -msgstr "" - #: ../../source/tutorial-quickstart-pytorch.rst:294 msgid "" "Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " @@ -26193,6 +26308,7 @@ msgstr "" "quickstart/>`_ 可在 :code:`examples/xgboost-quickstart` 中找到。" #: ../../source/tutorial-quickstart-pytorch.rst:372 +#: ../../source/tutorial-quickstart-tensorflow.rst:295 #, fuzzy msgid "Video tutorial" msgstr "教程" @@ -26204,30 +26320,53 @@ msgid "" "that shows the new APIs (as the content above does)" msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:-1 -msgid "" -"Check out this Federated Learning quickstart tutorial for using Flower " -"with PyTorch Lightning to train an Auto Encoder model on MNIST." -msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 MNIST 上训练自动编码器模型。" - #: ../../source/tutorial-quickstart-pytorch-lightning.rst:5 msgid "Quickstart PyTorch Lightning" msgstr "快速入门 PyTorch Lightning" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:10 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:7 +#, fuzzy +msgid "" +"In this federated learning tutorial we will learn how to train an " +"AutoEncoder model on MNIST using Flower and PyTorch Lightning. It is " +"recommended to create a virtual environment and run everything within a " +":doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:20 msgid "" -"Let's build a horizontal federated learning system using PyTorch " -"Lightning and Flower!" -msgstr "让我们使用 PyTorch Lightning 和 Flower 构建一个水平联邦学习系统!" +"This will create a new directory called `quickstart-pytorch-lightning` " +"containing the following files:" +msgstr "" -#: ../../source/tutorial-quickstart-pytorch-lightning.rst:12 +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:43 msgid "" -"Please refer to the `full code example " -"`_ to learn more." +"By default, Flower Simulation Engine will be started and it will create a" +" federation of 4 nodes using `FedAvg `_ " +"as the aggregation strategy. The dataset will be partitioned using Flower" +" Dataset's `IidPartitioner `_." +" To run the project, do:" +msgstr "" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:94 +msgid "" +"Each simulated `ClientApp` (two per round) will also log a summary of " +"their local training process. Expect this output to be similar to:" msgstr "" -"请参阅 `完整代码示例 `_ 了解更多信息。" + +#: ../../source/tutorial-quickstart-pytorch-lightning.rst:116 +#, fuzzy +msgid "" +"Check the `source code `_ of this tutorial in ``examples" +"/quickstart-pytorch-lightning`` in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" #: ../../source/tutorial-quickstart-scikitlearn.rst:-1 msgid "" @@ -26316,7 +26455,7 @@ msgstr ":code:`set_model_params()`" msgid "Sets the parameters of a :code:`sklearn` LogisticRegression model" msgstr "设置:code:`sklean`的LogisticRegression模型的参数" -#: ../../source/tutorial-quickstart-scikitlearn.rst:49 +#: ../../source/tutorial-quickstart-scikitlearn.rst:50 msgid ":code:`set_initial_params()`" msgstr ":code:`set_initial_params()`" @@ -26387,7 +26526,7 @@ msgstr "" msgid "return the model weight as a list of NumPy ndarrays" msgstr "以 NumPy ndarrays 列表形式返回模型参数" -#: ../../source/tutorial-quickstart-scikitlearn.rst:120 +#: ../../source/tutorial-quickstart-scikitlearn.rst:121 msgid ":code:`set_parameters` (optional)" msgstr ":code:`set_parameters` (可选)" @@ -26499,7 +26638,6 @@ msgid "" msgstr "客户端和服务器都准备就绪后,我们现在就可以运行一切,看看联邦学习的运行情况。联邦学习系统通常有一个服务器和多个客户端。因此,我们必须先启动服务器:" #: ../../source/tutorial-quickstart-scikitlearn.rst:239 -#: ../../source/tutorial-quickstart-tensorflow.rst:122 #: ../../source/tutorial-quickstart-xgboost.rst:575 msgid "" "Once the server is running we can start the clients in different " @@ -26507,7 +26645,6 @@ msgid "" msgstr "服务器运行后,我们就可以在不同终端启动客户端了。打开一个新终端,启动第一个客户端:" #: ../../source/tutorial-quickstart-scikitlearn.rst:246 -#: ../../source/tutorial-quickstart-tensorflow.rst:129 #: ../../source/tutorial-quickstart-xgboost.rst:582 msgid "Open another terminal and start the second client:" msgstr "打开另一台终端,启动第二个客户端:" @@ -26533,121 +26670,117 @@ msgstr "" "mnist>`_ 可以在 :code:`examples/sklearn-logreg-mnist` 中找到。" #: ../../source/tutorial-quickstart-tensorflow.rst:-1 +#, fuzzy msgid "" "Check out this Federated Learning quickstart tutorial for using Flower " -"with TensorFlow to train a MobilNetV2 model on CIFAR-10." +"with TensorFlow to train a CNN model on CIFAR-10." msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 TensorFlow 在 CIFAR-10 上训练 MobilNetV2 模型。" #: ../../source/tutorial-quickstart-tensorflow.rst:5 msgid "Quickstart TensorFlow" msgstr "快速入门 TensorFlow" -#: ../../source/tutorial-quickstart-tensorflow.rst:13 -msgid "Let's build a federated learning system in less than 20 lines of code!" -msgstr "让我们用不到 20 行代码构建一个联邦学习系统!" - -#: ../../source/tutorial-quickstart-tensorflow.rst:15 -msgid "Before Flower can be imported we have to install it:" -msgstr "在导入 Flower 之前,我们必须先安装它:" - -#: ../../source/tutorial-quickstart-tensorflow.rst:21 +#: ../../source/tutorial-quickstart-tensorflow.rst:7 +#, fuzzy msgid "" -"Since we want to use the Keras API of TensorFlow (TF), we have to install" -" TF as well:" -msgstr "由于我们要使用 TensorFlow (TF) 的 Keras API,因此还必须安装 TF:" - -#: ../../source/tutorial-quickstart-tensorflow.rst:31 -msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" -msgstr "接下来,在名为 :code:`client.py` 的文件中导入 Flower 和 TensorFlow:" +"In this tutorial we will learn how to train a Convolutional Neural " +"Network on CIFAR-10 using the Flower framework and TensorFlow. First of " +"all, it is recommended to create a virtual environment and run everything" +" within a :doc:`virtualenv `." +msgstr "" +"首先,建议创建一个虚拟环境,并在 `virtualenv `_ 中运行一切。" -#: ../../source/tutorial-quickstart-tensorflow.rst:38 +#: ../../source/tutorial-quickstart-tensorflow.rst:13 msgid "" -"We use the Keras utilities of TF to load CIFAR10, a popular colored image" -" classification dataset for machine learning. The call to " -":code:`tf.keras.datasets.cifar10.load_data()` downloads CIFAR10, caches " -"it locally, and then returns the entire training and test set as NumPy " -"ndarrays." +"Let's use `flwr new` to create a complete Flower+TensorFlow project. It " +"will generate all the files needed to run, by default with the Flower " +"Simulation Engine, a federation of 10 nodes using `FedAvg " +"`_. The " +"dataset will be partitioned using Flower Dataset's `IidPartitioner " +"`_." msgstr "" -"我们使用 TF 的 Keras 实用程序加载 CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。调用 " -":code:`tf.keras.datasets.cifar10.load_data()` 会下载 CIFAR10,将其缓存到本地,然后以 " -"NumPy ndarrays 的形式返回整个训练集和测试集。" -#: ../../source/tutorial-quickstart-tensorflow.rst:47 +#: ../../source/tutorial-quickstart-tensorflow.rst:28 msgid "" -"Next, we need a model. For the purpose of this tutorial, we use " -"MobilNetV2 with 10 output classes:" -msgstr "接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" +"Then, run the command below. You will be prompted to select one of the " +"available templates (choose ``TensorFlow``), give a name to your project," +" and type in your developer name:" +msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:54 +#: ../../source/tutorial-quickstart-tensorflow.rst:118 msgid "" -"The Flower server interacts with clients through an interface called " -":code:`Client`. When the server selects a particular client for training," -" it sends training instructions over the network. The client receives " -"those instructions and calls one of the :code:`Client` methods to run " -"your code (i.e., to train the neural network we defined earlier)." +"This tutorial uses `Flower Datasets `_ " +"to easily download and partition the `CIFAR-10` dataset. In this example " +"you'll make use of the `IidPartitioner `_" +" to generate `num_partitions` partitions. You can choose `other " +"partitioners `_ available in Flower Datasets. Each " +"``ClientApp`` will call this function to create the ``NumPy`` arrays that" +" correspond to their data partition." msgstr "" -"Flower 服务器通过一个名为 :code:`Client` " -"的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 :code:`Client`" -" 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" -#: ../../source/tutorial-quickstart-tensorflow.rst:60 +#: ../../source/tutorial-quickstart-tensorflow.rst:147 msgid "" -"Flower provides a convenience class called :code:`NumPyClient` which " -"makes it easier to implement the :code:`Client` interface when your " -"workload uses Keras. The :code:`NumPyClient` interface defines three " -"methods which can be implemented in the following way:" +"Next, we need a model. We defined a simple Convolutional Neural Network " +"(CNN), but feel free to replace it with a more sophisticated model if " +"you'd like:" msgstr "" -"Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用 Keras 时,该类可以更轻松地实现 " -":code:`Client` 接口。:code:`NumPyClient` 接口定义了三个方法,可以通过以下方式实现:" -#: ../../source/tutorial-quickstart-tensorflow.rst:82 +#: ../../source/tutorial-quickstart-tensorflow.rst:178 msgid "" -"We can now create an instance of our class :code:`CifarClient` and add " -"one line to actually run this client:" -msgstr "现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户端:" +"With `TensorFlow`, we can use the built-in ``get_weights()`` and " +"``set_weights()`` functions, which simplifies the implementation with " +"`Flower`. The rest of the functionality in the ClientApp is directly " +"inspired by the centralized case. The ``fit()`` method in the client " +"trains the model using the local dataset. Similarly, the ``evaluate()`` " +"method is used to evaluate the model received on a held-out validation " +"set that the client might have:" +msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:90 -#, fuzzy +#: ../../source/tutorial-quickstart-tensorflow.rst:212 msgid "" -"That's it for the client. We only have to implement :code:`Client` or " -":code:`NumPyClient` and call :code:`fl.client.start_client()`. If you " -"implement a client of type :code:`NumPyClient` you'll need to first call " -"its :code:`to_client()` method. The string :code:`\"[::]:8080\"` tells " -"the client which server to connect to. In our case we can run the server " -"and the client on the same machine, therefore we use " -":code:`\"[::]:8080\"`. If we run a truly federated workload with the " -"server and clients running on different machines, all that needs to " -"change is the :code:`server_address` we point the client at." +"Finally, we can construct a ``ClientApp`` using the ``FlowerClient`` " +"defined above by means of a ``client_fn()`` callback. Note that the " +"`context` enables you to get access to hyperparameters defined in your " +"``pyproject.toml`` to configure the run. For example, in this tutorial we" +" access the `local-epochs` setting to control the number of epochs a " +"``ClientApp`` will perform when running the ``fit()`` method, in addition" +" to `batch-size`. You could define additional hyperparameters in " +"``pyproject.toml`` and access them here." msgstr "" -"这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient` 并调用 " -":code:`fl.client.start_client()` 或 " -":code:`fl.client.start_numpy_client()`。字符串 " -":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此使用 " -":code:`\"[::]:8080\"。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的 " -":code:`server_address`。" -#: ../../source/tutorial-quickstart-tensorflow.rst:135 -msgid "Each client will have its own dataset." -msgstr "每个客户都有自己的数据集。" +#: ../../source/tutorial-quickstart-tensorflow.rst:247 +msgid "" +"To construct a ``ServerApp`` we define a ``server_fn()`` callback with an" +" identical signature to that of ``client_fn()`` but the return type is " +"`ServerAppComponents `_ as " +"opposed to a `Client `_. In this example we use the " +"`FedAvg`. To it we pass a randomly initialized model that will serve as " +"the global model to federate." +msgstr "" -#: ../../source/tutorial-quickstart-tensorflow.rst:137 +#: ../../source/tutorial-quickstart-tensorflow.rst:284 +#, fuzzy msgid "" -"You should now see how the training does in the very first terminal (the " -"one that started the server):" -msgstr "现在你应该能在第一个终端(启动服务器的终端)看到训练的效果了:" +"Check the source code of the extended version of this tutorial in " +"|quickstart_tf_link|_ in the Flower GitHub repository." +msgstr "" +"此示例的`完整源代码 `_ 可在 :code:`examples/xgboost-quickstart` 中找到。" -#: ../../source/tutorial-quickstart-tensorflow.rst:169 +#: ../../source/tutorial-quickstart-tensorflow.rst:299 msgid "" -"Congratulations! You've successfully built and run your first federated " -"learning system. The full `source code " -"`_ for this can be found in :code:`examples" -"/quickstart-tensorflow/client.py`." +"The video shown below shows how to setup a TensorFlow + Flower project " +"using our previously recommended APIs. A new video tutorial will be " +"released that shows the new APIs (as the content above does)" msgstr "" -"恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 " -"`_ 可以在 :code:`examples/quickstart-" -"tensorflow/client.py` 中找到。" #: ../../source/tutorial-quickstart-xgboost.rst:-1 msgid "" @@ -28861,7 +28994,7 @@ msgid "" msgstr "在机器学习中,我们有一个模型和数据。模型可以是一个神经网络(如图所示),也可以是其他东西,比如经典的线性回归。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:41 -msgid "|e5918c1c06a4434bbe4bf49235e40059|" +msgid "|e87b69b2ada74ea49412df16f4a0b9cc|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:109 @@ -28876,7 +29009,7 @@ msgid "" msgstr "我们使用数据来训练模型,以完成一项有用的任务。任务可以是检测图像中的物体、转录音频或玩围棋等游戏。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:53 -msgid "|c0165741bd1944f09ec55ce49032377d|" +msgid "|33cacb7d985c4906b348515c1a5cd993|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:111 @@ -28897,7 +29030,7 @@ msgid "" msgstr "它源于智能手机上用户与应用程序的交互、汽车上传感器数据的收集、笔记本电脑上键盘输入的接收,或者智能扬声器上某人试着唱的歌。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:67 -msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +msgid "|cc080a555947492fa66131dc3a967603|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:113 @@ -28915,7 +29048,7 @@ msgstr "" "\"通常不只是一个地方,而是很多地方。它可能是多个运行同一应用程序的设备。但也可能是多个组织,都在为同一任务生成数据。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:79 -msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +msgid "|085c3e0fb8664c6aa06246636524b20b|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:115 @@ -28931,7 +29064,7 @@ msgid "" msgstr "因此,要使用机器学习或任何类型的数据分析,过去使用的方法是在中央服务器上收集所有数据。这个服务器可以在数据中心的某个地方,也可以在云端的某个地方。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:91 -msgid "|74f26ca701254d3db57d7899bd91eb55|" +msgid "|bfe69c74e48c45d49b50251c38c2a019|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:117 @@ -28946,7 +29079,7 @@ msgid "" msgstr "一旦所有数据都收集到一处,我们最终就可以使用机器学习算法在数据上训练我们的模型。这就是我们基本上一直依赖的机器学习方法。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:103 -msgid "|bda79f21f8154258a40e5766b2634ad7|" +msgid "|ebbecd651f0348d99c6511ea859bf4ca|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:119 @@ -28966,7 +29099,7 @@ msgid "" msgstr "我们刚刚看到的经典机器学习方法可以在某些情况下使用。很好的例子包括对假日照片进行分类或分析网络流量。在这些案例中,所有数据自然都可以在中央服务器上获得。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:138 -msgid "|89d30862e62e4f9989e193483a08680a|" +msgid "|163117eb654a4273babba413cf8065f5|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:173 @@ -28981,7 +29114,7 @@ msgid "" msgstr "但这种方法并不适用于许多其他情况。例如,集中服务器上没有数据,或者一台服务器上的数据不足以训练出一个好的模型。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:150 -msgid "|77e9918671c54b4f86e01369c0785ce8|" +msgid "|452ac3ba453b4cd1be27be1ba7560d64|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:175 @@ -29129,7 +29262,7 @@ msgid "" msgstr "我们首先在服务器上初始化模型。这与经典的集中式学习完全相同:我们随机或从先前保存的检查点初始化模型参数。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:210 -msgid "|7e4ccef37cc94148a067107b34eb7447|" +msgid "|f403fcd69e4e44409627e748b404c086|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:307 @@ -29153,7 +29286,7 @@ msgid "" msgstr "接下来,我们会将全局模型的参数发送到连接的客户端节点(如智能手机等边缘设备或企业的服务器)。这是为了确保每个参与节点都使用相同的模型参数开始本地训练。我们通常只使用几个连接节点,而不是所有节点。这样做的原因是,选择越来越多的客户端节点会导致收益递减。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:225 -msgid "|28e47e4cded14479a0846c8e5f22c872|" +msgid "|4b00fe63870145968f8443619a792a42|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:309 @@ -29179,7 +29312,7 @@ msgstr "" "(mini-batches)。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:240 -msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +msgid "|368378731066486fa4397e89bc6b870c|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:311 @@ -29202,7 +29335,7 @@ msgid "" msgstr "经过本地训练后,每个客户节点最初收到的模型参数都会略有不同。参数之所以不同,是因为每个客户端节点的本地数据集中都有不同的数据。然后,客户端节点将这些模型更新发回服务器。它们发送的模型更新既可以是完整的模型参数,也可以只是本地训练过程中积累的梯度。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:255 -msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +msgid "|a66aa83d85bf4ffba7ed660b718066da|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:313 @@ -29248,7 +29381,7 @@ msgstr "" " 100 个示例的 10 倍。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:273 -msgid "|81749d0ac0834c36a83bd38f433fea31|" +msgid "|82324b9af72a4582a81839d55caab767|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:315 @@ -29355,7 +29488,7 @@ msgstr "" "为联邦学习、分析和评估提供了一种统一的方法。它允许用户联邦化任何工作负载、任何 ML 框架和任何编程语言。" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:334 -msgid "|ed9aae51da70428eab7eef32f21e819e|" +msgid "|fbf2da0da3cc4f8ab3b3eff852d80c41|" msgstr "" #: ../../source/tutorial-series-what-is-federated-learning.ipynb:340 @@ -32665,3 +32798,575 @@ msgstr "" #~ msgid "|c00bf2750bc24d229737a0fe1395f0fc|" #~ msgstr "" +#~ msgid "run\\_client\\_app" +#~ msgstr "run\\_client\\_app" + +#~ msgid "run\\_supernode" +#~ msgstr "flower-superlink" + +#~ msgid "Retrieve the corresponding layout by the string key." +#~ msgstr "" + +#~ msgid "" +#~ "When there isn't an exact match, " +#~ "all the existing keys in the " +#~ "layout map will be treated as a" +#~ " regex and map against the input " +#~ "key again. The first match will be" +#~ " returned, based on the key insertion" +#~ " order. Return None if there isn't" +#~ " any match found." +#~ msgstr "" + +#~ msgid "the string key as the query for the layout." +#~ msgstr "" + +#~ msgid "Corresponding layout based on the query." +#~ msgstr "" + +#~ msgid "run\\_server\\_app" +#~ msgstr "run\\_server\\_app" + +#~ msgid "run\\_superlink" +#~ msgstr "flower-superlink" + +#~ msgid "Start a Ray-based Flower simulation server." +#~ msgstr "启动基于 Ray 的Flower模拟服务器。" + +#~ msgid "" +#~ "A function creating `Client` instances. " +#~ "The function must have the signature " +#~ "`client_fn(context: Context). It should return" +#~ " a single client instance of type " +#~ "`Client`. Note that the created client" +#~ " instances are ephemeral and will " +#~ "often be destroyed after a single " +#~ "method invocation. Since client instances " +#~ "are not long-lived, they should " +#~ "not attempt to carry state over " +#~ "method invocations. Any state required " +#~ "by the instance (model, dataset, " +#~ "hyperparameters, ...) should be (re-)created" +#~ " in either the call to `client_fn`" +#~ " or the call to any of the " +#~ "client methods (e.g., load evaluation " +#~ "data in the `evaluate` method itself)." +#~ msgstr "" +#~ "创建客户端实例的函数。该函数必须接受一个名为 `cid` 的 `str` 参数。它应返回一个" +#~ " Client " +#~ "类型的客户端实例。请注意,创建的客户端实例是短暂的,通常在调用一个方法后就会被销毁。由于客户机实例不是长期存在的,它们不应试图在方法调用时携带状态数据。实例所需的任何状态数据(模型、数据集、超参数......)都应在调用" +#~ " `client_fn` 或任何客户端方法(例如,在 `evaluate` " +#~ "方法中加载评估数据)时(重新)创建。" + +#~ msgid "The total number of clients in this simulation." +#~ msgstr "需要等待的客户数量。" + +#~ msgid "" +#~ "UNSUPPORTED, WILL BE REMOVED. USE " +#~ "`num_clients` INSTEAD. List `client_id`s for" +#~ " each client. This is only required" +#~ " if `num_clients` is not set. Setting" +#~ " both `num_clients` and `clients_ids` with" +#~ " `len(clients_ids)` not equal to " +#~ "`num_clients` generates an error. Using " +#~ "this argument will raise an error." +#~ msgstr "" +#~ "列出每个客户的 `client_id`。只有在未设置 `num_clients` " +#~ "时才需要这样做。同时设置`num_clients`和`clients_ids`,且`len(clients_ids)`不等于`num_clients`,会产生错误。" + +#~ msgid "" +#~ "CPU and GPU resources for a single" +#~ " client. Supported keys are `num_cpus` " +#~ "and `num_gpus`. To understand the GPU" +#~ " utilization caused by `num_gpus`, as " +#~ "well as using custom resources, please" +#~ " consult the Ray documentation." +#~ msgstr "" +#~ "\"num_gpus\": 0.0` 单个客户端的 CPU 和 GPU " +#~ "资源。支持的键值为 `num_cpus` 和 `num_gpus`。要了解 " +#~ "`num_gpus` 所导致的 GPU 利用率,以及使用自定义资源的情况,请查阅 Ray" +#~ " 文档。" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Server`. If no instance" +#~ " is provided, then `start_server` will " +#~ "create one." +#~ msgstr "抽象基类 `flwr.server.Server`的实现。如果没有提供实例,`start_server` 将创建一个。" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.Strategy`. If no " +#~ "strategy is provided, then `start_server` " +#~ "will use `flwr.server.strategy.FedAvg`." +#~ msgstr "" +#~ "抽象基类 `flwr.server.strategy` 的实现。如果没有提供策略,`start_server`" +#~ " 将使用 `flwr.server.strategy.FedAvg`。" + +#~ msgid "" +#~ "An implementation of the abstract base" +#~ " class `flwr.server.ClientManager`. If no " +#~ "implementation is provided, then " +#~ "`start_simulation` will use " +#~ "`flwr.server.client_manager.SimpleClientManager`." +#~ msgstr "" +#~ "抽象基类 `flwr.server.ClientManager` " +#~ "的实现。如果没有提供实现,`start_simulation` 将使用 " +#~ "`flwr.server.client_manager.SimpleClientManager`。" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } An empty dictionary can " +#~ "be used (ray_init_args={}) to prevent " +#~ "any arguments from being passed to " +#~ "ray.init." +#~ msgstr "" +#~ "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为" +#~ " None(默认值),则将使用以下默认参数初始化 Ray: { " +#~ "\"ignore_reinit_error\": True, \"include_dashboard\": " +#~ "False } 可以使用空字典(ray_init_args={})来防止向 ray.init " +#~ "传递任何参数。" + +#~ msgid "" +#~ "Optional dictionary containing arguments for" +#~ " the call to `ray.init`. If " +#~ "ray_init_args is None (the default), Ray" +#~ " will be initialized with the " +#~ "following default args:" +#~ msgstr "" +#~ "可选字典,包含调用 `ray.init` 时的参数。如果 ray_init_args 为" +#~ " None(默认值),则将使用以下默认参数初始化 Ray:" + +#~ msgid "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" +#~ msgstr "{ \"ignore_reinit_error\": True, \"include_dashboard\": False }" + +#~ msgid "" +#~ "An empty dictionary can be used " +#~ "(ray_init_args={}) to prevent any arguments" +#~ " from being passed to ray.init." +#~ msgstr "可以使用空字典 (ray_init_args={}) 来防止向 ray.init 传递任何参数。" + +#~ msgid "" +#~ "Set to True to prevent `ray.shutdown()`" +#~ " in case `ray.is_initialized()=True`." +#~ msgstr "设为 True 可在 `ray.is_initialized()=True` 情况下阻止 `ray.shutdown()` 。" + +#~ msgid "" +#~ "Optionally specify the type of actor " +#~ "to use. The actor object, which " +#~ "persists throughout the simulation, will " +#~ "be the process in charge of " +#~ "executing a ClientApp wrapping input " +#~ "argument `client_fn`." +#~ msgstr "可选择指定要使用的actor类型。actor对象将在整个模拟过程中持续存在,它将是负责运行客户端作业(即其 `fit()`方法)的进程。" + +#~ msgid "" +#~ "If you want to create your own " +#~ "Actor classes, you might need to " +#~ "pass some input argument. You can " +#~ "use this dictionary for such purpose." +#~ msgstr "如果您想创建自己的 Actor 类,可能需要传递一些输入参数。为此,您可以使用本字典。" + +#~ msgid "" +#~ "(default: \"DEFAULT\") Optional string " +#~ "(\"DEFAULT\" or \"SPREAD\") for the VCE" +#~ " to choose in which node the " +#~ "actor is placed. If you are an " +#~ "advanced user needed more control you" +#~ " can use lower-level scheduling " +#~ "strategies to pin actors to specific " +#~ "compute nodes (e.g. via " +#~ "NodeAffinitySchedulingStrategy). Please note this" +#~ " is an advanced feature. For all " +#~ "details, please refer to the Ray " +#~ "documentation: https://docs.ray.io/en/latest/ray-" +#~ "core/scheduling/index.html" +#~ msgstr "" +#~ "(默认:\"DEFAULT\")可选字符串(\"DEFAULT \"或 \"SPREAD\"),供 " +#~ "VCE " +#~ "选择将行为体放置在哪个节点上。如果你是需要更多控制权的高级用户,可以使用低级调度策略将actor固定到特定计算节点(例如,通过 " +#~ "NodeAffinitySchedulingStrategy)。请注意,这是一项高级功能。有关详细信息,请参阅 Ray " +#~ "文档:https://docs.ray.io/en/latest/ray-core/scheduling/index.html" + +#~ msgid "**hist** -- Object containing metrics from training." +#~ msgstr "**hist** -- 包含训练指标的对象。" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with FastAI to train a vision " +#~ "model on CIFAR-10." +#~ msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 FastAI 在 CIFAR-10 上训练视觉模型。" + +#~ msgid "Let's build a federated learning system using fastai and Flower!" +#~ msgstr "让我们用 fastai 和 Flower 建立一个联邦学习系统!" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn more." +#~ msgstr "" +#~ "请参阅 `完整代码示例 " +#~ "`_了解更多信息。" + +#~ msgid "" +#~ "Let's build a federated learning system" +#~ " using Hugging Face Transformers and " +#~ "Flower!" +#~ msgstr "让我们用Hugging Face Transformers和Flower来构建一个联邦学习系统!" + +#~ msgid "Dependencies" +#~ msgstr "依赖关系" + +#~ msgid "" +#~ "To follow along this tutorial you " +#~ "will need to install the following " +#~ "packages: :code:`datasets`, :code:`evaluate`, " +#~ ":code:`flwr`, :code:`torch`, and " +#~ ":code:`transformers`. This can be done " +#~ "using :code:`pip`:" +#~ msgstr "" +#~ "要学习本教程,您需要安装以下软件包: :code:`datasets`、 :code:`evaluate`、 " +#~ ":code:`flwr`、 :code:`torch`和 :code:`transformers`。这可以通过" +#~ " :code:`pip` 来完成:" + +#~ msgid "Standard Hugging Face workflow" +#~ msgstr "标准Hugging Face工作流程" + +#~ msgid "Handling the data" +#~ msgstr "处理数据" + +#~ msgid "" +#~ "To fetch the IMDB dataset, we will" +#~ " use Hugging Face's :code:`datasets` " +#~ "library. We then need to tokenize " +#~ "the data and create :code:`PyTorch` " +#~ "dataloaders, this is all done in " +#~ "the :code:`load_data` function:" +#~ msgstr "" +#~ "为了获取 IMDB 数据集,我们将使用 Hugging Face 的 " +#~ ":code:`datasets` 库。然后,我们需要对数据进行标记化,并创建 :code:`PyTorch` " +#~ "数据加载器,这些都将在 :code:`load_data` 函数中完成:" + +#~ msgid "Training and testing the model" +#~ msgstr "训练和测试模型" + +#~ msgid "" +#~ "Once we have a way of creating " +#~ "our trainloader and testloader, we can" +#~ " take care of the training and " +#~ "testing. This is very similar to " +#~ "any :code:`PyTorch` training or testing " +#~ "loop:" +#~ msgstr "" +#~ "有了创建 trainloader 和 testloader " +#~ "的方法后,我们就可以进行训练和测试了。这与任何 :code:`PyTorch` 训练或测试循环都非常相似:" + +#~ msgid "Creating the model itself" +#~ msgstr "创建模型本身" + +#~ msgid "" +#~ "To create the model itself, we " +#~ "will just load the pre-trained " +#~ "distillBERT model using Hugging Face’s " +#~ ":code:`AutoModelForSequenceClassification` :" +#~ msgstr "" +#~ "要创建模型本身,我们只需使用 Hugging Face 的 " +#~ ":code:`AutoModelForSequenceClassification` 加载预训练的 " +#~ "distillBERT 模型:" + +#~ msgid "Creating the IMDBClient" +#~ msgstr "创建 IMDBClient" + +#~ msgid "" +#~ "To federate our example to multiple " +#~ "clients, we first need to write " +#~ "our Flower client class (inheriting from" +#~ " :code:`flwr.client.NumPyClient`). This is very" +#~ " easy, as our model is a " +#~ "standard :code:`PyTorch` model:" +#~ msgstr "" +#~ "要将我们的示例联邦到多个客户端,我们首先需要编写 Flower 客户端类(继承自 " +#~ ":code:`flwr.client.NumPyClient`)。这很容易,因为我们的模型是一个标准的 " +#~ ":code:`PyTorch` 模型:" + +#~ msgid "" +#~ "The :code:`get_parameters` function lets the" +#~ " server get the client's parameters. " +#~ "Inversely, the :code:`set_parameters` function " +#~ "allows the server to send its " +#~ "parameters to the client. Finally, the" +#~ " :code:`fit` function trains the model " +#~ "locally for the client, and the " +#~ ":code:`evaluate` function tests the model " +#~ "locally and returns the relevant " +#~ "metrics." +#~ msgstr "" +#~ ":code:`get_parameters` " +#~ "函数允许服务器获取客户端的参数。相反,:code:`set_parameters`函数允许服务器将其参数发送给客户端。最后,:code:`fit`函数在本地为客户端训练模型,:code:`evaluate`函数在本地测试模型并返回相关指标。" + +#~ msgid "Starting the server" +#~ msgstr "启动服务器" + +#~ msgid "" +#~ "Now that we have a way to " +#~ "instantiate clients, we need to create" +#~ " our server in order to aggregate " +#~ "the results. Using Flower, this can " +#~ "be done very easily by first " +#~ "choosing a strategy (here, we are " +#~ "using :code:`FedAvg`, which will define " +#~ "the global weights as the average " +#~ "of all the clients' weights at " +#~ "each round) and then using the " +#~ ":code:`flwr.server.start_server` function:" +#~ msgstr "" +#~ "现在我们有了实例化客户端的方法,我们需要创建服务器,以便汇总结果。使用 Flower,首先选择一个策略(这里我们使用 " +#~ ":code:`FedAvg`,它将把全局模型参数定义为每轮所有客户端模型参数的平均值),然后使用 " +#~ ":code:`flwr.server.start_server`函数,就可以非常轻松地完成这项工作:" + +#~ msgid "" +#~ "The :code:`weighted_average` function is there" +#~ " to provide a way to aggregate " +#~ "the metrics distributed amongst the " +#~ "clients (basically this allows us to " +#~ "display a nice average accuracy and " +#~ "loss for every round)." +#~ msgstr "" +#~ "使用 :code:`weighted_average` " +#~ "函数是为了提供一种方法来汇总分布在客户端的指标(基本上,这可以让我们显示每一轮的平均精度和损失值)。" + +#~ msgid "Putting everything together" +#~ msgstr "把所有东西放在一起" + +#~ msgid "We can now start client instances using:" +#~ msgstr "现在我们可以使用:" + +#~ msgid "" +#~ "And they will be able to connect" +#~ " to the server and start the " +#~ "federated training." +#~ msgstr "他们就能连接到服务器,开始联邦训练。" + +#~ msgid "" +#~ "If you want to check out " +#~ "everything put together, you should " +#~ "check out the `full code example " +#~ "`_ ." +#~ msgstr "" +#~ "如果您想查看所有内容,请查看完整的代码示例: " +#~ "[https://github.com/adap/flower/tree/main/examples/quickstart-" +#~ "huggingface](https://github.com/adap/flower/tree/main/examples" +#~ "/quickstart-huggingface)." + +#~ msgid "" +#~ "Of course, this is a very basic" +#~ " example, and a lot can be " +#~ "added or modified, it was just to" +#~ " showcase how simply we could " +#~ "federate a Hugging Face workflow using" +#~ " Flower." +#~ msgstr "" +#~ "当然,这只是一个非常基本的示例,还可以添加或修改很多内容,只是为了展示我们可以如何简单地使用 Flower " +#~ "联合Hugging Face的工作流程。" + +#~ msgid "" +#~ "Note that in this example we used" +#~ " :code:`PyTorch`, but we could have " +#~ "very well used :code:`TensorFlow`." +#~ msgstr "请注意,在本例中我们使用了 :code:`PyTorch`,但也完全可以使用 :code:`TensorFlow`。" + +#~ msgid "" +#~ "Check out this Federated Learning " +#~ "quickstart tutorial for using Flower " +#~ "with PyTorch Lightning to train an " +#~ "Auto Encoder model on MNIST." +#~ msgstr "查看此联邦学习快速入门教程,了解如何使用 Flower 和 PyTorch Lightning 在 MNIST 上训练自动编码器模型。" + +#~ msgid "" +#~ "Let's build a horizontal federated " +#~ "learning system using PyTorch Lightning " +#~ "and Flower!" +#~ msgstr "让我们使用 PyTorch Lightning 和 Flower 构建一个水平联邦学习系统!" + +#~ msgid "" +#~ "Please refer to the `full code " +#~ "example `_ to learn " +#~ "more." +#~ msgstr "" +#~ "请参阅 `完整代码示例 " +#~ "`_ 了解更多信息。" + +#~ msgid "Let's build a federated learning system in less than 20 lines of code!" +#~ msgstr "让我们用不到 20 行代码构建一个联邦学习系统!" + +#~ msgid "Before Flower can be imported we have to install it:" +#~ msgstr "在导入 Flower 之前,我们必须先安装它:" + +#~ msgid "" +#~ "Since we want to use the Keras " +#~ "API of TensorFlow (TF), we have to" +#~ " install TF as well:" +#~ msgstr "由于我们要使用 TensorFlow (TF) 的 Keras API,因此还必须安装 TF:" + +#~ msgid "Next, in a file called :code:`client.py`, import Flower and TensorFlow:" +#~ msgstr "接下来,在名为 :code:`client.py` 的文件中导入 Flower 和 TensorFlow:" + +#~ msgid "" +#~ "We use the Keras utilities of TF" +#~ " to load CIFAR10, a popular colored" +#~ " image classification dataset for machine" +#~ " learning. The call to " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` downloads " +#~ "CIFAR10, caches it locally, and then " +#~ "returns the entire training and test " +#~ "set as NumPy ndarrays." +#~ msgstr "" +#~ "我们使用 TF 的 Keras 实用程序加载 " +#~ "CIFAR10,这是一个用于机器学习的流行彩色图像分类数据集。调用 " +#~ ":code:`tf.keras.datasets.cifar10.load_data()` 会下载 " +#~ "CIFAR10,将其缓存到本地,然后以 NumPy ndarrays 的形式返回整个训练集和测试集。" + +#~ msgid "" +#~ "Next, we need a model. For the " +#~ "purpose of this tutorial, we use " +#~ "MobilNetV2 with 10 output classes:" +#~ msgstr "接下来,我们需要一个模型。在本教程中,我们使用带有 10 个输出类的 MobilNetV2:" + +#~ msgid "" +#~ "The Flower server interacts with clients" +#~ " through an interface called " +#~ ":code:`Client`. When the server selects " +#~ "a particular client for training, it " +#~ "sends training instructions over the " +#~ "network. The client receives those " +#~ "instructions and calls one of the " +#~ ":code:`Client` methods to run your code" +#~ " (i.e., to train the neural network" +#~ " we defined earlier)." +#~ msgstr "" +#~ "Flower 服务器通过一个名为 :code:`Client` " +#~ "的接口与客户端交互。当服务器选择一个特定的客户端进行训练时,它会通过网络发送训练指令。客户端接收到这些指令后,会调用 " +#~ ":code:`Client` 方法之一来运行您的代码(即训练我们之前定义的神经网络)。" + +#~ msgid "" +#~ "Flower provides a convenience class " +#~ "called :code:`NumPyClient` which makes it " +#~ "easier to implement the :code:`Client` " +#~ "interface when your workload uses Keras." +#~ " The :code:`NumPyClient` interface defines " +#~ "three methods which can be implemented" +#~ " in the following way:" +#~ msgstr "" +#~ "Flower 提供了一个名为 :code:`NumPyClient` 的便捷类,当您的工作负载使用" +#~ " Keras 时,该类可以更轻松地实现 :code:`Client` " +#~ "接口。:code:`NumPyClient` 接口定义了三个方法,可以通过以下方式实现:" + +#~ msgid "" +#~ "We can now create an instance of" +#~ " our class :code:`CifarClient` and add " +#~ "one line to actually run this " +#~ "client:" +#~ msgstr "现在我们可以创建一个 :code:`CifarClient` 类的实例,并添加一行来实际运行该客户端:" + +#~ msgid "" +#~ "That's it for the client. We only" +#~ " have to implement :code:`Client` or " +#~ ":code:`NumPyClient` and call " +#~ ":code:`fl.client.start_client()`. If you implement" +#~ " a client of type :code:`NumPyClient` " +#~ "you'll need to first call its " +#~ ":code:`to_client()` method. The string " +#~ ":code:`\"[::]:8080\"` tells the client which" +#~ " server to connect to. In our " +#~ "case we can run the server and " +#~ "the client on the same machine, " +#~ "therefore we use :code:`\"[::]:8080\"`. If " +#~ "we run a truly federated workload " +#~ "with the server and clients running " +#~ "on different machines, all that needs" +#~ " to change is the :code:`server_address`" +#~ " we point the client at." +#~ msgstr "" +#~ "这就是客户端。我们只需实现 :code:`Client` 或 :code:`NumPyClient`" +#~ " 并调用 :code:`fl.client.start_client()` 或 " +#~ ":code:`fl.client.start_numpy_client()`。字符串 " +#~ ":code:`\"[::]:8080\"`会告诉客户端要连接的服务器。在本例中,我们可以在同一台机器上运行服务器和客户端,因此使用 " +#~ ":code:`\"[::]:8080\"。如果我们运行的是真正的联邦工作负载,服务器和客户端运行在不同的机器上,那么需要改变的只是客户端指向的" +#~ " :code:`server_address`。" + +#~ msgid "Each client will have its own dataset." +#~ msgstr "每个客户都有自己的数据集。" + +#~ msgid "" +#~ "You should now see how the " +#~ "training does in the very first " +#~ "terminal (the one that started the " +#~ "server):" +#~ msgstr "现在你应该能在第一个终端(启动服务器的终端)看到训练的效果了:" + +#~ msgid "" +#~ "Congratulations! You've successfully built and" +#~ " run your first federated learning " +#~ "system. The full `source code " +#~ "`_ for this can be " +#~ "found in :code:`examples/quickstart-" +#~ "tensorflow/client.py`." +#~ msgstr "" +#~ "恭喜您!您已经成功构建并运行了第一个联邦学习系统。`完整的源代码 " +#~ "`_ 可以在 :code:`examples/quickstart-" +#~ "tensorflow/client.py` 中找到。" + +#~ msgid "|e5918c1c06a4434bbe4bf49235e40059|" +#~ msgstr "" + +#~ msgid "|c0165741bd1944f09ec55ce49032377d|" +#~ msgstr "" + +#~ msgid "|0a0ac9427ac7487b8e52d75ed514f04e|" +#~ msgstr "" + +#~ msgid "|5defee3ea4ca40d99fcd3e4ea045be25|" +#~ msgstr "" + +#~ msgid "|74f26ca701254d3db57d7899bd91eb55|" +#~ msgstr "" + +#~ msgid "|bda79f21f8154258a40e5766b2634ad7|" +#~ msgstr "" + +#~ msgid "|89d30862e62e4f9989e193483a08680a|" +#~ msgstr "" + +#~ msgid "|77e9918671c54b4f86e01369c0785ce8|" +#~ msgstr "" + +#~ msgid "|7e4ccef37cc94148a067107b34eb7447|" +#~ msgstr "" + +#~ msgid "|28e47e4cded14479a0846c8e5f22c872|" +#~ msgstr "" + +#~ msgid "|4b8c5d1afa144294b76ffc76e4658a38|" +#~ msgstr "" + +#~ msgid "|9dbdb3a0f6cb4a129fac863eaa414c17|" +#~ msgstr "" + +#~ msgid "|81749d0ac0834c36a83bd38f433fea31|" +#~ msgstr "" + +#~ msgid "|ed9aae51da70428eab7eef32f21e819e|" +#~ msgstr "" +