From 26f2dab9e7b5762d8e713a21a193f29df5bb3cc5 Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Fri, 9 Aug 2024 13:30:21 +0100 Subject: [PATCH 1/2] update logging --- src/py/flwr/server/server.py | 7 ++++++- src/py/flwr/server/workflow/default_workflows.py | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/src/py/flwr/server/server.py b/src/py/flwr/server/server.py index f1bfb6f0533b..94e8a94b3051 100644 --- a/src/py/flwr/server/server.py +++ b/src/py/flwr/server/server.py @@ -91,7 +91,7 @@ def fit(self, num_rounds: int, timeout: Optional[float]) -> Tuple[History, float # Initialize parameters log(INFO, "[INIT]") self.parameters = self._get_initial_parameters(server_round=0, timeout=timeout) - log(INFO, "Evaluating initial global parameters") + log(INFO, "Starting evaluation of initial global parameters") res = self.strategy.evaluate(0, parameters=self.parameters) if res is not None: log( @@ -102,6 +102,8 @@ def fit(self, num_rounds: int, timeout: Optional[float]) -> Tuple[History, float ) history.add_loss_centralized(server_round=0, loss=res[0]) history.add_metrics_centralized(server_round=0, metrics=res[1]) + else: + log(INFO, "Evaluation returned no results (`None`)") # Run federated learning for num_rounds start_time = timeit.default_timer() @@ -123,6 +125,7 @@ def fit(self, num_rounds: int, timeout: Optional[float]) -> Tuple[History, float ) # Evaluate model using strategy implementation + log(INFO, "Starting evaluation of global parameters") res_cen = self.strategy.evaluate(current_round, parameters=self.parameters) if res_cen is not None: loss_cen, metrics_cen = res_cen @@ -138,6 +141,8 @@ def fit(self, num_rounds: int, timeout: Optional[float]) -> Tuple[History, float history.add_metrics_centralized( server_round=current_round, metrics=metrics_cen ) + else: + log(INFO, "Evaluation returned no results (`None`)") # Evaluate model on a sample of available clients res_fed = self.evaluate_round(server_round=current_round, timeout=timeout) diff --git a/src/py/flwr/server/workflow/default_workflows.py b/src/py/flwr/server/workflow/default_workflows.py index 80759316da84..dd7dd36aa826 100644 --- a/src/py/flwr/server/workflow/default_workflows.py +++ b/src/py/flwr/server/workflow/default_workflows.py @@ -167,7 +167,7 @@ def default_init_params_workflow(driver: Driver, context: Context) -> None: context.state.parameters_records[MAIN_PARAMS_RECORD] = paramsrecord # Evaluate initial parameters - log(INFO, "Evaluating initial global parameters") + log(INFO, "Starting evaluation of initial global parameters") parameters = compat.parametersrecord_to_parameters(paramsrecord, keep_input=True) res = context.strategy.evaluate(0, parameters=parameters) if res is not None: @@ -179,6 +179,8 @@ def default_init_params_workflow(driver: Driver, context: Context) -> None: ) context.history.add_loss_centralized(server_round=0, loss=res[0]) context.history.add_metrics_centralized(server_round=0, metrics=res[1]) + else: + log(INFO, "Evaluation returned no results (`None`)") def default_centralized_evaluation_workflow(_: Driver, context: Context) -> None: @@ -192,6 +194,7 @@ def default_centralized_evaluation_workflow(_: Driver, context: Context) -> None start_time = cast(float, cfg[Key.START_TIME]) # Centralized evaluation + log(INFO, "Starting evaluation of global parameters") parameters = compat.parametersrecord_to_parameters( record=context.state.parameters_records[MAIN_PARAMS_RECORD], keep_input=True, @@ -211,6 +214,8 @@ def default_centralized_evaluation_workflow(_: Driver, context: Context) -> None context.history.add_metrics_centralized( server_round=current_round, metrics=metrics_cen ) + else: + log(INFO, "Evaluation returned no results (`None`)") def default_fit_workflow( # pylint: disable=R0914 From e0c5b6c54ab4116bf19ec5add4c593b711ce2e1e Mon Sep 17 00:00:00 2001 From: Heng Pan Date: Wed, 14 Aug 2024 18:00:40 +0100 Subject: [PATCH 2/2] update logging --- src/py/flwr/server/server.py | 3 --- src/py/flwr/server/workflow/default_workflows.py | 3 --- 2 files changed, 6 deletions(-) diff --git a/src/py/flwr/server/server.py b/src/py/flwr/server/server.py index 94e8a94b3051..5e2a0c6b2719 100644 --- a/src/py/flwr/server/server.py +++ b/src/py/flwr/server/server.py @@ -125,7 +125,6 @@ def fit(self, num_rounds: int, timeout: Optional[float]) -> Tuple[History, float ) # Evaluate model using strategy implementation - log(INFO, "Starting evaluation of global parameters") res_cen = self.strategy.evaluate(current_round, parameters=self.parameters) if res_cen is not None: loss_cen, metrics_cen = res_cen @@ -141,8 +140,6 @@ def fit(self, num_rounds: int, timeout: Optional[float]) -> Tuple[History, float history.add_metrics_centralized( server_round=current_round, metrics=metrics_cen ) - else: - log(INFO, "Evaluation returned no results (`None`)") # Evaluate model on a sample of available clients res_fed = self.evaluate_round(server_round=current_round, timeout=timeout) diff --git a/src/py/flwr/server/workflow/default_workflows.py b/src/py/flwr/server/workflow/default_workflows.py index dd7dd36aa826..82d8d5d4ccb6 100644 --- a/src/py/flwr/server/workflow/default_workflows.py +++ b/src/py/flwr/server/workflow/default_workflows.py @@ -194,7 +194,6 @@ def default_centralized_evaluation_workflow(_: Driver, context: Context) -> None start_time = cast(float, cfg[Key.START_TIME]) # Centralized evaluation - log(INFO, "Starting evaluation of global parameters") parameters = compat.parametersrecord_to_parameters( record=context.state.parameters_records[MAIN_PARAMS_RECORD], keep_input=True, @@ -214,8 +213,6 @@ def default_centralized_evaluation_workflow(_: Driver, context: Context) -> None context.history.add_metrics_centralized( server_round=current_round, metrics=metrics_cen ) - else: - log(INFO, "Evaluation returned no results (`None`)") def default_fit_workflow( # pylint: disable=R0914