From a655d9de6e10df44072694446052d6723b1c9b12 Mon Sep 17 00:00:00 2001 From: Zhihong Zhang Date: Mon, 18 Mar 2024 15:46:21 -0400 Subject: [PATCH 01/21] Updated FOBS readme to add DatumManager, added agrpcs as secure scheme --- nvflare/fuel/f3/drivers/net_utils.py | 2 +- nvflare/fuel/utils/fobs/README.rst | 9 +++++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/nvflare/fuel/f3/drivers/net_utils.py b/nvflare/fuel/f3/drivers/net_utils.py index 91a4ef5a7a..25f931d880 100644 --- a/nvflare/fuel/f3/drivers/net_utils.py +++ b/nvflare/fuel/f3/drivers/net_utils.py @@ -32,7 +32,7 @@ MAX_ITER_SIZE = 10 RANDOM_TRIES = 20 BIND_TIME_OUT = 5 -SECURE_SCHEMES = {"https", "wss", "grpcs", "stcp", "satcp"} +SECURE_SCHEMES = {"https", "wss", "grpcs", "agrpcs", "ngrpcs", "stcp", "satcp"} # GRPC can't handle frame size over 2G. So the limit is set to (2G-2M) MAX_FRAME_SIZE = 2 * 1024 * 1024 * 1024 - (2 * 1024 * 1024) diff --git a/nvflare/fuel/utils/fobs/README.rst b/nvflare/fuel/utils/fobs/README.rst index 9c21e16cdc..9adaadfdc8 100644 --- a/nvflare/fuel/utils/fobs/README.rst +++ b/nvflare/fuel/utils/fobs/README.rst @@ -117,6 +117,11 @@ A decomposer can either serialize the class into bytes or decompose it into obje serializable types. In most cases, it only involves saving members as a list and reconstructing the object from the list. +MessagePack can't handle items larger than 4GB in dict. To work around this issue, FOBS can externalize +the large item and just stores a reference in the buffer. :code:`DatumManager` is used to handle the +externalized data. For most objects which don't deal with dict items larger than 4GB, the DatumManager +is not needed. + Here is an example of a simple decomposer. Even though :code:`datetime` is not supported by MessagePack, a decomposer is included in `fobs` module so no need to further decompose it. @@ -138,10 +143,10 @@ by MessagePack, a decomposer is included in `fobs` module so no need to further def supported_type(self) -> Type[Any]: return Simple - def decompose(self, obj) -> Any: + def decompose(self, obj, manager) -> Any: return [obj.num, obj.name, obj.timestamp] - def recompose(self, data: Any) -> Simple: + def recompose(self, data: Any, manager) -> Simple: return Simple(data[0], data[1], data[2]) From 2646f603bb9947179963f46aa9b80d643a9b822a Mon Sep 17 00:00:00 2001 From: Zhihong Zhang Date: Fri, 3 May 2024 16:18:13 -0400 Subject: [PATCH 02/21] Implemented horizontal calls in nvflare plugin --- integration/xgboost/processor/src/dam/dam.cc | 4 +- .../xgboost/processor/src/include/dam.h | 4 +- .../processor/src/include/nvflare_processor.h | 13 ++++-- .../src/nvflare-plugin/nvflare_processor.cc | 43 +++++++++++++++---- .../runners/xgb_client_runner.py | 4 ++ 5 files changed, 52 insertions(+), 16 deletions(-) diff --git a/integration/xgboost/processor/src/dam/dam.cc b/integration/xgboost/processor/src/dam/dam.cc index d768d497dd..27c3512946 100644 --- a/integration/xgboost/processor/src/dam/dam.cc +++ b/integration/xgboost/processor/src/dam/dam.cc @@ -26,7 +26,7 @@ void print_buffer(uint8_t *buffer, int size) { } // DamEncoder ====== -void DamEncoder::AddFloatArray(std::vector &value) { +void DamEncoder::AddFloatArray(const std::vector &value) { if (encoded) { std::cout << "Buffer is already encoded" << std::endl; return; @@ -38,7 +38,7 @@ void DamEncoder::AddFloatArray(std::vector &value) { entries->push_back(new Entry(kDataTypeFloatArray, buffer, value.size())); } -void DamEncoder::AddIntArray(std::vector &value) { +void DamEncoder::AddIntArray(const std::vector &value) { std::cout << "AddIntArray called, size: " << value.size() << std::endl; if (encoded) { std::cout << "Buffer is already encoded" << std::endl; diff --git a/integration/xgboost/processor/src/include/dam.h b/integration/xgboost/processor/src/include/dam.h index e6afd44299..1f113d92fe 100644 --- a/integration/xgboost/processor/src/include/dam.h +++ b/integration/xgboost/processor/src/include/dam.h @@ -53,9 +53,9 @@ class DamEncoder { this->data_set_id = data_set_id; } - void AddIntArray(std::vector &value); + void AddIntArray(const std::vector &value); - void AddFloatArray(std::vector &value); + void AddFloatArray(const std::vector &value); std::uint8_t * Finish(size_t &size); diff --git a/integration/xgboost/processor/src/include/nvflare_processor.h b/integration/xgboost/processor/src/include/nvflare_processor.h index 52cf42920f..cc6fb6b1a4 100644 --- a/integration/xgboost/processor/src/include/nvflare_processor.h +++ b/integration/xgboost/processor/src/include/nvflare_processor.h @@ -24,6 +24,8 @@ const int kDataSetHGPairs = 1; const int kDataSetAggregation = 2; const int kDataSetAggregationWithFeatures = 3; const int kDataSetAggregationResult = 4; +const int kDataSetHistograms = 5; +const int kDataSetHistogramResult = 6; class NVFlareProcessor: public processing::Processor { private: @@ -51,11 +53,11 @@ class NVFlareProcessor: public processing::Processor { free(buffer); } - void* ProcessGHPairs(size_t &size, std::vector& pairs) override; + void* ProcessGHPairs(size_t *size, const std::vector& pairs) override; - void* HandleGHPairs(size_t &size, void *buffer, size_t buf_size) override; + void* HandleGHPairs(size_t *size, void *buffer, size_t buf_size) override; - void InitAggregationContext(const std::vector &cuts, std::vector &slots) override { + void InitAggregationContext(const std::vector &cuts, const std::vector &slots) override { if (this->slots_.empty()) { this->cuts_ = std::vector(cuts); this->slots_ = std::vector(slots); @@ -64,8 +66,11 @@ class NVFlareProcessor: public processing::Processor { } } - void *ProcessAggregation(size_t &size, std::map> nodes) override; + void *ProcessAggregation(size_t *size, std::map> nodes) override; std::vector HandleAggregation(void *buffer, size_t buf_size) override; + void *ProcessHistograms(size_t *size, const std::vector& histograms) override; + + std::vector HandleHistograms(void *buffer, size_t buf_size) override; }; \ No newline at end of file diff --git a/integration/xgboost/processor/src/nvflare-plugin/nvflare_processor.cc b/integration/xgboost/processor/src/nvflare-plugin/nvflare_processor.cc index dce1701f7e..aaf9335ec2 100644 --- a/integration/xgboost/processor/src/nvflare-plugin/nvflare_processor.cc +++ b/integration/xgboost/processor/src/nvflare-plugin/nvflare_processor.cc @@ -23,24 +23,24 @@ using std::vector; using std::cout; using std::endl; -void* NVFlareProcessor::ProcessGHPairs(size_t &size, std::vector& pairs) { +void* NVFlareProcessor::ProcessGHPairs(size_t *size, const std::vector& pairs) { cout << "ProcessGHPairs called with pairs size: " << pairs.size() << endl; gh_pairs_ = new std::vector(pairs); DamEncoder encoder(kDataSetHGPairs); encoder.AddFloatArray(pairs); - auto buffer = encoder.Finish(size); + auto buffer = encoder.Finish(*size); return buffer; } -void* NVFlareProcessor::HandleGHPairs(size_t &size, void *buffer, size_t buf_size) { +void* NVFlareProcessor::HandleGHPairs(size_t *size, void *buffer, size_t buf_size) { cout << "HandleGHPairs called with buffer size: " << buf_size << " Active: " << active_ << endl; - size = buf_size; + *size = buf_size; return buffer; } -void *NVFlareProcessor::ProcessAggregation(size_t &size, std::map> nodes) { +void *NVFlareProcessor::ProcessAggregation(size_t *size, std::map> nodes) { cout << "ProcessAggregation called with " << nodes.size() << " nodes" << endl; int64_t data_set_id; @@ -107,7 +107,7 @@ void *NVFlareProcessor::ProcessAggregation(size_t &size, std::map NVFlareProcessor::HandleAggregation(void *buffer, size_t buf while (remaining > kPrefixLen) { DamDecoder decoder(reinterpret_cast(pointer), remaining); if (!decoder.IsValid()) { - cout << "Not DAM encoded buffer ignored at offset: " << (int)(pointer - (char *)buffer) << endl; + cout << "Not DAM encoded buffer ignored at offset: " + << static_cast((pointer - reinterpret_cast(buffer))) << endl; break; } auto size = decoder.Size(); @@ -153,6 +154,31 @@ std::vector NVFlareProcessor::HandleAggregation(void *buffer, size_t buf return result; } +void *NVFlareProcessor::ProcessHistograms(size_t *size, const std::vector& histograms) { + cout << "HandleHistograms called with " << histograms.size() << " entries" << endl; + + DamEncoder encoder(kDataSetHistograms); + encoder.AddFloatArray(histograms); + return encoder.Finish(*size); +} + +std::vector NVFlareProcessor::HandleHistograms(void *buffer, size_t buf_size) { + cout << "HandleHistograms called with buffer size: " << buf_size << endl; + + DamDecoder decoder(reinterpret_cast(buffer), buf_size); + if (!decoder.IsValid()) { + cout << "Not DAM encoded buffer, ignored" << endl; + return std::vector(); + } + + if (decoder.GetDataSetId() != kDataSetHistogramResult) { + cout << "Invalid dataset: " << decoder.GetDataSetId() << endl; + return std::vector(); + } + + return decoder.DecodeFloatArray(); +} + extern "C" { processing::Processor *LoadProcessor(char *plugin_name) { @@ -163,4 +189,5 @@ processing::Processor *LoadProcessor(char *plugin_name) { return new NVFlareProcessor(); } -} + +} // extern "C" diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py index fc836728e9..f3a9dbc905 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/runners/xgb_client_runner.py @@ -133,6 +133,10 @@ def run(self, ctx: dict): "federated_server_address": f"{self._server_addr}", "federated_world_size": self._world_size, "federated_rank": self._rank, + "plugin_name": "nvflare", + "loader_params": { + "LIBRARY_PATH": "/tmp", + }, } with xgb.collective.CommunicatorContext(**communicator_env): # Load the data. Dmatrix must be created with column split mode in CommunicatorContext for vertical FL From 306b8e16b287ba4d27579eb6696d39307bd0b997 Mon Sep 17 00:00:00 2001 From: Zhihong Zhang Date: Sun, 5 May 2024 17:23:03 -0400 Subject: [PATCH 03/21] Added support for horizontal secure XGBoost --- .../adaptors/grpc_client_adaptor.py | 53 +++++++++++++ .../xgboost/histogram_based_v2/defs.py | 2 + .../histogram_based_v2/sec/client_handler.py | 79 +++++++++++++++++-- .../histogram_based_v2/sec/data_converter.py | 25 ++++++ .../sec/processor_data_converter.py | 18 +++++ .../histogram_based_v2/sec/server_handler.py | 43 ++++++++-- 6 files changed, 207 insertions(+), 13 deletions(-) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py index ae5e291ce1..1fe65e7b45 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py @@ -11,6 +11,9 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import threading +import time + import grpc import nvflare.app_opt.xgboost.histogram_based_v2.proto.federated_pb2 as pb2 @@ -23,6 +26,8 @@ from nvflare.fuel.f3.drivers.net_utils import get_open_tcp_port from nvflare.security.logging import secure_format_exception +DUPLICATE_REQ_MAX_HOLD_TIME = 3600.0 + class GrpcClientAdaptor(XGBClientAdaptor, FederatedServicer): def __init__( @@ -41,6 +46,8 @@ def __init__( self._app_dir = None self._workspace = None self._run_dir = None + self._lock = threading.Lock() + self._pending_req = {} def initialize(self, fl_ctx: FLContext): self._client_name = fl_ctx.get_identity_name() @@ -129,11 +136,17 @@ def _abort(self, reason: str): def Allgather(self, request: pb2.AllgatherRequest, context): try: + if self._check_duplicate_seq("allgather", request.rank, request.sequence_number): + return pb2.AllgatherReply(receive_buffer=bytes()) + rcv_buf, _ = self._send_all_gather( rank=request.rank, seq=request.sequence_number, send_buf=request.send_buffer, ) + + self._finish_pending_req("allgather", request.rank, request.sequence_number) + return pb2.AllgatherReply(receive_buffer=rcv_buf) except Exception as ex: self._abort(reason=f"send_all_gather exception: {secure_format_exception(ex)}") @@ -143,11 +156,16 @@ def Allgather(self, request: pb2.AllgatherRequest, context): def AllgatherV(self, request: pb2.AllgatherVRequest, context): try: + if self._check_duplicate_seq("allgatherv", request.rank, request.sequence_number): + return pb2.AllgatherVReply(receive_buffer=bytes()) + rcv_buf = self._do_all_gather_v( rank=request.rank, seq=request.sequence_number, send_buf=request.send_buffer, ) + + self._finish_pending_req("allgatherv", request.rank, request.sequence_number) return pb2.AllgatherVReply(receive_buffer=rcv_buf) except Exception as ex: self._abort(reason=f"send_all_gather_v exception: {secure_format_exception(ex)}") @@ -157,6 +175,9 @@ def AllgatherV(self, request: pb2.AllgatherVRequest, context): def Allreduce(self, request: pb2.AllreduceRequest, context): try: + if self._check_duplicate_seq("allreduce", request.rank, request.sequence_number): + return pb2.AllreduceReply(receive_buffer=bytes()) + rcv_buf, _ = self._send_all_reduce( rank=request.rank, seq=request.sequence_number, @@ -164,6 +185,8 @@ def Allreduce(self, request: pb2.AllreduceRequest, context): reduce_op=request.reduce_operation, send_buf=request.send_buffer, ) + + self._finish_pending_req("allreduce", request.rank, request.sequence_number) return pb2.AllreduceReply(receive_buffer=rcv_buf) except Exception as ex: self._abort(reason=f"send_all_reduce exception: {secure_format_exception(ex)}") @@ -173,15 +196,45 @@ def Allreduce(self, request: pb2.AllreduceRequest, context): def Broadcast(self, request: pb2.BroadcastRequest, context): try: + if self._check_duplicate_seq("broadcast", request.rank, request.sequence_number): + return pb2.BroadcastReply(receive_buffer=bytes()) + rcv_buf = self._do_broadcast( rank=request.rank, send_buf=request.send_buffer, seq=request.sequence_number, root=request.root, ) + + self._finish_pending_req("broadcast", request.rank, request.sequence_number) return pb2.BroadcastReply(receive_buffer=rcv_buf) except Exception as ex: self._abort(reason=f"send_broadcast exception: {secure_format_exception(ex)}") context.set_code(grpc.StatusCode.INTERNAL) context.set_details(str(ex)) return pb2.BroadcastReply(receive_buffer=None) + + def _check_duplicate_seq(self, op: str, rank: int, seq: int): + with self._lock: + event = self._pending_req.get((rank, seq), None) + if event: + self.logger.info(f"Duplicate seq {op=} {rank=} {seq=}, wait till original req is done") + event.wait(DUPLICATE_REQ_MAX_HOLD_TIME) + time.sleep(1) # To ensure the first request is returned first + self.logger.info(f"Duplicate seq {op=} {rank=} {seq=} returned with empty buffer") + return True + + with self._lock: + self._pending_req[(rank, seq)] = threading.Event() + return False + + def _finish_pending_req(self, op: str, rank: int, seq: int): + with self._lock: + event = self._pending_req.get((rank, seq), None) + if not event: + self.logger.error(f"No pending req {op=} {rank=} {seq=}") + return + + event.set() + del self._pending_req[(rank, seq)] + self.logger.info(f"Request seq {op=} {rank=} {seq=} finished processing") diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py index 1ee77acd6f..39f7e32bd4 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py @@ -100,7 +100,9 @@ class Constant: EVENT_AFTER_ALL_GATHER_V = "xgb.after_all_gather_v" HEADER_KEY_ENCRYPTED_DATA = "xgb.encrypted_data" + HEADER_KEY_ENCRYPTED_HISTOGRAMS = "xgb.encrypted_histograms" HEADER_KEY_ORIGINAL_BUF_SIZE = "xgb.original_buf_size" + HEADER_KEY_IN_AGGR = "xgb.in_aggr" DUMMY_BUFFER_SIZE = 4 diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py index 570db379c1..cd9333ab00 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py @@ -14,6 +14,11 @@ import os import time +from nvflare.app_opt.he import decomposers + +from nvflare.app_opt.he.homomorphic_encrypt import load_tenseal_context_from_workspace +from nvflare.app_opt.xgboost.histogram_based_v2.sec.dam import DamDecoder + from nvflare.apis.event_type import EventType from nvflare.apis.fl_component import FLComponent from nvflare.apis.fl_context import FLContext @@ -33,12 +38,15 @@ split, ) from nvflare.app_opt.xgboost.histogram_based_v2.sec.data_converter import FeatureAggregationResult -from nvflare.app_opt.xgboost.histogram_based_v2.sec.processor_data_converter import ProcessorDataConverter +from nvflare.app_opt.xgboost.histogram_based_v2.sec.processor_data_converter import ProcessorDataConverter, \ + DATA_SET_HISTOGRAMS from nvflare.app_opt.xgboost.histogram_based_v2.sec.sec_handler import SecurityHandler +import tenseal as ts +from tenseal.tensors.ckksvector import CKKSVector class ClientSecurityHandler(SecurityHandler): - def __init__(self, key_length=1024, num_workers=10): + def __init__(self, key_length=1024, num_workers=10, tenseal_context_file="client_context.tenseal"): FLComponent.__init__(self) self.num_workers = num_workers self.key_length = key_length @@ -54,6 +62,10 @@ def __init__(self, key_length=1024, num_workers=10): self.feature_masks = None self.aggregator = Aggregator() self.aggr_result = None # for label client: computed aggr result based on clear-text clear_ghs + self.tenseal_context_file = tenseal_context_file + self.tenseal_context = None + + decomposers.register() def _process_before_broadcast(self, fl_ctx: FLContext): root = fl_ctx.get_prop(Constant.PARAM_KEY_ROOT) @@ -123,9 +135,22 @@ def _process_after_broadcast(self, fl_ctx: FLContext): fl_ctx.set_prop(key=Constant.PARAM_KEY_RCV_BUF, value=dummy_buf, private=True, sticky=False) def _process_before_all_gather_v(self, fl_ctx: FLContext): - rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) self.info(fl_ctx, "start") buffer = fl_ctx.get_prop(Constant.PARAM_KEY_SEND_BUF) + + decoder = DamDecoder(buffer) + if not decoder.isValid(): + self.info(fl_ctx, "Not secure content - ignore") + return + + if decoder.get_data_set_id() == DATA_SET_HISTOGRAMS: + self._process_before_all_gather_v_horizontal(fl_ctx, decoder) + else: + self._process_before_all_gather_v_vertical(fl_ctx, decoder) + + def _process_before_all_gather_v_vertical(self, fl_ctx: FLContext): + rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) + buffer = fl_ctx.get_prop(Constant.PARAM_KEY_SEND_BUF) aggr_ctx = self.data_converter.decode_aggregation_context(buffer, fl_ctx) if not aggr_ctx: @@ -181,6 +206,25 @@ def _process_before_all_gather_v(self, fl_ctx: FLContext): fl_ctx.set_prop(key=Constant.PARAM_KEY_SEND_BUF, value=encoded_str, private=True, sticky=False) fl_ctx.set_prop(key=Constant.PARAM_KEY_HEADERS, value=headers, private=True, sticky=False) + def _process_before_all_gather_v_horizontal(self, fl_ctx: FLContext): + if not self.tenseal_context: + return self._abort("Horizontal secure XGBoost not supported due to missing context", fl_ctx) + + buffer = fl_ctx.get_prop(Constant.PARAM_KEY_SEND_BUF) + histograms = self.data_converter.decode_histograms(buffer, fl_ctx) + + start = time.time() + vector = ts.ckks_vector(self.tenseal_context, histograms) + self.info( + fl_ctx, f"_process_before_all_gather_v: Histograms with {len(histograms)} entries " + f"encrypted in {time.time()-start} secs" + ) + headers = {Constant.HEADER_KEY_ENCRYPTED_DATA: True, + Constant.HEADER_KEY_ENCRYPTED_HISTOGRAMS: True, + Constant.HEADER_KEY_ORIGINAL_BUF_SIZE: len(buffer)} + fl_ctx.set_prop(key=Constant.PARAM_KEY_SEND_BUF, value=vector, private=True, sticky=False) + fl_ctx.set_prop(key=Constant.PARAM_KEY_HEADERS, value=headers, private=True, sticky=False) + def _do_aggregation(self, groups, fl_ctx: FLContext): # this is only for the label-client to compute aggregation in clear-text! if not self.feature_masks: @@ -228,7 +272,6 @@ def _decrypt_aggr_result(self, encoded, fl_ctx: FLContext): def _process_after_all_gather_v(self, fl_ctx: FLContext): # called after AllGatherV result is received from the server self.info(fl_ctx, "start") - rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) reply = fl_ctx.get_prop(Constant.PARAM_KEY_REPLY) assert isinstance(reply, Shareable) encrypted_data = reply.get_header(Constant.HEADER_KEY_ENCRYPTED_DATA) @@ -236,9 +279,16 @@ def _process_after_all_gather_v(self, fl_ctx: FLContext): self.info(fl_ctx, "no encrypted result - ignore") return - rcv_buf = fl_ctx.get_prop(Constant.PARAM_KEY_RCV_BUF) + has_histograms = reply.get_header(Constant.HEADER_KEY_ENCRYPTED_HISTOGRAMS) + if has_histograms: + self._process_after_all_gather_v_horizontal(fl_ctx) + else: + self._process_after_all_gather_v_vertical(fl_ctx) + def _process_after_all_gather_v_vertical(self, fl_ctx: FLContext): + rcv_buf = fl_ctx.get_prop(Constant.PARAM_KEY_RCV_BUF) # this rcv_buf is a list of replies from ALL clients! + rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) if not isinstance(rcv_buf, dict): return self._abort(f"rank {rank}: expect a dict of aggr result but got {type(rcv_buf)}", fl_ctx) rank_replies = rcv_buf @@ -291,11 +341,30 @@ def _process_after_all_gather_v(self, fl_ctx: FLContext): result = self.data_converter.encode_aggregation_result(final_result, fl_ctx) fl_ctx.set_prop(key=Constant.PARAM_KEY_RCV_BUF, value=result, private=True, sticky=False) + def _process_after_all_gather_v_horizontal(self, fl_ctx: FLContext): + encrypted_histograms = fl_ctx.get_prop(Constant.PARAM_KEY_RCV_BUF) + rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) + if not isinstance(encrypted_histograms, CKKSVector): + return self._abort(f"rank {rank}: expect a CKKSVector but got {type(encrypted_histograms)}", fl_ctx) + + histograms = encrypted_histograms.decrypt(secret_key=self.tenseal_context.secret_key()) + result = self.data_converter.encode_histograms_result(histograms, fl_ctx) + fl_ctx.set_prop(key=Constant.PARAM_KEY_RCV_BUF, value=result, private=True, sticky=False) + def handle_event(self, event_type: str, fl_ctx: FLContext): if event_type == EventType.START_RUN: self.public_key, self.private_key = generate_keys(self.key_length) self.encryptor = Encryptor(self.public_key, self.num_workers) self.decrypter = Decrypter(self.private_key, self.num_workers) self.adder = Adder(self.num_workers) + try: + self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx) + except Exception as ex: + self.info(fl_ctx, + f"Can't load tenseal context, horizontal secure XGBoost is not supported: {ex}") + self.tenseal_context = None + elif event_type == EventType.END_RUN: + self.tenseal_context = None else: super().handle_event(event_type, fl_ctx) + diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/data_converter.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/data_converter.py index 9a8416ce2b..a9b0a92657 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/data_converter.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/data_converter.py @@ -76,3 +76,28 @@ def encode_aggregation_result( """ pass + + def decode_histograms(self, buffer: bytes, fl_ctx: FLContext) -> List[float]: + """Decode the buffer to extract flattened histograms + + Args: + buffer: buffer to be decoded + fl_ctx: FLContext info + + Returns: if the buffer contains histograms, return the flattened histograms + otherwise, return None + + """ + pass + + def encode_histograms_result(self, histograms: List[float], fl_ctx: FLContext) -> bytes: + """Encode flattened histograms to be sent back to XGBoost + + Args: + histograms: The flattened histograms for all features + fl_ctx: FLContext info + + Returns: a buffer of bytes + + """ + pass diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/processor_data_converter.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/processor_data_converter.py index c6acb7293d..224b60325b 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/processor_data_converter.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/processor_data_converter.py @@ -26,6 +26,8 @@ DATA_SET_AGGREGATION = 2 DATA_SET_AGGREGATION_WITH_FEATURES = 3 DATA_SET_AGGREGATION_RESULT = 4 +DATA_SET_HISTOGRAMS = 5 +DATA_SET_HISTOGRAMS_RESULT = 6 SCALE_FACTOR = 1000000.0 # Preserve 6 decimal places @@ -101,6 +103,21 @@ def encode_aggregation_result( return encoder.finish() + def decode_histograms(self, buffer: bytes, fl_ctx: FLContext) -> List[float]: + decoder = DamDecoder(buffer) + if not decoder.is_valid(): + return None + data_set_id = decoder.get_data_set_id() + if data_set_id != DATA_SET_HISTOGRAMS: + raise RuntimeError(f"Invalid DataSet: {data_set_id}") + + return decoder.decode_float_array() + + def encode_histograms_result(self, histograms: List[float], fl_ctx: FLContext) -> bytes: + encoder = DamEncoder(DATA_SET_HISTOGRAMS_RESULT) + encoder.add_float_array(histograms) + return encoder.finish() + @staticmethod def get_bin_size(cuts: [int], feature_id: int) -> int: return cuts[feature_id + 1] - cuts[feature_id] @@ -133,3 +150,4 @@ def to_float_array(result: FeatureAggregationResult) -> List[float]: float_array.append(ProcessorDataConverter.int_to_float(h)) return float_array + diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py index d24b73dc76..4aad567fe1 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py @@ -14,6 +14,8 @@ import os import threading +from nvflare.app_opt.he import decomposers + from nvflare.apis.fl_component import FLComponent from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable @@ -33,6 +35,8 @@ def __init__(self): self.aggr_result_to_send = None self.aggr_result_lock = threading.Lock() + decomposers.register() + def _process_before_broadcast(self, fl_ctx: FLContext): self.info(fl_ctx, "start") rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) @@ -82,20 +86,25 @@ def _process_after_broadcast(self, fl_ctx: FLContext): fl_ctx.set_prop(key=Constant.PARAM_KEY_RCV_BUF, value=self.encrypted_gh, private=True, sticky=False) def _process_before_all_gather_v(self, fl_ctx: FLContext): - self.info(fl_ctx, "start") - rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) request = fl_ctx.get_prop(Constant.PARAM_KEY_REQUEST) assert isinstance(request, Shareable) has_encrypted_data = request.get_header(Constant.HEADER_KEY_ENCRYPTED_DATA) self.info(fl_ctx, f"{has_encrypted_data=}") if not has_encrypted_data: - self.info(fl_ctx, "no encrypted data - ignore") + self.info(fl_ctx, "start - non-secure data") return - fl_ctx.set_prop(key="in_aggr", value=True, private=True, sticky=False) + has_histograms = request.get_header(Constant.HEADER_KEY_ENCRYPTED_HISTOGRAMS) + split_mode = "horizontal" if has_histograms else "vertical" + self.info(fl_ctx, f"start - {split_mode}") + + fl_ctx.set_prop(key=Constant.HEADER_KEY_IN_AGGR, value=True, private=True, sticky=False) + fl_ctx.set_prop(key=Constant.HEADER_KEY_ENCRYPTED_HISTOGRAMS, value=has_histograms, private=True, sticky=False) + + rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) send_buf = fl_ctx.get_prop(Constant.PARAM_KEY_SEND_BUF) if send_buf: - # the send_buf contains encoded aggr result (str) from this rank + # the send_buf contains encoded aggr result (str) or CKKS vector from this rank self.info(fl_ctx, f"got encrypted aggr data: {len(send_buf)} bytes") with self.aggr_result_lock: self.aggr_result_to_send = None @@ -113,9 +122,9 @@ def _process_before_all_gather_v(self, fl_ctx: FLContext): def _process_after_all_gather_v(self, fl_ctx: FLContext): # this is called after the Server has finished gathering - # Note: this fl_ctx is the same as the one in _handle_before_all_gather_v! + # Note: this fl_ctx is the same as the one in _process_before_all_gather_v! rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) - in_aggr = fl_ctx.get_prop("in_aggr") + in_aggr = fl_ctx.get_prop(Constant.HEADER_KEY_IN_AGGR) self.info(fl_ctx, f"start {in_aggr=}") if not in_aggr: @@ -124,14 +133,32 @@ def _process_after_all_gather_v(self, fl_ctx: FLContext): reply = fl_ctx.get_prop(Constant.PARAM_KEY_REPLY) assert isinstance(reply, Shareable) + has_histograms = fl_ctx.get_prop(Constant.HEADER_KEY_ENCRYPTED_HISTOGRAMS) reply.set_header(Constant.HEADER_KEY_ENCRYPTED_DATA, True) + reply.set_header(Constant.HEADER_KEY_ENCRYPTED_HISTOGRAMS, has_histograms) with self.aggr_result_lock: if not self.aggr_result_to_send: if not self.aggr_result_dict: return self._abort(f"Rank {rank}: no aggr result after AllGatherV!", fl_ctx) - self.aggr_result_to_send = self.aggr_result_dict + + if has_histograms: + self.aggr_result_to_send = self._histogram_sum(fl_ctx) + else: + self.aggr_result_to_send = self.aggr_result_dict # reset aggr_result_dict for next gather self.aggr_result_dict = None self.info(fl_ctx, f"aggr_result_to_send {len(self.aggr_result_to_send)}") fl_ctx.set_prop(key=Constant.PARAM_KEY_RCV_BUF, value=self.aggr_result_to_send, private=True, sticky=False) + + def _histogram_sum(self, fl_ctx: FLContext): + + result = None + + for rank, vector in self.aggr_result_dict.items(): + if not result: + result = vector + else: + result = result + vector + + return result From 6d7508293b0a02284288bee2433181175949135f Mon Sep 17 00:00:00 2001 From: Zhihong Zhang Date: Mon, 6 May 2024 18:07:07 -0400 Subject: [PATCH 04/21] Fixed a few horizontal issues --- .../src/nvflare-plugin/nvflare_processor.cc | 2 +- .../adaptors/grpc_client_adaptor.py | 2 +- .../xgboost/histogram_based_v2/defs.py | 2 +- .../histogram_based_v2/sec/client_handler.py | 44 ++++++++++--------- .../sec/processor_data_converter.py | 1 - .../histogram_based_v2/sec/server_handler.py | 29 +++++++----- .../histogram_based_v2/secure_data_loader.py | 12 +++-- 7 files changed, 53 insertions(+), 39 deletions(-) diff --git a/integration/xgboost/processor/src/nvflare-plugin/nvflare_processor.cc b/integration/xgboost/processor/src/nvflare-plugin/nvflare_processor.cc index aaf9335ec2..749d8e98b5 100644 --- a/integration/xgboost/processor/src/nvflare-plugin/nvflare_processor.cc +++ b/integration/xgboost/processor/src/nvflare-plugin/nvflare_processor.cc @@ -155,7 +155,7 @@ std::vector NVFlareProcessor::HandleAggregation(void *buffer, size_t buf } void *NVFlareProcessor::ProcessHistograms(size_t *size, const std::vector& histograms) { - cout << "HandleHistograms called with " << histograms.size() << " entries" << endl; + cout << "ProcessHistograms called with " << histograms.size() << " entries" << endl; DamEncoder encoder(kDataSetHistograms); encoder.AddFloatArray(histograms); diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py index 1fe65e7b45..20e0ad6d41 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py @@ -220,7 +220,7 @@ def _check_duplicate_seq(self, op: str, rank: int, seq: int): if event: self.logger.info(f"Duplicate seq {op=} {rank=} {seq=}, wait till original req is done") event.wait(DUPLICATE_REQ_MAX_HOLD_TIME) - time.sleep(1) # To ensure the first request is returned first + time.sleep(1) # To ensure the first request is returned first self.logger.info(f"Duplicate seq {op=} {rank=} {seq=} returned with empty buffer") return True diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py index 39f7e32bd4..f1ca935ea6 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/defs.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/defs.py @@ -100,7 +100,7 @@ class Constant: EVENT_AFTER_ALL_GATHER_V = "xgb.after_all_gather_v" HEADER_KEY_ENCRYPTED_DATA = "xgb.encrypted_data" - HEADER_KEY_ENCRYPTED_HISTOGRAMS = "xgb.encrypted_histograms" + HEADER_KEY_HORIZONTAL = "xgb.horizontal" HEADER_KEY_ORIGINAL_BUF_SIZE = "xgb.original_buf_size" HEADER_KEY_IN_AGGR = "xgb.in_aggr" diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py index cd9333ab00..889f88627d 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py @@ -14,15 +14,15 @@ import os import time -from nvflare.app_opt.he import decomposers - -from nvflare.app_opt.he.homomorphic_encrypt import load_tenseal_context_from_workspace -from nvflare.app_opt.xgboost.histogram_based_v2.sec.dam import DamDecoder +import tenseal as ts +from tenseal.tensors.ckksvector import CKKSVector from nvflare.apis.event_type import EventType from nvflare.apis.fl_component import FLComponent from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable +from nvflare.app_opt.he import decomposers +from nvflare.app_opt.he.homomorphic_encrypt import load_tenseal_context_from_workspace from nvflare.app_opt.xgboost.histogram_based_v2.aggr import Aggregator from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant from nvflare.app_opt.xgboost.histogram_based_v2.mock_he.adder import Adder @@ -37,12 +37,13 @@ generate_keys, split, ) +from nvflare.app_opt.xgboost.histogram_based_v2.sec.dam import DamDecoder from nvflare.app_opt.xgboost.histogram_based_v2.sec.data_converter import FeatureAggregationResult -from nvflare.app_opt.xgboost.histogram_based_v2.sec.processor_data_converter import ProcessorDataConverter, \ - DATA_SET_HISTOGRAMS +from nvflare.app_opt.xgboost.histogram_based_v2.sec.processor_data_converter import ( + DATA_SET_HISTOGRAMS, + ProcessorDataConverter, +) from nvflare.app_opt.xgboost.histogram_based_v2.sec.sec_handler import SecurityHandler -import tenseal as ts -from tenseal.tensors.ckksvector import CKKSVector class ClientSecurityHandler(SecurityHandler): @@ -139,14 +140,14 @@ def _process_before_all_gather_v(self, fl_ctx: FLContext): buffer = fl_ctx.get_prop(Constant.PARAM_KEY_SEND_BUF) decoder = DamDecoder(buffer) - if not decoder.isValid(): + if not decoder.is_valid(): self.info(fl_ctx, "Not secure content - ignore") return if decoder.get_data_set_id() == DATA_SET_HISTOGRAMS: - self._process_before_all_gather_v_horizontal(fl_ctx, decoder) + self._process_before_all_gather_v_horizontal(fl_ctx) else: - self._process_before_all_gather_v_vertical(fl_ctx, decoder) + self._process_before_all_gather_v_vertical(fl_ctx) def _process_before_all_gather_v_vertical(self, fl_ctx: FLContext): rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) @@ -216,12 +217,15 @@ def _process_before_all_gather_v_horizontal(self, fl_ctx: FLContext): start = time.time() vector = ts.ckks_vector(self.tenseal_context, histograms) self.info( - fl_ctx, f"_process_before_all_gather_v: Histograms with {len(histograms)} entries " - f"encrypted in {time.time()-start} secs" + fl_ctx, + f"_process_before_all_gather_v: Histograms with {len(histograms)} entries " + f"encrypted in {time.time()-start} secs", ) - headers = {Constant.HEADER_KEY_ENCRYPTED_DATA: True, - Constant.HEADER_KEY_ENCRYPTED_HISTOGRAMS: True, - Constant.HEADER_KEY_ORIGINAL_BUF_SIZE: len(buffer)} + headers = { + Constant.HEADER_KEY_ENCRYPTED_DATA: True, + Constant.HEADER_KEY_HORIZONTAL: True, + Constant.HEADER_KEY_ORIGINAL_BUF_SIZE: len(buffer), + } fl_ctx.set_prop(key=Constant.PARAM_KEY_SEND_BUF, value=vector, private=True, sticky=False) fl_ctx.set_prop(key=Constant.PARAM_KEY_HEADERS, value=headers, private=True, sticky=False) @@ -279,8 +283,8 @@ def _process_after_all_gather_v(self, fl_ctx: FLContext): self.info(fl_ctx, "no encrypted result - ignore") return - has_histograms = reply.get_header(Constant.HEADER_KEY_ENCRYPTED_HISTOGRAMS) - if has_histograms: + horizontal = reply.get_header(Constant.HEADER_KEY_HORIZONTAL) + if horizontal: self._process_after_all_gather_v_horizontal(fl_ctx) else: self._process_after_all_gather_v_vertical(fl_ctx) @@ -360,11 +364,9 @@ def handle_event(self, event_type: str, fl_ctx: FLContext): try: self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx) except Exception as ex: - self.info(fl_ctx, - f"Can't load tenseal context, horizontal secure XGBoost is not supported: {ex}") + self.info(fl_ctx, f"Can't load tenseal context, horizontal secure XGBoost is not supported: {ex}") self.tenseal_context = None elif event_type == EventType.END_RUN: self.tenseal_context = None else: super().handle_event(event_type, fl_ctx) - diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/processor_data_converter.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/processor_data_converter.py index 224b60325b..63298c5fb2 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/processor_data_converter.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/processor_data_converter.py @@ -150,4 +150,3 @@ def to_float_array(result: FeatureAggregationResult) -> List[float]: float_array.append(ProcessorDataConverter.int_to_float(h)) return float_array - diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py index 4aad567fe1..119d0f5570 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py @@ -14,11 +14,10 @@ import os import threading -from nvflare.app_opt.he import decomposers - from nvflare.apis.fl_component import FLComponent from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable +from nvflare.app_opt.he import decomposers from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant from nvflare.app_opt.xgboost.histogram_based_v2.sec.sec_handler import SecurityHandler @@ -94,18 +93,22 @@ def _process_before_all_gather_v(self, fl_ctx: FLContext): self.info(fl_ctx, "start - non-secure data") return - has_histograms = request.get_header(Constant.HEADER_KEY_ENCRYPTED_HISTOGRAMS) - split_mode = "horizontal" if has_histograms else "vertical" + horizontal = request.get_header(Constant.HEADER_KEY_HORIZONTAL) + split_mode = "horizontal" if horizontal else "vertical" self.info(fl_ctx, f"start - {split_mode}") fl_ctx.set_prop(key=Constant.HEADER_KEY_IN_AGGR, value=True, private=True, sticky=False) - fl_ctx.set_prop(key=Constant.HEADER_KEY_ENCRYPTED_HISTOGRAMS, value=has_histograms, private=True, sticky=False) + fl_ctx.set_prop(key=Constant.HEADER_KEY_HORIZONTAL, value=horizontal, private=True, sticky=False) rank = fl_ctx.get_prop(Constant.PARAM_KEY_RANK) send_buf = fl_ctx.get_prop(Constant.PARAM_KEY_SEND_BUF) if send_buf: + if horizontal: + length = send_buf.size() + else: + length = len(send_buf) # the send_buf contains encoded aggr result (str) or CKKS vector from this rank - self.info(fl_ctx, f"got encrypted aggr data: {len(send_buf)} bytes") + self.info(fl_ctx, f"got encrypted aggr data: {length} bytes") with self.aggr_result_lock: self.aggr_result_to_send = None if not self.aggr_result_dict: @@ -133,22 +136,28 @@ def _process_after_all_gather_v(self, fl_ctx: FLContext): reply = fl_ctx.get_prop(Constant.PARAM_KEY_REPLY) assert isinstance(reply, Shareable) - has_histograms = fl_ctx.get_prop(Constant.HEADER_KEY_ENCRYPTED_HISTOGRAMS) + horizontal = fl_ctx.get_prop(Constant.HEADER_KEY_HORIZONTAL) reply.set_header(Constant.HEADER_KEY_ENCRYPTED_DATA, True) - reply.set_header(Constant.HEADER_KEY_ENCRYPTED_HISTOGRAMS, has_histograms) + reply.set_header(Constant.HEADER_KEY_HORIZONTAL, horizontal) with self.aggr_result_lock: if not self.aggr_result_to_send: if not self.aggr_result_dict: return self._abort(f"Rank {rank}: no aggr result after AllGatherV!", fl_ctx) - if has_histograms: + if horizontal: self.aggr_result_to_send = self._histogram_sum(fl_ctx) else: self.aggr_result_to_send = self.aggr_result_dict # reset aggr_result_dict for next gather self.aggr_result_dict = None - self.info(fl_ctx, f"aggr_result_to_send {len(self.aggr_result_to_send)}") + + if horizontal: + length = self.aggr_result_to_send.size() + else: + length = len(self.aggr_result_to_send) + + self.info(fl_ctx, f"aggr_result_to_send {length}") fl_ctx.set_prop(key=Constant.PARAM_KEY_RCV_BUF, value=self.aggr_result_to_send, private=True, sticky=False) def _histogram_sum(self, fl_ctx: FLContext): diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py b/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py index 69a418235d..f27e9a4c0d 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/secure_data_loader.py @@ -16,9 +16,12 @@ from nvflare.app_opt.xgboost.data_loader import XGBDataLoader +COL_SECURE = 2 +ROW_SECURE = 3 + class SecureDataLoader(XGBDataLoader): - def __init__(self, rank: int, folder: str): + def __init__(self, rank: int, folder: str, data_split_mode=COL_SECURE): """Reads CSV dataset and return XGB data matrix in vertical secure mode. Args: @@ -27,18 +30,19 @@ def __init__(self, rank: int, folder: str): """ self.rank = rank self.folder = folder + self.data_split_mode = data_split_mode def load_data(self, client_id: str): train_path = f"{self.folder}/site-{self.rank + 1}/train.csv" valid_path = f"{self.folder}/site-{self.rank + 1}/valid.csv" - if self.rank == 0: + if self.rank == 0 or self.data_split_mode == ROW_SECURE: label = "&label_column=0" else: label = "" - train_data = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=2) - valid_data = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=2) + train_data = xgb.DMatrix(train_path + f"?format=csv{label}", data_split_mode=self.data_split_mode) + valid_data = xgb.DMatrix(valid_path + f"?format=csv{label}", data_split_mode=self.data_split_mode) return train_data, valid_data From 213ca1d692b834bacc648475c676f5e2f2587c34 Mon Sep 17 00:00:00 2001 From: Zhihong Zhang Date: Wed, 8 May 2024 11:31:39 -0400 Subject: [PATCH 05/21] Added reliable message --- .../adaptors/xgb_adaptor.py | 51 ++++++++--- .../xgboost/histogram_based_v2/executor.py | 11 +-- .../histogram_based_v2/fed_executor.py | 6 +- .../xgboost/histogram_based_v2/sender.py | 89 ------------------- 4 files changed, 48 insertions(+), 109 deletions(-) delete mode 100644 nvflare/app_opt/xgboost/histogram_based_v2/sender.py diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py index 073a47bdd9..db9a2b6aaa 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py @@ -16,7 +16,6 @@ from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant -from nvflare.app_opt.xgboost.histogram_based_v2.sender import Sender from nvflare.fuel.utils.validation_utils import check_non_negative_int, check_positive_int from .adaptor import AppAdaptor @@ -129,28 +128,25 @@ class XGBClientAdaptor(AppAdaptor): XGBClientAdaptor specifies commonly required methods for client adaptor implementations. """ - def __init__(self, in_process): + def __init__(self, in_process, per_msg_timeout: float, tx_timeout: float): """Constructor of XGBClientAdaptor""" AppAdaptor.__init__(self, XGB_APP_NAME, in_process) self.engine = None - self.sender = None self.stopped = False self.rank = None self.num_rounds = None self.world_size = None + self.per_msg_timeout = per_msg_timeout + self.tx_timeout = tx_timeout - def set_sender(self, sender: Sender): - """Set the sender to be used to send XGB operation requests to the server. - - Args: - sender: the sender to be set + def start(self, fl_ctx: FLContext): + pass - Returns: None + def stop(self, fl_ctx: FLContext): + pass - """ - if not isinstance(sender, Sender): - raise TypeError(f"sender must be Sender but got {type(sender)}") - self.sender = sender + def _is_stopped(self) -> (bool, int): + pass def configure(self, config: dict, fl_ctx: FLContext): """Called by XGB Executor to configure the target. @@ -202,6 +198,35 @@ def _send_request(self, op: str, req: Shareable) -> (bytes, Shareable): else: raise RuntimeError(f"invalid reply for op {op}: expect Shareable but got {type(reply)}") + req.set_header(Constant.MSG_KEY_XGB_OP, op) + + with self.engine.new_context() as fl_ctx: + reply = ReliableMessage.send_request( + target=FQCN.ROOT_SERVER, + topic=Constant.TOPIC_XGB_REQUEST, + request=req, + per_msg_timeout=self.per_msg_timeout, + tx_timeout=self.tx_timeout, + abort_signal=self.abort_signal, + fl_ctx=fl_ctx, + ) + + if isinstance(reply, Shareable): + rc = reply.get_return_code() + if rc != ReturnCode.OK: + raise RuntimeError(f"received error return code: {rc}") + + reply_op = reply.get_header(Constant.MSG_KEY_XGB_OP) + if reply_op != op: + raise RuntimeError(f"received op {reply_op} != expected op {op}") + + rcv_buf = reply.get(Constant.PARAM_KEY_RCV_BUF) + if not isinstance(rcv_buf, bytes): + raise RuntimeError(f"invalid rcv_buf for {op=}: expect bytes but got {type(rcv_buf)}") + return rcv_buf + else: + raise RuntimeError(f"invalid reply for op {op}: expect Shareable but got {type(reply)}") + def _send_all_gather(self, rank: int, seq: int, send_buf: bytes) -> (bytes, Shareable): """This method is called by a concrete client adaptor to send Allgather operation to the server. diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/executor.py b/nvflare/app_opt/xgboost/histogram_based_v2/executor.py index 8c0e727dc5..dc870ff094 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/executor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/executor.py @@ -23,7 +23,6 @@ from nvflare.security.logging import secure_format_exception from .defs import Constant -from .sender import Sender class XGBExecutor(Executor): @@ -32,7 +31,8 @@ def __init__( adaptor_component_id: str, configure_task_name=Constant.CONFIG_TASK_NAME, start_task_name=Constant.START_TASK_NAME, - req_timeout=60.0, + per_msg_timeout=10.0, + tx_timeout=100.0, ): """Constructor @@ -40,10 +40,13 @@ def __init__( adaptor_component_id: the component ID of client target adaptor configure_task_name: name of the config task start_task_name: name of the start task + per_msg_timeout: timeout for sending one message + tx_timeout: transaction timeout """ Executor.__init__(self) self.adaptor_component_id = adaptor_component_id - self.req_timeout = req_timeout + self.per_msg_timeout = per_msg_timeout + self.tx_timeout = tx_timeout self.configure_task_name = configure_task_name self.start_task_name = start_task_name self.adaptor = None @@ -80,8 +83,6 @@ def handle_event(self, event_type: str, fl_ctx: FLContext): return adaptor.set_abort_signal(self.abort_signal) - engine = fl_ctx.get_engine() - adaptor.set_sender(Sender(engine, self.req_timeout)) adaptor.initialize(fl_ctx) self.adaptor = adaptor elif event_type == EventType.END_RUN: diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py b/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py index 107d5550aa..ad94cbc534 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py @@ -27,7 +27,8 @@ def __init__( verbose_eval=False, use_gpus=False, int_server_grpc_options=None, - req_timeout=60.0, + per_msg_timeout=10.0, + tx_timeout=100.0, model_file_name="model.json", metrics_writer_id: str = None, in_process=True, @@ -35,7 +36,8 @@ def __init__( XGBExecutor.__init__( self, adaptor_component_id="", - req_timeout=req_timeout, + per_msg_timeout=per_msg_timeout, + tx_timeout=tx_timeout, ) self.early_stopping_rounds = early_stopping_rounds self.xgb_params = xgb_params diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sender.py b/nvflare/app_opt/xgboost/histogram_based_v2/sender.py deleted file mode 100644 index 7177fbb214..0000000000 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sender.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from nvflare.apis.shareable import ReturnCode, Shareable -from nvflare.apis.signal import Signal -from nvflare.fuel.f3.cellnet.fqcn import FQCN -from nvflare.fuel.utils.obj_utils import get_logger - -from .defs import Constant - - -class Sender: - """ - A Sender is used to send XGB requests from the client to the server and wait for reply. - TBD: currently the sender simply sends the request with an aux message. It will be enhanced to be more - reliable in dealing with unstable network. - """ - - def __init__(self, engine, timeout): - """Constructor - - Args: - engine: the client engine that can send aux messages - timeout: the timeout for XGB requests - """ - self.engine = engine - self.timeout = timeout - self.logger = get_logger(self) - - def _extract_result(self, reply, expected_op): - if not reply: - return None - if not isinstance(reply, dict): - self.logger.error(f"expect reply to be a dict but got {type(reply)}") - return None - result = reply.get(FQCN.ROOT_SERVER) - if not result: - self.logger.error(f"no reply from {FQCN.ROOT_SERVER} for request {expected_op}") - return None - if not isinstance(result, Shareable): - self.logger.error(f"expect result to be a Shareable but got {type(result)}") - return None - rc = result.get_return_code() - if rc != ReturnCode.OK: - self.logger.error(f"server failed to process request: {rc=}") - return None - reply_op = result.get_header(Constant.MSG_KEY_XGB_OP) - if reply_op != expected_op: - self.logger.error(f"received op {reply_op} != expected op {expected_op}") - return None - return result - - def send_to_server(self, op: str, req: Shareable, abort_signal: Signal): - """Send an XGB request to the server. - - Args: - op: the XGB operation code - req: the XGB request - abort_signal: used for checking whether the job is aborted. - - Returns: reply from the server - - Note: when this method is enhanced to be more reliable, we'll keep resending until either the request is - sent successfully or the job is aborted. - - """ - req.set_header(Constant.MSG_KEY_XGB_OP, op) - - server_name = FQCN.ROOT_SERVER - with self.engine.new_context() as fl_ctx: - reply = self.engine.send_aux_request( - targets=[server_name], - topic=Constant.TOPIC_XGB_REQUEST, - request=req, - timeout=self.timeout, - fl_ctx=fl_ctx, - ) - return self._extract_result(reply, op) From d946eaaaaddc892ca7c36c9be7ae7f0b326dfeae Mon Sep 17 00:00:00 2001 From: Zhihong Zhang Date: Wed, 8 May 2024 16:07:33 -0400 Subject: [PATCH 06/21] Added ReliableMessage parameters --- .../xgboost/histogram_based_v2/adaptors/xgb_adaptor.py | 10 +++------- .../app_opt/xgboost/histogram_based_v2/fed_executor.py | 2 ++ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py index db9a2b6aaa..f917a3af74 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py @@ -13,9 +13,12 @@ # limitations under the License. from abc import abstractmethod +from nvflare.apis.fl_constant import ReturnCode from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable +from nvflare.apis.utils.reliable_message import ReliableMessage from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant +from nvflare.fuel.f3.cellnet.fqcn import FQCN from nvflare.fuel.utils.validation_utils import check_non_negative_int, check_positive_int from .adaptor import AppAdaptor @@ -191,13 +194,6 @@ def _send_request(self, op: str, req: Shareable) -> (bytes, Shareable): Returns: operation result """ - reply = self.sender.send_to_server(op, req, self.abort_signal) - if isinstance(reply, Shareable): - rcv_buf = reply.get(Constant.PARAM_KEY_RCV_BUF) - return rcv_buf, reply - else: - raise RuntimeError(f"invalid reply for op {op}: expect Shareable but got {type(reply)}") - req.set_header(Constant.MSG_KEY_XGB_OP, op) with self.engine.new_context() as fl_ctx: diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py b/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py index ad94cbc534..90989d4c4b 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py @@ -63,6 +63,8 @@ def get_adaptor(self, fl_ctx: FLContext): adaptor = GrpcClientAdaptor( int_server_grpc_options=self.int_server_grpc_options, in_process=self.in_process, + per_msg_timeout=self.per_msg_timeout, + tx_timeout=self.tx_timeout, ) adaptor.set_runner(runner) return adaptor From bb4a934f5dcff9d553ba2086d2e25311e115a1a8 Mon Sep 17 00:00:00 2001 From: Zhihong Zhang Date: Thu, 9 May 2024 20:40:28 -0400 Subject: [PATCH 07/21] Added log for debugging empty rcv_buf --- .../adaptors/grpc_client_adaptor.py | 4 +++- .../histogram_based_v2/adaptors/xgb_adaptor.py | 2 +- .../xgboost/histogram_based_v2/controller.py | 16 ++++++++++++---- .../xgboost/histogram_based_v2/fed_executor.py | 4 ++-- 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py index 20e0ad6d41..737ed035aa 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py @@ -34,8 +34,10 @@ def __init__( self, int_server_grpc_options=None, in_process=True, + per_msg_timeout=10.0, + tx_timeout=100.0 ): - XGBClientAdaptor.__init__(self, in_process) + XGBClientAdaptor.__init__(self, in_process, per_msg_timeout, tx_timeout) self.int_server_grpc_options = int_server_grpc_options self.in_process = in_process self.internal_xgb_server = None diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py index f917a3af74..eb78c45461 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py @@ -219,7 +219,7 @@ def _send_request(self, op: str, req: Shareable) -> (bytes, Shareable): rcv_buf = reply.get(Constant.PARAM_KEY_RCV_BUF) if not isinstance(rcv_buf, bytes): raise RuntimeError(f"invalid rcv_buf for {op=}: expect bytes but got {type(rcv_buf)}") - return rcv_buf + return rcv_buf, reply else: raise RuntimeError(f"invalid reply for op {op}: expect Shareable but got {type(reply)}") diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py index 8040cf2922..647e7204ca 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py @@ -20,6 +20,7 @@ from nvflare.apis.impl.controller import Controller from nvflare.apis.shareable import ReturnCode, Shareable, make_reply from nvflare.apis.signal import Signal +from nvflare.apis.utils.reliable_message import ReliableMessage from nvflare.app_opt.xgboost.histogram_based_v2.adaptors.xgb_adaptor import XGBServerAdaptor from nvflare.fuel.utils.validation_utils import check_number_range, check_object_type, check_positive_number, check_str from nvflare.security.logging import secure_format_exception @@ -149,14 +150,17 @@ def start_controller(self, fl_ctx: FLContext): engine = fl_ctx.get_engine() engine.register_aux_message_handler( + topic=Constant.TOPIC_CLIENT_DONE, + message_handle_func=self._process_client_done, + ) + ReliableMessage.register_request_handler( topic=Constant.TOPIC_XGB_REQUEST, - message_handle_func=self._process_xgb_request, + handler_f=self._process_xgb_request, ) - engine.register_aux_message_handler( + ReliableMessage.register_request_handler( topic=Constant.TOPIC_CLIENT_DONE, - message_handle_func=self._process_client_done, + handler_f=self._process_client_done, ) - def _trigger_stop(self, fl_ctx: FLContext, error=None): # first trigger the abort_signal to tell all components (mainly the controller's control_flow and adaptor) # that check this signal to abort. @@ -328,6 +332,10 @@ def _process_broadcast(self, request: Shareable, fl_ctx: FLContext) -> Shareable send_buf = fl_ctx.get_prop(Constant.PARAM_KEY_SEND_BUF) assert isinstance(self.adaptor, XGBServerAdaptor) rcv_buf = self.adaptor.broadcast(rank, seq, root, send_buf, fl_ctx) + if not rcv_buf: + self.logger.info("======== rcv_buf is null") + else: + self.logger.info(f"========= rcb_buf len: {len(rcv_buf)}") reply = Shareable() fl_ctx.set_prop(key=Constant.PARAM_KEY_REPLY, value=reply, private=True, sticky=False) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py b/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py index 90989d4c4b..168d8d328a 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/fed_executor.py @@ -27,8 +27,8 @@ def __init__( verbose_eval=False, use_gpus=False, int_server_grpc_options=None, - per_msg_timeout=10.0, - tx_timeout=100.0, + per_msg_timeout=60.0, + tx_timeout=600.0, model_file_name="model.json", metrics_writer_id: str = None, in_process=True, From 2037db4b434a7549c14845dc34fa1557157fa68f Mon Sep 17 00:00:00 2001 From: Zhihong Zhang Date: Fri, 10 May 2024 15:09:31 -0400 Subject: [PATCH 08/21] Added finally block to finish duplicate seq --- .../adaptors/grpc_client_adaptor.py | 13 ++++++++----- .../histogram_based_v2/adaptors/xgb_adaptor.py | 2 -- 2 files changed, 8 insertions(+), 7 deletions(-) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py index 737ed035aa..a2ef77bca0 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py @@ -147,14 +147,14 @@ def Allgather(self, request: pb2.AllgatherRequest, context): send_buf=request.send_buffer, ) - self._finish_pending_req("allgather", request.rank, request.sequence_number) - return pb2.AllgatherReply(receive_buffer=rcv_buf) except Exception as ex: self._abort(reason=f"send_all_gather exception: {secure_format_exception(ex)}") context.set_code(grpc.StatusCode.INTERNAL) context.set_details(str(ex)) return pb2.AllgatherReply(receive_buffer=None) + finally: + self._finish_pending_req("allgather", request.rank, request.sequence_number) def AllgatherV(self, request: pb2.AllgatherVRequest, context): try: @@ -167,13 +167,14 @@ def AllgatherV(self, request: pb2.AllgatherVRequest, context): send_buf=request.send_buffer, ) - self._finish_pending_req("allgatherv", request.rank, request.sequence_number) return pb2.AllgatherVReply(receive_buffer=rcv_buf) except Exception as ex: self._abort(reason=f"send_all_gather_v exception: {secure_format_exception(ex)}") context.set_code(grpc.StatusCode.INTERNAL) context.set_details(str(ex)) return pb2.AllgatherVReply(receive_buffer=None) + finally: + self._finish_pending_req("allgatherv", request.rank, request.sequence_number) def Allreduce(self, request: pb2.AllreduceRequest, context): try: @@ -188,13 +189,14 @@ def Allreduce(self, request: pb2.AllreduceRequest, context): send_buf=request.send_buffer, ) - self._finish_pending_req("allreduce", request.rank, request.sequence_number) return pb2.AllreduceReply(receive_buffer=rcv_buf) except Exception as ex: self._abort(reason=f"send_all_reduce exception: {secure_format_exception(ex)}") context.set_code(grpc.StatusCode.INTERNAL) context.set_details(str(ex)) return pb2.AllreduceReply(receive_buffer=None) + finally: + self._finish_pending_req("allreduce", request.rank, request.sequence_number) def Broadcast(self, request: pb2.BroadcastRequest, context): try: @@ -208,13 +210,14 @@ def Broadcast(self, request: pb2.BroadcastRequest, context): root=request.root, ) - self._finish_pending_req("broadcast", request.rank, request.sequence_number) return pb2.BroadcastReply(receive_buffer=rcv_buf) except Exception as ex: self._abort(reason=f"send_broadcast exception: {secure_format_exception(ex)}") context.set_code(grpc.StatusCode.INTERNAL) context.set_details(str(ex)) return pb2.BroadcastReply(receive_buffer=None) + finally: + self._finish_pending_req("broadcast", request.rank, request.sequence_number) def _check_duplicate_seq(self, op: str, rank: int, seq: int): with self._lock: diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py index eb78c45461..d0ec51de7c 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/xgb_adaptor.py @@ -217,8 +217,6 @@ def _send_request(self, op: str, req: Shareable) -> (bytes, Shareable): raise RuntimeError(f"received op {reply_op} != expected op {op}") rcv_buf = reply.get(Constant.PARAM_KEY_RCV_BUF) - if not isinstance(rcv_buf, bytes): - raise RuntimeError(f"invalid rcv_buf for {op=}: expect bytes but got {type(rcv_buf)}") return rcv_buf, reply else: raise RuntimeError(f"invalid reply for op {op}: expect Shareable but got {type(reply)}") From 54af72ac752ec4d0f7e055ee07fce6b1eaa363d4 Mon Sep 17 00:00:00 2001 From: Zhihong Zhang Date: Fri, 10 May 2024 15:57:52 -0400 Subject: [PATCH 09/21] Removed debug statements --- nvflare/app_opt/xgboost/histogram_based_v2/controller.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py index 647e7204ca..2d76b1e697 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py @@ -332,12 +332,8 @@ def _process_broadcast(self, request: Shareable, fl_ctx: FLContext) -> Shareable send_buf = fl_ctx.get_prop(Constant.PARAM_KEY_SEND_BUF) assert isinstance(self.adaptor, XGBServerAdaptor) rcv_buf = self.adaptor.broadcast(rank, seq, root, send_buf, fl_ctx) - if not rcv_buf: - self.logger.info("======== rcv_buf is null") - else: - self.logger.info(f"========= rcb_buf len: {len(rcv_buf)}") - reply = Shareable() + reply = Shareable() fl_ctx.set_prop(key=Constant.PARAM_KEY_REPLY, value=reply, private=True, sticky=False) fl_ctx.set_prop(key=Constant.PARAM_KEY_RCV_BUF, value=rcv_buf, private=True, sticky=False) self.fire_event(Constant.EVENT_AFTER_BROADCAST, fl_ctx) From c2ffb3b8f4f5ad1b38f7b8c6e4da12af318ade3f Mon Sep 17 00:00:00 2001 From: Zhihong Zhang Date: Fri, 10 May 2024 15:59:12 -0400 Subject: [PATCH 10/21] format change --- .../histogram_based_v2/adaptors/grpc_client_adaptor.py | 8 +------- nvflare/app_opt/xgboost/histogram_based_v2/controller.py | 1 + 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py index a2ef77bca0..e5fb71d0f5 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/adaptors/grpc_client_adaptor.py @@ -30,13 +30,7 @@ class GrpcClientAdaptor(XGBClientAdaptor, FederatedServicer): - def __init__( - self, - int_server_grpc_options=None, - in_process=True, - per_msg_timeout=10.0, - tx_timeout=100.0 - ): + def __init__(self, int_server_grpc_options=None, in_process=True, per_msg_timeout=10.0, tx_timeout=100.0): XGBClientAdaptor.__init__(self, in_process, per_msg_timeout, tx_timeout) self.int_server_grpc_options = int_server_grpc_options self.in_process = in_process diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py index 2d76b1e697..f5f4fb4e7b 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py @@ -161,6 +161,7 @@ def start_controller(self, fl_ctx: FLContext): topic=Constant.TOPIC_CLIENT_DONE, handler_f=self._process_client_done, ) + def _trigger_stop(self, fl_ctx: FLContext, error=None): # first trigger the abort_signal to tell all components (mainly the controller's control_flow and adaptor) # that check this signal to abort. From b43241b9d526f6aedff45c8e2cd1286f05fb1c7c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yuan-Ting=20Hsieh=20=28=E8=AC=9D=E6=B2=85=E5=BB=B7=29?= Date: Tue, 7 May 2024 15:43:06 -0700 Subject: [PATCH 11/21] Add in process client api tests (#2549) * Add in process client api tests * Fix headers * Fix comments --- .../data/apps/cyclic/app/custom/__init__.py | 2 +- .../cyclic/app/custom/tf2_model_persistor.py | 2 +- .../data/apps/cyclic/app/custom/tf2_net.py | 2 +- .../data/apps/cyclic/app/custom/trainer.py | 2 +- .../app/custom/__init__.py | 2 +- .../app/custom/np_trainer.py | 2 +- .../apps/sag_exception/app/custom/__init__.py | 2 +- .../app/custom/exception_trainer.py | 2 +- .../apps/tb_streaming/app/custom/__init__.py | 2 +- .../app/custom/custom_controller.py | 2 +- .../app/custom/custom_executor.py | 2 +- .../app/custom/cifar10_structured_fl.py | 2 +- .../data/jobs/decorator/app/custom/net.py | 2 +- .../app/config/config_fed_client.conf | 37 +++++ .../app/config/config_fed_server.conf | 62 ++++++++ .../app/custom/cifar10_structured_fl.py | 139 ++++++++++++++++++ .../decorator_in_process/app/custom/net.py | 37 +++++ .../data/jobs/decorator_in_process/meta.conf | 11 ++ .../app/custom/cifar10_lightning_fl.py | 2 +- .../data/jobs/lightning/app/custom/lit_net.py | 2 +- .../data/jobs/lightning/app/custom/net.py | 2 +- .../app/config/config_fed_client.conf | 37 +++++ .../app/config/config_fed_server.conf | 62 ++++++++ .../app/custom/cifar10_lightning_fl.py | 105 +++++++++++++ .../app/custom/lit_net.py | 72 +++++++++ .../lightning_in_process/app/custom/net.py | 37 +++++ .../data/jobs/lightning_in_process/meta.conf | 11 ++ .../app/config/config_fed_client.conf | 37 +++++ .../app/config/config_fed_server.conf | 88 +++++++++++ .../app/custom/cifar10_fl.py | 137 +++++++++++++++++ .../app/custom/net.py | 37 +++++ .../jobs/pt_client_api_in_process/meta.conf | 11 ++ .../standalone_job/client_api.yml | 62 +++++++- tests/unit_test/app_opt/__init__.py | 2 +- tests/unit_test/fuel/utils/fobs/datum_test.py | 2 +- 35 files changed, 998 insertions(+), 20 deletions(-) create mode 100644 tests/integration_test/data/jobs/decorator_in_process/app/config/config_fed_client.conf create mode 100644 tests/integration_test/data/jobs/decorator_in_process/app/config/config_fed_server.conf create mode 100644 tests/integration_test/data/jobs/decorator_in_process/app/custom/cifar10_structured_fl.py create mode 100644 tests/integration_test/data/jobs/decorator_in_process/app/custom/net.py create mode 100644 tests/integration_test/data/jobs/decorator_in_process/meta.conf create mode 100644 tests/integration_test/data/jobs/lightning_in_process/app/config/config_fed_client.conf create mode 100644 tests/integration_test/data/jobs/lightning_in_process/app/config/config_fed_server.conf create mode 100644 tests/integration_test/data/jobs/lightning_in_process/app/custom/cifar10_lightning_fl.py create mode 100644 tests/integration_test/data/jobs/lightning_in_process/app/custom/lit_net.py create mode 100644 tests/integration_test/data/jobs/lightning_in_process/app/custom/net.py create mode 100644 tests/integration_test/data/jobs/lightning_in_process/meta.conf create mode 100644 tests/integration_test/data/jobs/pt_client_api_in_process/app/config/config_fed_client.conf create mode 100644 tests/integration_test/data/jobs/pt_client_api_in_process/app/config/config_fed_server.conf create mode 100644 tests/integration_test/data/jobs/pt_client_api_in_process/app/custom/cifar10_fl.py create mode 100644 tests/integration_test/data/jobs/pt_client_api_in_process/app/custom/net.py create mode 100644 tests/integration_test/data/jobs/pt_client_api_in_process/meta.conf diff --git a/tests/integration_test/data/apps/cyclic/app/custom/__init__.py b/tests/integration_test/data/apps/cyclic/app/custom/__init__.py index 2db92b2574..4fc50543f1 100644 --- a/tests/integration_test/data/apps/cyclic/app/custom/__init__.py +++ b/tests/integration_test/data/apps/cyclic/app/custom/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/apps/cyclic/app/custom/tf2_model_persistor.py b/tests/integration_test/data/apps/cyclic/app/custom/tf2_model_persistor.py index f2bcbf1d1a..33c5a7c119 100644 --- a/tests/integration_test/data/apps/cyclic/app/custom/tf2_model_persistor.py +++ b/tests/integration_test/data/apps/cyclic/app/custom/tf2_model_persistor.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/apps/cyclic/app/custom/tf2_net.py b/tests/integration_test/data/apps/cyclic/app/custom/tf2_net.py index 2ccb46862f..46766284c2 100644 --- a/tests/integration_test/data/apps/cyclic/app/custom/tf2_net.py +++ b/tests/integration_test/data/apps/cyclic/app/custom/tf2_net.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/apps/cyclic/app/custom/trainer.py b/tests/integration_test/data/apps/cyclic/app/custom/trainer.py index 46013fa46e..720f795198 100644 --- a/tests/integration_test/data/apps/cyclic/app/custom/trainer.py +++ b/tests/integration_test/data/apps/cyclic/app/custom/trainer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/apps/np_sag_weights_diff/app/custom/__init__.py b/tests/integration_test/data/apps/np_sag_weights_diff/app/custom/__init__.py index 2db92b2574..4fc50543f1 100644 --- a/tests/integration_test/data/apps/np_sag_weights_diff/app/custom/__init__.py +++ b/tests/integration_test/data/apps/np_sag_weights_diff/app/custom/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/apps/np_sag_weights_diff/app/custom/np_trainer.py b/tests/integration_test/data/apps/np_sag_weights_diff/app/custom/np_trainer.py index 0b7e860213..8b754271aa 100755 --- a/tests/integration_test/data/apps/np_sag_weights_diff/app/custom/np_trainer.py +++ b/tests/integration_test/data/apps/np_sag_weights_diff/app/custom/np_trainer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/apps/sag_exception/app/custom/__init__.py b/tests/integration_test/data/apps/sag_exception/app/custom/__init__.py index 2db92b2574..4fc50543f1 100644 --- a/tests/integration_test/data/apps/sag_exception/app/custom/__init__.py +++ b/tests/integration_test/data/apps/sag_exception/app/custom/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/apps/sag_exception/app/custom/exception_trainer.py b/tests/integration_test/data/apps/sag_exception/app/custom/exception_trainer.py index d8273ac7f7..ca471871be 100755 --- a/tests/integration_test/data/apps/sag_exception/app/custom/exception_trainer.py +++ b/tests/integration_test/data/apps/sag_exception/app/custom/exception_trainer.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/apps/tb_streaming/app/custom/__init__.py b/tests/integration_test/data/apps/tb_streaming/app/custom/__init__.py index 2db92b2574..4fc50543f1 100755 --- a/tests/integration_test/data/apps/tb_streaming/app/custom/__init__.py +++ b/tests/integration_test/data/apps/tb_streaming/app/custom/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/apps/tb_streaming/app/custom/custom_controller.py b/tests/integration_test/data/apps/tb_streaming/app/custom/custom_controller.py index 9a585eb81c..c89750518b 100755 --- a/tests/integration_test/data/apps/tb_streaming/app/custom/custom_controller.py +++ b/tests/integration_test/data/apps/tb_streaming/app/custom/custom_controller.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/apps/tb_streaming/app/custom/custom_executor.py b/tests/integration_test/data/apps/tb_streaming/app/custom/custom_executor.py index d3e0d8ce54..8680a9a280 100755 --- a/tests/integration_test/data/apps/tb_streaming/app/custom/custom_executor.py +++ b/tests/integration_test/data/apps/tb_streaming/app/custom/custom_executor.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/jobs/decorator/app/custom/cifar10_structured_fl.py b/tests/integration_test/data/jobs/decorator/app/custom/cifar10_structured_fl.py index b92b37cb54..74aaf898f7 100644 --- a/tests/integration_test/data/jobs/decorator/app/custom/cifar10_structured_fl.py +++ b/tests/integration_test/data/jobs/decorator/app/custom/cifar10_structured_fl.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/jobs/decorator/app/custom/net.py b/tests/integration_test/data/jobs/decorator/app/custom/net.py index 031f84f432..47ac7e9589 100644 --- a/tests/integration_test/data/jobs/decorator/app/custom/net.py +++ b/tests/integration_test/data/jobs/decorator/app/custom/net.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/jobs/decorator_in_process/app/config/config_fed_client.conf b/tests/integration_test/data/jobs/decorator_in_process/app/config/config_fed_client.conf new file mode 100644 index 0000000000..0f810bf50a --- /dev/null +++ b/tests/integration_test/data/jobs/decorator_in_process/app/config/config_fed_client.conf @@ -0,0 +1,37 @@ +{ + format_version = 2 + app_script = "cifar10_structured_fl.py" + app_config = "" + executors = [ + { + tasks = [ + "train" + ] + executor { + path = "nvflare.app_opt.pt.in_process_client_api_executor.PTInProcessClientAPIExecutor" + args { + task_script_path = "{app_script}" + task_script_args = "{app_config}" + params_transfer_type = "DIFF" + train_with_evaluation = true + result_pull_interval = 0.5 + log_pull_interval = 0.1 + } + } + } + ] + task_data_filters = [] + task_result_filters = [] + components = [ + { + id = "event_to_fed" + name = "ConvertToFedEvent" + args { + events_to_convert = [ + "analytix_log_stats" + ] + fed_event_prefix = "fed." + } + } + ] +} diff --git a/tests/integration_test/data/jobs/decorator_in_process/app/config/config_fed_server.conf b/tests/integration_test/data/jobs/decorator_in_process/app/config/config_fed_server.conf new file mode 100644 index 0000000000..8245a2d527 --- /dev/null +++ b/tests/integration_test/data/jobs/decorator_in_process/app/config/config_fed_server.conf @@ -0,0 +1,62 @@ +{ + format_version = 2 + task_data_filters = [] + task_result_filters = [] + model_class_path = "net.Net" + workflows = [ + { + id = "scatter_and_gather" + path = "nvflare.app_common.workflows.scatter_and_gather.ScatterAndGather" + args { + min_clients = 2 + num_rounds = 2 + start_round = 0 + wait_time_after_min_received = 0 + aggregator_id = "aggregator" + persistor_id = "persistor" + shareable_generator_id = "shareable_generator" + train_task_name = "train" + train_timeout = 0 + } + } + ] + components = [ + { + id = "persistor" + path = "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor" + args { + model { + path = "{model_class_path}" + } + } + } + { + id = "shareable_generator" + path = "nvflare.app_common.shareablegenerators.full_model_shareable_generator.FullModelShareableGenerator" + args {} + } + { + id = "aggregator" + path = "nvflare.app_common.aggregators.intime_accumulate_model_aggregator.InTimeAccumulateWeightedAggregator" + args { + expected_data_kind = "WEIGHT_DIFF" + } + } + { + id = "model_selector" + path = "nvflare.app_common.widgets.intime_model_selector.IntimeModelSelector" + args { + key_metric = "accuracy" + } + } + { + id = "receiver" + path = "nvflare.app_opt.tracking.tb.tb_receiver.TBAnalyticsReceiver" + args { + events = [ + "fed.analytix_log_stats" + ] + } + } + ] +} diff --git a/tests/integration_test/data/jobs/decorator_in_process/app/custom/cifar10_structured_fl.py b/tests/integration_test/data/jobs/decorator_in_process/app/custom/cifar10_structured_fl.py new file mode 100644 index 0000000000..74aaf898f7 --- /dev/null +++ b/tests/integration_test/data/jobs/decorator_in_process/app/custom/cifar10_structured_fl.py @@ -0,0 +1,139 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.optim as optim +import torchvision +import torchvision.transforms as transforms +from net import Net + +# (1) import nvflare client API +import nvflare.client as flare + +# (optional) set a fix place so we don't need to download everytime +DATASET_PATH = "/tmp/nvflare/data" +# (optional) We change to use GPU to speed things up. +# if you want to use CPU, change DEVICE="cpu" +DEVICE = "cuda:0" +PATH = "./cifar_net.pth" + + +def main(): + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + + batch_size = 4 + + trainset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=True, download=True, transform=transform) + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2) + + testset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=False, download=True, transform=transform) + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2) + + net = Net() + + # (2) initializes NVFlare client API + flare.init() + + # (3) decorates with flare.train and load model from the first argument + # wraps training logic into a method + @flare.train + def train(input_model=None, total_epochs=2, lr=0.001): + net.load_state_dict(input_model.params) + + criterion = nn.CrossEntropyLoss() + optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9) + + # (optional) use GPU to speed things up + net.to(DEVICE) + # (optional) calculate total steps + steps = total_epochs * len(trainloader) + + for epoch in range(total_epochs): # loop over the dataset multiple times + + running_loss = 0.0 + for i, data in enumerate(trainloader, 0): + # get the inputs; data is a list of [inputs, labels] + # (optional) use GPU to speed things up + inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE) + + # zero the parameter gradients + optimizer.zero_grad() + + # forward + backward + optimize + outputs = net(inputs) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + # print statistics + running_loss += loss.item() + if i % 2000 == 1999: # print every 2000 mini-batches + print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}") + running_loss = 0.0 + + print("Finished Training") + + torch.save(net.state_dict(), PATH) + + # (4) construct trained FL model + output_model = flare.FLModel(params=net.cpu().state_dict(), meta={"NUM_STEPS_CURRENT_ROUND": steps}) + return output_model + + # (5) decorates with flare.evaluate and load model from the first argument + @flare.evaluate + def fl_evaluate(input_model=None): + return evaluate(input_weights=input_model.params) + + # wraps evaluate logic into a method + def evaluate(input_weights): + net.load_state_dict(input_weights) + # (optional) use GPU to speed things up + net.to(DEVICE) + + correct = 0 + total = 0 + # since we're not training, we don't need to calculate the gradients for our outputs + with torch.no_grad(): + for data in testloader: + # (optional) use GPU to speed things up + images, labels = data[0].to(DEVICE), data[1].to(DEVICE) + # calculate outputs by running images through the network + outputs = net(images) + # the class with the highest energy is what we choose as prediction + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + + # return evaluation metrics + return 100 * correct // total + + while flare.is_running(): + # (6) receives FLModel from NVFlare + input_model = flare.receive() + print(f"current_round={input_model.current_round}") + + # (7) call fl_evaluate method before training + # to evaluate on the received/aggregated model + global_metric = fl_evaluate(input_model) + print(f"Accuracy of the global model on the 10000 test images: {global_metric} %") + # call train method + train(input_model, total_epochs=2, lr=0.001) + # call evaluate method + metric = evaluate(input_weights=torch.load(PATH)) + print(f"Accuracy of the trained model on the 10000 test images: {metric} %") + + +if __name__ == "__main__": + main() diff --git a/tests/integration_test/data/jobs/decorator_in_process/app/custom/net.py b/tests/integration_test/data/jobs/decorator_in_process/app/custom/net.py new file mode 100644 index 0000000000..47ac7e9589 --- /dev/null +++ b/tests/integration_test/data/jobs/decorator_in_process/app/custom/net.py @@ -0,0 +1,37 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = torch.flatten(x, 1) # flatten all dimensions except batch + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x diff --git a/tests/integration_test/data/jobs/decorator_in_process/meta.conf b/tests/integration_test/data/jobs/decorator_in_process/meta.conf new file mode 100644 index 0000000000..113a25d240 --- /dev/null +++ b/tests/integration_test/data/jobs/decorator_in_process/meta.conf @@ -0,0 +1,11 @@ +{ + name = "decorator" + resource_spec {} + deploy_map { + app = [ + "@ALL" + ] + } + min_clients = 2 + mandatory_clients = [] +} diff --git a/tests/integration_test/data/jobs/lightning/app/custom/cifar10_lightning_fl.py b/tests/integration_test/data/jobs/lightning/app/custom/cifar10_lightning_fl.py index 2a42f0e0a8..7bf2b1e2e9 100644 --- a/tests/integration_test/data/jobs/lightning/app/custom/cifar10_lightning_fl.py +++ b/tests/integration_test/data/jobs/lightning/app/custom/cifar10_lightning_fl.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/jobs/lightning/app/custom/lit_net.py b/tests/integration_test/data/jobs/lightning/app/custom/lit_net.py index aaf150a4c5..e98275e554 100644 --- a/tests/integration_test/data/jobs/lightning/app/custom/lit_net.py +++ b/tests/integration_test/data/jobs/lightning/app/custom/lit_net.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/jobs/lightning/app/custom/net.py b/tests/integration_test/data/jobs/lightning/app/custom/net.py index 031f84f432..47ac7e9589 100644 --- a/tests/integration_test/data/jobs/lightning/app/custom/net.py +++ b/tests/integration_test/data/jobs/lightning/app/custom/net.py @@ -1,4 +1,4 @@ -# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/integration_test/data/jobs/lightning_in_process/app/config/config_fed_client.conf b/tests/integration_test/data/jobs/lightning_in_process/app/config/config_fed_client.conf new file mode 100644 index 0000000000..23c3f2fa11 --- /dev/null +++ b/tests/integration_test/data/jobs/lightning_in_process/app/config/config_fed_client.conf @@ -0,0 +1,37 @@ +{ + format_version = 2 + app_script = "cifar10_lightning_fl.py" + app_config = "" + executors = [ + { + tasks = [ + "train" + ] + executor { + path = "nvflare.app_opt.pt.in_process_client_api_executor.PTInProcessClientAPIExecutor" + args { + task_script_path = "{app_script}" + task_script_args = "{app_config}" + params_transfer_type = "DIFF" + train_with_evaluation = true + result_pull_interval = 0.5 + log_pull_interval = 0.1 + } + } + } + ] + task_data_filters = [] + task_result_filters = [] + components = [ + { + id = "event_to_fed" + name = "ConvertToFedEvent" + args { + events_to_convert = [ + "analytix_log_stats" + ] + fed_event_prefix = "fed." + } + } + ] +} diff --git a/tests/integration_test/data/jobs/lightning_in_process/app/config/config_fed_server.conf b/tests/integration_test/data/jobs/lightning_in_process/app/config/config_fed_server.conf new file mode 100644 index 0000000000..45811d3380 --- /dev/null +++ b/tests/integration_test/data/jobs/lightning_in_process/app/config/config_fed_server.conf @@ -0,0 +1,62 @@ +{ + format_version = 2 + task_data_filters = [] + task_result_filters = [] + model_class_path = "lit_net.LitNet" + workflows = [ + { + id = "scatter_and_gather" + path = "nvflare.app_common.workflows.scatter_and_gather.ScatterAndGather" + args { + min_clients = 2 + num_rounds = 2 + start_round = 0 + wait_time_after_min_received = 0 + aggregator_id = "aggregator" + persistor_id = "persistor" + shareable_generator_id = "shareable_generator" + train_task_name = "train" + train_timeout = 0 + } + } + ] + components = [ + { + id = "persistor" + path = "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor" + args { + model { + path = "{model_class_path}" + } + } + } + { + id = "shareable_generator" + path = "nvflare.app_common.shareablegenerators.full_model_shareable_generator.FullModelShareableGenerator" + args {} + } + { + id = "aggregator" + path = "nvflare.app_common.aggregators.intime_accumulate_model_aggregator.InTimeAccumulateWeightedAggregator" + args { + expected_data_kind = "WEIGHT_DIFF" + } + } + { + id = "model_selector" + path = "nvflare.app_common.widgets.intime_model_selector.IntimeModelSelector" + args { + key_metric = "val_acc_epoch" + } + } + { + id = "receiver" + path = "nvflare.app_opt.tracking.tb.tb_receiver.TBAnalyticsReceiver" + args { + events = [ + "fed.analytix_log_stats" + ] + } + } + ] +} diff --git a/tests/integration_test/data/jobs/lightning_in_process/app/custom/cifar10_lightning_fl.py b/tests/integration_test/data/jobs/lightning_in_process/app/custom/cifar10_lightning_fl.py new file mode 100644 index 0000000000..7bf2b1e2e9 --- /dev/null +++ b/tests/integration_test/data/jobs/lightning_in_process/app/custom/cifar10_lightning_fl.py @@ -0,0 +1,105 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torchvision +import torchvision.transforms as transforms +from lit_net import LitNet +from pytorch_lightning import LightningDataModule, Trainer, seed_everything +from torch.utils.data import DataLoader, random_split + +# (1) import nvflare lightning client API +import nvflare.client.lightning as flare + +seed_everything(7) + + +DATASET_PATH = "/tmp/nvflare/data" +BATCH_SIZE = 4 + +transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + + +class CIFAR10DataModule(LightningDataModule): + def __init__(self, data_dir: str = DATASET_PATH, batch_size: int = BATCH_SIZE): + super().__init__() + self.data_dir = data_dir + self.batch_size = batch_size + + def prepare_data(self): + torchvision.datasets.CIFAR10(root=self.data_dir, train=True, download=True, transform=transform) + torchvision.datasets.CIFAR10(root=self.data_dir, train=False, download=True, transform=transform) + + def setup(self, stage: str): + # Assign train/val datasets for use in dataloaders + if stage == "fit" or stage == "validate": + cifar_full = torchvision.datasets.CIFAR10( + root=self.data_dir, train=True, download=False, transform=transform + ) + self.cifar_train, self.cifar_val = random_split(cifar_full, [0.8, 0.2]) + + # Assign test dataset for use in dataloader(s) + if stage == "test" or stage == "predict": + self.cifar_test = torchvision.datasets.CIFAR10( + root=self.data_dir, train=False, download=False, transform=transform + ) + + def train_dataloader(self): + return DataLoader(self.cifar_train, batch_size=self.batch_size) + + def val_dataloader(self): + return DataLoader(self.cifar_val, batch_size=self.batch_size) + + def test_dataloader(self): + return DataLoader(self.cifar_test, batch_size=self.batch_size) + + def predict_dataloader(self): + return DataLoader(self.cifar_test, batch_size=self.batch_size) + + +def main(): + model = LitNet() + cifar10_dm = CIFAR10DataModule() + + trainer = Trainer(max_epochs=1, devices=1 if torch.cuda.is_available() else None) + # (2) patch the lightning trainer + flare.patch(trainer) + + while flare.is_running(): + # (3) receives FLModel from NVFlare + # Note that we don't need to pass this input_model to trainer + # because after flare.patch the trainer.fit/validate will get the + # global model internally + input_model = flare.receive() + print(f"current_round={input_model.current_round}") + + # (4) evaluate the current global model to allow server-side model selection + print("--- validate global model ---") + trainer.validate(model, datamodule=cifar10_dm) + + # perform local training starting with the received global model + print("--- train new model ---") + trainer.fit(model, datamodule=cifar10_dm) + + # test local model + print("--- test new model ---") + trainer.test(ckpt_path="best", datamodule=cifar10_dm) + + # get predictions + print("--- prediction with new best model ---") + trainer.predict(ckpt_path="best", datamodule=cifar10_dm) + + +if __name__ == "__main__": + main() diff --git a/tests/integration_test/data/jobs/lightning_in_process/app/custom/lit_net.py b/tests/integration_test/data/jobs/lightning_in_process/app/custom/lit_net.py new file mode 100644 index 0000000000..e98275e554 --- /dev/null +++ b/tests/integration_test/data/jobs/lightning_in_process/app/custom/lit_net.py @@ -0,0 +1,72 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any + +import torch.nn as nn +import torch.optim as optim +from net import Net +from pytorch_lightning import LightningModule +from torchmetrics import Accuracy + +NUM_CLASSES = 10 +criterion = nn.CrossEntropyLoss() + + +class LitNet(LightningModule): + def __init__(self): + super().__init__() + self.save_hyperparameters() + self.model = Net() + self.train_acc = Accuracy(task="multiclass", num_classes=NUM_CLASSES) + self.valid_acc = Accuracy(task="multiclass", num_classes=NUM_CLASSES) + # (optional) pass additional information via self.__fl_meta__ + self.__fl_meta__ = {} + + def forward(self, x): + out = self.model(x) + return out + + def training_step(self, batch, batch_idx): + x, labels = batch + outputs = self(x) + loss = criterion(outputs, labels) + self.train_acc(outputs, labels) + self.log("train_loss", loss) + self.log("train_acc", self.train_acc, on_step=True, on_epoch=False) + return loss + + def evaluate(self, batch, stage=None): + x, labels = batch + outputs = self(x) + loss = criterion(outputs, labels) + self.valid_acc(outputs, labels) + + if stage: + self.log(f"{stage}_loss", loss) + self.log(f"{stage}_acc", self.valid_acc, on_step=True, on_epoch=True) + return outputs + + def validation_step(self, batch, batch_idx): + self.evaluate(batch, "val") + + def test_step(self, batch, batch_idx): + self.evaluate(batch, "test") + + def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> Any: + return self.evaluate(batch) + + def configure_optimizers(self): + optimizer = optim.SGD(self.parameters(), lr=0.001, momentum=0.9) + return {"optimizer": optimizer} diff --git a/tests/integration_test/data/jobs/lightning_in_process/app/custom/net.py b/tests/integration_test/data/jobs/lightning_in_process/app/custom/net.py new file mode 100644 index 0000000000..47ac7e9589 --- /dev/null +++ b/tests/integration_test/data/jobs/lightning_in_process/app/custom/net.py @@ -0,0 +1,37 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = torch.flatten(x, 1) # flatten all dimensions except batch + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x diff --git a/tests/integration_test/data/jobs/lightning_in_process/meta.conf b/tests/integration_test/data/jobs/lightning_in_process/meta.conf new file mode 100644 index 0000000000..f50197ccb0 --- /dev/null +++ b/tests/integration_test/data/jobs/lightning_in_process/meta.conf @@ -0,0 +1,11 @@ +{ + name = "lightning" + resource_spec {} + deploy_map { + app = [ + "@ALL" + ] + } + min_clients = 2 + mandatory_clients = [] +} diff --git a/tests/integration_test/data/jobs/pt_client_api_in_process/app/config/config_fed_client.conf b/tests/integration_test/data/jobs/pt_client_api_in_process/app/config/config_fed_client.conf new file mode 100644 index 0000000000..6a1b31708f --- /dev/null +++ b/tests/integration_test/data/jobs/pt_client_api_in_process/app/config/config_fed_client.conf @@ -0,0 +1,37 @@ +{ + format_version = 2 + app_script = "cifar10_fl.py" + app_config = "" + executors = [ + { + tasks = [ + "train" + ] + executor { + path = "nvflare.app_opt.pt.in_process_client_api_executor.PTInProcessClientAPIExecutor" + args { + task_script_path = "{app_script}" + task_script_args = "{app_config}" + params_transfer_type = "DIFF" + train_with_evaluation = true + result_pull_interval = 0.5 + log_pull_interval = 0.1 + } + } + } + ] + task_data_filters = [] + task_result_filters = [] + components = [ + { + id = "event_to_fed" + name = "ConvertToFedEvent" + args { + events_to_convert = [ + "analytix_log_stats" + ] + fed_event_prefix = "fed." + } + } + ] +} diff --git a/tests/integration_test/data/jobs/pt_client_api_in_process/app/config/config_fed_server.conf b/tests/integration_test/data/jobs/pt_client_api_in_process/app/config/config_fed_server.conf new file mode 100644 index 0000000000..267a815c72 --- /dev/null +++ b/tests/integration_test/data/jobs/pt_client_api_in_process/app/config/config_fed_server.conf @@ -0,0 +1,88 @@ +{ + format_version = 2 + task_data_filters = [] + task_result_filters = [] + model_class_path = "net.Net" + workflows = [ + { + id = "scatter_and_gather" + path = "nvflare.app_common.workflows.scatter_and_gather.ScatterAndGather" + args { + min_clients = 2 + num_rounds = 3 + start_round = 0 + wait_time_after_min_received = 0 + aggregator_id = "aggregator" + persistor_id = "persistor" + shareable_generator_id = "shareable_generator" + train_task_name = "train" + train_timeout = 0 + } + } + ] + components = [ + { + id = "persistor" + path = "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor" + args { + model { + path = "{model_class_path}" + } + } + } + { + id = "shareable_generator" + path = "nvflare.app_common.shareablegenerators.full_model_shareable_generator.FullModelShareableGenerator" + args {} + } + { + id = "aggregator" + path = "nvflare.app_common.aggregators.intime_accumulate_model_aggregator.InTimeAccumulateWeightedAggregator" + args { + expected_data_kind = "WEIGHT_DIFF" + } + } + { + id = "model_selector" + path = "nvflare.app_common.widgets.intime_model_selector.IntimeModelSelector" + args { + key_metric = "accuracy" + } + } + { + id = "receiver" + path = "nvflare.app_opt.tracking.tb.tb_receiver.TBAnalyticsReceiver" + args { + events = [ + "fed.analytix_log_stats" + ] + } + } + { + id = "mlflow_receiver" + path = "nvflare.app_opt.tracking.mlflow.mlflow_receiver.MLflowReceiver" + args { + tracking_uri = "" + kwargs { + experiment_name = "nvflare-sag-pt-experiment" + run_name = "nvflare-sag-pt-with-mlflow" + experiment_tags { + "mlflow.note.content" = "## **NVFlare SAG PyTorch experiment with MLflow**" + } + run_tags { + "mlflow.note.content" = """## Federated Experiment tracking with MLflow +### Example of using **[NVIDIA FLARE](https://nvflare.readthedocs.io/en/main/index.html)** to train an image classifier using federated averaging ([FedAvg]([FedAvg](https://arxiv.org/abs/1602.05629))) and [PyTorch](https://pytorch.org/) as the deep learning training framework. This example also highlights the NVFlare streaming capability from the clients to the server. + +> **_NOTE:_** + This example uses the *[CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html)* dataset and will load its data within the trainer code. +""" + } + } + artifact_location = "artifacts" + events = [ + "fed.analytix_log_stats" + ] + } + } + ] +} diff --git a/tests/integration_test/data/jobs/pt_client_api_in_process/app/custom/cifar10_fl.py b/tests/integration_test/data/jobs/pt_client_api_in_process/app/custom/cifar10_fl.py new file mode 100644 index 0000000000..eaaabff176 --- /dev/null +++ b/tests/integration_test/data/jobs/pt_client_api_in_process/app/custom/cifar10_fl.py @@ -0,0 +1,137 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.optim as optim +import torchvision +import torchvision.transforms as transforms +from net import Net + +# (1) import nvflare client API +import nvflare.client as flare + +# (optional) metrics +from nvflare.client.tracking import SummaryWriter + +# (optional) set a fix place so we don't need to download everytime +DATASET_PATH = "/tmp/nvflare/data" +# (optional) We change to use GPU to speed things up. +# if you want to use CPU, change DEVICE="cpu" +DEVICE = "cuda:0" + + +def main(): + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + + batch_size = 4 + epochs = 2 + + trainset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=True, download=True, transform=transform) + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=2) + + testset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=False, download=True, transform=transform) + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=2) + + net = Net() + + # (2) initializes NVFlare client API + flare.init() + + summary_writer = SummaryWriter() + while flare.is_running(): + # (3) receives FLModel from NVFlare + input_model = flare.receive() + print(f"\n[Current Round={input_model.current_round}, Site = {flare.get_site_name()}]\n") + + # (4) loads model from NVFlare + net.load_state_dict(input_model.params) + + criterion = nn.CrossEntropyLoss() + optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) + + # (optional) use GPU to speed things up + net.to(DEVICE) + # (optional) calculate total steps + steps = epochs * len(trainloader) + for epoch in range(epochs): # loop over the dataset multiple times + + running_loss = 0.0 + for i, data in enumerate(trainloader, 0): + # get the inputs; data is a list of [inputs, labels] + # (optional) use GPU to speed things up + inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE) + + # zero the parameter gradients + optimizer.zero_grad() + + # forward + backward + optimize + outputs = net(inputs) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + # print statistics + running_loss += loss.item() + if i % 2000 == 1999: # print every 2000 mini-batches + print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}") + global_step = input_model.current_round * steps + epoch * len(trainloader) + i + + summary_writer.add_scalar(tag="loss_for_each_batch", scalar=running_loss, global_step=global_step) + running_loss = 0.0 + + print("Finished Training") + + PATH = "./cifar_net.pth" + torch.save(net.state_dict(), PATH) + + # (5) wraps evaluation logic into a method to re-use for + # evaluation on both trained and received model + def evaluate(input_weights): + net = Net() + net.load_state_dict(input_weights) + # (optional) use GPU to speed things up + net.to(DEVICE) + + correct = 0 + total = 0 + # since we're not training, we don't need to calculate the gradients for our outputs + with torch.no_grad(): + for data in testloader: + # (optional) use GPU to speed things up + images, labels = data[0].to(DEVICE), data[1].to(DEVICE) + # calculate outputs by running images through the network + outputs = net(images) + # the class with the highest energy is what we choose as prediction + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + + print(f"Accuracy of the network on the 10000 test images: {100 * correct // total} %") + return 100 * correct // total + + # (6) evaluate on received model for model selection + accuracy = evaluate(input_model.params) + # (7) construct trained FL model + output_model = flare.FLModel( + params=net.cpu().state_dict(), + metrics={"accuracy": accuracy}, + meta={"NUM_STEPS_CURRENT_ROUND": steps}, + ) + # (8) send model back to NVFlare + flare.send(output_model) + + +if __name__ == "__main__": + main() diff --git a/tests/integration_test/data/jobs/pt_client_api_in_process/app/custom/net.py b/tests/integration_test/data/jobs/pt_client_api_in_process/app/custom/net.py new file mode 100644 index 0000000000..47ac7e9589 --- /dev/null +++ b/tests/integration_test/data/jobs/pt_client_api_in_process/app/custom/net.py @@ -0,0 +1,37 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.fc2 = nn.Linear(120, 84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.conv1(x))) + x = self.pool(F.relu(self.conv2(x))) + x = torch.flatten(x, 1) # flatten all dimensions except batch + x = F.relu(self.fc1(x)) + x = F.relu(self.fc2(x)) + x = self.fc3(x) + return x diff --git a/tests/integration_test/data/jobs/pt_client_api_in_process/meta.conf b/tests/integration_test/data/jobs/pt_client_api_in_process/meta.conf new file mode 100644 index 0000000000..d274eff79a --- /dev/null +++ b/tests/integration_test/data/jobs/pt_client_api_in_process/meta.conf @@ -0,0 +1,11 @@ +{ + name = "pt_client_api_in_process" + resource_spec {} + deploy_map { + app = [ + "@ALL" + ] + } + min_clients = 2 + mandatory_clients = [] +} diff --git a/tests/integration_test/data/test_configs/standalone_job/client_api.yml b/tests/integration_test/data/test_configs/standalone_job/client_api.yml index c8c030a79b..611135a07c 100644 --- a/tests/integration_test/data/test_configs/standalone_job/client_api.yml +++ b/tests/integration_test/data/test_configs/standalone_job/client_api.yml @@ -107,7 +107,7 @@ tests: - python -c "from torchvision.datasets import CIFAR10; CIFAR10(root='/tmp/nvflare/data', train=True, download=True)" teardown: - rm -rf /tmp/nvflare/data - - test_name: "run pt-decorator" + - test_name: "run decorator" event_sequence: - "trigger": "type": "server_log" @@ -126,7 +126,7 @@ tests: - python -c "from torchvision.datasets import CIFAR10; CIFAR10(root='/tmp/nvflare/data', train=True, download=True)" teardown: - rm -rf /tmp/nvflare/data - - test_name: "run lightning" + - test_name: "run lightning-client-api" event_sequence: - "trigger": "type": "server_log" @@ -146,3 +146,61 @@ tests: - python -c "from torchvision.datasets import CIFAR10; CIFAR10(root='/tmp/nvflare/data', train=True, download=True)" teardown: - rm -rf /tmp/nvflare/data + - test_name: "run pt-client-api-in-process" + event_sequence: + - "trigger": + "type": "server_log" + "data": "Server started" + "actions": [ "submit_job pt_client_api_in_process" ] + "result": + "type": "job_submit_success" + - "trigger": + "type": "run_state" + "data": { "run_finished": True } + "actions": [ "ensure_current_job_done" ] + "result": + "type": "run_state" + "data": { "run_finished": True } + setup: + - python -c "from torchvision.datasets import CIFAR10; CIFAR10(root='/tmp/nvflare/data', train=True, download=True)" + teardown: + - rm -rf /tmp/nvflare/data + - test_name: "run decorator-in-process" + event_sequence: + - "trigger": + "type": "server_log" + "data": "Server started" + "actions": [ "submit_job decorator_in_process" ] + "result": + "type": "job_submit_success" + - "trigger": + "type": "run_state" + "data": { "run_finished": True } + "actions": [ "ensure_current_job_done" ] + "result": + "type": "run_state" + "data": { "run_finished": True } + setup: + - python -c "from torchvision.datasets import CIFAR10; CIFAR10(root='/tmp/nvflare/data', train=True, download=True)" + teardown: + - rm -rf /tmp/nvflare/data + - test_name: "run lightning-client-api-in-process" + event_sequence: + - "trigger": + "type": "server_log" + "data": "Server started" + "actions": [ "submit_job lightning_in_process" ] + "result": + "type": "job_submit_success" + - "trigger": + "type": "run_state" + "data": { "run_finished": True } + "actions": [ "ensure_current_job_done" ] + "result": + "type": "run_state" + "data": { "run_finished": True } + setup: + - python -m pip install pytorch_lightning + - python -c "from torchvision.datasets import CIFAR10; CIFAR10(root='/tmp/nvflare/data', train=True, download=True)" + teardown: + - rm -rf /tmp/nvflare/data diff --git a/tests/unit_test/app_opt/__init__.py b/tests/unit_test/app_opt/__init__.py index d9155f923f..4fc50543f1 100644 --- a/tests/unit_test/app_opt/__init__.py +++ b/tests/unit_test/app_opt/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit_test/fuel/utils/fobs/datum_test.py b/tests/unit_test/fuel/utils/fobs/datum_test.py index 5255a3e3dd..b0d5f2b68d 100644 --- a/tests/unit_test/fuel/utils/fobs/datum_test.py +++ b/tests/unit_test/fuel/utils/fobs/datum_test.py @@ -1,4 +1,4 @@ -# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 918f248be010e32277d68736d71d3160865aba14 Mon Sep 17 00:00:00 2001 From: Sean Yang Date: Tue, 7 May 2024 16:58:06 -0700 Subject: [PATCH 12/21] Add client controller executor (#2530) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add client controller executor * address comments * enhance abort, set peer props * remove asserts --------- Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- .../sag_cse_ccwf_pt/config_fed_client.conf | 183 ++++++++ .../sag_cse_ccwf_pt/config_fed_server.conf | 39 ++ job_templates/sag_cse_ccwf_pt/info.conf | 5 + job_templates/sag_cse_ccwf_pt/info.md | 11 + job_templates/sag_cse_ccwf_pt/meta.conf | 8 + nvflare/apis/controller_spec.py | 2 +- nvflare/apis/impl/controller.py | 32 +- nvflare/apis/impl/wf_comm_client.py | 83 +++- .../ccwf/client_controller_executor.py | 391 ++++++++++++++++++ nvflare/app_common/ccwf/common.py | 1 + nvflare/app_common/ccwf/server_ctl.py | 14 +- .../app_common/workflows/model_controller.py | 18 +- nvflare/fuel/utils/validation_utils.py | 11 +- .../private/fed/client/client_run_manager.py | 6 + nvflare/private/fed/utils/fed_utils.py | 2 +- 15 files changed, 765 insertions(+), 41 deletions(-) create mode 100644 job_templates/sag_cse_ccwf_pt/config_fed_client.conf create mode 100644 job_templates/sag_cse_ccwf_pt/config_fed_server.conf create mode 100644 job_templates/sag_cse_ccwf_pt/info.conf create mode 100644 job_templates/sag_cse_ccwf_pt/info.md create mode 100644 job_templates/sag_cse_ccwf_pt/meta.conf create mode 100644 nvflare/app_common/ccwf/client_controller_executor.py diff --git a/job_templates/sag_cse_ccwf_pt/config_fed_client.conf b/job_templates/sag_cse_ccwf_pt/config_fed_client.conf new file mode 100644 index 0000000000..0ee5e16152 --- /dev/null +++ b/job_templates/sag_cse_ccwf_pt/config_fed_client.conf @@ -0,0 +1,183 @@ +{ + # version of the configuration + format_version = 2 + + # This is the application script which will be invoked. Client can replace this script with user's own training script. + app_script = "train.py" + + # Additional arguments needed by the training code. For example, in lightning, these can be --trainer.batch_size=xxx. + app_config = "" + + # Path to defined PyTorch network + # This assumes that there will be a "net.py" file with class name "Net", please modify accordingly + model_class_path = "net.Net" + + # Client Computing Executors. + executors = [ + { + # tasks the executors are defined to handle + tasks = [ + "train", + "validate", + "submit_model" + ] + + # This particular executor + executor { + + # This is an executor for pytorch + Client API. The underline data exchange is using Pipe. + path = "nvflare.app_opt.pt.client_api_launcher_executor.PTClientAPILauncherExecutor" + + args { + + # launcher_id is used to locate the Launcher object in "components" + launcher_id = "launcher" + + # pipe_id is used to locate the Pipe object in "components" + pipe_id = "pipe" + + # Timeout in seconds for waiting for a heartbeat from the training script. Defaults to 30 seconds. + # Please refer to the class docstring for all available arguments + heartbeat_timeout = 60 + + # format of the exchange parameters + params_exchange_format = "pytorch" + + # if the transfer_type is FULL, then it will be sent directly + # if the transfer_type is DIFF, then we will calculate the + # difference VS received parameters and send the difference + params_transfer_type = "FULL" + # if train_with_evaluation is true, the executor will expect + # the custom code need to send back both the trained parameters and the evaluation metric + # otherwise only trained parameters are expected + train_with_evaluation = true + + train_task_name = "train" + evaluate_task_name = "validate" + submit_model_task_name = "submit_model" + } + } + } + { + # All tasks prefixed with wf_ are routed to this ClientControllerExecutor + tasks = ["wf_*"] + executor { + id = "client_controller_executor" + path = "nvflare.app_common.ccwf.client_controller_executor.ClientControllerExecutor" + # ClientControllerExecutor for running controllers on client-side. + args { + # list of controller ids from components to be run in order + controller_id_list = ["sag_ctl", "cse_ctl"] + task_name_prefix = "wf" + # persistor used to distribute and save final results for clients + persistor_id = "persistor" + } + } + } + ] + + # Array of task data filters. If provided, it will control the data from client controller to client executor + # Filter direction (in, out, inout) can be set as since clients send tasks to each other, a task has both a sending (out) and a receiving (in) direction + task_data_filters = [] + + # Array of task result filters. If provided, it will control the data from client executor to client controller + # Filter direction (in, out, inout) can be set as since clients send tasks to each other, a task has both a sending (out) and a receiving (in) direction + task_result_filters = [] + + components = [ + { + id = "sag_ctl" + path = "nvflare.app_common.workflows.scatter_and_gather.ScatterAndGather" + args { + min_clients = 2 + num_rounds = 3 + start_round = 0 + wait_time_after_min_received = 0 + aggregator_id = "aggregator" + persistor_id = "persistor" + shareable_generator_id = "shareable_generator" + train_task_name = "train" + train_timeout = 0 + } + } + { + id = "cse_ctl", + path = "nvflare.app_common.workflows.cross_site_model_eval.CrossSiteModelEval", + args { + model_locator_id = "model_locator", + submit_model_timeout = 600, + validation_timeout = 6000, + cleanup_models = false + } + } + { + # component id is "launcher" + id = "launcher" + + # the class path of this component + path = "nvflare.app_common.launchers.subprocess_launcher.SubprocessLauncher" + + args { + # the launcher will invoke the script + script = "python3 custom/{app_script} {app_config} " + # if launch_once is true, the SubprocessLauncher will launch once for the whole job + # if launch_once is false, the SubprocessLauncher will launch a process for each task it receives from server + launch_once = true + } + } + { + id = "pipe" + + path = "nvflare.fuel.utils.pipe.file_pipe.FilePipe" + + args { + # Mode of the endpoint. A pipe has two endpoints. + # An endpoint can be either the one that initiates communication or the one listening. + # PASSIVE is the one listening. + mode = "PASSIVE" + + # root_path: is the directory location of the parameters exchange. + # You can also set it to an absolute path in your system. + root_path = "{WORKSPACE}/{JOB_ID}/{SITE_NAME}" + } + } + # required components for the client-controlled workflow defined on client-side + { + id = "persistor" + path = "nvflare.app_opt.pt.file_model_persistor.PTFileModelPersistor" + args.model.path = "{model_class_path}" + } + { + id = "shareable_generator" + path = "nvflare.app_common.shareablegenerators.full_model_shareable_generator.FullModelShareableGenerator" + args = {} + } + { + # This is the aggregator that perform the weighted average aggregation. + # the aggregation is "in-time", so it doesn't wait for client results, but aggregates as soon as it received the data. + id = "aggregator" + path = "nvflare.app_common.aggregators.intime_accumulate_model_aggregator.InTimeAccumulateWeightedAggregator" + args.expected_data_kind = "WEIGHTS" + }, + { + id = "model_locator" + name = "PTFileModelLocator" + args { + pt_persistor_id = "persistor" + } + }, + { + # This component is not directly used in Workflow. + # it select the best model based on the incoming global validation metrics. + id = "model_selector" + path = "nvflare.app_common.widgets.intime_model_selector.IntimeModelSelector" + # need to make sure this "key_metric" match what server side received + args.key_metric = "accuracy" + }, + { + id = "json_generator" + name = "ValidationJsonGenerator" + args {} + } + ] +} diff --git a/job_templates/sag_cse_ccwf_pt/config_fed_server.conf b/job_templates/sag_cse_ccwf_pt/config_fed_server.conf new file mode 100644 index 0000000000..514c8cc8fe --- /dev/null +++ b/job_templates/sag_cse_ccwf_pt/config_fed_server.conf @@ -0,0 +1,39 @@ +{ + # version of the configuration + format_version = 2 + + # task data filter: if filters are provided, the filter will filter the data flow out of server to client. + task_data_filters =[] + + # task result filter: if filters are provided, the filter will filter the result flow out of client to server. + task_result_filters = [] + + # This assumes that there will be a "net.py" file with class name "Net". + # If your model code is not in "net.py" and class name is not "Net", please modify here + model_class_path = "net.Net" + + # workflows: Array of workflows the control the Federated Learning workflow lifecycle. + # One can specify multiple workflows. The NVFLARE will run them in the order specified. + workflows = [ + { + # server-side controller to manage job life cycle and configuration + id = "svr_ctl" + path = "nvflare.app_common.ccwf.server_ctl.ServerSideController" + args { + # the prefix for task names of this workflow + task_name_prefix = "wf" + # the maximum amount of time allowed for a client to miss a status report + max_status_report_interval = 300 + # policy to choose which client to run the controller logic from + starting_client_policy = "random" + # timeout for the ClientControllerExecutor start task, which runs all of the controllers + start_task_timeout = 600 + } + } + ] + + # List of components used in the server side workflow. + components = [ + ] + +} diff --git a/job_templates/sag_cse_ccwf_pt/info.conf b/job_templates/sag_cse_ccwf_pt/info.conf new file mode 100644 index 0000000000..850794a095 --- /dev/null +++ b/job_templates/sag_cse_ccwf_pt/info.conf @@ -0,0 +1,5 @@ +{ + description = "Client Controller FedAvg and cross-site evaluation with PyTorch" + execution_api_type = "client_api" + controller_type = "client" +} \ No newline at end of file diff --git a/job_templates/sag_cse_ccwf_pt/info.md b/job_templates/sag_cse_ccwf_pt/info.md new file mode 100644 index 0000000000..e2a768a04a --- /dev/null +++ b/job_templates/sag_cse_ccwf_pt/info.md @@ -0,0 +1,11 @@ +# Job Template Information Card + +## sag_cse_ccwf_pt + name = "sag_cse_ccwf_pt" + description = "Client Controller FedAvg with scatter & gather workflow and cross-site evaluation with PyTorch" + class_name = "ClientControllerExecutor" + controller_type = "client" + executor_type = "launcher_executor" + contributor = "NVIDIA" + init_publish_date = "2024-04-25" + last_updated_date = "2024-04-25" diff --git a/job_templates/sag_cse_ccwf_pt/meta.conf b/job_templates/sag_cse_ccwf_pt/meta.conf new file mode 100644 index 0000000000..155d780e05 --- /dev/null +++ b/job_templates/sag_cse_ccwf_pt/meta.conf @@ -0,0 +1,8 @@ +name = "sag_cse_ccwf_pt" +resource_spec {} +min_clients = 2 +deploy_map { + app = [ + "@ALL" + ] +} diff --git a/nvflare/apis/controller_spec.py b/nvflare/apis/controller_spec.py index 2f18f95623..772fe54d9b 100644 --- a/nvflare/apis/controller_spec.py +++ b/nvflare/apis/controller_spec.py @@ -88,7 +88,7 @@ def __init__( name (str): name of the task data (Shareable): data of the task props: Any additional properties of the task - timeout: How long this task will last. If == 0, the task never time out. + timeout: How long this task will last. If == 0, the task never time out (WFCommServer-> never time out, WFCommClient-> time out after `max_task_timeout`). before_task_sent_cb: If provided, this callback would be called before controller sends the tasks to clients. It needs to follow the before_task_sent_cb_signature. after_task_sent_cb: If provided, this callback would be called after controller sends the tasks to clients. diff --git a/nvflare/apis/impl/controller.py b/nvflare/apis/impl/controller.py index 924512f77b..d8646145c2 100644 --- a/nvflare/apis/impl/controller.py +++ b/nvflare/apis/impl/controller.py @@ -60,7 +60,7 @@ def broadcast( min_responses: int = 1, wait_time_after_min_received: int = 0, ): - self.communicator.broadcast(task, fl_ctx, targets, min_responses, wait_time_after_min_received) + return self.communicator.broadcast(task, fl_ctx, targets, min_responses, wait_time_after_min_received) def broadcast_and_wait( self, @@ -71,12 +71,12 @@ def broadcast_and_wait( wait_time_after_min_received: int = 0, abort_signal: Optional[Signal] = None, ): - self.communicator.broadcast_and_wait( + return self.communicator.broadcast_and_wait( task, fl_ctx, targets, min_responses, wait_time_after_min_received, abort_signal ) def broadcast_forever(self, task: Task, fl_ctx: FLContext, targets: Union[List[Client], List[str], None] = None): - self.communicator.broadcast_forever(task, fl_ctx, targets) + return self.communicator.broadcast_forever(task, fl_ctx, targets) def send( self, @@ -86,7 +86,7 @@ def send( send_order: SendOrder = SendOrder.SEQUENTIAL, task_assignment_timeout: int = 0, ): - self.communicator.send(task, fl_ctx, targets, send_order, task_assignment_timeout) + return self.communicator.send(task, fl_ctx, targets, send_order, task_assignment_timeout) def send_and_wait( self, @@ -97,7 +97,7 @@ def send_and_wait( task_assignment_timeout: int = 0, abort_signal: Signal = None, ): - self.communicator.send_and_wait(task, fl_ctx, targets, send_order, task_assignment_timeout, abort_signal) + return self.communicator.send_and_wait(task, fl_ctx, targets, send_order, task_assignment_timeout, abort_signal) def relay( self, @@ -109,7 +109,7 @@ def relay( task_result_timeout: int = 0, dynamic_targets: bool = True, ): - self.communicator.relay( + return self.communicator.relay( task, fl_ctx, targets, send_order, task_assignment_timeout, task_result_timeout, dynamic_targets ) @@ -124,7 +124,7 @@ def relay_and_wait( dynamic_targets: bool = True, abort_signal: Optional[Signal] = None, ): - self.communicator.relay_and_wait( + return self.communicator.relay_and_wait( task, fl_ctx, targets, @@ -136,7 +136,11 @@ def relay_and_wait( ) def get_num_standing_tasks(self) -> int: - return self.communicator.get_num_standing_tasks() + try: + return self.communicator.get_num_standing_tasks() + except Exception as e: + self.logger.warning(f"get_num_standing_tasks() is not supported by {self.communicator}: {e}") + return None def cancel_task( self, task: Task, completion_status=TaskCompletionStatus.CANCELLED, fl_ctx: Optional[FLContext] = None @@ -144,7 +148,10 @@ def cancel_task( self.communicator.cancel_task(task, completion_status, fl_ctx) def cancel_all_tasks(self, completion_status=TaskCompletionStatus.CANCELLED, fl_ctx: Optional[FLContext] = None): - self.communicator.cancel_all_tasks(completion_status, fl_ctx) + try: + self.communicator.cancel_all_tasks(completion_status, fl_ctx) + except Exception as e: + self.log_warning(fl_ctx, f"cancel_all_tasks() is not supported by {self.communicator}: {e}") def get_client_disconnect_time(self, client_name): """Get the time when the client is deemed disconnected. @@ -157,4 +164,9 @@ def get_client_disconnect_time(self, client_name): """ if not self.communicator: return None - return self.communicator.get_client_disconnect_time(client_name) + + try: + return self.communicator.get_client_disconnect_time(client_name) + except Exception as e: + self.logger.warning(f"get_client_disconnect_time() is not supported by {self.communicator}: {e}") + return None diff --git a/nvflare/apis/impl/wf_comm_client.py b/nvflare/apis/impl/wf_comm_client.py index a8753f5b64..8589cbeec9 100644 --- a/nvflare/apis/impl/wf_comm_client.py +++ b/nvflare/apis/impl/wf_comm_client.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import List, Union +from typing import List, Optional, Union from nvflare.apis.client import Client from nvflare.apis.controller_spec import ClientTask, SendOrder, Task, TaskCompletionStatus @@ -24,6 +24,7 @@ from nvflare.apis.signal import Signal from nvflare.apis.utils.task_utils import apply_filters from nvflare.apis.wf_comm_spec import WFCommSpec +from nvflare.app_common.ccwf.common import Constant from nvflare.private.fed.utils.fed_utils import get_target_names from nvflare.private.privacy_manager import Scope from nvflare.security.logging import secure_format_exception @@ -32,10 +33,17 @@ class WFCommClient(FLComponent, WFCommSpec): def __init__( self, + max_task_timeout: int = Constant.MAX_TASK_TIMEOUT, ) -> None: + """Communicator using aux channel communication. + + Args: + max_task_timeout (int, optional): Maximum task timeout when `task.timeout` is set to 0. Defaults to 3600. + """ super().__init__() self.task_data_filters = {} self.task_result_filters = {} + self.max_task_timeout = max_task_timeout def broadcast( self, @@ -57,7 +65,6 @@ def broadcast_and_wait( abort_signal: Signal = None, ): engine = fl_ctx.get_engine() - request = task.data # apply task filters self.log_debug(fl_ctx, "firing event EventType.BEFORE_TASK_DATA_FILTER") fl_ctx.set_prop(FLContextKey.TASK_DATA, task.data, sticky=False, private=True) @@ -66,7 +73,7 @@ def broadcast_and_wait( # # first apply privacy-defined filters try: filter_name = Scope.TASK_DATA_FILTERS_NAME - task.data = apply_filters(filter_name, request, fl_ctx, self.task_data_filters, task.name, FilterKey.OUT) + task.data = apply_filters(filter_name, task.data, fl_ctx, self.task_data_filters, task.name, FilterKey.OUT) except Exception as e: self.log_exception( fl_ctx, @@ -80,6 +87,9 @@ def broadcast_and_wait( fl_ctx.set_prop(FLContextKey.TASK_DATA, task.data, sticky=False, private=True) self.fire_event(EventType.AFTER_TASK_DATA_FILTER, fl_ctx) + if targets is None: + targets = engine.all_clients.values() + target_names = get_target_names(targets) _, invalid_names = engine.validate_targets(target_names) if invalid_names: @@ -92,16 +102,22 @@ def broadcast_and_wait( task.client_tasks.append(client_task) task.last_client_task_map[client_task.id] = client_task - # task_cb_error = self._call_task_cb(task.before_task_sent_cb, client, task, fl_ctx) - # if task_cb_error: - # return self._make_error_reply(ReturnCode.ERROR, targets) + task_cb_error = self._call_task_cb(task.before_task_sent_cb, client, task, fl_ctx) + if task_cb_error: + return self._make_error_reply(ReturnCode.ERROR, targets) + + if task.timeout < 0: + raise ValueError(f"The task timeout must >= 0. But got {task.timeout}") - if task.timeout <= 0: - raise ValueError(f"The task timeout must > 0. But got {task.timeout}") + if task.timeout == 0: + task.timeout = self.max_task_timeout + + # Note: set request here since task.data can be modified by user callback before_task_sent_cb + request = task.data request.set_header(ReservedKey.TASK_NAME, task.name) replies = engine.send_aux_request( - targets=targets, + targets=target_names, topic=ReservedTopic.DO_TASK, request=request, timeout=task.timeout, @@ -109,10 +125,20 @@ def broadcast_and_wait( secure=task.secure, ) + for client_task in task.client_tasks: + task_cb_error = self._call_task_cb(task.after_task_sent_cb, client_task.client, client_task.task, fl_ctx) + if task_cb_error: + return self._make_error_reply(ReturnCode.ERROR, targets) + self.log_debug(fl_ctx, "firing event EventType.BEFORE_TASK_RESULT_FILTER") self.fire_event(EventType.BEFORE_TASK_RESULT_FILTER, fl_ctx) for target, reply in replies.items(): + + peer_ctx = reply.get_header(FLContextKey.PEER_CONTEXT) + peer_ctx.set_prop(FLContextKey.SHAREABLE, reply, private=True) + fl_ctx.set_peer_context(peer_ctx) + # get the client task for the target for client_task in task.client_tasks: if client_task.client.name == target: @@ -133,6 +159,9 @@ def broadcast_and_wait( client_task.result = error_reply break + if not reply.get_peer_props() and fl_ctx.get_peer_context(): + reply.set_peer_props(fl_ctx.get_peer_context().get_all_public_props()) + # assign replies to client task, prepare for the result_received_cb client_task.result = reply @@ -220,21 +249,49 @@ def send( targets: Union[List[Client], List[str], None] = None, send_order: SendOrder = SendOrder.SEQUENTIAL, task_assignment_timeout: int = 0, + ): + return self.send_and_wait(task, fl_ctx, targets, send_order, task_assignment_timeout) + + def send_and_wait( + self, + task: Task, + fl_ctx: FLContext, + targets: Union[List[Client], List[str], None] = None, + send_order: SendOrder = SendOrder.SEQUENTIAL, + task_assignment_timeout: int = 0, + abort_signal: Signal = None, ): engine = fl_ctx.get_engine() self._validate_target(engine, targets) - return self.send_and_wait(task, fl_ctx, targets, send_order, task_assignment_timeout) + for target in targets: + reply = self.broadcast_and_wait(task, fl_ctx, [target], abort_signal=abort_signal) + if reply.get_return_code() == ReturnCode.OK: + return reply - def send_and_wait( + def relay( self, task: Task, fl_ctx: FLContext, targets: Union[List[Client], List[str], None] = None, send_order: SendOrder = SendOrder.SEQUENTIAL, task_assignment_timeout: int = 0, - abort_signal: Signal = None, + task_result_timeout: int = 0, + dynamic_targets: bool = True, + ): + return self.relay_and_wait(task, fl_ctx, targets, send_order, task_assignment_timeout) + + def relay_and_wait( + self, + task: Task, + fl_ctx: FLContext, + targets: Union[List[Client], List[str], None] = None, + send_order=SendOrder.SEQUENTIAL, + task_assignment_timeout: int = 0, + task_result_timeout: int = 0, + dynamic_targets: bool = True, + abort_signal: Optional[Signal] = None, ): engine = fl_ctx.get_engine() @@ -249,8 +306,6 @@ def send_and_wait( def _validate_target(self, engine, targets): if len(targets) == 0: raise ValueError("Must provide a target to send.") - if len(targets) != 1: - raise ValueError("send_and_wait can only send to a single target.") target_names = get_target_names(targets) _, invalid_names = engine.validate_targets(target_names) if invalid_names: diff --git a/nvflare/app_common/ccwf/client_controller_executor.py b/nvflare/app_common/ccwf/client_controller_executor.py new file mode 100644 index 0000000000..f2b278c858 --- /dev/null +++ b/nvflare/app_common/ccwf/client_controller_executor.py @@ -0,0 +1,391 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import threading +import time +from typing import List + +from nvflare.apis.event_type import EventType +from nvflare.apis.executor import Executor +from nvflare.apis.fl_constant import FLContextKey, ReturnCode +from nvflare.apis.fl_context import FLContext +from nvflare.apis.impl.task_controller import Task +from nvflare.apis.impl.wf_comm_client import WFCommClient +from nvflare.apis.shareable import Shareable, make_reply +from nvflare.apis.signal import Signal +from nvflare.app_common.abstract.learnable import Learnable +from nvflare.app_common.abstract.learnable_persistor import LearnablePersistor +from nvflare.app_common.app_constant import AppConstants +from nvflare.app_common.ccwf.common import Constant, StatusReport, make_task_name, topic_for_end_workflow +from nvflare.fuel.utils.validation_utils import check_number_range +from nvflare.security.logging import secure_format_exception + + +class ClientControllerExecutor(Executor): + def __init__( + self, + controller_id_list: List, + task_name_prefix: str = "", + persistor_id=AppConstants.DEFAULT_PERSISTOR_ID, + final_result_ack_timeout=Constant.FINAL_RESULT_ACK_TIMEOUT, + max_task_timeout: int = Constant.MAX_TASK_TIMEOUT, + ): + """ + ClientControllerExecutor for running controllers on client-side using WFCommClient. + + Args: + controller_id_list: List of controller ids, used in order. + task_name_prefix: prefix of task names. All CCWF task names are prefixed with this. + persistor_id: ID of the persistor component + final_result_ack_timeout: timeout for sending final result to participating clients + max_task_timeout: Maximum task timeout for Controllers using WFCommClient when `task.timeout` is set to 0. Defaults to 3600. + """ + check_number_range("final_result_ack_timeout", final_result_ack_timeout, min_value=1.0) + + Executor.__init__(self) + self.controller_id_list = controller_id_list + self.task_name_prefix = task_name_prefix + self.persistor_id = persistor_id + self.final_result_ack_timeout = final_result_ack_timeout + self.max_task_timeout = max_task_timeout + + self.start_task_name = make_task_name(task_name_prefix, Constant.BASENAME_START) + self.configure_task_name = make_task_name(task_name_prefix, Constant.BASENAME_CONFIG) + self.report_final_result_task_name = make_task_name(task_name_prefix, Constant.BASENAME_REPORT_FINAL_RESULT) + + self.persistor = None + + self.current_status = StatusReport() + self.last_status_report_time = time.time() # time of last status report to server + self.config = None + self.workflow_id = None + self.finalize_lock = threading.Lock() + + self.asked_to_stop = False + self.status_lock = threading.Lock() + self.engine = None + self.me = None + self.is_starting_client = False + self.workflow_done = False + self.fatal_system_error = False + + def get_config_prop(self, name: str, default=None): + """ + Get a specified config property. + Args: + name: name of the property + default: default value to return if the property is not defined. + Returns: + """ + if not self.config: + return default + return self.config.get(name, default) + + def start_run(self, fl_ctx: FLContext): + self.engine = fl_ctx.get_engine() + if not self.engine: + self.system_panic("no engine", fl_ctx) + return + + runner = fl_ctx.get_prop(FLContextKey.RUNNER) + if not runner: + self.system_panic("no client runner", fl_ctx) + return + + self.me = fl_ctx.get_identity_name() + + self.persistor = self.engine.get_component(self.persistor_id) + if not isinstance(self.persistor, LearnablePersistor): + self.log_warning( + fl_ctx, f"Persistor {self.persistor_id} must be a Persistor instance but got {type(self.persistor)}" + ) + self.persistor = None + + self.initialize(fl_ctx) + + def initialize_controller(self, controller_id, fl_ctx): + controller = self.engine.get_component(controller_id) + + comm = WFCommClient(max_task_timeout=self.max_task_timeout) + controller.set_communicator(comm) + controller.config = self.config + controller.initialize(fl_ctx) + + return controller + + def handle_event(self, event_type: str, fl_ctx: FLContext): + if event_type == EventType.START_RUN: + self.start_run(fl_ctx) + + elif event_type == EventType.BEFORE_PULL_TASK: + # add my status to fl_ctx + if not self.workflow_id: + return + + reports = fl_ctx.get_prop(Constant.STATUS_REPORTS) + if reports: + reports.pop(self.workflow_id, None) + + if self.workflow_done: + return + report = self._get_status_report() + if not report: + self.log_debug(fl_ctx, "nothing to report this time") + return + self._add_status_report(report, fl_ctx) + self.last_status_report_time = report.timestamp + + elif event_type in [EventType.ABORT_TASK, EventType.END_RUN]: + if not self.asked_to_stop and not self.workflow_done: + self.asked_to_stop = True + self.finalize(fl_ctx) + + elif event_type == EventType.FATAL_SYSTEM_ERROR: + if self.is_starting_client and not self.fatal_system_error: + self.fatal_system_error = True + self.fire_fed_event(EventType.FATAL_SYSTEM_ERROR, Shareable(), fl_ctx) + + def _add_status_report(self, report: StatusReport, fl_ctx: FLContext): + reports = fl_ctx.get_prop(Constant.STATUS_REPORTS) + if not reports: + reports = {} + # set the prop as public, so it will be sent to the peer in peer_context + fl_ctx.set_prop(Constant.STATUS_REPORTS, reports, sticky=False, private=False) + reports[self.workflow_id] = report.to_dict() + + def initialize(self, fl_ctx: FLContext): + """Called to initialize the executor. + Args: + fl_ctx: The FL Context + Returns: None + """ + fl_ctx.set_prop(Constant.EXECUTOR, self, private=True, sticky=False) + self.fire_event(Constant.EXECUTOR_INITIALIZED, fl_ctx) + + def finalize(self, fl_ctx: FLContext): + """Called to finalize the executor. + Args: + fl_ctx: the FL Context + Returns: None + """ + with self.finalize_lock: + if self.workflow_done: + return + + fl_ctx.set_prop(Constant.EXECUTOR, self, private=True, sticky=False) + fl_ctx.set_prop(FLContextKey.WORKFLOW, self.workflow_id, private=True, sticky=False) + self.fire_event(Constant.EXECUTOR_FINALIZED, fl_ctx) + self.workflow_done = True + + def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort_signal: Signal) -> Shareable: + if self.workflow_done: + self.log_error(fl_ctx, f"ClientControllerExecutor is finalized, not executing task {task_name}.") + return make_reply(ReturnCode.ERROR) + + if task_name == self.configure_task_name: + self.config = shareable[Constant.CONFIG] + my_wf_id = self.get_config_prop(FLContextKey.WORKFLOW) + if not my_wf_id: + self.log_error(fl_ctx, "missing workflow id in configuration!") + return make_reply(ReturnCode.BAD_REQUEST_DATA) + self.log_info(fl_ctx, f"got my workflow id {my_wf_id}") + self.workflow_id = my_wf_id + + self.engine.register_aux_message_handler( + topic=topic_for_end_workflow(my_wf_id), + message_handle_func=self._process_end_workflow, + ) + + return make_reply(ReturnCode.OK) + + elif task_name == self.start_task_name: + self.is_starting_client = True + + for controller_id in self.controller_id_list: + + if self.asked_to_stop: + self.log_info(fl_ctx, "Asked to stop, exiting") + return make_reply(ReturnCode.OK) + + self.controller = self.initialize_controller(controller_id, fl_ctx) + self.log_info(fl_ctx, f"Starting control flow {self.controller.name}") + + try: + res = self.controller.control_flow(abort_signal, fl_ctx) + except Exception as e: + error_msg = f"{controller_id} control_flow exception: {secure_format_exception(e)}" + self.log_error(fl_ctx, error_msg) + self.system_panic(error_msg, fl_ctx) + + if abort_signal.triggered: + return make_reply(ReturnCode.TASK_ABORTED) + + if hasattr(self.controller, "persistor"): + self.broadcast_final_result(self.controller.persistor.load(fl_ctx), fl_ctx) + + self.controller.stop_controller(fl_ctx) + + self.log_info(fl_ctx, f"Finished control flow {self.controller.name}") + + self.update_status(action=f"finished_{controller_id}", error=None, all_done=True) + + self.update_status(action="finished_start_task", error=None, all_done=True) + + return make_reply(ReturnCode.OK) + + elif task_name == self.report_final_result_task_name: + return self._process_final_result(shareable, fl_ctx) + + else: + self.log_error(fl_ctx, f"Could not handle task: {task_name}") + return make_reply(ReturnCode.TASK_UNKNOWN) + + def _get_status_report(self): + with self.status_lock: + status = self.current_status + must_report = False + if status.error: + must_report = True + elif status.timestamp: + must_report = True + + if not must_report: + return None + + # do status report + report = copy.copy(status) + return report + + def update_status(self, last_round=None, action=None, error=None, all_done=False): + with self.status_lock: + status = self.current_status + status.timestamp = time.time() + if all_done: + # once marked all_done, always all_done! + status.all_done = True + if error: + status.error = error + if action: + status.action = action + if status.last_round is None: + status.last_round = last_round + elif last_round is not None and last_round > status.last_round: + status.last_round = last_round + + status_dict = status.to_dict() + self.logger.info(f"updated my last status: {status_dict}") + + def _process_final_result(self, request: Shareable, fl_ctx: FLContext) -> Shareable: + peer_ctx = fl_ctx.get_peer_context() + if peer_ctx: + client_name = peer_ctx.get_identity_name() + else: + self.log_error(fl_ctx, "Request from unknown client") + return make_reply(ReturnCode.BAD_REQUEST_DATA) + result = request.get(Constant.RESULT) + + if not result: + self.log_error(fl_ctx, f"Bad request from client {client_name}: no result") + return make_reply(ReturnCode.BAD_REQUEST_DATA) + + if not isinstance(result, Learnable): + self.log_error(fl_ctx, f"Bad result from client {client_name}: expect Learnable but got {type(result)}") + return make_reply(ReturnCode.BAD_REQUEST_DATA) + + self.log_info(fl_ctx, f"Got final result from client {client_name}") + + fl_ctx.set_prop(AppConstants.GLOBAL_MODEL, result, private=True, sticky=True) + + if self.persistor: + self.persistor.save(result, fl_ctx) + else: + self.log_error(fl_ctx, "persistor not configured, model will not be saved") + + return make_reply(ReturnCode.OK) + + def _process_end_workflow(self, topic: str, request: Shareable, fl_ctx: FLContext) -> Shareable: + self.log_info(fl_ctx, f"ending workflow {self.get_config_prop(FLContextKey.WORKFLOW)}") + self.asked_to_stop = True + # self._abort_current_task(fl_ctx) + self.finalize(fl_ctx) + return make_reply(ReturnCode.OK) + + def is_task_secure(self, fl_ctx: FLContext) -> bool: + """ + Determine whether the task should be secure. A secure task requires encrypted communication between the peers. + The task is secure only when the training is in secure mode AND private_p2p is set to True. + """ + private_p2p = self.get_config_prop(Constant.PRIVATE_P2P) + secure_train = fl_ctx.get_prop(FLContextKey.SECURE_MODE, False) + return private_p2p and secure_train + + def broadcast_final_result(self, result: Learnable, fl_ctx: FLContext): + targets = self.get_config_prop(Constant.RESULT_CLIENTS) + + if not isinstance(targets, list): + self.log_warning(fl_ctx, f"expected targets of result clients to be type list, but got {type(targets)}") + return None + + if self.me in targets: + targets.remove(self.me) + + if len(targets) == 0: + # no targets to receive the result! + self.log_info(fl_ctx, "no targets to receive final result") + return None + + shareable = Shareable() + shareable[Constant.RESULT] = result + + self.log_info(fl_ctx, f"broadcasting final result to clients {targets}") + + self.update_status(action="broadcast_final_result") + + task = Task( + name=self.report_final_result_task_name, + data=shareable, + timeout=int(self.final_result_ack_timeout), + secure=self.is_task_secure(fl_ctx), + ) + + resp = self.controller.broadcast_and_wait( + task=task, + targets=targets, + min_responses=len(targets), + fl_ctx=fl_ctx, + ) + + if not isinstance(resp, dict): + self.log_error(fl_ctx, f"bad response for final result from clients, expected dict but got {type(resp)}") + return + + num_errors = 0 + for t in targets: + reply = resp.get(t) + if not isinstance(reply, Shareable): + self.log_error( + fl_ctx, + f"bad response for final result from client {t}: " f"reply must be Shareable but got {type(reply)}", + ) + num_errors += 1 + continue + + rc = reply.get_return_code(ReturnCode.OK) + if rc != ReturnCode.OK: + self.log_error(fl_ctx, f"bad response for final result from client {t}: {rc}") + num_errors += 1 + + if num_errors == 0: + self.log_info(fl_ctx, f"successfully broadcast final result to {targets}") + return num_errors diff --git a/nvflare/app_common/ccwf/common.py b/nvflare/app_common/ccwf/common.py index e1883fbe24..27ac91b381 100644 --- a/nvflare/app_common/ccwf/common.py +++ b/nvflare/app_common/ccwf/common.py @@ -87,6 +87,7 @@ class Constant: LEARN_TASK_ABORT_TIMEOUT = 5.0 FINAL_RESULT_ACK_TIMEOUT = 10 GET_MODEL_TIMEOUT = 10 + MAX_TASK_TIMEOUT = 3600 PROP_KEY_TRAIN_CLIENTS = "cwf.train_clients" diff --git a/nvflare/app_common/ccwf/server_ctl.py b/nvflare/app_common/ccwf/server_ctl.py index 82f6499183..01365113c6 100644 --- a/nvflare/app_common/ccwf/server_ctl.py +++ b/nvflare/app_common/ccwf/server_ctl.py @@ -14,6 +14,7 @@ import time from datetime import datetime +from typing import List from nvflare.apis.client import Client from nvflare.apis.controller_spec import ClientTask, Task @@ -57,7 +58,7 @@ def __init__(self): class ServerSideController(Controller): def __init__( self, - num_rounds: int, + num_rounds: int = 1, start_round: int = 0, task_name_prefix: str = "wf", configure_task_timeout=Constant.CONFIG_TASK_TIMEOUT, @@ -65,10 +66,10 @@ def __init__( start_task_timeout=Constant.START_TASK_TIMEOUT, task_check_period: float = Constant.TASK_CHECK_INTERVAL, job_status_check_interval: float = Constant.JOB_STATUS_CHECK_INTERVAL, - starting_client=None, + starting_client: str = "", starting_client_policy: str = DefaultValuePolicy.ANY, participating_clients=None, - result_clients=None, + result_clients: List[str] = [], result_clients_policy: str = DefaultValuePolicy.ALL, max_status_report_interval: float = Constant.PER_CLIENT_STATUS_REPORT_TIMEOUT, progress_timeout: float = Constant.WORKFLOW_PROGRESS_TIMEOUT, @@ -78,7 +79,7 @@ def __init__( Constructor Args: - num_rounds - the number of rounds to be performed. This is a workflow config parameter. + num_rounds - the number of rounds to be performed. This is a workflow config parameter. Defaults to 1. start_round - the starting round number. This is a workflow config parameter. task_name_prefix - the prefix for task names of this workflow. The workflow requires multiple tasks (e.g. config and start) between the server controller and the client. @@ -97,7 +98,8 @@ def __init__( starting_client - name of the starting client. starting_client_policy - how to determine the starting client if the name is not explicitly specified. Possible values are: - ANY - any one of the participating clients (randomly chosen) + ANY - any one of the participating clients (the first client) + RANDOM - a random client EMPTY - no starting client DISALLOW - does not allow implicit - starting_client must be explicitly specified start_task_timeout - how long to wait for the starting client to finish the “start” task. @@ -182,6 +184,7 @@ def start_controller(self, fl_ctx: FLContext): allow_none=False, ) + self.log_info(fl_ctx, f"Using participating clients: {self.participating_clients}") self.starting_client = validate_candidate( var_name="starting_client", candidate=self.starting_client, @@ -189,6 +192,7 @@ def start_controller(self, fl_ctx: FLContext): default_policy=self.starting_client_policy, allow_none=True, ) + self.log_info(fl_ctx, f"Starting client: {self.starting_client}") self.result_clients = validate_candidates( var_name="result_clients", diff --git a/nvflare/app_common/workflows/model_controller.py b/nvflare/app_common/workflows/model_controller.py index d5db8215e3..8d511b9805 100644 --- a/nvflare/app_common/workflows/model_controller.py +++ b/nvflare/app_common/workflows/model_controller.py @@ -63,7 +63,7 @@ def __init__( raise ValueError("task_check_period must be greater than 0.") self._task_check_period = task_check_period self._persistor_id = persistor_id - self._persistor = None + self.persistor = None # config data self._ignore_result_error = ignore_result_error @@ -77,13 +77,13 @@ def start_controller(self, fl_ctx: FLContext) -> None: self.info("Initializing ModelController workflow.") if self._persistor_id: - self._persistor = self._engine.get_component(self._persistor_id) - if not isinstance(self._persistor, LearnablePersistor): + self.persistor = self._engine.get_component(self._persistor_id) + if not isinstance(self.persistor, LearnablePersistor): self.warning( f"Model Persistor {self._persistor_id} must be a LearnablePersistor type object, " - f"but got {type(self._persistor)}" + f"but got {type(self.persistor)}" ) - self._persistor = None + self.persistor = None self.engine = self.fl_ctx.get_engine() FLComponentWrapper.initialize(self) @@ -292,9 +292,9 @@ def control_flow(self, abort_signal: Signal, fl_ctx: FLContext) -> None: def load_model(self): # initialize global model model = None - if self._persistor: + if self.persistor: self.info("loading initial model from persistor") - global_weights = self._persistor.load(self.fl_ctx) + global_weights = self.persistor.load(self.fl_ctx) if not isinstance(global_weights, ModelLearnable): self.panic( @@ -332,12 +332,12 @@ def load_model(self): return model def save_model(self, model): - if self._persistor: + if self.persistor: self.info("Start persist model on server.") self.event(AppEventType.BEFORE_LEARNABLE_PERSIST) # persistor uses Learnable format to save model ml = make_model_learnable(weights=model.params, meta_props=model.meta) - self._persistor.save(ml, self.fl_ctx) + self.persistor.save(ml, self.fl_ctx) self.event(AppEventType.AFTER_LEARNABLE_PERSIST) self.info("End persist model on server.") else: diff --git a/nvflare/fuel/utils/validation_utils.py b/nvflare/fuel/utils/validation_utils.py index 774638050e..45d2abaa8c 100644 --- a/nvflare/fuel/utils/validation_utils.py +++ b/nvflare/fuel/utils/validation_utils.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import random + SYMBOL_ALL = "@all" SYMBOL_NONE = "@none" @@ -24,12 +26,13 @@ class DefaultValuePolicy: DISALLOW = "disallow" ANY = "any" + RANDOM = "random" EMPTY = "empty" ALL = "all" @classmethod def valid_policy(cls, p: str): - return p in [cls.DISALLOW, cls.ANY, cls.EMPTY, cls.ALL] + return p in [cls.DISALLOW, cls.ANY, cls.RANDOM, cls.EMPTY, cls.ALL] def check_positive_int(name, value): @@ -144,6 +147,7 @@ def validate_candidates(var_name: str, candidates, base: list, default_policy: s 1. Not explicitly specified (Python object None or empty list []) In this case, the default_policy decides the final result: - ANY: returns a list that contains a single item from the base + - RANDOM: returns a list that contains a random item from the base - EMPTY: returns an empty list - ALL: returns the base list - DISALLOW: raise exception - candidates must be explicitly specified @@ -189,6 +193,8 @@ def validate_candidates(var_name: str, candidates, base: list, default_policy: s return base elif default_policy == DefaultValuePolicy.DISALLOW: raise ValueError(f"invalid value '{candidates}' in '{var_name}': it must be subset of {base}") + elif default_policy == DefaultValuePolicy.RANDOM: + return [random.choice(base)] else: # any return [base[0]] @@ -222,6 +228,7 @@ def validate_candidate(var_name: str, candidate, base: list, default_policy: str 1. Not explicitly specified (Python object None or empty string) In this case, the default_policy decides the final result: - ANY: returns the first item from the base + - RANDOM: returns a random item from the base - EMPTY: returns an empty str - ALL or DISALLOW: raise exception - candidate must be explicitly specified @@ -263,6 +270,8 @@ def validate_candidate(var_name: str, candidate, base: list, default_policy: str return "" elif default_policy == DefaultValuePolicy.ANY: return base[0] + elif default_policy == DefaultValuePolicy.RANDOM: + return random.choice(base) else: raise ValueError(f"invalid value '{candidate}' in '{var_name}': it must be one of {base}") else: diff --git a/nvflare/private/fed/client/client_run_manager.py b/nvflare/private/fed/client/client_run_manager.py index b92766047a..7c15dd57ca 100644 --- a/nvflare/private/fed/client/client_run_manager.py +++ b/nvflare/private/fed/client/client_run_manager.py @@ -175,6 +175,12 @@ def get_client_from_name(self, client_name): return c return None + def get_clients(self): + return list(self.all_clients.values()) + + def persist_components(self, fl_ctx: FLContext, completed: bool): + self.logger.warning(f"will not persist components, not supported by {self.__class__.__name__}") + def get_widget(self, widget_id: str) -> Widget: return self.widgets.get(widget_id) diff --git a/nvflare/private/fed/utils/fed_utils.py b/nvflare/private/fed/utils/fed_utils.py index 2eddb292b5..c069d91324 100644 --- a/nvflare/private/fed/utils/fed_utils.py +++ b/nvflare/private/fed/utils/fed_utils.py @@ -314,7 +314,7 @@ def get_target_names(targets): continue if name not in target_names: - target_names.append(t) + target_names.append(name) return target_names From c7d1bee76102fea0abc2298cacad038e7e5efb08 Mon Sep 17 00:00:00 2001 From: Isaac Yang Date: Wed, 8 May 2024 09:01:53 -0700 Subject: [PATCH 13/21] Add option in dashboard cli for AWS vpc and subnet --- nvflare/dashboard/cli.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/nvflare/dashboard/cli.py b/nvflare/dashboard/cli.py index a3dbea95d8..2c18cdae0c 100644 --- a/nvflare/dashboard/cli.py +++ b/nvflare/dashboard/cli.py @@ -153,7 +153,11 @@ def cloud(args): "t", exe=True, ) - print(f"Dashboard launch script for cloud is written at {dest}. Now running the script.") + print(f"Dashboard launch script for cloud is written at {dest}. Now running it.") + if args.vpc_id and args.subnet_id: + option = [f"--vpc-id={args.vpc_id}", f"--subnet-id={args.subnet_id}"] + print(f"Option of the script: {option}") + dest = [dest] + option _ = subprocess.run(dest) os.remove(dest) @@ -192,6 +196,18 @@ def define_dashboard_parser(parser): parser.add_argument("--cred", help="set credential directly in the form of USER_EMAIL:PASSWORD") parser.add_argument("-i", "--image", help="set the container image name") parser.add_argument("--local", action="store_true", help="start dashboard locally without docker image") + parser.add_argument( + "--vpc-id", + type=str, + default="", + help="VPC id for AWS EC2 instance. Applicable to AWS only. Ignored if subnet-id is not specified.", + ) + parser.add_argument( + "--subnet-id", + type=str, + default="", + help="Subnet id for AWS EC2 instance. Applicable to AWS only. Ignored if vpc-id is not specified.", + ) def handle_dashboard(args): From 5c6923fa65c6b102100c91a6f2b19bac4b4fd2fd Mon Sep 17 00:00:00 2001 From: Yan Cheng <58191769+yanchengnv@users.noreply.github.com> Date: Thu, 9 May 2024 15:44:12 -0400 Subject: [PATCH 14/21] [2.5] Clean up to allow creation of nvflare light (#2573) * clean up to allow creation of nvflare light * move defs to cellnet --- nvflare/client/ipc/ipc_agent.py | 9 ++++++-- nvflare/fuel/f3/cellnet/cell.py | 3 +-- nvflare/fuel/f3/cellnet/defs.py | 36 +++++++++++++++++++++++++++++++ nvflare/private/defs.py | 38 ++------------------------------- 4 files changed, 46 insertions(+), 40 deletions(-) diff --git a/nvflare/client/ipc/ipc_agent.py b/nvflare/client/ipc/ipc_agent.py index e7b104cc96..6f527c50c1 100644 --- a/nvflare/client/ipc/ipc_agent.py +++ b/nvflare/client/ipc/ipc_agent.py @@ -104,8 +104,13 @@ def __init__( self.cell.register_request_cb(channel=defs.CHANNEL, topic=defs.TOPIC_HEARTBEAT, cb=self._handle_heartbeat) self.cell.register_request_cb(channel=defs.CHANNEL, topic=defs.TOPIC_BYE, cb=self._handle_bye) self.cell.register_request_cb(channel=defs.CHANNEL, topic=defs.TOPIC_ABORT, cb=self._handle_abort_task) - self.cell.add_incoming_request_filter( - channel=defs.CHANNEL, + self.cell.core_cell.add_incoming_request_filter( + channel="*", + topic="*", + cb=self._msg_received, + ) + self.cell.core_cell.add_incoming_reply_filter( + channel="*", topic="*", cb=self._msg_received, ) diff --git a/nvflare/fuel/f3/cellnet/cell.py b/nvflare/fuel/f3/cellnet/cell.py index 723b894f75..8544e733cb 100644 --- a/nvflare/fuel/f3/cellnet/cell.py +++ b/nvflare/fuel/f3/cellnet/cell.py @@ -20,13 +20,12 @@ from typing import Dict, List, Union from nvflare.fuel.f3.cellnet.core_cell import CoreCell, TargetMessage -from nvflare.fuel.f3.cellnet.defs import MessageHeaderKey, MessageType, ReturnCode +from nvflare.fuel.f3.cellnet.defs import CellChannel, MessageHeaderKey, MessageType, ReturnCode from nvflare.fuel.f3.cellnet.utils import decode_payload, encode_payload, make_reply from nvflare.fuel.f3.message import Message from nvflare.fuel.f3.stream_cell import StreamCell from nvflare.fuel.f3.streaming.stream_const import StreamHeaderKey from nvflare.fuel.f3.streaming.stream_types import StreamFuture -from nvflare.private.defs import CellChannel from nvflare.security.logging import secure_format_exception CHANNELS_TO_EXCLUDE = ( diff --git a/nvflare/fuel/f3/cellnet/defs.py b/nvflare/fuel/f3/cellnet/defs.py index de86aeed3e..bb4c4d0971 100644 --- a/nvflare/fuel/f3/cellnet/defs.py +++ b/nvflare/fuel/f3/cellnet/defs.py @@ -135,3 +135,39 @@ class AbortRun(Exception): class InvalidRequest(Exception): pass + + +class SSLConstants: + """hard coded names related to SSL.""" + + CERT = "ssl_cert" + PRIVATE_KEY = "ssl_private_key" + ROOT_CERT = "ssl_root_cert" + + +class CellChannel: + + CLIENT_MAIN = "admin" + AUX_COMMUNICATION = "aux_communication" + SERVER_MAIN = "task" + SERVER_COMMAND = "server_command" + SERVER_PARENT_LISTENER = "server_parent_listener" + CLIENT_COMMAND = "client_command" + CLIENT_SUB_WORKER_COMMAND = "client_sub_worker_command" + MULTI_PROCESS_EXECUTOR = "multi_process_executor" + SIMULATOR_RUNNER = "simulator_runner" + RETURN_ONLY = "return_only" + + +class CellChannelTopic: + + Register = "register" + Quit = "quit" + GET_TASK = "get_task" + SUBMIT_RESULT = "submit_result" + HEART_BEAT = "heart_beat" + EXECUTE_RESULT = "execute_result" + FIRE_EVENT = "fire_event" + REPORT_JOB_FAILURE = "report_job_failure" + + SIMULATOR_WORKER_INIT = "simulator_worker_init" diff --git a/nvflare/private/defs.py b/nvflare/private/defs.py index 6ae5c7fd8c..6ccc6895ab 100644 --- a/nvflare/private/defs.py +++ b/nvflare/private/defs.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# this import is to let existing scripts import from nvflare.private.defs +from nvflare.fuel.f3.cellnet.defs import CellChannel, CellChannelTopic, SSLConstants # noqa: F401 from nvflare.fuel.f3.message import Message from nvflare.fuel.hci.server.constants import ConnProps @@ -128,42 +130,6 @@ class AppFolderConstants: CONFIG_ENV = "environment.json" -class SSLConstants: - """hard coded names related to SSL.""" - - CERT = "ssl_cert" - PRIVATE_KEY = "ssl_private_key" - ROOT_CERT = "ssl_root_cert" - - -class CellChannel: - - CLIENT_MAIN = "admin" - AUX_COMMUNICATION = "aux_communication" - SERVER_MAIN = "task" - SERVER_COMMAND = "server_command" - SERVER_PARENT_LISTENER = "server_parent_listener" - CLIENT_COMMAND = "client_command" - CLIENT_SUB_WORKER_COMMAND = "client_sub_worker_command" - MULTI_PROCESS_EXECUTOR = "multi_process_executor" - SIMULATOR_RUNNER = "simulator_runner" - RETURN_ONLY = "return_only" - - -class CellChannelTopic: - - Register = "register" - Quit = "quit" - GET_TASK = "get_task" - SUBMIT_RESULT = "submit_result" - HEART_BEAT = "heart_beat" - EXECUTE_RESULT = "execute_result" - FIRE_EVENT = "fire_event" - REPORT_JOB_FAILURE = "report_job_failure" - - SIMULATOR_WORKER_INIT = "simulator_worker_init" - - ERROR_MSG_PREFIX = "NVFLARE_ERROR" From 23e4da2781586e6b0683d499a45d814f2794f9a1 Mon Sep 17 00:00:00 2001 From: Isaac Yang Date: Thu, 9 May 2024 15:24:46 -0700 Subject: [PATCH 15/21] Enable patch and build for nvflight (#2574) --- nvflight/__init__.py | 13 +++ nvflight/build_wheel.py | 79 ++++++++++++++++ nvflight/patch.diff | 27 ++++++ nvflight/prepare_setup.py | 186 ++++++++++++++++++++++++++++++++++++++ nvflight/setup.py | 54 +++++++++++ 5 files changed, 359 insertions(+) create mode 100644 nvflight/__init__.py create mode 100644 nvflight/build_wheel.py create mode 100644 nvflight/patch.diff create mode 100644 nvflight/prepare_setup.py create mode 100644 nvflight/setup.py diff --git a/nvflight/__init__.py b/nvflight/__init__.py new file mode 100644 index 0000000000..4fc50543f1 --- /dev/null +++ b/nvflight/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/nvflight/build_wheel.py b/nvflight/build_wheel.py new file mode 100644 index 0000000000..312bcfbe45 --- /dev/null +++ b/nvflight/build_wheel.py @@ -0,0 +1,79 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import os +import shutil +import subprocess + +from prepare_setup import prepare_setup + +import versioneer + +versions = versioneer.get_versions() +if versions["error"]: + today = datetime.date.today().timetuple() + year = today[0] % 1000 + month = today[1] + day = today[2] + version = f"2.3.9.dev{year:02d}{month:02d}{day:02d}" +else: + version = versions["version"] + + +def patch(setup_dir, patch_file): + file_dir_path = os.path.abspath(os.path.dirname(__file__)) + cmd = ['git', 'apply', os.path.join(file_dir_path, patch_file)] + try: + subprocess.run(cmd, check=True, cwd=setup_dir) + except subprocess.CalledProcessError as e: + print(f"Error to patch prepared files {e}") + exit(1) + +nvflight_setup_dir = "/tmp/nvflight_setup" +patch_file = "patch.diff" +# prepare +prepare_setup(nvflight_setup_dir) + +patch(nvflight_setup_dir, patch_file) +# build wheel +dist_dir = os.path.join(nvflight_setup_dir, "dist") +if os.path.isdir(dist_dir): + shutil.rmtree(dist_dir) + +env = os.environ.copy() +env['NVFL_VERSION'] = version + +cmd_str = "python setup.py -v sdist bdist_wheel" +cmd = cmd_str.split(" ") +try: + subprocess.run(cmd, check=True, cwd=nvflight_setup_dir, env=env) +except subprocess.CalledProcessError as e: + print(f"Error: {e}") + +results = [] +for root, dirs, files in os.walk(dist_dir): + result = [os.path.join(root, f) for f in files if f.endswith(".whl")] + results.extend(result) + +if not os.path.isdir("dist"): + os.makedirs("dist", exist_ok=True) + +if len(results) == 1: + shutil.copy(results[0], os.path.join("dist", os.path.basename(results[0]))) +else: + print(f"something is not right, wheel files = {results}") + +print(f"Setup dir {nvflight_setup_dir}") +shutil.rmtree(nvflight_setup_dir) diff --git a/nvflight/patch.diff b/nvflight/patch.diff new file mode 100644 index 0000000000..b65b48fbcc --- /dev/null +++ b/nvflight/patch.diff @@ -0,0 +1,27 @@ +diff --git a/nvflare/client/__init__.py b/nvflare/client/__init__.py +index 8d668962..7bcb2978 100644 +--- a/nvflare/client/__init__.py ++++ b/nvflare/client/__init__.py +@@ -15,22 +15,4 @@ + + # https://github.com/microsoft/pylance-release/issues/856 + +-from nvflare.apis.analytix import AnalyticsDataType as AnalyticsDataType +-from nvflare.app_common.abstract.fl_model import FLModel as FLModel +-from nvflare.app_common.abstract.fl_model import ParamsType as ParamsType +- +-from .api import get_config as get_config +-from .api import get_job_id as get_job_id +-from .api import get_site_name as get_site_name +-from .api import init as init +-from .api import is_evaluate as is_evaluate +-from .api import is_running as is_running +-from .api import is_submit_model as is_submit_model +-from .api import is_train as is_train +-from .api import log as log +-from .api import receive as receive +-from .api import send as send +-from .api import system_info as system_info +-from .decorator import evaluate as evaluate +-from .decorator import train as train + from .ipc.ipc_agent import IPCAgent diff --git a/nvflight/prepare_setup.py b/nvflight/prepare_setup.py new file mode 100644 index 0000000000..94514789fe --- /dev/null +++ b/nvflight/prepare_setup.py @@ -0,0 +1,186 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import shutil + +exclude_extensions = [".md", ".rst", ".pyc", "__pycache__"] + +nvflight_packages = { + "nvflare": { + "include": ["_version.py"], + "exclude": ["*"] + }, + "nvflare/apis": { + "include": ["__init__.py", "fl_constant.py"], + "exclude": ["*"] + }, + "nvflare/app_common": { + "include": ["__init__.py"], + "exclude": ["*"] + }, + "nvflare/app_common/decomposers": { + "include": ["__init__.py", "numpy_decomposers.py"], + "exclude": ["*"] + }, + "nvflare/client": { + "include": ["__init__.py"], + "exclude": ["*"] + }, + "nvflare/client/ipc": { + "include": ["__init__.py", "defs.py", "ipc_agent.py"], + "exclude": ["*"] + }, + "nvflare/fuel": { + "include": ["__init__.py"], + "exclude": ["*"] + }, + "nvflare/fuel/common": { + "include": ["*"], + "exclude": [] + }, + "nvflare/fuel/f3": { + "include": ["__init__.py", + "comm_error.py", + "connection.py", + "endpoint.py", + "mpm.py", + "stats_pool.py", + "comm_config.py", + "communicator.py", + "message.py", + "stream_cell.py" + ], + "exclude": ["*"] + }, + "nvflare/fuel/f3/cellnet": { + "include": ["*"], + "exclude": [] + }, + "nvflare/fuel/f3/drivers": { + "include": ["*"], + "exclude": ["grpc", "aio_grpc_driver.py", "aio_http_driver.py", "grpc_driver.py"] + }, + "nvflare/fuel/f3/sfm": { + "include": ["*"], + "exclude": [] + }, + "nvflare/fuel/f3/streaming": { + "include": ["*"], + "exclude": [] + }, + "nvflare/fuel/hci": { + "include": ["__init__.py", "security.py"], + "exclude": ["*"] + }, + "nvflare/fuel/utils": { + "include": ["*"], + "exclude": ["fobs"] + }, + "nvflare/fuel/utils/fobs": { + "include": ["*"], + "exclude": [] + }, + "nvflare/fuel/utils/fobs/decomposers": { + "include": ["*"], + "exclude": [] + }, + "nvflare/security": { + "include": ["__init__.py", "logging.py"], + "exclude": ["*"] + } +} + + +def should_exclude(str_value): + return any(str_value.endswith(ext) for ext in exclude_extensions) + + +def package_selected_files(package_info: dict): + if not package_info: + return + all_items = "*" + results = {} + + for p, package_rule in package_info.items(): + include = package_rule["include"] + exclude = package_rule["exclude"] + paths = [] + for include_item in include: + item_path = os.path.join(p, include_item) + if all_items != include_item: + if all_items in exclude: + # excluded everything except for included items + if os.path.isfile(item_path) and not should_exclude(item_path): + paths.append(item_path) + elif include_item not in exclude: + paths.append(item_path) + else: + if all_items in exclude: + # excluded everything except for included items + if os.path.isfile(item_path): + paths.append(item_path) + else: + # include everything in the package except excluded items + for root, dirs, files in os.walk(p): + if should_exclude(root) or os.path.basename(root) in exclude: + continue + + for f in files: + if not should_exclude(f) and f not in exclude: + paths.append(os.path.join(root, f)) + results[p] = paths + return results + + +def create_empty_file(file_path): + try: + with open(file_path, 'w'): + pass # This block is intentionally left empty + except Exception as e: + print(f"Error creating empty file: {e}") + + +def copy_files(package_paths: dict, target_dir: str): + for p, paths in package_paths.items(): + for src_path in paths: + dst_path = os.path.join(target_dir, src_path) + os.makedirs(os.path.dirname(dst_path), exist_ok=True) + shutil.copy(src_path, dst_path) + + for p in package_paths: + init_file_path = os.path.join(target_dir, p, "__init__.py") + if not os.path.isfile(init_file_path): + create_empty_file(init_file_path) + + +def prepare_setup(setup_dir: str): + if os.path.isdir(setup_dir): + shutil.rmtree(setup_dir) + + os.makedirs(setup_dir, exist_ok=True) + nvflight_paths = package_selected_files(nvflight_packages) + copy_files(nvflight_paths, setup_dir) + + src_files = [ + "setup.cfg", + "README.md", + "LICENSE", + os.path.join("nvflight", "setup.py") + ] + + for src in src_files: + shutil.copy(src, os.path.join(setup_dir, os.path.basename(src))) + diff --git a/nvflight/setup.py b/nvflight/setup.py new file mode 100644 index 0000000000..1aea531751 --- /dev/null +++ b/nvflight/setup.py @@ -0,0 +1,54 @@ +# Copyright (c) 2023, NVIDIA CORPORATION. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +import os + +from setuptools import find_packages, setup + +this_directory = os.path.abspath(os.path.dirname(__file__)) + +today = datetime.date.today().timetuple() +year = today[0] % 1000 +month = today[1] +day = today[2] + +release_package = find_packages( + where=".", + include=[ + "*", + ], + exclude=["tests", "tests.*"], +) + +package_data = {"": ["*.yml", "*.config"], } + +release = os.environ.get("NVFL_RELEASE") +version = os.environ.get("NVFL_VERSION") + +if release == "1": + package_dir = {"nvflare": "nvflare"} + package_name = "nvflare-light" +else: + package_dir = {"nvflare": "nvflare"} + package_name = "nvflare-light-nightly" + +setup( + name=package_name, + version=version, + package_dir=package_dir, + packages=release_package, + package_data=package_data, + include_package_data=True, +) From 1ba16d81554dfd136c5087ca0c41208c925c2cbc Mon Sep 17 00:00:00 2001 From: Minghui Chen <50226876+MinghuiChen43@users.noreply.github.com> Date: Sat, 11 May 2024 04:05:23 +0800 Subject: [PATCH 16/21] add FedBN Implementation on NVFlare research folder - a local batch normalization federated learning method (#2524) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add research/fedbn * delete redudant controller and correct figs requirements * update plot_requirements * rewrite fedbn * update jobs * remove workspace * update README * simplify job simulator_run to take only one workspace parameter. (#2528) * Add missing client api test jobs (#2535) * Fixed the simulator server workspace root dir (#2533) * Fixed the simulator server root dir error. * Added unit test for SimulatorRunner start_server_app. --------- Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> * Improve InProcessClientAPIExecutor (#2536) * 1. rename ExeTaskFnWrapper class to TaskScriptRunner 2. Replace implementation of the inprocess function exection from calling a main() function to user runpy.run_path() which reduce the user requirements to have main() function 3. redirect print() to logger.info() * 1. rename ExeTaskFnWrapper class to TaskScriptRunner 2. Replace implementation of the inprocess function exection from calling a main() function to user runpy.run_path() which reduce the user requirements to have main() function 3. redirect print() to logger.info() * make result check and result pull use the same configurable variable * rename exec_task_fn_wrapper to task_script_runner.py * fix typo * FIX MLFLow and Tensorboard Output to be consistent with new Workspace root changes (#2537) * 1) fix mlruns and tb_events dirs due to workspace directory changes 2) for MLFLow, add tracking_rui default to workspace_dir / /mlruns instead current default /mlruns. This is a) consistent with Tensorboard 2) avoid job output oeverwrite the 1st job * 1) fix mlruns and tb_events dirs due to workspace directory changes 2) for MLFLow, add tracking_rui default to workspace_dir / /mlruns instead current default /mlruns. This is a) consistent with Tensorboard 2) avoid job output oeverwrite the 1st job * 1) fix mlruns and tb_events dirs due to workspace directory changes 2) for MLFLow, add tracking_rui default to workspace_dir / /mlruns instead current default /mlruns. This is a) consistent with Tensorboard 2) avoid job output oeverwrite the 1st job * 1. Remove the default code to use configuration 2. fix some broken notebook * rollback changes * Fix decorator issue (#2542) * update create and run job script * FLModel summary (#2544) * add FLModel Summary * format * remove jobs folder * expose aggregate_fn to users for overwriting (#2539) * handle cases where the script with relative path in Script Runner (#2543) * handle cases where the script with relative path * handle cases where the script with relative path * add more unit test cases and change the file search logics * code format * add more unit test cases and change the file search logics * Lr newton raphson (#2529) * Implement federated logistic regression with second-order newton raphson. Update file headers. Update README. Update README. Fix README. Refine README. Update README. Added more logging for the job status changing. (#2480) * Added more logging for the job status changing. * Fixed a logging call error. --------- Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Co-authored-by: Yuan-Ting Hsieh (謝沅廷) Fix update client status (#2508) * check workflow id before updating client status * change order of checks Add user guide on how to deploy to EKS (#2510) * Add user guide on how to deploy to EKS * Address comments Improve dead client handling (#2506) * dev * test dead client cmd * added more info for dead client tracing * remove unused imports * fix unit test * fix test case * address PR comments --------- Co-authored-by: Sean Yang Enhance WFController (#2505) * set flmodel variables in basefedavg * make round info optional, fix inproc api bug temporarily disable preflight tests (#2521) Upgrade dependencies (#2516) Use full path for PSI components (#2437) (#2517) Multiple bug fixes from 2.4 (#2518) * [2.4] Support client custom code in simulator (#2447) * Support client custom code in simulator * Fix client custom code * Remove cancel_futures args (#2457) * Fix sub_worker_process shutdown (#2458) * Set GRPC_ENABLE_FORK_SUPPORT to False (#2474) Pythonic job creation (#2483) * WIP: constructed the FedJob. * WIP: server_app josn export. * generate the job app config. * fully functional pythonic job creation. * Added simulator_run for pythonic API. * reformat. * Added filters support for pythonic job creation. * handled the direct import case in fed_job. * refactor. * Added the resource_spec set function for FedJob. * refactored. * Moved the ClientApp and ServerApp into fed_app.py. * Refactored: removed the _FilterDef class. * refactored. * Rename job config classes (#3) * rename config related classes * add client api example * fix metric streaming * add to() routine * Enable obj in the constructor as paramenter. * Added support for the launcher script. * refactored. * reformat. * Update the comment. * re-arrange the package location. * Added add_ext_script() for BaseAppConfig. * codestyle fix. * Removed the client-api-pt example. * removed no used import. * fixed the in_time_accumulate_weighted_aggregator_test.py * Added Enum parameter support. * Added docstring. * Added ability to handle parameters from base class. * Move the parameter data format conversion to the START_RUN event for InProcessClientAPIExecutor. * Added params_exchange_format for PTInProcessClientAPIExecutor. * codestyle fix. * Fixed a custom code folder structure issue. * work for sub-folder custom files. * backed to handle parameters from base classes. * Support folder structure job config. * Added support for flat folder from '.XXX' import. * codestyle fix. * refactored and add docstring. * Address some of the PR reviews. --------- Co-authored-by: Holger Roth <6304754+holgerroth@users.noreply.github.com> Co-authored-by: Yuan-Ting Hsieh (謝沅廷) Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Enhancements from 2.4 (#2519) * Starts heartbeat after task is pull and before task execution (#2415) * Starts pipe handler heartbeat send/check after task is pull before task execution (#2442) * [2.4] Improve cell pipe timeout handling (#2441) * improve cell pipe timeout handling * improved end and abort handling * improve timeout handling --------- Co-authored-by: Yuan-Ting Hsieh (謝沅廷) * [2.4] Enhance launcher executor (#2433) * Update LauncherExecutor logs and execution setup timeout * Change name * [2.4] Fire and forget for pipe handler control messages (#2413) * Fire and forget for pipe handler control messages * Add default timeout value * fix wait-for-reply (#2478) * Fix pipe handler timeout in task exchanger and launcher executor (#2495) * Fix metric relay pipe handler timeout (#2496) * Rely on launcher check_run_status to pause/resume hb (#2502) Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> --------- Co-authored-by: Yan Cheng <58191769+yanchengnv@users.noreply.github.com> Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Update ci cd from 2.4 (#2520) * Update github actions (#2450) * Fix premerge (#2467) * Fix issues on hello-world TF2 notebook * Fix tf integration test (#2504) * Add client api integration tests --------- Co-authored-by: Isaac Yang Co-authored-by: Sean Yang use controller name for stats (#2522) Simulator workspace re-design (#2492) * Redesign simulator workspace structure. * working, needs clean. * Changed the simulator workspacce structure to be consistent with POC. * Moved the logfile init to start_server_app(). * optimzed. * adjust the stats pool location. * Addressed the PR views. --------- Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Co-authored-by: Yuan-Ting Hsieh (謝沅廷) Simulator end run for all clients (#2514) * Provide an option to run END_RUN for all clients. * Added end_run_all option for simulator to run END_RUN event for all clients. * Fixed a add_argument type, added help message. * Changed to use add_argument(() compatible with python 3.8. * reformat. * rewrite the _end_run_clients() and add docstring for easier understanding. * reformat. * adjusting the locking in the _end_run_clients. * Fixed a potential None pointer error. * renamed the clients_finished_end_run variable. --------- Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Co-authored-by: Sean Yang Co-authored-by: Yuan-Ting Hsieh (謝沅廷) Secure XGBoost Integration (#2512) * Updated FOBS readme to add DatumManager, added agrpcs as secure scheme * Refactoring * Refactored the secure version to histogram_based_v2 * Replaced Paillier with a mock encryptor * Added license header * Put mock back * Added metrics_writer back and fixed GRPC error reply simplify job simulator_run to take only one workspace parameter. (#2528) Fix README. Fix file links in README. Fix file links in README. Add comparison between centralized and federated training code. Add missing client api test jobs (#2535) Fixed the simulator server workspace root dir (#2533) * Fixed the simulator server root dir error. * Added unit test for SimulatorRunner start_server_app. --------- Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Improve InProcessClientAPIExecutor (#2536) * 1. rename ExeTaskFnWrapper class to TaskScriptRunner 2. Replace implementation of the inprocess function exection from calling a main() function to user runpy.run_path() which reduce the user requirements to have main() function 3. redirect print() to logger.info() * 1. rename ExeTaskFnWrapper class to TaskScriptRunner 2. Replace implementation of the inprocess function exection from calling a main() function to user runpy.run_path() which reduce the user requirements to have main() function 3. redirect print() to logger.info() * make result check and result pull use the same configurable variable * rename exec_task_fn_wrapper to task_script_runner.py * fix typo Update README for launching python script. Modify tensorboard logdir. Link to environment setup instructions. expose aggregate_fn to users for overwriting (#2539) FIX MLFLow and Tensorboard Output to be consistent with new Workspace root changes (#2537) * 1) fix mlruns and tb_events dirs due to workspace directory changes 2) for MLFLow, add tracking_rui default to workspace_dir / /mlruns instead current default /mlruns. This is a) consistent with Tensorboard 2) avoid job output oeverwrite the 1st job * 1) fix mlruns and tb_events dirs due to workspace directory changes 2) for MLFLow, add tracking_rui default to workspace_dir / /mlruns instead current default /mlruns. This is a) consistent with Tensorboard 2) avoid job output oeverwrite the 1st job * 1) fix mlruns and tb_events dirs due to workspace directory changes 2) for MLFLow, add tracking_rui default to workspace_dir / /mlruns instead current default /mlruns. This is a) consistent with Tensorboard 2) avoid job output oeverwrite the 1st job * 1. Remove the default code to use configuration 2. fix some broken notebook * rollback changes Fix decorator issue (#2542) Remove line number in code link. FLModel summary (#2544) * add FLModel Summary * format formatting Update KM example, add 2-stage solution without HE (#2541) * add KM without HE, update everything * fix license header * fix license header - update year to 2024 * fix format --------- Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> * update license --------- Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Co-authored-by: Holger Roth * Add information about dig (bind9-dnsutils) in the document * format update * Update KM example, add 2-stage solution without HE (#2541) * add KM without HE, update everything * fix license header * fix license header - update year to 2024 * fix format --------- Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> * Update monai readme to remove logging.conf (#2552) * MONAI mednist example (#2532) * add monai notebook * add training script * update example * update notebook * use job template * call init later * swith back * add gitignore * update notebooks * add readmes * send received model to GPU * use monai tb stats handler * formatting * Improve AWS cloud launch script * Add in process client api tests (#2549) * Add in process client api tests * Fix headers * Fix comments * Add client controller executor (#2530) * add client controller executor * address comments * enhance abort, set peer props * remove asserts --------- Co-authored-by: Yuan-Ting Hsieh (謝沅廷) * Add option in dashboard cli for AWS vpc and subnet * add note on README visualization * update README * update readme * update readme * update readme * [2.5] Clean up to allow creation of nvflare light (#2573) * clean up to allow creation of nvflare light * move defs to cellnet * Enable patch and build for nvflight (#2574) * verified commit --------- Co-authored-by: Yuhong Wen Co-authored-by: Yuan-Ting Hsieh (謝沅廷) Co-authored-by: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Co-authored-by: Sean Yang Co-authored-by: Zhijin Co-authored-by: Holger Roth Co-authored-by: Isaac Yang Co-authored-by: Ziyue Xu Co-authored-by: Ziyue Xu <71786575+ZiyueXu77@users.noreply.github.com> Co-authored-by: Holger Roth <6304754+holgerroth@users.noreply.github.com> Co-authored-by: Yan Cheng <58191769+yanchengnv@users.noreply.github.com> --- research/fed-bn/LICENSE | 201 +++++++++++++++++++++++++++ research/fed-bn/README.md | 75 ++++++++++ research/fed-bn/create_job.sh | 1 + research/fed-bn/figs/loss.jpeg | Bin 0 -> 29518 bytes research/fed-bn/prepare_data.sh | 3 + research/fed-bn/requirements.txt | 4 + research/fed-bn/run_job.sh | 1 + research/fed-bn/src/fedbn_cifar10.py | 145 +++++++++++++++++++ research/fed-bn/src/net.py | 41 ++++++ 9 files changed, 471 insertions(+) create mode 100644 research/fed-bn/LICENSE create mode 100644 research/fed-bn/README.md create mode 100755 research/fed-bn/create_job.sh create mode 100644 research/fed-bn/figs/loss.jpeg create mode 100755 research/fed-bn/prepare_data.sh create mode 100644 research/fed-bn/requirements.txt create mode 100755 research/fed-bn/run_job.sh create mode 100644 research/fed-bn/src/fedbn_cifar10.py create mode 100644 research/fed-bn/src/net.py diff --git a/research/fed-bn/LICENSE b/research/fed-bn/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/research/fed-bn/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/research/fed-bn/README.md b/research/fed-bn/README.md new file mode 100644 index 0000000000..5e541d2235 --- /dev/null +++ b/research/fed-bn/README.md @@ -0,0 +1,75 @@ +# FedBN: Federated Learning on Non-IID Features via Local Batch Normalization + +Welcome to the repository for FedBN, a federated learning algorithm designed to address the feature shift problem when aggregating models across different data distributions. + +###### Abstract: + +> In this work, we propose an effective method that uses local batch normalization to alleviate the feature shift before averaging models. The resulting scheme, called FedBN, outperforms both classical FedAvg and FedProx on our extensive experiments. These empirical results are supported by a convergence analysis that shows in a simplified setting that FedBN has a faster convergence rate than FedAvg. + +## License +This project is open-sourced under the Apache v2 License. The codebase builds upon the initial work shared at [FedBN](https://github.com/med-air/FedBN). + +## Setup Instructions + +To set up the environment for training, execute the following commands: +``` +pip install --upgrade pip +pip install -r ./requirements.txt +``` + +## Running the code + +### Initial Configuration + +Ensure all shell scripts are executable: +``` +find . -name ".sh" -exec chmod +x {} \; +``` + +Set the Python path to recognize the FedBN modules: +``` +export PYTHONPATH=${PYTHONPATH}:${PWD}/.. +``` + +Data Preparation + +Download the necessary datasets by running: +``` +./prepare_data.sh +``` + +# Run FedBN on different data splits + +We will use the in-process client API, we choose the sag_pt job template and run the following command to create the job: +``` +./create_job.sh +``` + +Execution + +Run the FedBN simulation with the following command: +``` +./run_job.sh +``` + +## Visualizing Results +To visualize training losses, we use the [Comet ML](https://www.comet.com/site/). +Below is an example of the loss visualization output: +![FedBN Loss Results](./figs/loss.jpeg) + +> **Note**: To use Comet ML experiment tracking system, you need to get Comet API key to get access. +> Alternatively, you can use Tensorboard or MLfow. + +## Citation +If you find the code and dataset useful, please cite our paper. +```latex +@inproceedings{ +li2021fedbn, +title={Fed{\{}BN{\}}: Federated Learning on Non-{\{}IID{\}} Features via Local Batch Normalization}, +author={Xiaoxiao Li and Meirui Jiang and Xiaofei Zhang and Michael Kamp and Qi Dou}, +booktitle={International Conference on Learning Representations}, +year={2021}, +url={https://openreview.net/pdf?id=6YEQUn0QICG} +} +``` + diff --git a/research/fed-bn/create_job.sh b/research/fed-bn/create_job.sh new file mode 100755 index 0000000000..c793ca3444 --- /dev/null +++ b/research/fed-bn/create_job.sh @@ -0,0 +1 @@ +nvflare job create -force -j ./jobs/fed_bn -w sag_pt -sd ./src/ -f config_fed_client.conf app_script=fedbn_cifar10.py diff --git a/research/fed-bn/figs/loss.jpeg b/research/fed-bn/figs/loss.jpeg new file mode 100644 index 0000000000000000000000000000000000000000..3fb646bcbed483ff78b7467bf3d57cf9ac148adc GIT binary patch literal 29518 zcmeFa2UwHI);JzTTou$snp718qzf3Nv#W$&gb)IuDV>BGdQlV=sbT3Iq(i6yLhpp$ zq=k<34pIa`6#hZnyYAlgyZ8Hl$5DuBipP{=?+S7sqjr z4DD==pWnF%>F(l>QwIS0xW9w*9|fN`GPW}$AZ!r+ZXyYp6ADW~fGJE*;9JMA!3ivO z3_ID|*b{Ib9K%R;wfh9vhydR*{RTGp1~#-o9^?BHaKtPv9FFrkzK%;wYHS4o6Q1V? zfAj!5fEqvsaPPSOgm=Paodf^~?EnBozx)Qxcr*&Q4!A32-lgjXO6FPXU`I!J%8@Ph4bgmpT9tQ zne+n5C6e>!FOprnbos|0$$q>*dWHPTkK_dS$Kym!9e+u5<{aU}A4$%W5JLZx*HJBi z?A&S1GxG@Mj(P1g(Wzrr{CW`)pFMr%hg0Xy6QbpQ1e_u|MRfKY z=^5fP$7qD+IZbqi_$=8Ka*7+^5I%vQRn>!ndwyWze}uGqmWZkAow_6SynvGVo`EgZ z&HHNSs9As;p7n&JXpZwB`YjtmZf6K-5fh@s$OvTs5D}j_bLlh@fatg!$GMPQxxx3d zD)}SZXT%izL6l6)0{7Gmkim%sbv+xNnEKwSo8Y9Q5dbM6(bHr^WB^IPVdm50kN=G> zvQKZ9KDB?j*XDnx+^47yIe0-w04ALNUPcx5r>EaVq}dzs(5%5UdUrSVeQf-UI$isn zB+tOz!(E%)A7E$`SmH4W6#oHY=_@x07ViPuOS|qr{e8x#0xF@N?d8P`##6;;fjjnm zT*1P$xyUVMSU-ND3_^=f!y=PdvHa3hyE+F|Pq@;CN}_$`(b6gmU~LA)eBpwo)im*I z;hy$0#@&A|u*r zbu~G3UF+@M-e`6UH(i)3gN)^y(J*`!i;_DSUmv7iEG-f*`&I&+Z`emewq_TLLr#|_ zZn_hV78`S2E}!^O&9@(2fL@J(m!sR|-s3VegVW?@3}kU$QsL$UrCB3BtQY)(eN}U* zdB?kFL_*^ka}G?p9v@;>Iq!a^0cjgQfTZ-vrB@APr`n~MoR5)iOq22*Xw(hrCNePGAFm_V8%)*>JnbPM zIPS5_KO9-CxSA+#i*OqWY7*M7(7w>0YAwd5E9U=RTf^}e)8ka03sAMjh0=V%?CIzR zVNK1?x^icU$_nsVL=FEOY4{jX+Jj=eY0b3(g$Q6@OFNJ-{TFQTqrvDeP?0F?Fg_~Dp z$JKawcZG6IxI;v5$(tp~U9T5h338Fd)I!Cn2I5j^7K^jju76?2vtMwC{x6sX%q5S1 z#O&OJ+;5vV7#1jafj01=?O?XP+NC0L>i@dx-wn)P$Eh%N4A&*8pvu^46$Vif< z&!lq^7MuM4!)+4SRgd6V9pTXZFQfc5Sd&02oK>vlhwFbQPh92PH0-t-SCH;bhO+1( z#N~4u*x3S9Hz%`d%ao*=yBycYu@`6Zxekz|p_Z1StQuHbX<=}am4tDlxR-2yE@zCX z`e=y?`|ScmxGb2#ocNxpW+ZEh6vPdDNqXbPS+I8yj?(ozJK}lC^XF-B9S{nf>Y6qEMvo}qH1H+e5ea{$< z0A}=o>XUPDVW7>-EB4DNh$BFAtn7>u_nuRSOA}_kq6Z(S=`TF>7$G@V&emq1z}j)C zir5mEeqY6`r-tW3MwFOxLz`g`C3WV5RVsXsnIH5;64s;;SaKC`R`YKJ`~sXiw0?hR z?0tNH_=l+95dDq-l+;N_fFJ|jPwIMphd+H6)BlsDWVZg$G|qI!{IkJf^QniwrSd!I zG*Ci8+)(0{W`Ig?w`RBRe}H&Q<10A$u^fB-yBY7zW7jmqGv>=~tE)FsUVeR1rLOvp zBMWFOs7s5Bb}2*5lq1)Ynun#)87#Pq7^w524an~oLy*^71A%jepO(uZ{5wD@c5!rr z_4W7mtCPFr&6iB7RaXyIfC<3E;DBta2_XY1?XJgp4oN+;0FpPqs}11)6^;cEfj0sI zC`062A_s->#P*Ob=_GWv(5#|&e9rn{s~0PN5V$6TX_+tM1A!VT&p~^@Fnge&gCYMU z+Gj(4%sow*l2Rm0@$&QN@~HJxAcNh=as~I6R^jAW0xdNsnaylt#=zjKOG1e2fk9mz z$SQ~J+@%k=<|%zi?qHt<1@k$}&GpCx-0)N4#otr-v}DX@$tZ*4dnDEGqkem1Is!C; z7HeiTf92R;zKhB%NvE1k!wh%8c{q*~Y&>!EK;xJ#v2Ji1? z3;+P>XsB>CyPc>2X|VQf83Z_q^~I^65o9sA;!Wj>{@MSR@kTWG32kdle9 z1W;FQ{6?8$C~*D=pfay{@*sZt`^dk&ovYbYb>-cs`jwX7NBpX!a(x|NZN6LOuatY~ z-M`PZ4thq4;8jkF82%`|u z10Z-(Qet#hNG5L=`xCJn++0p%xs|=z=iSqjwTUfdqXkHcKC64UQc~7Hn#u z1-PNFN*t`kcWT0o#lsM9D-7DR@^izCvjmomEd)f~7+Wfd2&N88c66|a09!qQ>kwp8 zQW9hByVDbwbxlAfgd}qjQ#lSPZM!2_%RKoK>7a5;?wj45Vcr4y*NCO08qP=wjpz}5 zqJf25CK#zW&H09Rrbh>}_Cfn=m)PfAB{WmrZptV&SMkRVQ?}$bLSeag9E%ng3QdDF zOQsL?6jC8m21#2Zp~X277UKtkoK$SsP&FA8kd_ev}TE7n91iRx#Vh2hAn#cl71em zic;Uy^%C>*JRq&TgKNm;X1lbRt!OTB=O;ldTP`8+JP z7o_GF;;YLoTDiDA}f|g3QZg%@0zGsFSp4KGDStwJE`Vof)gkvQ?wnxd;&C`B*O*x=RV)k`d}`!*t_XYTXqC!9cA9H z>DzzWU6It;A2%bh?sr$sDI6ek0{EX0Q}fhUstT+x7CNxGJUnCf1#mw3YVIl7U*YrF z1kobz2$=}5ZWc7A%dzET7yug$F`%RMMwn*8^kxx*UuHj#&jFa~GYc<1v zvaT*1K4Uqi2QJ|KfT@OrPjLlb!-Rxv7LEXH76D)8=d-`> z_222_+p$Q_%_g?O%LkA0GL;|xZFc+}a8Y@?su;9VQT)*ei{-Cw%7Ezb$M7424sC+G zy(1sZ(|{cBEVt7&h6wo&GY3`&!z>8mHuJ4Xvn78Y8br2i^nn_~VFV<1YZ} z*WVWC{}o~WxK906q&`23SJ}Vy6lpHf+k?fPS5o~nR5`}uW}_q*Y}u4J>CrWhQE&GCBopCcIvAX?EKjikHYCAKeC#9!? zLMDlW0}H0giyE3NeP~kGPIorSU%0;J{MgF#0Qc&f27Uh4h(W8}21F-IfH`hdVmw9V z!yR5ntWt`?XWP6B95x2trR@2Uh*k%S(RNa+3>omCTCOgGy+~S`hp296*Fb+|nn+{i zZVrPx)aqj|NI{~433eDy{+}EX-?<0O`p>XCevg~&=K1uwCd!rbx z_C`|$W7zwg_;?*k7&;>dX(Nn|9?5LUaH`8$(t&mI(&9FKEEPsC^tdXA(6iENTl&dQ zwiHD~yH?z5fWX19E-R$Gg@cj$RNf_Xn?cm2!R)c*U|~ZGjCh`55B(s}76h79*HgV| z+fJ7h7U0I{I|yv`<>0-*fs9r!qUwIWkYkKm;wC8?fU20x&MDm+(1C35k_<&oWAaY3 z`E|JT3B?D_rWu|o84{R8{GbUqd|W9An9$9_E!ATOvUp|V;J7D(tJ>g_?c$!Hmv9Ht z*?nQeugB4xot<+zk22~a@O6&7TuN2|rk`WurWK@aZ|jjy+)bmpp6D@*`I@YS+?wyL zHQI2lUwWz&1LCFANunEdQk4{P$bWfv^*3gveVOTKNq4qfyVaKcp}bE7M|FqY_5-Mg z9n@3Uqk@$B8eMF+?g27*Q_B_>YUxI{JTFHP$vhR5^?b&ajVMS}RnaGH^wJSPYIMaV z(l;EKy$gb4X`}A+!7UubB7qLTmBqXp^9E|-3|1pC`3oZ`)x2I*%;$`$_ocX%rKyR> z%z8}+o6N)Uj(v;Wx)U9n)R)V{5KzHiIg`m5W9?ZyJg-T610V&Mh~|# zH}pP9a1zkhd<12s&J}W_8`Q|JY70_S!tG_CKnQ{&t`ca~*OR3{!*tl9Q$oZki!c!^E(WdJjRfavsFk*86*u;zrC26T(fT%fHf5EC z=de8IS53KtE%YGElru_wIW#M$N}ib_eHWHosopyI*(@K2F`;7RayFVZN>ipEvb&~B z#}JG%gtFLLvDa2EI<4)@F64g-G6Me$dN`o7g8!wY{v~9xysHpzW}D~E*I;@C5V^}? zY?3US#Xsiv9Kr%phe+zgLj0jAzPL6ib(u#wWX|BRSIqIOtfTNj8Ja5~l{xVo9cP`f zCuSg3(o#qNyLfl=LPTdOw4nlC@@yHk9jU7!9mVjq~;sl$w7M!5jaE zJ6R4$j0AV8JZMB|zP*4JMn`h+`a7NS@h&nKqsd7t%(gYZd#|_;!_6+nI5MDUANnAr z*jQmk2cF4SN8YBW&zP1MMds2!#t+}!SdUn~J=nC~X*ImruN5pqj4^3V)f%{@q!>ot z;c!=?dnklyjxN<$< zeLQw)Mvq}Lxm2frv$o5AWixa~Q(Led|KTQ3vfRLnOsS!ja075C` zAHw!)O4ou;=Ic`@q~or=W}#9KZja@6>!Liz@Nw??qoCa>YDDb}Qi z!`j;1jJ^GyQ+tYJlOL8xzgtt+U%GqR;m7-sGoe_rV%aoCZ@GgKwxoI0C+wAEQ;nRmz3nt4jo5-Nn=aBqcH<-8#< zBDR>ds~q>vXNA1pBR(YC_9@^&ECBFtKF|N5F}}QBC0THM>%5wlO7W4f+{Oga+Bx4q zzcvXv9CYD`ZPQrN6P!?B=P-H6o|PHD4F9NO3v=~ac9U%8;N{?;Nmj1RZ+EaPs(?ye;i5EpV>8zFpuU4XKW>}H$+Nnb`pQy%LCYG_$c~0mOT@WEuUVP$Pe!%yO5pY3 z{e@OLmLj)U8@PC#k!E55?<~c zV>vn1hcKQ3wxqNSW4#dV5ZVG-b2?A?Jo{_|I6OH_wIz~0!0DApbk-GIQ8rxw#C!mD z*D?ZK9u|ga3caIjb<6t$lc|wi!3aKyRrmrr=e||fIPKc~foF4E8~dgME#Zw}j+`9p zv4o{xO5L&)X`B}v|I=d7b$yyOIOON>RWNCl>?*~MAyxLEA0M9>;8fQ2KQscVL(QWe zJyF)AmLl5K{>4aPbjIR@pP$}fc zkw*Z9zE@F0>4Ks+&~G^}RrU2w98@quFh)s9pjIu#EdO74!Go*zFu-=ugB{F&ahw7cY{*^iG z)U)dTh=SbOpa>4M7pI1BVpz~1+ktCj46}yjO?5s`4i+QDEr|qP$(P*R zGG~e|@5SCMS1TS2#}`_}4C2v*~0} zwW3zFsX&?LB8&VhFito`wk0#RnR6zb7)5V{0tTBTGvdp%gZwbK53lXlayoNyV*Iaa zAFt+{-`sFB>>uhKD?dK$R}?gXIa66>B2t;|0yasrrr`D&~7+<))qceIvbYSTlhbNd5z zpGZYAapa~r(qCN}eQ#zT8+*;m`WpAx5ny)CT&v%)DnZo*_G2*|o-t05jkLjnrxDcB zgINUEo-(MXu|wj|)CY2JJpJ|3XtLVA!qErL^*#Y z>r=85UY({}+42MkIQdHR)W78O;Kz3e77?2#7Y04FD<|xVf1PE|{;xLi{|tk% z0Po1%Jm>ux$-#?y_pySo%KFs@^6-g=(8YhMb1eu>8)?oOBNmW(xcNVwWd1(zD&ony7Boyt$sEKa zLebUNP3lMAzh#)YSyetVySxArxOwbaDWIsxVW?*ALyOxA-*Yaht2B7j6+%Ci37+7D z&9`5SdU6w`Exz8VHX7h~5z>6NAeCUf*z?d6=A742Y7%A+7i=dV*%c@2w1X1labnP% zgc2!)Oyc~sQIPC_;m~dpwo|=MRn=MbL0_Dab_A7FMg?F0NHHUiJ^o%<7B>eE)z2_ISYPNR$(JmYGES(`$Hgi*9mPfbIqFs{lNG1eaq9*th;Q)mjIH~8sP z#ZP}vrC)u-zVFr8pjXjjI!?m2SCvfP%V&Z*3nD_M1if>-3KWKtLND`00ZH4f#Xt8_ z+J!$JG`VjprBPljJzdH}yTxQ@J2)pU43mj##5FYdK0I)YW3I^Fmr`8nRuax`ee`JB zKxwnex2f=Luh_f+a4xqpMm)hx6;^Z`rr~g(EiKbPjv`>v4*Kj#^TS?CA=V8yE$*p8 zCcE7C@3gq(1z9=e=RS3C&o{TjJwlbqk?HZY8O-SPpaPJ<71@b_M*0CoRq&80cT}ug zESF~Al}f|V_mS%fZ4N$@O{UyeZLgMsa~VoigjUGkUW+y7Yy9aHj6k4&ne*n~x6R)> zt0AZcy6kd@E1lHR=r~3i_VY9gUT0fhl3E2R+Ul>U&{HUH!dTm z>{XK=kZ$YmFSvJj{%RJg2p_#ECCE%;HQvs;+1E^*PBidFQdH%RnuzejAR)$PtSYSL zP)C+GtomX)HG7KUX7+)=+g;OYW)GowzC|!l5#B^)t>ryswwqkBCS%{V4yI>gy~sn? zh!`tldR+r<1jgiQ}9hkqo^KV;gojJw*K<1uH^cnv|hNFRnf@};Jx zuCqvN5$HT;R(B7zU7y7)QE_`V&I3glMBb!tykuHz> zV_G3{D|@B1NuwXjC-vBHvjgKWS;nona5lANL2$dfc91WsTI>M0hyC!$`~wbJVd7dv z`j~$o{!j*M9boO$7--*Fv74Mc6FrF4+)a+2ty2Y4sNZqFxnZilPuU&3G@#&H@@h{V z<-}w`pH=snWFB%iGQuUy~%BTxL%HTF-fC3l9oNL=SymfK^(^o=KT zqnk70+Av83|3>&+Gg|~spVH;_-Jg^q+xqxYOJPDVq2@yeA3R$ud;5G|1il=hHKR2p zdC+Sphf-_QUFkH`^Mwv*?kKPtuTX4?qfnYg%2eVgDGjtcmIka>lIt%RroUOiidNts z4e)MRF@V&qftv4flBpK(xkSK_96w_69HbnAbXTSn7-B4B(Ji{P;^qh&DTqD4@a?%R z`ZBqNDIqY>okW_-S~iN%b^4O!s8Qz7ZiJMZhXX9+M4^-b)jG*?s`(kBi zMgTv&-mX0+4(Y`TcZ&+D)M9n+@Hf~rcsZR&5dXYmE;Ukm{0TUW{L>_|skxzj(3;9V zaD8z_ppDtRd~(vjgd6jcw)=zodsJaP&7@hOHsg=kAmh1`rCV=}Ina>dB1A*;|Zd+d9RMvuCT7&+!9N_ISS5bI1yF%!`R<`d9mEk=Q9l|$<7 zmn8P$`p87%3TZ^=rg^hrg%z4{hMF_9JAw91dr>c+5}O~F{6F?DA-d+wjYd=%LPmi~ zcEbv^Wz5!L%!A;oZ$)D>`nSwW({B8>oFR`B^QXBjh<<&K>v& zWJ{vFYAG;4%+GgO9i&Lkk`W1)PD0XbTpGFxY@dQ&sARfx_+iJkxvAKYYN&WdAn!xP zYk)}E4c+@P<~^OJBNRRTV0At%E)_-Cd>&JDSfCkXY{ejcK|Ab<`U8BKT2o#KWy6_f zP+0qCGoR^-YcxQc<-7M2Er-G#rVH5$xHScew$m<2>Cg{=2=m&kx(+3EhfH+)$oADC zIt*t+O0!6}{mz`|OsrCrPNO;b^T3)*W3ITZBBy3$+j190r@Bl{!STm_=8edAitFKt ztB^v=ADCtbrTg)Nyf-^l*oB>4l!}%jI-mkd8RLBA2F|$m3F{%WidGR~O?tzK)vmp` zO}=`=Y!3&YanZ?N-9&zM8*e<1KW?`_yZ+m}p8qWIldwN=_*xtlI)!{Xf6JIE{jP-0HK~6;r&(lYt$)AgzEX<$>G=>yDKO%->>cD zLJ4b~4%ix0O}}8ON@<5lg11SvDeEYedzGX5lD*y(E*oZ~Wf(}XYnEa=_0)Obz!Hhv z#+>N!QY5%XT^{uqlE6$2%5h>^R_~NN7+nL2efGBq?38+YIlVV;e9+%Xd2ddjJnz2s z58~V9VDh}4l8L?-ADWgH$f`R_uZotqvGbCor#xnW#F_P=S(GknlNA(3wR)n(eM={2 z`_nAD5VU!2K{;>(be1O*)#S{hnO?rz3z~;V025)RraD}0<pKsr9@a04F!G~dO2nl8K)jh|SV%ry>fwOae7yr|9FW1=f^qmdjdJ)pAe z5s-5mjx@F4#2SpZ=rTk|<0X{rly};rO+AL+>C?0ys<#p4UQ#Y9LHLM5SHvorU&wgv zFYqq}#9~*5A2oG~$Vk%62+WiVOPzgbE|^!BgCw$!}T8l ze4KS~lR7%F8P@5|z&to+O@`o}o|JYg!xR|4#WW?$Fm&aBVp$Spns+*?sn zcvf-$4up)~nT4Po^t&-OGZD0dMoLN{)dx-50;7mmKD2t3glQZjmfmI`8R=!pJ+$QW zN~Cv;Fx`ivY!_&8xbE9ugw%f^0R5nb$KG1i=0^_}-R$gB#Vw^e@hE`l$>X5U$iozY z4T)$}^J6R=6lAWA%z3P4%v^xP%FuEcP8ZxU1nV>9UC8XMS_|IPBjK2nM)ho3W|@^y zU!IjZ0?ZXsAYIge*A%MvMA`8I+g&+E}!=99CT%d)hililqSLMgBW_RQI@NCtXP3V1Fv!3 zt+9M&8M86yuV`On7kJ(W1#V*PAnjL+Ly0?PHY<~p;|pu#Hhp{+=vm4BL^cvWIdPyV zi@OhN)gA|(JTxR&1cnjU z&=$dxg(R7bl_@E@J9;49Y=8}2OU`v2(v1tR+R#$?iS-(5pqrKUE|Dwng>1Y zO+eIVC&34&kU#$Uu=d}#+ooRsYM$bUC(b;U4h)p(%+m-sI?=wSFD{crq(yTI(y2w)6>+VUL7a`l5$snKe$Il3shjN?>CXZ; zYlFqfkTsFszJ2M2;;^-bw?FT%w{J9h`k3Sg6Sd4qH z69whYgX1T0a3&9SJ{e>9=W_^GC80v{j7z*hL22N@GtH-6)#+4_(fs+$sXrHexYhf5 z#4VKMw8(K_0LR7jvY-B^BEHzlo5IfajitPgypxUGG1-rZz4Ye4R18wx*>XqSS(Gpv zyl=3NN0-H&Jw#lf?-^`ez4>KUrptr+X5>4{qJfkeXd*Z5xt^;duv_*T15A%*x^i?N zP(3bqvIBC1b^JF{_)+)lyCb?-R+{}0d{hTLEdShmU9H7C+nk=PqcdG=`dhlT z|Bm7*LCsuNz8b()S5BQl)>|2sw=&zzJ-RonB|nAe$|Wq=Wv6rOWM8NX+_0iOZl{Es z@xg>%UB+wJ1Dh{9)>o3!*?CO4N6lT8J6GyG%vZ>}#Af8YBeS@pRvl7j;NdgZD(xfZ zB*58 z)SAUk=B}H86C${+1omD71DefIYoLg@K!I?8T$;u__;6TjvM>-?b$<_WA;-0!nePR zFeG!fAQ60P%tiGA$($6WPx_{9G4194rOn7)v~bqiV7pJy7g~o7+|K@mL(&rj>95(b zO=D%?L)^gy7`Q07iz!UOcwdPq+Ii(sCcE(2!$`@56gR`lb*NSVcf}HTbT9uWw{JZtl2b?3p9f(jp=Y+9j;LmVr9uv#Bk* zXER;qd`g|)P7Dy(;U*o8f2ezSY3P!`W^%ph$VSo1Zn{11X#0ZcoP9%$W}u%0A7gib zp-Lg$Tp5w_t9oWLSPL&?0-HTN;yrBQ8?QsDHbDsExI8fGo{nDY41Z5 zX1{Rqo4(tx@V_xEew)Xy2lISs@!*WiXRwZIM*wE(XD+pC-}I&bu_5}~vV1#S{cg1S zUBt=8f-e(?Tz`LUzA)LRF~6=K%>F^YU7*LbqwZrGUOk|%{boidrQ9yR9{v%sW5_sndjAL^=7FW-d;S7L{mJ+}=*#yIgUxcvv5V(5BGPhydw} zzdX|+f58_m-8AB&=i3<4DEp`}#}(L4PT7L?3ru>75R9=kVY&{uYV=L*zJmYA!HJ&L zi^DtN%qG8V3LLZQrV5)!Kg=M^2^r=^n=rl0EHDvmiQ@hWBu9C?h$bE$BfeZZzXfgz zW;qN~gjAAb5#N2F6xl9)r-{YXWV7C5Ohq%EAPOr=HEOwcOyYWbuyQlkmJL&y*|*{h zOlargrmoD5sIiD?d=?`|Go?^E_sQi2(J=v`Ll%60BKi&H6=b(-h;?R~s^O)*GlNt~ zj1;L~frDH^G6o0h;s!jW;+uU7nxYuOB1|Ezrwtbjc7ECu1F1k+LBfgX)V0JV{?n`3C(El56?lx3OdJ7*;!u=n8)ux&Z>T1*-|>|L1+ z^ZlR?z6Zm7(#4%kz0o)%SFsX;rA-kGlVRc;%WSJemtNK%+Fuogwjiqvt$rGyn-W~Y&H4^zqA~N32B5i`oN`q2k$>s%^MFN}h#V ztZ=%rH3mMjDzFi+5D)aA@ZfT%jynP@l}sf}VTr&_FXYY1X-U#uIcW)|P*l4c+2aCE za6*R2jy_Fv*8Ry5tc^-1jAA1xPcU8B1v}McI3-qeARu^v#nvb)NS-w@9P!SZz#=}T z(LDqLvo?~|GV(P`kS|N8%RVlp=?Lc=St^3VY%dj5vJ9k=PpmM$q6Fz)X|o%+Jqrqk zOesTtUg@)rsju9ZdXkyB%%>mQnBvlL7+Gj>Hhu-ZtE)9iIh&$w(B43aH8T$@__+b) z7>AxySJD$8Clyk?nB@5;5o^wrSBOr^OpnUgm7~LeK#JgL$J)+5Mw6Hof`NHl64&gm z*zAUqP}?J;9*1|6NbemQxTP_<#XUW?F#kYv6Lo8xD-Us%;8@b7Lq;Y9f&Ma(D|rOC zvpl*jSfYJdb}`7=AkhcHWF4i0-lr6v@M%$~XOP1CQ#dho$7w;V+oPjknf~^cgQ5{I z4kip`W)?B@Q6_|9^tva9CUe@eGCmy6kj^*vqEsEZ*22n)aFqn=+A_hDBfvx5-j&w1 z7_6V_^U=U#&bm)r9l-d!``5m9?oalZ^XB#PN-fvn%*kvaPoyT76Dfyg#V81l_@o;o=c_G=mmkHJj&Ls;&&Z^j+kzsTY-|AROKi+by*_yiL*3XH`+AsB$&lR($gafGOiK ze`f+vlbs3kqt8Z+RB|H?d>zfXG~-qIQXT2inX(KESBkP!t?tGyUe8VoY4bH9i#k1&HZv&B#LC30%>v9 zo7sq6m#o#T{o#FXc8@H9P1E{5(MoQS$GJ-VSKF^_4&U`;tL}>1Z_y1B7wai!PjqE6 z7;KuA0Xn3!(ho%jo^u%RF=}wKh@uP+sNsKbZx+-fG^o>S;FHBFmO8tjXj7X^E&S?L zL~Nre{LXl~@EqA&fpXElDWgq30kVini>`{DIr@3Gq`TdAbG=7^g*EMapX73B{mr;) zfH$plGRg6>zVASo7%hTR&_@N7ir<%*T_-j){pOL|Bogw%+&c<8=3K zt+?&Eyh}kTd@)V_?poNp8h54ay~i0{ZC6BE0PKptHuk?68ZZYS}=YfEl$ zil;MUh#I86jK5Dl_OihiZ%pXYczd(v$7bD_PI~*@0a5;uc~&wFT*5X%O#Nt00`0IM zoUb?DCfY1L#1<^?8tt@T&M~dSk`^Y!IIx#~IxY1^bNfWej(<%_moQ-^$Q_x$K2|}* zrvM(WBj{vqTo(;i^x(_BWmZ)S&qDEQbo}E%jT-k+=fxB{Dl_W``YjgR zs|})}9Yc=*L-DqJZI2rGBC27Z2~*FOVJL3f2k?W_uN5Uc|NHD7-z55b@E?gQ$JpOr zfVfIhXUA3Hs9wbEUhI%X`-n1GdUb4VAd`M}YxHw&nW%!`~YIio{|77V zQt@_H(3pqEjE$aRv&m$1;NCwR^6LJ zkFs8)-&pz*YuzFYBH7Awh>(wwv;FhAsVX8j&KjQHbB-^J*sTtGQsglrtRg{PAs2X` zH&9Vg64K!&Dg9Q%V?}Hx;_BOMW&5inIW8&1TEG|tSkP(v zscU9x6f6{3Ais;rtY6?c_fk{neRBihB#foQ4i!0+tr!cBAn^tt+l9@VgOp~?crd-r zv!{7AgB+bY(G@)=*aJazL79yyF9lm2+ieEk&w)n$2+S(t$$+vw zUi4}$UiEW~;^ace#}`m4A`^ocys%2{BR$S8p)1c~>zm`e$E~wO)5p6+XEfg-n_8zQ zrQgziHhar>2p$(vv8x1d$P>KY|0j0m{>FC4(EsKcPo%&q2d;_+AW&B^%zi-Kx2!x` z798roambw$Yp|+I;bu7oX@IYt&Gp&r!?RK zIr*hR0X&n(NQb1X$%gq=XXkKlR0!W;90v(iQ$ZgIe``{*ve|Vb_ec+Uwj1LBbZ9QU zMem&#B~=(U_%^|MZIYOy4Lx0tMKBL-zaEH%(r;sIKbM+}72(YUmSn8$a-- zV)s)vIxn#`%a77G_lI(tX=IXfXOJ%nv+CXU|AZ!6M$Y8t45h=Wii=GNcU`&mHgzIq8}#s#rL62PkXwkl+^POK zehtd){G6rnpU-TmjquIHzRog%Cq{B4?Pbj|J**ahbZ*B?E#$8`*W-Lr)5A&-Q(|mw zCay)emT8R;zgK*&%_8smnuMBTrR+^*|FFLw=JW9PZ1uOd(_c=L`-dl;{SE&c;9qpI z6W{K@TTI?X0dvb35soxIWHOZXa=_bN!@eK*+}G5>*DvzjUh>mQyD5d+irXNZ^0TMm zH`Re_78>Uk#sl{%g1I7|j98Y*pg8Zt%hc?*z-QIj_?hwu+b){fp%kkPPb{Jqd^?Jn z<8eh{Yr0peGCEQ~_{CX}=B!mJVevNZ#}U%UWyZtBotHNbG`wibU2dAF+wsv)FfzE4 zEKD)&DC$&y%K7kE8^Jdx(eF0TyqvRB$ zWlK>oB+5K%nSBs89S}l43k$p5Kr!H(H2v801x!=~Eh_O*t8je8M?Z)nj3hQS$Og1& z3OBOb*W_SAG|>N0Pg&P1U_0Ec^;VzLW@BVHYEdP5%`bb7tq2uVKH)GkQ-~!SD0%LW zl;N@rXTz?#GDpwn5=_Jw{MwM>mv|+ge$$|T15b{^`VaPNz@L!6t_X`z#apfv!!1Sy z%L{^|<*r90uCUzFlvDdfEgan#)^L7U{Sa$+ns z#+Q8%vv6i+EPT>fUXq>i9qQ^*5DKCbUWWGb!|08B#g16kXcBEb_R}&O8Pr1`OmHeE zR+CvA)H!mEPzGbxg8ZZDgmuXc+N4@y#MKwlR090MY+DCD*7okXk>W?5&M>w3_*=;E zh+SAcKR00SXf4q!*nV2rL6;$>JHH*hxH|FpX+CoVq-VJDfV4ap)wwW4^1m5_;dFeuBOtx2vpV6Nb?*zA2^Di9zt7`SjNez6&cW9;L z^9AT59!7O<&Jz`%J86jqo~-0Tm)+B~sFN!4FPDqnO?b%l7moh*kh-rGQ~c;Zd$ksK zXfXAu>l)?6L(zx-K2`Yxm47{ugT`F3CI2rzkRSa{MC&Gs{x6xp=O zA4zb;E9DXCQT|;&@>i5<3hq$Etz0xv@L>s}ODPS<71M3#@8&9grW=eiK<@d~&G3P{ zP(q-`+UN02S>2xQf%T8y8f7Y32)bQK)CuT>1xX~p zw#Bu7$y|`(fHyza43slosi1OXTay~xj_aG_cc+UiLcFB2-8JEU=lzz^_F^pq%yl3u zP+TFolTpZ6MnstJ2yp8aZ96F3odm4+5pD*{@Vhy1t|=++ELM@kafNbZ5XtSQRBS)7 zPNWi3|9C>{nu^iQ-GTe`0h-yRMw#=3y*INDBRA&gdnam&V?6d^!Y;-SA}sJ&J5`$< zRpyAC2YyprMO~4OqX~uN*6vLhMpy2=y8>jnERM`;iqLWLlY{Mr`Gc`ULzzU5^;6EDB~d$*0L`CY2i4F|`>a1rz< z-CmBmWvjfOa?Xymt}|g-PJk+DCQt49jz6BQbISZf4%t2wvHfPpJ@;Cd!O91O!2YP| z)<9$m!M(C-nBs15Tc)GXBT6G{oyUF)%7wRncdjTYgHmrrSWpTCdao%gBkERQdH~_{ zhw0_1S-xOr{<3v=3bmC82m)yeF3_Z=d{4 zXkN^x%UT`Kk=m$(Laif!9e4nV-?g2Vcn_28*D51YHFW0$mk>v<;2)M3B*zo0Z=*T; z##+v*OyAC4x(=;#N?yq*eUGz$*yFcY>~oui z>9jkaW17c)_;#;4Q943%SEutkyPp`_s{1y^fQ$gy!AgVU0qiq%3Rk-FC^$6cPe9p(T zPSv#WroFefw+}g_l}%TPlv+8*BkJv|CN$^{)4u=CxreDxel z5Nh;ZYI|rQ>`Haca^+rI?!59PLz6QeXJ>bdD>zn~U)Zk9OcYLW&3APpEQM_28N@_m znY?Z>TiljvnK5{X=gaW+nU@lNs!f)*2olKM|CM6!u!+lDO@k66Vo)`KrxL^w{NFSp{FW=>|MC2@N1I>dS0OGwtuL>zaBty3tGw@A zYFa?HuN*Zy<#MDAnuBS|ONePB#_|gdPDU9YL4EdTp(CrDlp}Gb%60?FhIa#FhS$~v zhXa`PRC^x&$u5R(OZI#CU!=+YuPko-zNUYbHy2Ycgh(Rq<>#X&oL0Rrv#adYbCXa; zmUlAd+t44dy_dR-WWrwaxE{k%8xT(<{kz~ z#LJ~1ncXdP=9*b3CGTr!6+0vqt(ZLkL%45tW~O&b4bG=o3G)k3Z0bbjUQ#mmjXTd{ z*%Q6l?9Ng_1zsD9ib3@{&?D6GQz4!TUTScNG9$72Bp7SU#4LvZ4ouhV4Z6L%n590N z4sR(|A`UZEP&IjzZDE%~IORmZD7y=xQG6H6EZaxC8Po-l0g}Hb^G~n1YlVWbFY$bi zIa7=*|$k%BVYG+%U%5>QkCt!4kL*?R}@{}p%kUr8Ns7`Jt@ z7j-+m;C3?WglPfEhJ_2ut=xoC)9JiiATfnRMJy9RSGiW5Ohv3&NML$SfDEpJ5?&*l zIcIv(g-X#KYC27PCDY#2u$T1*xS#jy^W*3H{PdjXd>>cwFN<7eDfQO@gHdvpyoR4 z^m-59bBERKP;#@^uQO_lv|QWSrq9Gxh?{MW?_O07nQr>cg}cw)7^4ZTSIpFq1#(wj zr3D62Ws)wFoTX}C(t7?n`W1Cw^O$Wc{<~DN;Hg$vlc|(LHkI6)usdpx^RNk z%G#_#IPv=i<#T<{W|V1<K(3{Gz^W7tdl8jqV*+h%Vu@-JrUcPSaB z_h$%Nq5jDM_x9|k){+W+*gR7~DG~%ECHrxQAWG8i&(&~89LSLQluvnL%XTKyN-j#-+s(=mf!>&E`%*!C?OZ52dhkFvf;4^VCkQNP$4**AZ?e>UYmW{=H ziL%rwo2VnKD1qMpARxs#29SM>j>&nB_RRPPm(e3Z>pA4tAU#{eO5yfEKRQ7G`Mv=;+vNYELH)i!^eH1QFIefstN(yjut za`4d_Tn3AKRMm2ZbZI(*rprntB7&Uca|@1B6K}V*3;locJ`!_?s`Cxag~nxQYOjCo z_=b9ZCLE$C-w8dT#*^*FO3yIb;~Ge;{yjeahScU+$=G7;mV532dWAmaMm<- zQmgfaD=SQT623EoB)Tv;yo4L)CQ9$QWUFsA8*n^lYCqtyefCsT9_iW$e*nQ{P)MDb zkb(t`v5T=n^%iOaQSJz8+A;ralXNt1hB=;wZw07|Ct@J( zs{pl0D(W<>M-^yXNcD?gaY-5QO3+lzB&K)A^v()ozU+j`uqtwPOT^uh9Ghm&fHG5j zBpPjp9l3@X_RNaS{6~aqUN&6IYrvsPW+MBj;b?Wv(}hUoJhv-qtz{a759vN8iP1pO zQ0O1;2q|W?_8{;kDwYfd+h9oKeBoDxW;1cBST4HJ#+nWq%$L8tm(Qk7^e3siOQ-0J zpzaSYZswHZ_LSUBTitgY?yDcZm`n?w--&wr1C`Sc4ceSAC+_#-Klrt`G`x YnpSXN%ibmJ|Mm1_G0Oj!w)8{Kzh?g3DF6Tf literal 0 HcmV?d00001 diff --git a/research/fed-bn/prepare_data.sh b/research/fed-bn/prepare_data.sh new file mode 100755 index 0000000000..d01ee2a3d7 --- /dev/null +++ b/research/fed-bn/prepare_data.sh @@ -0,0 +1,3 @@ +DATASET_ROOT="/tmp/nvflare/data" + +python3 -c "import torchvision.datasets as datasets; datasets.CIFAR10(root='${DATASET_ROOT}', train=True, download=True)" \ No newline at end of file diff --git a/research/fed-bn/requirements.txt b/research/fed-bn/requirements.txt new file mode 100644 index 0000000000..eb6efcb233 --- /dev/null +++ b/research/fed-bn/requirements.txt @@ -0,0 +1,4 @@ +nvflare~=2.4.0rc +torch +torchvision +comet_ml diff --git a/research/fed-bn/run_job.sh b/research/fed-bn/run_job.sh new file mode 100755 index 0000000000..e7cd2231e7 --- /dev/null +++ b/research/fed-bn/run_job.sh @@ -0,0 +1 @@ +nvflare simulator -n 2 -t 2 ./jobs/fed_bn -w fedbn_workspace diff --git a/research/fed-bn/src/fedbn_cifar10.py b/research/fed-bn/src/fedbn_cifar10.py new file mode 100644 index 0000000000..e0747941f1 --- /dev/null +++ b/research/fed-bn/src/fedbn_cifar10.py @@ -0,0 +1,145 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# (optional) metrics +import comet_ml +import torch +import torch.nn as nn +import torch.optim as optim +import torchvision +import torchvision.transforms as transforms +from net import Net + +# (1) import nvflare client API +import nvflare.client as flare + +# (optional) set a fix place so we don't need to download everytime +DATASET_PATH = "/tmp/nvflare/data" +# (optional) We change to use GPU to speed things up. +# if you want to use CPU, change DEVICE="cpu" +DEVICE = "cuda:0" +# input your own comet ml account API key +COMET_API_KEY = "" + + +# key function for FedBN +def load_state_dict_skip_bn(model, state_dict): + new_state_dict = {k: v for k, v in state_dict.items() if "bn" not in k} + model.load_state_dict(new_state_dict, strict=False) + + +def main(): + transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) + + batch_size = 4 + epochs = 2 + + trainset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=True, download=True, transform=transform) + trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=8) + + testset = torchvision.datasets.CIFAR10(root=DATASET_PATH, train=False, download=True, transform=transform) + testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=8) + + net = Net() + + # (2) initializes NVFlare client API + flare.init() + + comet_ml.init() + exp = comet_ml.Experiment(project_name="fedbn_cifar10", api_key=COMET_API_KEY) + + while flare.is_running(): + # (3) receives FLModel from NVFlare + input_model = flare.receive() + print(f"\n[Current Round={input_model.current_round}, Site = {flare.get_site_name()}]\n") + + # (4) loads model from NVFlare, but skip bn layers + load_state_dict_skip_bn(net, input_model.params) + + criterion = nn.CrossEntropyLoss() + optimizer = optim.Adam(net.parameters(), lr=0.001) + + # (optional) use GPU to speed things up + net.to(DEVICE) + # (optional) calculate total steps + steps = epochs * len(trainloader) + for epoch in range(epochs): # loop over the dataset multiple times + running_loss = 0.0 + for i, data in enumerate(trainloader, 0): + # get the inputs; data is a list of [inputs, labels] + # (optional) use GPU to speed things up + inputs, labels = data[0].to(DEVICE), data[1].to(DEVICE) + + # zero the parameter gradients + optimizer.zero_grad() + + # forward + backward + optimize + outputs = net(inputs) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + # print statistics + running_loss += loss.item() + if i % 2000 == 1999: # print every 2000 mini-batches + print(f"[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}") + global_step = input_model.current_round * steps + epoch * len(trainloader) + i + + exp.log_metrics({"loss": running_loss}, step=global_step) + running_loss = 0.0 + + print("Finished Training") + + PATH = "./cifar_net.pth" + torch.save(net.state_dict(), PATH) + + # (5) wraps evaluation logic into a method to re-use for + # evaluation on both trained and received model + def evaluate(input_weights): + net = Net() + net.load_state_dict(input_weights) + # (optional) use GPU to speed things up + net.to(DEVICE) + + correct = 0 + total = 0 + # since we're not training, we don't need to calculate the gradients for our outputs + with torch.no_grad(): + for data in testloader: + # (optional) use GPU to speed things up + images, labels = data[0].to(DEVICE), data[1].to(DEVICE) + # calculate outputs by running images through the network + outputs = net(images) + # the class with the highest energy is what we choose as prediction + _, predicted = torch.max(outputs.data, 1) + total += labels.size(0) + correct += (predicted == labels).sum().item() + + print(f"Accuracy of the network on the 10000 test images: {100 * correct // total} %") + return 100 * correct // total + + # (6) evaluate on received model for model selection + accuracy = evaluate(input_model.params) + # (7) construct trained FL model + output_model = flare.FLModel( + params=net.cpu().state_dict(), + metrics={"accuracy": accuracy}, + meta={"NUM_STEPS_CURRENT_ROUND": steps}, + ) + # (8) send model back to NVFlare + flare.send(output_model) + + +if __name__ == "__main__": + main() diff --git a/research/fed-bn/src/net.py b/research/fed-bn/src/net.py new file mode 100644 index 0000000000..12d6929561 --- /dev/null +++ b/research/fed-bn/src/net.py @@ -0,0 +1,41 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class Net(nn.Module): + def __init__(self): + super().__init__() + self.conv1 = nn.Conv2d(3, 6, 5) + self.bn1 = nn.BatchNorm2d(6) + self.pool = nn.MaxPool2d(2, 2) + self.conv2 = nn.Conv2d(6, 16, 5) + self.bn2 = nn.BatchNorm2d(16) + self.fc1 = nn.Linear(16 * 5 * 5, 120) + self.bn3 = nn.BatchNorm1d(120) + self.fc2 = nn.Linear(120, 84) + self.bn4 = nn.BatchNorm1d(84) + self.fc3 = nn.Linear(84, 10) + + def forward(self, x): + x = self.pool(F.relu(self.bn1(self.conv1(x)))) + x = self.pool(F.relu(self.bn2(self.conv2(x)))) + x = torch.flatten(x, 1) + x = F.relu(self.bn3(self.fc1(x))) + x = F.relu(self.bn4(self.fc2(x))) + x = self.fc3(x) + return x From 8fd2291aee6ab2f69d2f0e7707c902b877f9626b Mon Sep 17 00:00:00 2001 From: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Date: Fri, 10 May 2024 19:13:15 -0700 Subject: [PATCH 17/21] fix MLFLOW example (#2575) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- .../app/config/config_fed_server.conf | 1 + .../cifar10/code/fl/train_with_mlflow.py | 2 +- .../step-by-step/cifar10/sag/sag.ipynb | 46 ++++++++++++++++++- .../sag_pt_in_proc/config_fed_server.conf | 2 +- 4 files changed, 47 insertions(+), 4 deletions(-) diff --git a/examples/advanced/experiment-tracking/mlflow/jobs/hello-pt-mlflow/app/config/config_fed_server.conf b/examples/advanced/experiment-tracking/mlflow/jobs/hello-pt-mlflow/app/config/config_fed_server.conf index 9bc187c8ab..5b0f694a6a 100644 --- a/examples/advanced/experiment-tracking/mlflow/jobs/hello-pt-mlflow/app/config/config_fed_server.conf +++ b/examples/advanced/experiment-tracking/mlflow/jobs/hello-pt-mlflow/app/config/config_fed_server.conf @@ -48,6 +48,7 @@ "id": "mlflow_receiver_with_tracking_uri", "path": "nvflare.app_opt.tracking.mlflow.mlflow_receiver.MLflowReceiver", "args": { + tracking_uri = "file:///{WORKSPACE}/{JOB_ID}/mlruns" "kwargs": { "experiment_name": "hello-pt-experiment", "run_name": "hello-pt-with-mlflow", diff --git a/examples/hello-world/step-by-step/cifar10/code/fl/train_with_mlflow.py b/examples/hello-world/step-by-step/cifar10/code/fl/train_with_mlflow.py index 1d43b88d4d..3e05f715b2 100644 --- a/examples/hello-world/step-by-step/cifar10/code/fl/train_with_mlflow.py +++ b/examples/hello-world/step-by-step/cifar10/code/fl/train_with_mlflow.py @@ -139,7 +139,7 @@ def evaluate(input_weights): running_loss += loss.item() if i % 2000 == 1999: # print every 2000 mini-batches print(f"({client_id}) [{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}") - global_step = input_model.current_round * local_epochs * batch_size + epoch * batch_size + i + global_step = input_model.current_round * steps + epoch * len(trainloader) + i mlflow.log_metric("loss", running_loss / 2000, global_step) running_loss = 0.0 diff --git a/examples/hello-world/step-by-step/cifar10/sag/sag.ipynb b/examples/hello-world/step-by-step/cifar10/sag/sag.ipynb index dbd515b0f0..6d8b8ca76e 100644 --- a/examples/hello-world/step-by-step/cifar10/sag/sag.ipynb +++ b/examples/hello-world/step-by-step/cifar10/sag/sag.ipynb @@ -232,8 +232,8 @@ "source": [ "! nvflare job create -j /tmp/nvflare/jobs/cifar10_sag_pt -w sag_pt_in_proc \\\n", "-f meta.conf min_clients=2 \\\n", - "-f config_fed_client.conf app_script=train.py app_config=\"--batch_size 4 --dataset_path {CIFAR10_ROOT} --num_workers 2\" \\\n", - "-f config_fed_server.conf num_rounds=5 \\\n", + "-f config_fed_client.conf app_script=train_with_mlflow.py app_config=\"--batch_size 4 --dataset_path {CIFAR10_ROOT} --num_workers 2\" \\\n", + "-f config_fed_server.conf num_rounds=2 \\\n", "-sd ../code/fl \\\n", "-force" ] @@ -289,6 +289,48 @@ "The next 5 examples will use the same ScatterAndGather workflow, but will demonstrate different execution APIs and feature.\n", "In the next example [sag_deploy_map](../sag_deploy_map/sag_deploy_map.ipynb), we will learn about the deploy_map configuration for deployment of apps to different sites." ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a49b430b-a65b-4b1e-8793-9b3befcfcfd9", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "!tree /tmp/nvflare/jobs/cifar10_sag_pt_workspace/" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "50594df7-b4c9-4e5e-944a-403b5a105c27", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "!mlflow ui --port 5000 --backend-store-uri /tmp/nvflare/jobs/cifar10_sag_pt_workspace/server/simulate_job/mlruns" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "af2b6628-61af-4bc8-84d4-a9876a27c7c2", + "metadata": {}, + "outputs": [], + "source": [ + "!tensorboard --logdir=/tmp/nvflare/jobs/cifar10_sag_pt_workspace/server/simulate_job/tb_events" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d3ad11c3-6ef7-46cd-8778-0090505b14e1", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/job_templates/sag_pt_in_proc/config_fed_server.conf b/job_templates/sag_pt_in_proc/config_fed_server.conf index ab5691c4b7..deb678189f 100644 --- a/job_templates/sag_pt_in_proc/config_fed_server.conf +++ b/job_templates/sag_pt_in_proc/config_fed_server.conf @@ -107,7 +107,7 @@ path = "nvflare.app_opt.tracking.mlflow.mlflow_receiver.MLflowReceiver" args { # tracking_uri = "http://0.0.0.0:5000" - tracking_uri = "" + tracking_uri = "file:///{WORKSPACE}/{JOB_ID}/mlruns" kwargs { experiment_name = "nvflare-sag-pt-experiment" run_name = "nvflare-sag-pt-with-mlflow" From fce5ebbddbe6d21c293329399984247dbdeea30f Mon Sep 17 00:00:00 2001 From: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Date: Fri, 10 May 2024 21:42:02 -0700 Subject: [PATCH 18/21] BugFix: InProcessClientAPIExecutor's TaskScriptRunner (#2558) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 1) find script full path to indicate which site script to avoid loading run script 2) make sure the task script failed will cause the client to return failure status which will trigger job stop rather wait forever 3) add different unit tests * sort key in unit test * add logic to improve error message * style format * add more tests and logics * code format * code format * fix steps error * fix global steps * rollback some changes and split it into another PR * rollback some changes and split it into another PR --------- Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- .../in_process_client_api_executor.py | 15 +- .../executors/task_script_runner.py | 60 ++++-- .../executors/task_script_runner_test.py | 174 ++++++++++++++++-- tests/unit_test/client/in_process/api_test.py | 4 +- .../data/jobs/in_proc_job/custom/src/model.py | 18 ++ .../data/jobs/in_proc_job/custom/src/train.py | 43 +++++ .../in_proc_job/server/custom/failed_train.py | 44 +++++ .../jobs/in_proc_job/server/custom/model.py | 18 ++ .../jobs/in_proc_job/server/custom/train.py | 47 +++++ .../in_proc_job/site-1/custom/failed_train.py | 44 +++++ .../jobs/in_proc_job/site-1/custom/model.py | 18 ++ .../site-1/custom/relative_import_train.py | 43 +++++ .../jobs/in_proc_job/site-1/custom/train.py | 43 +++++ 13 files changed, 537 insertions(+), 34 deletions(-) create mode 100644 tests/unit_test/data/jobs/in_proc_job/custom/src/model.py create mode 100644 tests/unit_test/data/jobs/in_proc_job/custom/src/train.py create mode 100644 tests/unit_test/data/jobs/in_proc_job/server/custom/failed_train.py create mode 100644 tests/unit_test/data/jobs/in_proc_job/server/custom/model.py create mode 100644 tests/unit_test/data/jobs/in_proc_job/server/custom/train.py create mode 100644 tests/unit_test/data/jobs/in_proc_job/site-1/custom/failed_train.py create mode 100644 tests/unit_test/data/jobs/in_proc_job/site-1/custom/model.py create mode 100644 tests/unit_test/data/jobs/in_proc_job/site-1/custom/relative_import_train.py create mode 100644 tests/unit_test/data/jobs/in_proc_job/site-1/custom/train.py diff --git a/nvflare/app_common/executors/in_process_client_api_executor.py b/nvflare/app_common/executors/in_process_client_api_executor.py index 2277f4718f..15049e4177 100644 --- a/nvflare/app_common/executors/in_process_client_api_executor.py +++ b/nvflare/app_common/executors/in_process_client_api_executor.py @@ -61,6 +61,7 @@ def __init__( submit_model_task_name: str = "submit_model", ): super(InProcessClientAPIExecutor, self).__init__() + self._abort = False self._client_api = None self._result_pull_interval = result_pull_interval self._log_pull_interval = log_pull_interval @@ -93,6 +94,7 @@ def __init__( self._event_manager = EventManager(self._data_bus) self._data_bus.subscribe([TOPIC_LOCAL_RESULT], self.local_result_callback) self._data_bus.subscribe([TOPIC_LOG_DATA], self.log_result_callback) + self._data_bus.subscribe([TOPIC_ABORT, TOPIC_STOP], self.to_abort_callback) self.local_result = None self._fl_ctx = None self._task_fn_path = None @@ -106,17 +108,19 @@ def handle_event(self, event_type: str, fl_ctx: FLContext): self._init_converter(fl_ctx) self._task_fn_wrapper = TaskScriptRunner( - script_path=self._task_script_path, script_args=self._task_script_args + site_name=fl_ctx.get_identity_name(), + script_path=self._task_script_path, + script_args=self._task_script_args, ) self._task_fn_thread = threading.Thread(target=self._task_fn_wrapper.run) - self._task_fn_thread.start() - meta = self._prepare_task_meta(fl_ctx, None) self._client_api = InProcessClientAPI(task_metadata=meta, result_check_interval=self._result_pull_interval) self._client_api.init() self._data_bus.put_data(CLIENT_API_KEY, self._client_api) + self._task_fn_thread.start() + elif event_type == EventType.END_RUN: self._event_manager.fire_event(TOPIC_STOP, "END_RUN received") if self._task_fn_thread: @@ -142,7 +146,7 @@ def execute(self, task_name: str, shareable: Shareable, fl_ctx: FLContext, abort # wait for result self.log_info(fl_ctx, "Waiting for result from peer") while True: - if abort_signal.triggered: + if abort_signal.triggered or self._abort is True: # notify peer that the task is aborted self._event_manager.fire_event(TOPIC_ABORT, f"{task_name}' is aborted, abort_signal_triggered") return make_reply(ReturnCode.TASK_ABORTED) @@ -231,3 +235,6 @@ def log_result_callback(self, topic, data, databus): # fire_fed_event = True w/o fed_event_converter somehow did not work with self._engine.new_context() as fl_ctx: send_analytic_dxo(self, dxo=dxo, fl_ctx=fl_ctx, event_type=ANALYTIC_EVENT_TYPE, fire_fed_event=False) + + def to_abort_callback(self, topic, data, databus): + self._abort = True diff --git a/nvflare/app_common/executors/task_script_runner.py b/nvflare/app_common/executors/task_script_runner.py index fba46f6b05..bd563e2b43 100644 --- a/nvflare/app_common/executors/task_script_runner.py +++ b/nvflare/app_common/executors/task_script_runner.py @@ -14,67 +14,91 @@ import builtins import logging import os +import runpy import sys import traceback +from nvflare.client.in_process.api import TOPIC_ABORT +from nvflare.fuel.data_event.data_bus import DataBus +from nvflare.fuel.data_event.event_manager import EventManager + print_fn = builtins.print class TaskScriptRunner: logger = logging.getLogger(__name__) - def __init__(self, script_path: str, script_args: str = None): + def __init__(self, site_name: str, script_path: str, script_args: str = None, redirect_print_to_log=True): """Wrapper for function given function path and args Args: + site_name (str): site name script_path (str): script file name, such as train.py script_args (str, Optional): script arguments to pass in. """ + + self.redirect_print_to_log = redirect_print_to_log + self.event_manager = EventManager(DataBus()) self.script_args = script_args - self.client_api = None + self.site_name = site_name self.logger = logging.getLogger(self.__class__.__name__) - self.script_path = self.get_script_full_path(script_path) + self.script_path = script_path + self.script_full_path = self.get_script_full_path(self.site_name, self.script_path) def run(self): """Call the task_fn with any required arguments.""" - self.logger.info(f"\n start task run() with {self.script_path}") + self.logger.info(f"\n start task run() with full path: {self.script_full_path}") try: - import runpy - curr_argv = sys.argv - builtins.print = log_print + builtins.print = log_print if self.redirect_print_to_log else print_fn sys.argv = self.get_sys_argv() - runpy.run_path(self.script_path, run_name="__main__") + runpy.run_path(self.script_full_path, run_name="__main__") sys.argv = curr_argv - + except ImportError as ie: + msg = "attempted relative import with no known parent package" + if ie.msg == msg: + xs = [p for p in sys.path if self.script_full_path.startswith(p)] + import_base_path = max(xs, key=len) + raise ImportError( + f"{ie.msg}, the relative import is not support. python import is based off the sys.path: {import_base_path}" + ) + else: + raise ie except Exception as e: msg = traceback.format_exc() self.logger.error(msg) - if self.client_api: - self.client_api.exec_queue.ask_abort(msg) + self.logger.error("fire abort event") + self.event_manager.fire_event(TOPIC_ABORT, f"'{self.script_full_path}' is aborted, {msg}") raise e finally: builtins.print = print_fn def get_sys_argv(self): args_list = [] if not self.script_args else self.script_args.split() - return [self.script_path] + args_list + return [self.script_full_path] + args_list - def get_script_full_path(self, script_path) -> str: + def get_script_full_path(self, site_name, script_path) -> str: target_file = None script_filename = os.path.basename(script_path) script_dirs = os.path.dirname(script_path) + if os.path.isabs(script_path): + if not os.path.isfile(script_path): + raise ValueError(f"script_path='{script_path}' not found") + return script_path + for r, dirs, files in os.walk(os.getcwd()): for f in files: absolute_path = os.path.join(r, f) if absolute_path.endswith(script_path): parent_dir = absolute_path[: absolute_path.find(script_path)].rstrip(os.sep) if os.path.isdir(parent_dir): - target_file = absolute_path - break + path_components = parent_dir.split(os.path.sep) + if site_name in path_components: + target_file = absolute_path + break - if not script_dirs and f == script_filename: + if not site_name and not script_dirs and f == script_filename: target_file = absolute_path break @@ -82,7 +106,9 @@ def get_script_full_path(self, script_path) -> str: break if not target_file: - raise ValueError(f"{script_path} is not found") + msg = f"Can not find {script_path}" + self.event_manager.fire_event(TOPIC_ABORT, f"'{self.script_path}' is aborted, {msg}") + raise ValueError(msg) return target_file diff --git a/tests/unit_test/app_common/executors/task_script_runner_test.py b/tests/unit_test/app_common/executors/task_script_runner_test.py index 65057e5f01..f065f78651 100644 --- a/tests/unit_test/app_common/executors/task_script_runner_test.py +++ b/tests/unit_test/app_common/executors/task_script_runner_test.py @@ -12,9 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. import os +import sys import unittest +import pytest + from nvflare.app_common.executors.task_script_runner import TaskScriptRunner +from nvflare.client.in_process.api import TOPIC_ABORT, TOPIC_STOP class TestTaskScriptRunner(unittest.TestCase): @@ -22,34 +26,34 @@ def test_app_scripts_and_args(self): curr_dir = os.getcwd() script_path = "nvflare/cli.py" script_args = "--batch_size 4" - wrapper = TaskScriptRunner(script_path=script_path, script_args=script_args) + wrapper = TaskScriptRunner(site_name="", script_path=script_path, script_args=script_args) - self.assertTrue(wrapper.script_path.endswith(script_path)) + self.assertTrue(wrapper.script_full_path.endswith(script_path)) self.assertEqual(wrapper.get_sys_argv(), [os.path.join(curr_dir, "nvflare", "cli.py"), "--batch_size", "4"]) def test_app_scripts_and_args2(self): curr_dir = os.getcwd() script_path = "cli.py" script_args = "--batch_size 4" - wrapper = TaskScriptRunner(script_path=script_path, script_args=script_args) + wrapper = TaskScriptRunner(site_name="", script_path=script_path, script_args=script_args) - self.assertTrue(wrapper.script_path.endswith(script_path)) + self.assertTrue(wrapper.script_full_path.endswith(script_path)) self.assertEqual(wrapper.get_sys_argv(), [os.path.join(curr_dir, "nvflare", "cli.py"), "--batch_size", "4"]) def test_app_scripts_with_sub_dirs1(self): curr_dir = os.getcwd() script_path = "nvflare/__init__.py" - wrapper = TaskScriptRunner(script_path=script_path) + wrapper = TaskScriptRunner(site_name="", script_path=script_path) - self.assertTrue(wrapper.script_path.endswith(script_path)) + self.assertTrue(wrapper.script_full_path.endswith(script_path)) self.assertEqual(wrapper.get_sys_argv(), [os.path.join(curr_dir, "nvflare", "__init__.py")]) def test_app_scripts_with_sub_dirs2(self): curr_dir = os.getcwd() script_path = "nvflare/app_common/executors/__init__.py" - wrapper = TaskScriptRunner(script_path=script_path) + wrapper = TaskScriptRunner(site_name="", script_path=script_path) - self.assertTrue(wrapper.script_path.endswith(script_path)) + self.assertTrue(wrapper.script_full_path.endswith(script_path)) self.assertEqual( wrapper.get_sys_argv(), [os.path.join(curr_dir, "nvflare", "app_common", "executors", "__init__.py")] ) @@ -57,9 +61,9 @@ def test_app_scripts_with_sub_dirs2(self): def test_app_scripts_with_sub_dirs3(self): curr_dir = os.getcwd() script_path = "executors/task_script_runner.py" - wrapper = TaskScriptRunner(script_path=script_path) + wrapper = TaskScriptRunner(site_name="app_common", script_path=script_path) - self.assertTrue(wrapper.script_path.endswith(script_path)) + self.assertTrue(wrapper.script_full_path.endswith(script_path)) self.assertEqual( wrapper.get_sys_argv(), [os.path.join(curr_dir, "nvflare", "app_common", "executors", "task_script_runner.py")], @@ -68,7 +72,153 @@ def test_app_scripts_with_sub_dirs3(self): def test_app_scripts_with_sub_dirs4(self): curr_dir = os.getcwd() script_path = "in_process/api.py" - wrapper = TaskScriptRunner(script_path=script_path) + wrapper = TaskScriptRunner(site_name="client", script_path=script_path) - self.assertTrue(wrapper.script_path.endswith(script_path)) + self.assertTrue(wrapper.script_full_path.endswith(script_path)) self.assertEqual(wrapper.get_sys_argv(), [os.path.join(curr_dir, "nvflare", "client", "in_process", "api.py")]) + + def test_file_not_found_with_exception(self): + curr_dir = os.getcwd() + script_path = "in_process/api.py" + with pytest.raises(ValueError, match="Can not find in_process/api.py"): + wrapper = TaskScriptRunner(site_name="site-1", script_path=script_path) + self.assertTrue(wrapper.script_full_path.endswith(script_path)) + self.assertEqual( + wrapper.get_sys_argv(), [os.path.join(curr_dir, "nvflare", "client", "in_process", "api.py")] + ) + + def test_run_scripts_with_sub_dirs(self): + old_sys_path = sys.path + script_args = "--batch_size 4" + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/custom")) + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/server/custom")) + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/site-1/custom")) + + try: + script_path = "train.py" + wrapper = TaskScriptRunner( + site_name="site-1", script_path=script_path, script_args=script_args, redirect_print_to_log=False + ) + self.assertTrue(wrapper.script_full_path.endswith(script_path)) + expected_path = os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/site-1/custom/train.py") + self.assertEqual(wrapper.get_sys_argv(), [expected_path, "--batch_size", "4"]) + wrapper.run() + finally: + sys.path = old_sys_path + + def test_run_scripts_with_sub_dirs2(self): + old_sys_path = sys.path + script_args = "--batch_size 4" + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/custom")) + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/server/custom")) + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/site-1/custom")) + + try: + script_path = "train.py" + wrapper = TaskScriptRunner( + site_name="server", script_path=script_path, script_args=script_args, redirect_print_to_log=False + ) + self.assertTrue(wrapper.script_full_path.endswith(script_path)) + expected_path = os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/server/custom/train.py") + self.assertEqual(wrapper.get_sys_argv(), [expected_path, "--batch_size", "4"]) + wrapper.run() + finally: + sys.path = old_sys_path + + def test_run_scripts_with_sub_dirs3(self): + old_sys_path = sys.path + script_args = "--batch_size 4" + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/custom")) + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/server/custom")) + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/site-1/custom")) + + try: + script_path = "src/train.py" + wrapper = TaskScriptRunner( + site_name="", script_path=script_path, script_args=script_args, redirect_print_to_log=False + ) + self.assertTrue(wrapper.script_full_path.endswith(script_path)) + expected_path = os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/custom/src/train.py") + self.assertEqual(wrapper.get_sys_argv(), [expected_path, "--batch_size", "4"]) + wrapper.run() + finally: + sys.path = old_sys_path + + def test_run_failed_scripts(self): + old_sys_path = sys.path + script_args = "--batch_size 4" + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/custom")) + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/server/custom")) + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/site-1/custom")) + + try: + script_path = "failed_train.py" + wrapper = TaskScriptRunner( + site_name="site-1", script_path=script_path, script_args=script_args, redirect_print_to_log=False + ) + wrapper.event_manager.data_bus.subscribe([TOPIC_ABORT, TOPIC_STOP], self.abort_callback) + + self.assertTrue(wrapper.script_full_path.endswith(script_path)) + with pytest.raises(ValueError, match="failed to train model"): + # 1 ) check if the exception is through, + # 2 ) more important to see if the callback is trigger. + wrapper.run() + finally: + sys.path = old_sys_path + + def abort_callback(self, topic, data, databus): + print("\n ===== calling abort_callback begin") + # assert failure here will not cause test to fail + self.assertEqual(topic, TOPIC_ABORT) + print("\n ===== calling abort_callback end") + + def test_run_relative_import_scripts(self): + old_sys_path = sys.path + script_args = "--batch_size 4" + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/custom")) + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/server/custom")) + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/site-1/custom")) + + try: + script_path = "relative_import_train.py" + wrapper = TaskScriptRunner( + site_name="site-1", script_path=script_path, script_args=script_args, redirect_print_to_log=False + ) + self.assertTrue(wrapper.script_full_path.endswith(script_path)) + path = os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/site-1/custom") + msg = f"attempted relative import with no known parent package, the relative import is not support. python import is based off the sys.path: {path}" + with pytest.raises(ImportError, match=msg): + # check the ImportError + wrapper.run() + finally: + sys.path = old_sys_path + + def test_run_abs_path_scripts(self): + old_sys_path = sys.path + script_args = "--batch_size 4" + + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/site-1/custom")) + + try: + # path doesn't exist + script_path = "/foo/dummy/train.py" + with pytest.raises(ValueError, match="script_path='/foo/dummy/train.py' not found"): + wrapper = TaskScriptRunner( + site_name="site-1", script_path=script_path, script_args=script_args, redirect_print_to_log=False + ) + finally: + sys.path = old_sys_path + + def test_run_abs_path_scripts2(self): + old_sys_path = sys.path + script_args = "--batch_size 4" + sys.path.append(os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/site-1/custom")) + + try: + script_path = os.path.join(os.getcwd(), "tests/unit_test/data/jobs/in_proc_job/site-1/custom/train.py") + wrapper = TaskScriptRunner( + site_name="site-1", script_path=script_path, script_args=script_args, redirect_print_to_log=False + ) + wrapper.run() + finally: + sys.path = old_sys_path diff --git a/tests/unit_test/client/in_process/api_test.py b/tests/unit_test/client/in_process/api_test.py index af2a62bc44..835bd80b5d 100644 --- a/tests/unit_test/client/in_process/api_test.py +++ b/tests/unit_test/client/in_process/api_test.py @@ -64,7 +64,9 @@ def test_init_with_custom_interval(self): def test_init_subscriptions(self): client_api = InProcessClientAPI(self.task_metadata) - assert list(client_api.data_bus.subscribers.keys()) == [TOPIC_GLOBAL_RESULT, TOPIC_ABORT, TOPIC_STOP] + xs = list(client_api.data_bus.subscribers.keys()) + xs.sort() + assert xs == [TOPIC_ABORT, TOPIC_GLOBAL_RESULT, TOPIC_STOP] def local_result_callback(self, data, topic): pass diff --git a/tests/unit_test/data/jobs/in_proc_job/custom/src/model.py b/tests/unit_test/data/jobs/in_proc_job/custom/src/model.py new file mode 100644 index 0000000000..936da53207 --- /dev/null +++ b/tests/unit_test/data/jobs/in_proc_job/custom/src/model.py @@ -0,0 +1,18 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class Model: + def train(self, dataset_path, batch_size): + pass diff --git a/tests/unit_test/data/jobs/in_proc_job/custom/src/train.py b/tests/unit_test/data/jobs/in_proc_job/custom/src/train.py new file mode 100644 index 0000000000..b57342173c --- /dev/null +++ b/tests/unit_test/data/jobs/in_proc_job/custom/src/train.py @@ -0,0 +1,43 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from model import Model + + +class Code: + # code to test relative import + def run(self, dataset_path, batch_size): + model = Model() + model.train(dataset_path, batch_size) + + def define_parser(self): + parser = argparse.ArgumentParser() + parser.add_argument("--dataset_path", type=str, default="/data", nargs="?") + parser.add_argument("--batch_size", type=int, default=4, nargs="?") + return parser.parse_args() + + def main(self): + args = self.define_parser() + dataset_path = args.dataset_path + batch_size = args.batch_size + self.run(dataset_path, batch_size) + + +print("__name__ src is ", __name__) + +if __name__ == "__main__": + code = Code() + code.main() diff --git a/tests/unit_test/data/jobs/in_proc_job/server/custom/failed_train.py b/tests/unit_test/data/jobs/in_proc_job/server/custom/failed_train.py new file mode 100644 index 0000000000..26c5d36853 --- /dev/null +++ b/tests/unit_test/data/jobs/in_proc_job/server/custom/failed_train.py @@ -0,0 +1,44 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from model import Model + + +class Code: + # code to test relative import + def run(self, dataset_path, batch_size): + model = Model() + model.train(dataset_path, batch_size) + raise ValueError("failed to train model") + + def define_parser(self): + parser = argparse.ArgumentParser() + parser.add_argument("--dataset_path", type=str, default="/data", nargs="?") + parser.add_argument("--batch_size", type=int, default=4, nargs="?") + return parser.parse_args() + + def main(self): + args = self.define_parser() + dataset_path = args.dataset_path + batch_size = args.batch_size + self.run(dataset_path, batch_size) + + +print("__name__ in server is ", __name__) + +if __name__ == "__main__": + code = Code() + code.main() diff --git a/tests/unit_test/data/jobs/in_proc_job/server/custom/model.py b/tests/unit_test/data/jobs/in_proc_job/server/custom/model.py new file mode 100644 index 0000000000..936da53207 --- /dev/null +++ b/tests/unit_test/data/jobs/in_proc_job/server/custom/model.py @@ -0,0 +1,18 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class Model: + def train(self, dataset_path, batch_size): + pass diff --git a/tests/unit_test/data/jobs/in_proc_job/server/custom/train.py b/tests/unit_test/data/jobs/in_proc_job/server/custom/train.py new file mode 100644 index 0000000000..ce3759cf7e --- /dev/null +++ b/tests/unit_test/data/jobs/in_proc_job/server/custom/train.py @@ -0,0 +1,47 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# print("\n in server train.py __package__ = ", __package__) +# __package__ = "nvflare.app_common.executors.server" +# print("\n in server train.py __package__ = ", __package__) + +import argparse + +from model import Model + + +class Code: + # code to test relative import + def run(self, dataset_path, batch_size): + model = Model() + model.train(dataset_path, batch_size) + + def define_parser(self): + parser = argparse.ArgumentParser() + parser.add_argument("--dataset_path", type=str, default="/data", nargs="?") + parser.add_argument("--batch_size", type=int, default=4, nargs="?") + return parser.parse_args() + + def main(self): + args = self.define_parser() + dataset_path = args.dataset_path + batch_size = args.batch_size + self.run(dataset_path, batch_size) + + +print("__name__ server is ", __name__) + +if __name__ == "__main__": + code = Code() + code.main() diff --git a/tests/unit_test/data/jobs/in_proc_job/site-1/custom/failed_train.py b/tests/unit_test/data/jobs/in_proc_job/site-1/custom/failed_train.py new file mode 100644 index 0000000000..8e0e5b0331 --- /dev/null +++ b/tests/unit_test/data/jobs/in_proc_job/site-1/custom/failed_train.py @@ -0,0 +1,44 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from model import Model + + +class Code: + # code to test relative import + def run(self, dataset_path, batch_size): + model = Model() + model.train(dataset_path, batch_size) + raise ValueError("failed to train model") + + def define_parser(self): + parser = argparse.ArgumentParser() + parser.add_argument("--dataset_path", type=str, default="/data", nargs="?") + parser.add_argument("--batch_size", type=int, default=4, nargs="?") + return parser.parse_args() + + def main(self): + args = self.define_parser() + dataset_path = args.dataset_path + batch_size = args.batch_size + self.run(dataset_path, batch_size) + + +print("__name__ site-1 is ", __name__) + +if __name__ == "__main__": + code = Code() + code.main() diff --git a/tests/unit_test/data/jobs/in_proc_job/site-1/custom/model.py b/tests/unit_test/data/jobs/in_proc_job/site-1/custom/model.py new file mode 100644 index 0000000000..936da53207 --- /dev/null +++ b/tests/unit_test/data/jobs/in_proc_job/site-1/custom/model.py @@ -0,0 +1,18 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class Model: + def train(self, dataset_path, batch_size): + pass diff --git a/tests/unit_test/data/jobs/in_proc_job/site-1/custom/relative_import_train.py b/tests/unit_test/data/jobs/in_proc_job/site-1/custom/relative_import_train.py new file mode 100644 index 0000000000..3a6801ecd9 --- /dev/null +++ b/tests/unit_test/data/jobs/in_proc_job/site-1/custom/relative_import_train.py @@ -0,0 +1,43 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from .model import Model + + +class Code: + # code to test relative import + def run(self, dataset_path, batch_size): + model = Model() + model.train(dataset_path, batch_size) + + def define_parser(self): + parser = argparse.ArgumentParser() + parser.add_argument("--dataset_path", type=str, default="/data", nargs="?") + parser.add_argument("--batch_size", type=int, default=4, nargs="?") + return parser.parse_args() + + def main(self): + args = self.define_parser() + dataset_path = args.dataset_path + batch_size = args.batch_size + self.run(dataset_path, batch_size) + + +print("__name__ site-1 is ", __name__) + +if __name__ == "__main__": + code = Code() + code.main() diff --git a/tests/unit_test/data/jobs/in_proc_job/site-1/custom/train.py b/tests/unit_test/data/jobs/in_proc_job/site-1/custom/train.py new file mode 100644 index 0000000000..943e40e567 --- /dev/null +++ b/tests/unit_test/data/jobs/in_proc_job/site-1/custom/train.py @@ -0,0 +1,43 @@ +# Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +from model import Model + + +class Code: + # code to test relative import + def run(self, dataset_path, batch_size): + model = Model() + model.train(dataset_path, batch_size) + + def define_parser(self): + parser = argparse.ArgumentParser() + parser.add_argument("--dataset_path", type=str, default="/data", nargs="?") + parser.add_argument("--batch_size", type=int, default=4, nargs="?") + return parser.parse_args() + + def main(self): + args = self.define_parser() + dataset_path = args.dataset_path + batch_size = args.batch_size + self.run(dataset_path, batch_size) + + +print("__name__ site-1 is ", __name__) + +if __name__ == "__main__": + code = Code() + code.main() From b11d6e3c517f8fb30c2466ede72fcdb56acb894f Mon Sep 17 00:00:00 2001 From: Chester Chen <512707+chesterxgchen@users.noreply.github.com> Date: Mon, 13 May 2024 21:14:59 -0700 Subject: [PATCH 19/21] update client_api.png (#2577) --- docs/resources/client_api.png | Bin 160494 -> 160306 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/docs/resources/client_api.png b/docs/resources/client_api.png index ef82720a922d94354b11ccb3493ace8b922558b9..edc340b010eaf25f26eb55993cb42e549ef45877 100644 GIT binary patch delta 46820 zcmd?Rbx>9P_dn{PyGt4oK|)$O1QBVZ5d>*z5F{ivD526ELJ$N*8mS|tgo1QRNSAbX z9P;-*_&m?&`F!Vl?>~3$%$>P24s-T7`@Q$t>%HQ&UTbYP=5fpBaHILqq}F2uR49dc z_=R-%gxCb{@bjVm2;CA95{`+Dg$cpgU>p<<^!5=Wtz-34A;gBr_Oc3sR z-67U45k4(ViMTGljSZjdWrj;CQNSO{@xZC+3gOCFr0^3pT)2rA4*Y%e9e4>HIsDr{ z-+hO>-A#Zq7ZJc^6=>nsc;s-05*m01i~v5J%8#hWVSvGV94O&!_blP#gpXrgND=Q9 z9btdnH zkb%2Uz*jXW;Tw2fH2d%u87gcY%tVwB23K{|0CX-B@SF)i3JYnV}Z*$#zvZ< z?ub~kT!X?WMVoER?Shx)#hOa|fUISB)&UWaR962R9#P{N}_dEvqO z$q0+*J^!_U7G6#!j;Qk!L;GJ(#|)fD|NHtxaIZI1z<5i^q!Av$3;+A|?Oy#yAk+f7 z%j+M#d4LAb*EU7uMtH#BCy!8%KzAt*dNdja9AAm=Qb@jeEYz29sTfjt+6!E`+B+P0 z#H){pl6dg&sO6f8g)ne7Km5Fx0?wC&1KxBsOE3V2!u7W|%xAmZBvI}E{`+W~_YIGQ4q^I!kDvq0en>~BS=BNB^# z{OhQ%WZ=(9n27CveeGDDjrNbQ(0ATTVOgs6{}C3YA4)WS(aRmftX2FYZ$W+ff8;&u zrG)>z)UZMRUrTeFE>#P6T(OUrh2_9qUQ!`6TPQCDfzx)8C8kTks1R$dU;YxUqXGs` zcQ!@PcE$hsK-AOww=U&@PK;3oT{1ugXX~>EQeT7jI0`17a>F9wZ+?*>g8MHO|3ALH z@$32@ne&F}|J>&KsPTV&t&E%5i1;e{m4gjSrU&!o2y%x)wHJ|7e5T*p5T*5&R?n$EmvDCxUKjNVu|I2=uPVd64(Lvy)dx0Q2 zu- z=g<)wwsUz{307@fZdD~d0qo&_)XH>9nV3WfL;16Dq9!KHGqI2ewuZVw;3ZTp{b zD3L@GMp-$*2)p`6?$hfqvPv6f7zl)@#{h@ql}*=R!+#_JhdC%i<4deygHa_8-+e{y8nFAm-t)GNGTW$hLO4px2}Xs!`^s6 zXb+24HcpPRtEdR|&?tCl$KPRaY}{&MtFq4!ZF-@CyFv9?6jQGE_xCIVvhnrrEStw0 zizA0pVoB9*i`|_JCmZEu4>vmLktu?Q8^|tbbAn?t^J&yeq>JHcj98T;9!Gir%!+ao zc0r;Sl9%R>6&F8&zVG#{dUNb)+Eo7{G_ce%)X%t4e1P_e@y*@Kw5l*JBBPJ5DpjmfQAfud;3P<^O`T? zhmO@fo6Bx?d$X&y#E1GmFz`!+(L~8pmP$?KbcT%#kCT&MPK!n>jLx51OrI}9S+e_c z9lZ7Q3vVqV?@35TDZ(&mWxoZKHH1wwHMC#Z@9zmHEAvTbEWUNzhVjQ=JjDFZGRGn@SEw?L#L#r)o!tCT4YI z0~yKq(hEsOWrE^iD>-Q}E2~3UNXq2V#!1Ea;^Cf{@bUKk=g^aaT3HDU)^f&$^^=N& zL)(JQ43yZSEo*+GPK#rzn?GL?ON-W=9gNK++ZWkpHy#*JYNm%`5^!)~91@#Z1spHi z$Qq7FX-4TMp49OiBBlJ)Bm8}()@RP`czcOpR>u?4maWmf#isT7A1a_QQ{A8A!%xuG zpAwgkO;w_l?QH}UlNDIa&g8RJ$~R&O7w$0gNx|dL#0J{L`*|jBa5dfk_^PI!2l2q{ z;C!8Pho8Sjnsz3WsfJHg{V4ut710K7EhVMa3%u^WyA~PF1s&CU)GWvfcA_6rJ<>RYv_Mf&{vGO*$Wz1OwE&wa`>I~}~l_C?j!nM_43IRR*d%#URaXB4*nqbBIT^i*!r znVHkmawc3SD}i}^FeQwvk~LEFS~#t6{x)x!;&3$Dx(BiF@zj*v)b$kJjA8RI9=&z7 zueAc0?|paLXq4YSZNF)T=(^3dJ1G4KhB>N>G9<1kC+LW4j&^qbSog*#r~mwW#p<47 z)e-$g%=UeV7i=f^X9{TjsYoa?-)P3QY5G-U%x zfLYYwkFFC#!Nyo|x>yeJC&lBt9EV6-`Ym87$)tR)B}9o z?&{c#$w53ZdsinXgc!!?HltnTlyC~~%-oH44rE1NR`N@}W6=qw@o2zKD?rE@vCOPbpE%#rZ?%#&wECu3ebFuz^)$`Y>Y!rExcYovx}Nb{`9nvj zigEF`G@U+ku=~1rsCn{5af;b!Q1WNHVvNZu&)152j)5L^p~*9w&cel>2fm8oG^7{KEQJc7&n4)BgWn z|Erkek&PECRuhpYpFX}QiLdF4!^>LusA@7Fl=NwCu3G2Tv=oIL;`Hp(soljU6<+G5 z^vN3eM)l%*mtTf9P1P=rgOt;L6=P0cn+CralC)$8pyBCU1D5{UBt|BP(y8o|$)poQ zj-Bba-Zs~vWV@e#`NWu?&-`)X$tm>wm{#kTcKx$MM^Ukba<#!Y46K_%j~|2yQBVsu zC{cfTGI6NzshfL3-XRS$epD>ZF1x;du!`wT>gM$KmyL`*Yow?fyL9rm=ve4Gu?D#; zWrT*{^VaaCxHexkYa|pIxC_Ii$Ta|-AlKk=(rC&uM8i8&Eb#M>ERdhSRePN{uVxVh zoTAdpgT@X|aTJN}R9mcw*hmYANJu^DK6%nv@S2>ft}oUQVd56`l^>&zbF*%$Fuij! zCe8oYcG$W8tf9oLv7xkukxtLlZ@;<9?A;cJQ~lY2ODjugkhI@%P#`*XNM6qSFStDo z@*sWOLj{_xd?9EIdlOHias?$)pWgPYq{Ank-f*{#Ebp6y>JLvQ3kOHCTs}^4RoTt2 zdVo0lSytkpSb1N>sSdedfUKN;fgjRHB*EP&JY<~3;&+e2YrTA^;ejFp<$IrvYJ=Bb zy2V#HGEw`13I%fAdbqDREtd{D_E&E|^3Cv%M?y$DOyH+LMZj4P$VF#@$jw_Tz$Jgg z*X4j`JCI-ceHGgJjvYw(rAOxClxBm`YdY|eZN{Nr(t z{&P9-GxgCLqNm|C`+FK{PqRqn^DfEV&Uxot1yg?CqblX%)!%_e`5stQ+ux{A< zPdQluXc3n&QkU;>0fDB>87KMHAS#Cgw?h4KHx0nMBUdHZBPrANH?6oab2D9*3ztSy&r6tam=nN(`gRQ8^1mcTi9Ja9IBNwYdC772 zel;NTOTybY_Qkax&pi8}D@I@E6O0=6hfJ9Y*oGc?PLwoJ8C5lSqZ?Bb-wE`fM43rD zUCIBZUOU!rlJPIGiHSrey)3*=w_6%%?F&iu9z7c@0pTdrHajW%w#26yJsIR2+~sh> zGgM-;MsVC?%+c&MTLRANr%IDy7pRdi)wC>R!|5Ri7dJq-5YG9~Bk;fKe!yfpGfEmy zM~`enlyErr$`$4-N_X$*Ot|$(nSo$H$-;$&BNroAr;DdfrK(XE*lSJ8N z`XVNy|IrN1@pe5occYvDDqS!2=V-iz&XJA4pvbM0ZL=9AN@7?QLlRB>#cH-1@Xdxx z%x0Ez(ulaP{CFYLaqFuCfi}|g9qMMeXjpNSCRfobP~fT<5`dY0`rEoV(q+#uO?s5=`j7_yxG?$+MQAeWx8x-@dmtWSwO0)IPP z=KRPEL>=7a-JiVmr)2(MN4}xmc31>EPoQ%bbvV;VEJLtaduT$#Z+_S7WWBNxqI0ex zt(50b7X1Qzxw*t0_;rCyxZS9&|BDi9> z61Y+}GD%hmd}9uXrWO=bmKDSg7jAdYOe#)mDf*9AP@j6Qq4`QyzRFa%m7S9jfMx~k zJ)vnHp7?VJMunT643s)J;BJ)oU=kc^CIF8!G?q3@{5}q&#KX+L36g=DT4h#Z#u$tW z=l>IHp?_os30r5nl{JcfwiIlSG5=*Kydd8?{8;${4R1jC+Z1nktmEeTbJOy}{lhiN zTvU25SD1K@hDSj`V_#U>e1yz>kU5}*GT9>;7^Lx>3#jrF#_5?B*T}(S(V?!7Db!;U zXonACgweqIO;D0#L5G)GC4|W=JwxwG1Pch zoDLWBRB9xMx=UU!T?r-Fb?W>!qW+Wowp(vQ3uIq{N4b53xwA#_gfj5_>?9PM(d+T& zezD)LSYL2?SGH3gNP=sS!Sv+jJ@50I(ia)*}q1G~kWr94PBa(OVz0zHo}f;l~_6+?*_#2;$v za0uC{+|_`-+(OH|7m?rLKaq!q_xU}z9Di}%*~^uw#q){$x;!JD^i&K2jWQ}?H zb^Z*MYEZZ$J9|t^Hn0p6kB4MNbFA#DUmzNuP>if}fKZ&nhp@#A&R%mg=%Df8+KXRH z?x$aMSq((M=41kwelCBRWSRqtm`uO`>C~e?nY%X~ z?SPneuffXzD02sd9vDJEU-d83Ogap6jY())VLKX}$%ESU-vU0dm#2DPTKDG{>`|p(4rfA{Zp8s{{-+uS zP~fWQbi?>k3npkma9^AZV8OiSz+8t0UF!cV5efmCYpx<<3klQ1FNX8^4g($z}nB?f{poG3d2qs7X7 z2gI02d^`e9od8e1n#el=V}@34Y>heB2Q&E}v4jJ3fl6z6W3E*efA$IGmCfWw^NFSn z8Zu`T%}mfK-@&kj=ktIm=)&)4V$+{G%^rmhDV|6J2^YRvGnS&M@whbLvIo5+66ZKQ znYxH%XqCA@_H5TjNo-U(r40c9OD=u3*IxqA{4>ahuH1hHz!h>7&MjFh>j+Vxe)Rd* zSGSYD6qGJ2kE7G4ApvgNm=VfIyHPb8S+bH}KqbZWqSLj7L4>c&nK^T5121) z@ef$-u;EIV4MuBWf_z1fre8LKf{x+*s6{3O(IYLfw>c$);sv0I$(`R}yo{)-n$K<^ z*U35a7jMntd!L4Ou3~yDB6^wwu&6?u=Jm=c=}sWMLQm)e zvAyE+tx%ca9=lfs8+TAUIuGJhg7X3wur+)%P;0v!jZK|XnfR=iA4MF3Vur4O4MM=? zKliKCc}zST+8U}q7hVHLGoawYKiRB@j(VVTawrQyFs0#07}%j?6k3mg8kresGd(Sd*@hz3(#g{j}JH#RqRrCi_V7g zH4+H^aNP2Jp&OD@Tz9-u`qV8yFZ<0!n)8O(4e|Dr&2hILhI-`Glgxp?ut?_0~3Ym#Na{fp>Uj^457}EA|^@135IO*eT@U)H1BHq?+cxNi;i@{9u z!N@I246aX~wZ^*v%)5FolKv`}i1#D_jPJV8FJwIMWZ>(RAS}SQJiaBhia}GbdaiA!(jv|Ftfu+#Q+-O08 z;m|ITDaiwGP_~D;Ob22&pPNIa=MPKI8BTb9$=v#7YDN38`&d^e=V{Si{Bqb&nH0b{ zJY1&QF$NYCfr?g3=05p&dvR9+^*1MLXE$($mJEJMpC8TAv}fV9kJ~DsJX1>pwG52if5%q~Bli%KhR-*0f zDT6|p`}H>^e!Sx?MKNVnOlT6vVnW#%ABdY1=a4NPph%>Nv!*khe(L+#fhjIM=3Jw$ z{$9LI>G=c(xvFLvl(Jd;-ec5O;Y9bS6EG>I#a;f(=;Y@D^phy&L?|-=23ZN32uB$P zVo@{aicbfii-!oNc`qdKe-j5ZvAM$IjDV*gE{4U&VHo?K`219$4z9mA+blhfiKJ%G zdeLFzTq}&_2e$HIp|RyyDj~zQyVs|E=i~Jo?vkrf3=^9zpeovz_t9DhaL#iZ#zDNC ztvmvZvBEfS;~Si{B`Cb_!NSGvC$ZxBQs9cb9(c4g>BUn@1rv7cFnz zbTeBLTRSo>-2>F_ZwAva>U18M*KIe`g|;yI6y~B!>)e1`2#2vVI8*&1lU6X#6CcMD zv*g}T^sZ*s|1!LAYQLGoJt~v^9H9w410`jKMw&F{iLP%_PcD<>0f=nFm1o;6OtU!j z)M05tJ~`QnGRu}QuYhDS=Wf} zHJOt|C+{qQkjU zKeC&#+rE{?A|Ddue|FpyDVFL*tBYsr*wi z&)%uosNK zUx4G26v7elG2=qftDSlhLV0HTRnSv-x1I0Zi6>NbrO^VXJ9mTCg*7E}w-PX*O=*<* z=_${C8KRh&->XZ^0@TC|#h~PuCb>bTnT_|Y2rG_{Up0v&f`|=v^&%~jx$!HCJzY#! zjZnTT{@k~qyVsHvV>w9RZW43M%s@~m!S4C=OX3DV(NIGuIw+L+=kol!?a$9GX%|em z(^XTSaZ2Qger!~YZb6ct{+!8r2=|uU<-v^e5?~^uOwnKjz(uBpw&UaY2zt|Qf# zlsy_F8?#8Rxzi=tq9M%Y7J^Fhu1`eVL_tP61c?^;Kb6LmM(9OMz3Gl^rLrsI}9YW#hDq(G{_PKH0_45z6_Kf4tM zf$2R2;jtN_Q$WRM!t(tm08E1bAcRzm?`g;dHj_P`yx+hZLf=1<$*P3?MmCJd672Mr z-8phG%8jVhfMD}tr5 zUe_$d{g(KE^(Jc5@G%zdtkLM+==A7(CGl55pSX`2HR;7e2->n))vA)}AYmJy&WlES zE!j4aqqV557?vS!(2FyuJhb0R>PvH$9pX4 zr5id?_fRk#Kvp}R==B_kKlHXKqYb+9G4I};T)R1Zvtv%71+VK>oy6NkLg|((^vAw? ziN8RcjSQxcz~}ImX4Rspl}mfgJMQgQSD$bXk8)*d+se1}od1lcZmsK3A9*cl`8D2t zLCH0tEK_9#_QsLrDOcZqRYA2+yai*ZG9>>ND0rS}iGVH?4)n3xx{vwjH=Rv`qMXzV zQBGRi!z1SfsgJbLTam%`PcZyciMq$5f`Wrw-L-J3^Nur0^5LKMn5ov)4nLxIJ<1iy-5Vuex zpS5^`ka(djrTQwWgT;rD(k;5!?5)Won-d$&TP~!bMN6bUeE(IFbKZ+s*=QC;A@PRJ z9x;F!TZjbBIL4HC?0@v&7$CD#iE;Cu3rZytV@alE%r+&i55TTLpVMS3h~;GL-rUTl z`IOesO0(ehu}6VKbWlgTiPt0v{~fPt651fT*6cgA7|obtcA%Z%8Q_v5_&= zlsL2}f{aaA4p|D@3HG8@i>;NjjY(O%_HvJDnYE1L+F7jw6p`O!e)hHdxTOgMmd!W& z1#g{WdHa^@`;MS%$%{`r|7T!%eI->>+!(aX#-8j7Cy4uU5-+EmpC8uq?hiz^AQR32 z2mU5?QZAz9>Km!uPQj>(c266L#~(Z*gJ*FkMy!H*O_||BQ%LX(`7_v0(r$kk|0)qlUx@jDSuz zr4q%Z*2Jli1T^J3dYc)Ab52Fds!%mqJXY^uVeU+f1v7e z38P9>T|RI(AlQNC%@00MrBz;%1D9#p{ZRm^0KtVnfVQ(Q1PY4lp40QgooPB*x^<+& z4a|yMFUwrnb&x3SZB`R5DMaja5O=aFUnSldf>C1Q{XeZ>g|JotnQbVN_j8sxA)Nwb;py{Gmbr zF>@3(0aEK1z;^#Km~nybM5Jp)nAfkw#r|`^TT69u)g#ZET4I&UHxZBT%Fd>nskYzGM(}d29fxh*>d_ zhl1%2pSgZ-U%J)5|H$^^L+--q*L_n!A<+mRi#|MHC&jHBm(Hnb=E>N?aBhl2XuwV= zlRpk`?Vc9F%{;kOFo)2vQc%pOE;b*xC3CSW6L`f9R1x^IQ8NDhQ*K%60eq*qnUQz^ zqv+@5930JJ#Yw4e5K;a`rv0P5^g+xow3<#(&33a{X2fkgP|*1wou6!449+unj+1h+ z4?VR1R00ZI4;@WVoLA07%M;;xPCV0p`}hU!@6}u!dK*9|O{-GIp(>7H(6_*Ux`^J6 z;$OzWuFU{^K)_X0Wj+iR%U_(I&DxI)*%f@Edhhl7wW#GW&~R9M=vo@I@&m95f@?Lq z-DaSn#_`0VUN4E=7H-cCQccIHxQQAX${g z#+N`FfFX*M-wtzpX`kerDs>1-2YfJC;#s4PD#jYAj2uW&nIsex;C$oHS3g}2gBTx@ ztBbPuf`;n$xQ5jJfaqLW^D{uB2sWTNjZOUUg;dYUqhYDTaq=GY4$v9A<>dJtlbYMM z3UqRVjGU_3-utlXbw!eDLOa&ecW>@zJ^0)(MI%%*LMi=$2SI;l7 zNBV?&+@+B)(3|hQ-@(WhAR2-|dy)IOHfP;{mP*97O2o*5_8IBI^gv(ZgECBzq{n9&UJa#wu!3-71UR^=b< z-}(aR6$$?eP#RuV7#9nPNXNk_F$BTB-^Yf=!p*F(-a-*9pi>NCH7RIkk}Mz`GF(}H_#w{6*%I( zm`Yt#y^u^(H&2Ib_z-rBmP@2^)UIF)VE^Vta(A-NSuT;9F>mfQD4~6+oP5s3Xm7Kq zo}eJxzDvWY@#^_ZFg2F+A-P^Q#4?GIkB&v~Dy_-&9hke32=TJea-c&7R3gquIlB2< z=v|kj&31tc(trxJz65}%eH7X#_0q z6GQ?_Kzcd57H=I7nL<*XfM7|bC6jap9321F_4FTzm{A(od=8Z6FFyLC&{*gSr{neS zpeEEvyo~^x5S0+?xvU_aryU5Lt(tBVZh-=o+HcbId?)cJEJ);u9R~bVOEu!%TZ*)1 zzwhs^2`~JdPBfIFy~8?%@v#E%WuD?~y!EO$OVqd5wy}L5zUKy4Yhzd;12P^h!c{A$T0+*Z>0Ii z0EdX|nW7&8)Ey(Lf6V_4M`YyQ0fO7@PMId5ZnE#B5eO6(nC$Dw#k8pSK#VLMBM*-a zkqLKg?xK$F!M5Am*TdSo?);DErvCgo#pV-PHiW+ zn;^*zkr(16A}5P0z?O4;e|m(Id^!<7&rhFrS+dQKGs7t%AXdc5Lp=guP;qY1)mG3* zGBp8h`$?K8@;Wl8fyzGX{M($VCu#^s%OM$NrN8lwKdo{98-KaZ#6;~UB<0wJ3twmq$L>R)?C1QBpL-TgSlK z2#*7|hy$Yoyb*ElY(CVByzfDbB9z5)!b+ibhqfmxHu_3@vy1bK`<#s0bmHz$I-k}7 z?$dz3Rnw6c#*&Gp9n1k62eshUmwF39mLIRpJUkE-DrP1Bz!xg}8R5qMLlSh`8$q_D z+({eT4Vwd0h__#<igs8<4xJ01F(-C$&-l;Umk$B#9zod^-v zFWWu~LyD{G!Opj}hTnEau1JU2?sja(>ziEtv=hBZqP64ZwAPKL?7vJ8xMhYXo%@=_ zEG_d`ai@JYRX%m3c4SUe_DEC6{Zvkkkoa75&gl4dUP?f$Js*hW9?#`>6w^&83#5?h z{OM~-j)!2bS)q`zVa{QzN~`d&geJ17IA0QqY$Aa?Lqm?LX_7eyV_C{xtg zqRhqXJC1h5QhS<5yJ;8n1rL&@-~j?gzVa~28P=;bhK3oW`NJ`bE;4iaGdC4pA#S{Y_8Z*`3V~<7x94adDruMtvo%33})jT&$Ort$3SQ%A@*@?VC9_qkZM8pbAuwu`T2mXwXH(jQ2w36uzT zeIfwUf5;bnp|MQ}vpR62SOHS%u78S7 z-Rxjx*E`8}p5|fKqF+Au)etpTqQ!4L94|ifMI|&CE*Q>LL#e3}^10p&lxdx>l`==+ z`5GY#o>s3!C|@;I#PGhiXstnJ2zVOWn!z~D><#FQN1IEy$}|%g7NCj2T=JX*aYvIvB1S zhb6VYQV+|p#ZI3?6MYnEKL6o20Kp}i4{&}PFmR?hkPmD*qK$R*1h(Guf~SGrUuG9( zdr$S%JS}dWtQO;p&e?B4h&=wA4*oxr&E2@o+p3|QW@Wt^F+c$!tyw-TG_CI7%IvCa z;m@H)&>BOWeUChDV0K9c?rb}Zh-J-p)_>zIamYM|i!htfEH?5c%SDdM)NCVzlO2PK zeKTWdPdXVb4yet~zo}rxNv7GyEiBK~i|OSv*i&j|lOf#;N4zbnhu#{Co|v>IyzyK- zhpc3Rx_l+zb=&RYq=n{h8BD(2tvUUm7V%BMXT6{Ia8DGvUk%~5?G7cZD$f!V*H_u9 zs(tq<>D}J)N_G(SNkMATu|~2ou#Y_iU9jdV&oeM8@UKFnmoo4vy0WwgnGnD8HHAl= zVy%m}1g;1Wze&Q`ZDq?nfGj>U?$T;W>OHysSPX=)gsXb${KO~OG@(7ao$`iH>Xf_F zdd7UM#h21w{{DT-H^k&?xFX4JNSxk?e zC3$M953N)?8lOX&W|YMj0LjfsALyegOUq~%>O7&F6B_w7a^4zs1krbw&lupaT_w`U z?L378I!KN(E~wON98?wj`pkUIZyF@eNjC(X*{iYaUqp+Q67PJV(4-QyWYowvueNr` zL^OOV8lM{US2qls(t`mF}1ZR~_@R;q=$Qur`XOMNo$a@+Z?k}>3pZv9ftsV^>wdW_ha8SR^nZNlVH z>co}tAm7Xvlo9E{vnmnJgm2%aYi8#2eIja}#b_d6+Ss6v6D5nI#3SlI*Ir3O%khj|r6Wb&UOc|n!WsCJV6JkB}2EJ9Pb>*e8aZc(EEVsnE}hoJFb>E-3rq^%~3lZYCb+=MxkRR|3~{)NuI*z3AE7NvP}rR-R`2=)#q;; zfk0mSoOmTo>g`30+$)DnoKVuN@P@fvb~E}kR>RI?uO8eedfm_;&OOw=&qmEXwH~u-|)JANF zEdOL@Fl8C(ASl7kjwv7Hs3Qe9es5ZDNIH}1=8+OUTD-Vt!+olpj2Qg~TXG=^-(C|d z@LA%9cYeGU^H})neWIZc!?*Dweyp!m+p=xAPp&2QET?Ga0i&xRc(;g7D4=0kJ~w^S`XQx@_moU7P6EXC!;EWu4{9-~XoF_M|4^3ilX}3C=E#*9It7-@dS+ ztEcC1?ha*h2>mQ0!+yPoJ$3ON@nqb{z2SC?k1N+FY~RtNJBv45^*6LUjDKYHF822t zZ$r2LP0Ft~zk+T8?IekBe~e=d3$7-Fh`@b`v{C`Pxa>_(ZpGnre#~#kSI>-WYT!dU zJn1?|db~DG7-!0J?LwGcuRlV1gzO;KIH&1->W|HsmP)_{FEV zS995&(YNC+rk&l0Y4~aQ&~DgmCgUAxHJAs;VHI62`@Vzj8N6HufsrzC(B}<|IAd*p zXTBO4pD4s5+9~dB>~QDTW&Jc;=ghC>{odA3;js0eav%c)E*QbE@PX0yw0~wMfECH| zU>=>SuwZ#BWN?aX z&B+|%A#L{iY^YgfhBj>%N9^YHYdW3OM8FL0HIsGjRM+$lJbhHkVMCWHC8D+(il6qg zPS~`zp?%fLq)&XX(3`#r>qz4qZeTbp11F9G?Eh{bi=_Kb! zXAIHE0U9z?q1gP5|00{6V&=#TzFiY%y!-G4(~XyG^eW_aJ8o!#v5ro73w@nLNsNH- z;oh`@;*@flMc4^`W@YWH?$OLo_!@nq6EtGGKhGZTD%2ge8Xd9~dz7Q}Fpr2eNGT%! zj-Cf6l=F-|y|XyF%f+V%eb<|e#5|GE2x-;gekB;7m0?^OT-$1*5l)Q?B~hhS06LAa zH(n0pa?DG&h0cy38OuyYOgS*jFvjv+dHJ}dnFey4o7!%pBOE z%#qC1C+qL3a{7J+i9*k|1^gJ4}?2*w`_Fzv}g@(o7y^KXlCCLa2*9|bc zED+0PoV=KwS3#?L3>sKuA9mGlOkEQYmK{~kTb-HxJAnL!izldCL+mYaDt0iFCrUm9 z2*`|!Nhq?#jmUB{X?g@F2kYWYJwXPP^sox+LOb**fm5^BL zml9~)?UCA<55xPW268`J7DBYiojtgrH0`Oh^F0_(^IREIG^Eol=`buEcrhz%zms!b zAGwziUv<@A?Y;tsYE?4T3A2|{yr<5HmHd=hV@Q*1Wy-HqAPVI-Fug#zSav^RrYEjA zC*j7GZV)qd$9JQ&8K5>P#Z^1mB;xr#j4z#LZ*TNj`HU!!6zW$`d65inOJ@uUq+6b( zcC{!C-wSwOcSpGZ%5qy>IqQ(qYfBkgC?H6Z5SB3H9STk_)q}Ae-an{S4ruqeIkq!c zYD@ajS2ih4FC*pfb&u{J#`16aWU1~-e6o(Mz$qkFf1r3+Icieu;L-R7O*v%Tn(L%b z^w=HWT_pWF{Rg>dv(h{O$J?LubM{EGGkql0BGH>Os>_eojmM!%ddFNYbiEQ`lr-}L zS;9+aurun*#-QueIhDxpG-V3=&W#p<$KL`Xuf$QLJe@XoTO++s_EgI-iJ>kf%``gr zF%1!9q`s;ynHO=U#nXg%qst0rosXT>19HqG`M*w~_Ex|zoIh#oK%7|R zikfNLsXpo0Uj`~Ew&aJ(&-mJjeh!-Z(Lj z{{nmJh-6fdK)n9BmlzmXz?;#El0a z-4RkB?9r|1j#m2+D-NTiP}PKUx{JwFI*7OYzPin!ss=_JHj}T5@dVe@u;)W$c*_@` zi?S!$9{emf=B;2kT0Bge*3`Yf%XKbLZ?rJHYrFM$aD+Z?urPhCothlV>h5p(!%#ob zht=Usp;%ub>5)1nXg8@McbNKh#B_Wb)oVFeU9R;T_G5ov z*L?cmiv%{C*~8m4X78mMehlPVv&t$(;BljsJH)epC+Qfnp`(P`N-F^edoPg2#%26- zDVKjP*=zyA2n`Af${K0D#@d;rn8P8gM`Tka%0+=-#*sHbG`1yq#+C@xn+q4Slc*Ii z=d6=X@+Vq(G|lR$fzT4$Jr20xUe}t!-X=Nayizp>Eeldsaj2YtV|O>)V_z-h(SFj9 z&{NsA_F3Z=xd_`U20z#dMS6N-Mh8zk0cDF%0JUil4mo5k!LaE`Pz-fsOA-`^Y4f%$ zKp{e_!*fS$V=aI45g|u`7%6xR;)V)_zJdsa4?RTt2Ah3Q0@p8FqK~ zD7xH#+gO>S(@SbX#+{v@mtg%p)j`3;<3XBlFf9v_SvO}vA=z7e0pWxY8v_?RWOV!) zYM?2Tr_T+HI5y2p=FQ6|e?}9FT5Oc__W)Bj7*r!>4`}pm+MKsIDBM`ou%e%$@?=l9 zoPXw^Jv-)}KZ|pSbHh6v!W$jvTUi_g8Vk#s(YlX@yH0iL`{+M_jnwh zV~&;TTu;Rq#oKN=SU)+G!{-?&O_qof9J;AZ^RYRlXyi(b#G$i)xm*5I+yk7cJUr`G zUk_5w2HOVYsgIzWrlib|v4FDMkmYzj z%KKz}a7Z-YgLqR)oO;}zA_imXU)oZz1Xy9L2Q8U?(c+nklTWN%~2uhra11-MwKQTB|v7FF+9rW--&MXp9T7Bv0WxG_4?g%s+HFw<{q( z;+Bm_Twvk0)LOZ%6!vPX1|UU(`Q`qco+oBk7hf~nd2)VC5-R{%u)(bA@Dhkv3qFwd zYJG1x2a%LUQoe3CBASPeW>=KUH>bT3Y@^B}R>s8?)S%4K03ftPuzK=JH6haPx4-)w z)F3E7PgFzIm5bmlYT1(av`oxZlSm(F2}O<*@JiJ)MF}2G?katJ!mmA8amakNNsG>| z=gn7bex&o)SCO_fV$)Z=`-Eq`SknTAooBu1!D~0w9e-0RkI0$RKmLx^j6TPnxdvuJ zx%T{!J|My`d|%jvM8lZ+WG2l*8Gg_1nJzc-tEP~u>;-y#%En0hfI%@Di!35UFWU&do@#aDRFKV-_^!W9pia7zNe(yMyeR_i$9j zick#ztPj4{({ByJcYcs-gq!~8>$&M+BKlX}gHUe-0uCiR)#Fo@I=jMl9; z?^yfKudl6wmwdQ!pOoDpxua+kK>{}`4l;5`W4Y%3RJq(iLEoRK-n@98Zf?ytSu)D3 zB1ixkP|GW z<*iKoN)tt#_RMP^X+CZbCNOQIw`^V1=BpAPr*mu9N-htnAf}(sc*Oi*NwP4Vd!MfD zuSm~vOIB74y!>iF*aj^zHpt0I)afiPVgz8sg{yv$k-K=)oOq;4cz2%_Y*t9nE8Br) zphkv*o8An5LdQSD-`|(9q2sP-lU^$5XIIv4D^a_r?|U>8T1woq6yxyBM*Q6=ecWlv zPZ1r99k%Dv>DRsH7g-e}2~Y&3pV~ckN^xF#hrbdnr1|i1IG_afw!WujkOEVORi7tb0jH$S_PfSicl}fp#A4S#Cw$*2buvF{J&vm1Iedg_(i+Kuz)bR4$&y zgCt)znjThTvnRrhA3)#5Fz@ko3{DCs^9GmS4s_scvD1VqgOY0S#&=V&x@gY;ju#Ho z@;6Fol$haK5#iL*f_}0!olgGtCl?Qv*foBB`nT2GyZC2w=*mSbEq73)yt@*QQEGD! zjkt!DsmFu(J!3R(@sA^%_ryQCn`o;r4D3z0-$ve(EI&2Q&><BqTs&}F_b=m35LX``ju`Z zU5_;R9j0~OESSc+TS*Q^=Jwjcuf&q@f2AQjvpg2^B~{8K=HIN@ZEiy5)~U)Y@*nSm zd9&dFJC%lx0h(ubp0rcP?TO)GhSa<_o5|z7#}sDq;U_i-7%dE*#sp%vd1;+{%Bp5@ z7i<@zKjq&Q_3I5Nb+C=>0bunAI$U`C-(ntq?Ki+#42edG8QrC}v#Y{hGKZ9-`3$ zdU&o!D{RGJ_#Lt9ZXIyz!VKuG!%pNSnEG%wxl?=k}>3bNyDs!#6s zu)A0c3o>X+W`kK>zV8_1?w)RryI(fO>&I1U6ss!6JB){r3-5~OFm_a7-_B6cuIcc8 zei?VLT5EUh58;JmtHjc<{yrBNnZZK?gFWWfJ&ZU{96pp5@9AohV!_;`klN+6ni zA@T%DK~tt`W-U>bPfLn?Rlm8{zf8WB#o12-Mii3{Ud2$>MAmU-umdobm@F*^$}e}4 z__)3DylhF(Gi(t}l5^9{W(W02-H&W0_00P3uDe;1^55}OsWbBa9!gTi|LiVnc)aHq z@^5sj!x>G;(Ju-0P+;*ref5(Z%vdGIJWvW(&uEB_Vc_<41Le2J&2-C2r3EY*)aW;b zWOxD#$$Xd(=T$H@8=n$R@u4(;IMBQX}_&A)JDhs-SMXcvM141qzrj<5VD z>doHl_s>^iVQj3zxiylYQ#(7JfRPe@>)NxZC{QcDJ3aLw@6CY z5d@_V(g-LZNT+mcK)O2w2?LQ132EsLB^BvVIz{?jo0+-C_r2fe`7$47m~+lvd&NJl z>$h}mDn{g}=KARQ^+M)n$8jn^aVg@lvEczpE??hOG)^EpyQN;bYupO*vZ~>mrS+ES+OAPOV}HlPIKVGdXiqAE`DgEtLU{)6Ka@5C5qKfN-9MurnwQq+o7zKguq8?87QR&mf7#-^(LY4cyAjqN@e!wKw($lhQ=8 z#6dt}g4E|6T#ewNLOn?R4O_h?o@Uk?L(X}Y(V{E4C14>W))xXR1XBdH2p`;hzLH?N zdWiUQQsjmsaaarM@^PVW>(79d&$8C(R{)ia@3ceY&SDQ}=TpNjq^&_2f;Pg??s}nV zFMc2$Coq}|WyJ@*H4itiM4N#lf)HO~Z_pla3xa^j=7^oM@npxZyN2>(nxxA@*uaba5%t<~3%I~=Id>{19eXYu z5k^2d6QOEgRanss>^IaKUkZR%6}4r(W*;8~#gp*{Om)ffmml8pEkONUVHMvWDl z)*bh8qu@59HlQl7o|KenKHo`hY_&N%9Dwlg6eU*+%A4UnF|e~Fmw;Co3^z3TIg-gAsU(k(xmoVlY6%&z|UV z9?rWo+nOQ6^Y5usbNAQCth;A<+7P%Wd{b;X`f>oVXzCC zG*tl|TzgjRc>}O+%~M_o<#ecd9wXt*#CZHR5bOm4M1W`nFlzADF`}XybQ$#C|_>3|5G7 zOb2R@gp)5I^?-Mlxp;OQypX?CLEb7to+l&F^CEV3{1+{VD}Nw2dz56#kbN z3NJy8{bW%sX_((2q$K+D&1}L*@DZTnB9j?V7Mx%b2UaOwJm%p=eJEFh z$?Hk(X-=B$^{RNAlZXzm1QvvEzi51k{t53bmtbq{Ft6)Uj{y7zOb8W_6IFK8yhYPs zsZl^aa6;iUa|U#P7R|s(P91&o>h_Yy-p?|gqVDpnqH|9iL8J4(%y|clO)W7t2%z&GS^AHHGUDH z@90NG;@m^rxjvGtA>#sygvbRi+xq=cifV@_YO!U*6&-ABi*%towhgePtaK+qK|K$8 z@P05INSsWZIh^jf2)2qDU?P+`?pmO~NH+AaDDb#NhDs7$>i2pdb4#GrGt>K5=pCj5 zB0r7|jm$G3YYZ`Rz+rpMDczc}(=<${0(c80vOWtG=Y8f_7D#Hn;j0_9L0B|oia4q$yKc;Vbvp2J8D zyge*h$`?wEoUgw|55`r33DZj7#~z@>+JUm-;MMIl8`0EW7hVQEe_^dfHQjqixxnQ+ zm^Vmhm&Ylh`YX?XGqs(51(w86b1BbnVvnSVs{}#}9Vyob7 zGju@D)EL=qrEufsavwrq!h4CEsiW5`QzL|UPZDghH_)t}f~IH_$i@3!eX;`937)Ep z`QzMmC4u_U6o27oW#KM?ccKaOy8$s1INd;(c5`d<(fN@!CbrB9WqbeD)`N}-6UZ)v zYpv6_4EMg|pYz3xxWuC@wMfhAMn0OUm4WI&&M^X%{qwG=w=u{#P%ia&CE|U5%WKC- z>f6hD-%b9)iPJhedApg<9~tEGB@EO?d0EF9?0bueB?iQITNUmg`0};^Af&3g&>AT} z(`j$i?-%H^14XMJ-oKxGy#uyWMw6lV2pAImEkcB{*^TJv8Khp<>;es8NK2jK4J$z= z&S$c@S#J~)x116+wL*8T`SxCB&uI9%kfCx`d5l{_ex%uf9~tyK8J;m96mz<}B*B{c zB#z<(G2aeFOQlH1387t{F(rpjzsT}4YSF|Jbm)6;m+p#iEflPMo(Mn_LeAlT6gq+X zIwrU1#HQY9v%a$7B*RNC3lujN8N-iI^6~=okt^N6JA4}th?#o98T1{WZGsGqawNX@ zIOTdv*HfTO)QKF^^=;=E>V1@I;Fmr|Y(@s6vGl}8UfZ(z!a?m`$sP7U?)LLAm@S;#bu+fnjKj-m|Fppi>~r zqhg$JQvp1zX|@nP={vmeWzj(_+4pal#95S7Wa=oJBgJ-YK4baoP6lTQYIeJreYj~)!Y^Bn&d9Z&kkOd-rThME;0zYRje^=NToUH6V+i{I? zr%ZhwcRnx~sgeI6_Pzi|_}QEGm7nN-y}Df-69Mn=?mUg9T*CWndw4*aKtZVF6p2&V z9K}4l1M4t>M7Fg02f;DZAhVJ(aP;&sC}{W-WAT$O16%NCUQE-AoiZKRA^+d45(fh> zgadtVkrdnQT(fmK{Na7Ns}4R3vG?Zm!vmsE)}q8Veh4cDv6MFpwA5MPb--G|`Sf^T zP`3Q!8VPy^?v!}3!u|4H^T!#*4pwU=>?t9+lCwhfqv{`RQvam_9DMV~JHYviC zA(ILV4%{#Ehl&D#TSO5+Z2UPl=85yRM(TZ2=9~WSoZg1TAY(W_Kj=`n-)ZkchRZRm z^CM_5@sQPGBC%uJEJ{wRW=S7*DpNF=Q6=WX&diR|(%*!BbTXUe$2kw;HT$vM6>m7P zKMKmcHkvciyt^zbz$Q~gIj6chlCKTn`BG+NzI-0%u84gTJIjKzx3w-CeM$rDy8Ut< zJ(MWH&m&tpf21e5$flJpoGmX4P2jP}Z~h_o^}a8Tq}Dqc*GY%@t*2xH@6BE-Y=cwl zM=mkv({CE?4L~@W?YcRIk${4~%Wg;T z9|{3eVJ-G@&mW^@&ICgqI@+%2ydB9w)`KBiW#04w35eek$o#A~R-xsgKYjoq23}Ad z(e*n`$0bBX{v8(;02t=5Yrr3U0doL99DKUp5IXhL`qd#e@XNr@6~_nvGV3Gw@U&GA z_YW8A`LBOO&_720RQ})o4*KU0n@-|;CAz2+1Tj`%q>ifh0n9GCT?~J_p;+_jzno}q6`&|R2mBKVbb+YNppON8KUMy)QW{q3!ScU7JsU)%*Z_1iDN5WY4bg7KRq=J@b8 zSF6zcT`k9f;7!@KE&J;h2AjJCPP>T3crOwp!bj?1NbF zo3|$wzk;#CxCOS?VAx9C3(CRF1X=0JNs~HPGq=Dl8#LPz94t$w`zFZK;R_f@4_f%^ z{(h9fH;AA+kg~_D-tUrn)`!36JS=Z)grcT%)1 z2vUWE@@Bv(Ack|t>lFo`IPu^?x(6gbp2Z?epaB-9>qM6Xg z{FDDda019&-|cSO6*;nneI-Glwa?}&PIpGRi|W5aj-#R6L|T_G_cJJu4a_20C84)S z0-Oup7;LbBIRiSj9~YZpJ3VmQM3(^7eIV>JtOLo?EUXO8ckqBBRy!&R0?Cve1u)PUryK&zND$l!Ne6OG2Xj}KahId>x?ixJ12(f%gCnm6 zMy!+o)>`7eex(#>=Kl?1dtp}REvM|(xa=yl4ExiN$2kPgtXNI;_-(4Is z!O1L}#Sus6{3?_M2ZB}p;+cTiKz{-ZHAcWc5w@HzeZK*3fqEQYGZQ#s9;Se-$Jp%{ z{O1m*r;pG}E4#r4adUU~PkP_5ELbsIYa6h~7b*Ku{?Bxf2z@eqyK05~+IObg%PN1D0Vf|VZhA9qrCRp}SgaS9j zTl%p{%A-#KpJO{k_?{LWD>}3No4hzVx-LR{5MI|_yPIPl-vN+UUx^-TorirGBfHA> zZPmlG^4|q}ivdY^`)?F4nFyomYf|va9?o)j;IDH4uRlCi^XU<{rA48omv3yUXM>Dj z=)_0_h#YN)eEDi$2|39TpTK5xiDY|t72w0>2|Qs7*iD2dfeWY)slc&h3d8R1lV<(& z$mxKoB*IE*zreZyJMKE~@qw`K45)$U{~Fk%bty4GlefT1l@65!MT%i93f4 zlnAzQt)=DsKaQ8&aNj#9gYoYc*l3%Ic0?F2v_aILO7pY<7}Y_wHuI8{j(um~S83edX)_QUWD=(qce78oi|fdJqYpBj<^4eD-yitZMCUF=nVzS5>iHsR(Dh8{x`m0-m8flkTtcLVw!LR18tIhFS zY5-Sds02_uUBu;n-{0!|_*tik_2Jbn9~dU2b4T7pMS%?Jk+*@Nk#KoF1-JmIw!#F* z=t0;!w!dTk$D9a}88n|DkS}BV{9C3zaeZ%B}V)&DpG-KeBB=wGRAQSH7xSkByCmVgK~(| z~hQ?~&KAd0IU|i8K^})zxNSzDuB;gRgnSWpj89)f&2dn&9mAt zAUr?46NJ$APj@@P?Y7-PI5ZXokh;=ZoN$zu0b=eo3x|UXO59(85QM}) zaK)&jWpEdM1i!B@^gF!b2IIiT-*VTLVE@na`Du~zT(TUw`R)4h;_1Zov7vIf9pRw| zM)O_U_ssvCy}pR=B#$@bgWSZ-K@QYNfJ<4}X6W$T_ULdRFBB^{PQ1Uit2- zR?cbK6gu%`Sr@MUoPa~L|Lpbu87rh8NK63-eQ!|!G(!qRO2y#s*MkE^X6}5}e*?wA zFhr14p_}Hz)%3c14_7@r1mQ7K1>4-Ogb&ARf5q_d$7V^RwPZp#Xx{F97Osk!}YMrr9&{#OV0ZR@Wbc<*4U`t_`jGk(7(Vy^QYC?+!!lw#T`q?GLiQ z=7sq)v|v}EBhsDUalUAa=Z)kIAmV_wKruK7L^-Sx6ub;|U{k&TsRCM<0}SQ591ZRE z3C)7u3NCD;fw^>|hT;HD767b|Qc9pK4HH_gf_9C23aDbr6dTrGP+rV{4fz7;3bx9w+Xk|VDW_4>!Oo=_{TCG5(Uv7vvKEad4z6UqlT0X;3 zHzx>|i~#HED(uZYzrg)n|BE+o(xe3g`ZZZ*{ywnHZTv{igy-H@6g!n?8u)x=kroXS zP~RWpIM@J@CCLRR8wA!ta7xn)I%vFmAZ%E?2={I>P!*~{4u?NpM+XJdc(4V%hif=G z7H<=q0pjaA4BO@brEbBm^q%54Jc8PS|7Zl*2HP*j#p}Pu#R^wPV&dyqm1aK|#?QlD zdCkN=j|_CL{{ktu)PpXMh!TI_ZAjTw57>d^t3gA@q|*b`M+2FYbQlVNJs2j8TX*_a z{PdN04B#4n73P*v1u+96U>~?WQ%E@YckyDd2@xQP7k>j-d@s|!4bEvg$05->Dbvko zeVH1GCW(t0p|c~n_ZJ8uO#sBq8H0l67quz@R)0Nn6!PybTq#pA%+ zw6xKGy{}blh(e`uO28(Wzq|Oy_s-?MlQ@tz8ViFU*%+&%5psVAj|P~yK78a5ND0bevar^DZlSIh1iNXt{+Z*pirsN{#^eh<9dd3 z1^U-gNHed|0c>I&F*cxOi5GmsMJEMhsCjMQcfPr=Lw_*cDK?70R&opK&&UE$*#fY= z22K?c6KZx2t|zaK+0^`Q{s*%Ee{t=vXCxIq=Htvg(6necy=F-Onvq=g&5Z!I>w=!t z=PFnsGDmVUzmR;Xg4edH@aS#R5AC{%vokG^Mi-X`i1VZj^wt7_Dn1Zsct#WRZZ??( z?#kecFu~m|JZfPcF18C`RUW8i>1G?(p3WA>SVbhSYNwj#|4#Iz0oNVynLhfi0!)St zL%GI$342q;Nl2XfU_8n>?|mqDf*avhOf*BR@fquxItGVS75kv3--*QVMq|&n!bV7A z%?p!&hI%=9(zbTpcrmk36ru47-e4V~y1_*hC8AW$H^ELgda>fqE-3%WXr_s14;Rn= zx^N(I;+v1t#f1YNXtivz^$Xuj8a1#Sqboe`r@j=;F5k*cCHra~EmnQl|JgIwS?=qz z{f(Q$(~ljJ;XMh0Ib%L|hBZ6+GCDT*fRq0ge7lE3DLpI^LBV`^W{+F9TPvl5Y#Zxz z{ljfDA4fhK|L3ZWZBz5wzU0-5D<@u%<=7WBdHkpvkW{~~YZ>)Hi}a_;{j?=Brtu2) z!5jZv_TXA%Q9AZyz!fHpq}f1~*g$-S|NP2T9Js_G;JR4JhmbnmQ>OsE*QOJgL$P!L zHWC8^@LjE^F6ZLn34!*W{d}blnkRqVJdwI{ZnY`iw9nRkCP8BHndyVC@BP!?NYsl2 zLev`>x&Xt3BI%O^jt8+0{u{h{=lenGhxLbGDnMlM}x@6;D{ zCSITys`*kz^WnD#5R5Y@0lo6TQj?DMYc$W|ZD7(F>E$guBc6=SiX zgvhA-^KPABIIyFV@Jb#nJ(}`a=zGNXtD8JI8Jap-zoI`9X;mxoa2mqW7zvA8MiL$T z%EJL;euWu91EsMS*%iH?qh9&7U$xdH3&Aen5>TuOh8yP*l>9ks9GrGoi206a0ytY0 z293LxA49JfnbENC(~*N)4D_p6`t&OT8|k<(y5rD(^_=v4i>^CVY&Sfd+u0dNYy9pz z@j>g9F3^R)*uz^h&B@doJ@%G^PjJyk3Q90&gN26r<*|}D(HtU+tzVz`UKr&@n4a5Q$cRvIHqM-_yt`MB& z1!0iu^UxTUc5~z}uorRfYFXfG+IZ*dLA9q>WI30fZNeOwu*8dpmLWoAQHKo@HoDSt zUP1ZnJV#Es#|S~Z-G&B=bQe#oT=(tUm@Uodmb$Q5bx$joh zs(znpaM>+s7`>M3kDTb1)apVM?U$Fn5YRCFdq_QhZh&H^cMB|3Qauoa;J2H550JrU zChh+kQS~fNK}=Bx=c!{syEB)A_x~BqLXu6r>7#Z_b7$K8XM-q{*7NvP5OCqZNxpCL z$+{i%#e-cY2gfPba`oZJQQGpv;8k}(aM*`&$>u{galYtGJcMBJ`0q9cmQWxMyHx$4 zvT^Dktt0N6-@sh`{?69!QLVeqqgCt=dUaOb1VKgFBL|uzDkd~9enXBIHDN&WaXJxV zb-VW5akAQBHd1H`tMRbUu6ZF|b;?}LJF^&k{m6k)m2zC(e$}_%1S1(h0woLwrn4}M z0~A~aQ4^>2=uD_P2%H2A2=n_tKjnY;C405ViJ~+2O+i6zDNrQgffECmzTvf^o@u%E zDr{lpKxBCHH2-Ce=Bgd{R8H%wIuH`J1dH6{+%;lnP(~* zSTBzaa*a*6jC^5_G9B>*Me+dCWCKAxEiNDvG9~>+zPNY%&m3J*4k2BHLDz60+i=}~ zJ;@hc?=EqNF3V&fxHLg{$IRQP* zyb9w40GR7N)yiiWl_j|ys(L$NOevD+$EfG~X^d{I@9Gt=4Kc{Sx0@Y7P6qCaiSXW) zxlqRK{Znxkh;uf-Iy2iz^>j~fI^`N180*ffeMkqR&sMBr0D;gY095uQf&A*$OH2QK z1th{i0iJG6)r4f1*&aw8I;SBIVV*T7xM~Hab7KMZ22_e>xw8Uu!C?#tEZngw3m7#_ zI%w!iYpf{D{IFN{?LznxuOiCcuERh?c-A3x=AZ-0*Q?2dHxWG0x5(nzBUr>z0_wiO zK$G%*)+)?w4LngMMz;%&{PI_=)um z&c`S3!7k)2e#L4U)DH<84(d0r{=V}z`PlRuGo7C?Sc&1=;oP3+^g;snkUo75irV|Q z7dtQ{MX<<(+BtYCbvz-SoRn1cP&UzQ+2CeJKHd-0N&+HVaEfp9glTH&GQ*`ecn`t? z7lb%g14fp@H8%ER!2fp9hhUvFkK;looB1Z_<(Sf0$W!Vl6aYoqZ07mNQ^juD+(Fy= z2vgRY)D)q1zPiOp_u=j^jjR5V+U8ylDp?-OH9r*r`3qQnwI=klyC< zMEe*{Lj5!D8;+gyrH%HSj1>=CZ@%t}Od8r~*!RTyB=_;17u@dv!R02(oR?#9@1T#7w{~oLEsMc&!cb{q1d}t2 z`yEms@C&iBpwe;%;BFYZ|K94D9>_<)L*{IQ;#WmSl_TpHD_ z*S*K@V?~d3^)26=SGZ$we+rI~I@^xE*4jxTJqNatFxnoaYuA1(iwtiZmws`|w>(|H zz4>Z?*QcjrU~j0>+})5H4Z_}&gg0=LAAjv;V~U8y%(K0BY*!eF%VuS=TH~&2f6GQ! zxSI=0MZ!dZh|z+?3*4Uc(aoP;%ZaJAA4ZMgM4x;zNC^mO^ScR)UvsQJ;INeehxZYhF%ed1~RSR4_z&d zfbVg9T@tx(B4{KMnx&Uk;&J!(cL2|L%_AS7RX~fS=u0RzI8bi-1fx4|TJe+vn1~WpecJVe+A@WJU&l;FLFbaw{oygKkatiM)CUjWXPBn+12yd zfk?-YB0l@;{of=R${_MSK#VC~(q=t?S_Yooy<#oh;F9thioyp zx$ViUj5(}9!KLrVFs126jxS9QoL1Tq$22vezaV3q2F>Xh=kMKQ=b)$A>ZVm4vQ6q* zayOlN^4i2qj9lb(Qj@&W(v`sB&_Gya8*TeC^LTQZMOtHXS;pZmmeI9|{&-FGwC)+9 zm|Dg~xKYW=Bkugh&2C?(#QomMr*3xU0J_QBBG4YXU&vLdB&XqfUQ&~+Y4TY5)=OXL zN&6DiSQ;7jlbvlYqVx zSIBDhPQGlz1+p?XR5u*9W6=2s#vtX_6kDAp4+w+oG3C6Z#f@KQBU7KgIZvyCdJkKx zcfFcqGY|c$JMPHowCzMJQZ|1L5$<#f{vD$u&EiLFCC|wP=GbHjm`9kSdiaH1IpPTB z{cF>qA}uGT{q_|2yNf^*9lfny3^bS3OU9*LCyS{ay98r5@5&dM7MiITyLWw+DNtx8 zLkS)r4w{fstU;Cvx_)u)-yp?<&&oV?vU+u6*8RFIq>;v7)*&r6W6;F} zQq(L9KRyW*(X)`yBxUrydq|UP6SRWlkYYigwXBY9iNiYJ6_lvXw)Z#Zext$}fMt?? zDIzOLdEZBpH8{sYRB@q!i;;Prl)dmWwGr`?Z9*DB|0l)L(n zmq2d-idPH+GJ?+H zyKen&QHbh*vK+<`;+jjY&xoh40`PH4C3|*}Wk#5Wi!-VpwXop3Z1KR$zS9)-ewxsSaQs$?Kfz|9(zm|}rDtLCn z+jYx38O_I+=O{djz}+%8ee9W81QE=%i|)}&-YKqYX2i73zeC!N@n{8QHW|A9vc1i$Yjw2)4m&RSyLS)BY@J*C8DgZ;JnypHW#uQ6 zEJzIT9#Y~kp?@;ewiA|&E|~ElUJ%}YwoU!iy@_aMeD`s~w?QON)mHYT@6LHyvaK+J zgUal8$zgkaY$?V4T+ZjykJFr&#!%pZ;^0o1NG=yfh!OJZuJa?5$ZZ)RgTkEM^Kpy6Z}d2@>bmEoet$#g9{1D@-UCaFY*Q3#w+I>3jk%kH-G1T9XFb^r43Ym$`5EcS_e-B573Rur zzb4~F9nJ)?-tjPetQj(PiPoh&v;;jslcA1$6^*v}zI;~i2Je?o4t4RnG*dPR=416= zW-4B83lp89{hrr7(SGGvNPWttB%F3s>j?H%X_|z{ zWG_*K5@hz^HTUiPZXa+-p>cuCUEQ;w%cFvv;?`$63qPrB0@#C%wnpN=Ktyz3dN3BB zKvAfmo4mNxN$;twOYTHaw#yfDu3%+WfBxb#iA%{p7ySuctFMN8>=*y>&h0^54K<&H zP-K@}CIt(-(f5!aaNztZ%IyU{FQs`w8Ck+`FhtQi*%}wuMyr?wUwmvk7snBYKq9YLzC&wECDVDZcA=cQ`f zYizt4j~_Kj#(R@8(2pkU(VC$Da-&Fu=3Js%A3b}q? z4`Le*L1Oz942SG?__s@UJ}0RXq^S!Hs*_Sbr#mR#s@rOYMo#r|ziM|BsqjLt-iI^{fUvsCe?kHCqQ>} z@kJm7ZxxXr@sby*nc;}yW$Xl~?*8`MYstz-pZ0b3o8xPUn!hMwkd@WBJ>J(7Am42L z(CdzBuVXi{)vk!BJK0^b!x-aResuHe)6@}$m;mun{ZaM@#4M#XrY~tZ#T8gBnDcY3 zsp3!~37uc^TwL>}wD#Ybf7F@S_35gm!!D)Dm-0!HmLe&9JzZ2R6kM%M1o_1Aq#k`& zrX{>W{BpJaY5Ak(AM>CLd&3@U6+2=7*Aurl^d&~O5i-B7heT8xmlx+Q&-Lk3OL#n+ zEH=6kY98iH?{7}bR(|yj=MvAc(t|4Ogb(bibA1hHm#KcChXijml_Lm*e$}qY5ZUk| z-;=T#gj%vK6%wdDra(|a0kc>d~MyEFcX-)`by?tKX9(<+uF>I}c? zoirvoG;9(ww(x4VUDIGpGsheAMqwkA2+C3G)^`ix4+QBS?_|kkwNi^?D9Sx!= z&y1{#Kb&Z&JNX9fbFzP47T+ztFJX8S4U5jBWb~b|%c*oAL6eSX#kV*$znw<@A0X;a zCmf3w?$D{+=IRT6ICc8l-tI*La-BM_f~=^XVSsudUT7qKc>-xDY5gXey27Tf$jnZ@?)U3SZ`D^BL!Dg*zk7EP1+2-Rq`;Y@o4jX82=3VN z`^CsW{54ea9YueuM-eGoc`z}rirdggLuQHHr)q{kR+*W_1{O0(uB8$qsfhbwgQ2EY zRqyIH6TeTvCTu6ux`5U(cFLz^lrVLsH^U`{k zK-x4tml_$@hm_D=<{(Ud=#y(#CK(pZoYxSCVispvsWK+UkX?aFspS)+SzLp;ThAlv?eU{y^8pQRGd;pqcBg}a7i zlC4z>meVJDM+1rO4Qqnx+G>7r7&IC7-8G?3kMYy)lertBTVf~}LOI}mvKnkEr6sCu zb+^>Hw{)JJ@;5QM8Fq^)r}_-lm&CVZb0*Z-mTTZu$o($yTJnRX6D!$c&T>8UI1J`` zvr|<}Qf3^T86if?56>AkuUD+UE!6S)T&z;9cN2ByxfqiIG41JLbswfi z!AGTdVv1gWVyjkbXzpb!N+FU({KXE-FR92Jl3>2H#taNo(c6)yB*zg6)x{I>+sA(B z3i`pAU*nA;!Kos~dt9|QC6$6qE#M^TABSSxI#`|%uS++*?)8x4S=bctBVJXNwkz$J zv%VBz8X};36UB!glKSQoxMo;r^f^yyNN#3LcrTWq!N&#cqX zzp4W84KrdM2w+(D!rFqW!`QbGflVQ@wA@u#;#eYelg9NytNf!0Udkq=bNMu6z9G*l z5X`1?98KYrxYTC*Ok^4jHAboTw~Xc--&Ci5r$c>~<0QA>c3?W(Y4Z;h4!Vzx>!Z?q z!~ct6=uK=~sCuh{_wCvM@3QJP%i&6>GY9L3xzCZZGXP#gV)A3lKX9g}`HIsdMv3@h z56h#YFc!g^)I2fvv(s46KIy{MlB;qvqvHx3ZY*rh)_!8b}d^n0xM_@|$K zU1`6ms7($rZiu_6iK>TNmP_xKZLY$Td#*XzoJstT2pO)LaZ*v zOyY7x`9*oYwN_xG(rsuasi&51Gpz36-1k`YY+Fkwp%13HA_>M4pGQ@6aR^%9k6y3+ zl~J$`k=r%}FwPm!`Vo#<+~xmeQir*VFCK&JnzG1}pxiCYK!y_DtCN+S;y-`ToS24E zugiRD^7GoeSZ_K9YEQMSCsJ)^5tpBr9bY+8j!;I|PccDn7B)h)WYDe78K`D4DUo>U zR~w=HV)2iWfdBVcH5Py2(`vX^#4XQhaVsBIXhYTn<>FuCtdQl2eft)Q_$0-R{`p6@ zdA&Gg^fTpBvO|$HW_%A3Aj)QW!nnZ@ev82I?k&!9w;=|4L?o@0Bu*4oLN(G!M_!@M zjsCWSw7{K2`Bs*@=FQ4}0fE#64h2NmX8KfPjGL&d`_lXx?$#&v@0-eE1CF^L-Bynv zh3-CVP+(Xyn-3;DkLfdBe-(c!@`z0&A`m0fk%E7&dGM**mjCA!?w=NnUw-`bm)XAl z<3I*Gp(2UKV#D;fU-+?u2e=GUDcApFXt=~H;iR0_=uZOAkKAHguA?2`soY}PX7Fta zr6O&1EJTL>=sKwfRQ0(NxhC!v7qw?+0xeK&s#SLe=K2qz`QGw3f{DDgk$ek*2{E{> z94k2q7m?OM#oD2^+_8?B?(`M0^!x~qGSgwnQqq-I zKDy(PwEHd`;W+nR-W-WJ}A8(f}>m?{XBfySP7f&emrB=gbqYxxx4pE~G zm&@NVW{3sP<5hM9xC`l_=CXr2AW~_rDQ$~sr02wWPCd=L)N)K_B@(Sfis+b3=OQrs zcnJ($A}(W+F~7b6?j5MI-;fr?!c-$>#Q7EX;oZ0RcW$o7SQC=iACi$@5T>bptrBU$ zd}NosEP7ZbwI_z@KFsEQR^sF*duTXLxaSs< zDH+}S_#?$yk;<%@yoD?3v5tqr1IJ9Z83jfmjIj6p#N>Jka!O?Q>VbJ#1~q4abiGe$ zqx9`oH$T@YIeaw>7uELNk1o(yo_mTbb&oBkAtn%8eeu-_FMv!!g4bLvSdTHA;}cASr`t&^mt1SqbAwc}|m*6)^PzZ5GuaVBOHaO6G*quYPvPdBr&eswA3gOPocMYic%dJd?q#D#5epgv6;kGcqaps`jxx3O zhIRw$njP4G^(1c-l3nUC*Uz=rJ~~;JqU~w&Pa4GbLYoG@abpTx&f${bMU2RR5teLn zEXt-aN6y^R5A%9EC7)XJ{J0lC_(gM3w+Dp8PkLfm40T_um_qAgq^avGZ*VR+VTH3w z>KW}1{4Qsq!*U>I)lw}WjS$lsj}Rh@@S2%x6QRMil~$q~RJ&qy%YN55-ani*snOb) z=oTS$YG(CoBT~bg)z@D6Uv5tEoRGl|u?RM0cH${U%6pb4gKHg?k>659nHMT*qF=6P zvu@04m0MSON&3#+iI-p^Ho`7_HA?kvLiA|6#%zgjV95431SKZex0j#XQH67=3@Q`& zR>_3&2r3A--;aYc0E6hN$H$Yfp>=W}_v2V0h(Cddjp$a$Y@#w2E0%ajv=<85v@lJb za+d-ZoqSFm?y2xLa53PHUJlTt3*y{1JA4%xBCo{SO0Vzypv0+_6`cg1f5x8ExO%St zxignB37K`8U4wgs69}FL+j_o(x*V;^tBvvjd`Y!oOVf*at5sP~m0T^4kcR~6GfeJ8 zZ5ePclCp^;3W#NfJ`WlXd5Uhsyuo`w6+GuaFeJAmW-QUnVa%vheNcU63_1COkh}EK z9OENLSthB|yPS+uQy{vzehw;o9NAB*iw}{$o;PgWp6foXG?sKx=(Li3RA$OBJ}5N# z7R(uOBVA{xMH~e%P{YByf+W&tb6!4E!o>uS0e10Dp(A0#3Kf4RM(&p~^x-0$g!Z&Nr?-Y1x{%F*n z*bk4M$OxaF$`9;GDy{HDOB*Wk?Jkd~SDd-TIqN#l|Aanu#21XX>Y6@`0`dsYi{@eK|J6K9 z%N9=wcjX|U4+aYF7$r>Skj5ciis;pm=N!chp^)rZG#J64E=&e9N%+1$rySx1PJlT4| z{WfG;dKPcl!mhEBeyaOr&m15bQ1i^5BB`pER=>FnJK4V5q$NhVm$#LUQ?Q4SjmYTB z9p~1i=36(^NM?yWRmn?$?P;7KGX*2e90|5kPiVVNN+G1*soBLQW-Dq8bb>k%TO)IXas8$sA;3QP*&08wzvoGN!#dN)-#qDP5G*S!kbR-5BjMpn8 z$RQ07ge5Wa-c0S7Na-97UZm$*FvSHarlb6ajJrIGgTgDqKn#FM!WQ4->EVLM1aMxd zpw;MukM~J0Q3kt2n5FU^!gTkbAF1sqRk4e{<3pDG;s&KO+vB&S18EhTWed69M{H6 z)$AF#XdcBnNG&cH-o1yeFsBA_!Rz*Cx z3>*jn)%GD@LdGNbN8_zuh+HKhH(kQt4mL0gK#o?7>5iy*txov+lN13s?@LZ?y(55Jq{tb9=hXpN#{mYC2cG2o(rA9Ke^4 zzNS!U&?Db8gD@0~Rfx|?x387Pd4>>R!tYM(vVq93{Pf}|*{j;~$L}bT4IWh`cLZZ> z7!U_n5|t+&XYL0_o4c#xZTvb*8fpNYy_Y)E-4yX=J-K>N+T!r7F`^}`0X(VnXFVa~ z_}I@f;EpHn-$J8RGqA(OR$%wsR>4@hgn;yG4|l>tLSy_j8IvAU8^;rCSz^({ZCya# zxxrFB2J^GFW1;~EfDhcxE2`Gpx)P_%v`B}7xf*I%ZDCO<@fTH#uudVM|CS3EzN@i< zeDX=Q`Ud-j>P74*rttN(b(ul4Ne2hH+Z6m}O4h^(A*tm67tYHL(BVBVdWZ3=CgKUm zP&U7&;PEc~JL3x|=5A1v>54CKELSRd(rS9$h>=+Z_ud;Odej3|LHgk@$sPz(7fD9B zoHJ0SW|X5T`pSe~G3)#O8G}&K&sIlS^g>cLA^{7<(Y9K!(}tTKb)B} zee*zEle}p<^;9SR(9HstRmA{BBw0Ex=#*aSR`nt56eHMK;+7LRAJNI8HzekdQ%^4c zIime2l8iGXY9e3+XqbA53){=E$t4Q7<9mdq@p+F3Aq_$yvDm-Vi{2|izlfg4mM+>e ze|?VX@OoSX19XV6M-$wM>CsDk3SO#5Ciy*NlcwDzz#ZUQdbsY@Um18rxMYMh*h{Nq zY)_80UuO}VhPh0z^Nj85%+VP*#FIZ)iQ8C%D;?k2$nI9|4qjhrKDwnE67%YG^%t~C z#G1O?%Cz*s!?=6rlYa?Ot9<)|>AgpYQ_0{OBX6r^FCYp0)>XMyqordM_=CO~)AOQa z4NIP8^#7hz|Dzu8RF8I!jp3?R2Q}L>;M)y=)%tpA6=M59-^Ddlk|_cSim{eru_U0; z-}pUv2k$FrA&cle-u>i9$eeRd{Nq4Gqhh}hh$2QjDK{~sI|(=1;a&=zBK+-M|C@I1 zRDT+#h08F~%@Il(0qvn}?LL#&u>pyNwo;?d^7vZ)eXPrKZKlSzdOx884C#ikAXtd; z?-w4YwrmNX#8OONlyPwqnETOyz>DWX;AQNTbxnJ@-VW3~T{CO`(O(X3zPevKSy%5$ zm6l)K)Y5&M-Onde=5(s@YcjVw;s&HlY)@}ZJbB)ziuvM+9(gi{!e*V;tplo z_L;_>Wd=!!jBScSJxNl=5=lg*M%jkP(nGSPBJQRzmQ*Aor6#4PNcN=##gMHmy`*H{ zQe-YPe2I0Wu`eW$~Md=CBZ zx^ax4^OG|p`8V6_A>B8}pVqe#%lp5uk=RLe>qDb$^uZy7ok7%dRO~GrF`-=pA@3>6 zqLr5+9qb)<*UvC0k^`2KUlWt-5yY>(mi-6`gqH(-ztik#K?XqlE^#1!@4tg$>XaWJ z+vV9IQLKXisnG@Mqn`km`EW)Pn?$(Gi&w-(;6&ehCjJoWz+7oG)yLKGu6{_^v17NK2X%N7dM4c;73m{lv z?v~HZhpm$Up4|yx4X;woQGh39RW9)eMUdP|_BxcoE{-~Z>$eOIpDLceEB2qD)Qrt_ z*V|{ZOWvzkD$#G$QRBTg0_Lq8kln9mPI(GFh@&(QSQciMy6hl+jV2esp;&@lUVp!$ zH>|@5OS#6_{ zdn{Ayx7Zff5;yE`TwgmZjI+LMXdTqj;J`L5TXGqg1e$sD+r{|GvvP8)ls9AqeSFh6 zlJWW#-0O-8i(=!=pI$H~#ZINzta%i25#ubjs#{eZY&n>EoVV5Z4e^5o&pH$>l_uX0 z^N-7O(QjeWu{mjVfqvmf+|tu^7=g5zReqH>W__0nB?Q4-5k|#4FIrPumgCRpPE;W( zI>V>8GK`>LT(u1c#_R=o&%IdlFPi`EVnTwcuw z%9Gzj9}ZBbD>hlL3wMf|V?HroYIT0M`e)Ow_KjwqHZSpMy7RYxm^%{;WbiWBZI?pW z9B+{G4h8T{T3wo&Dsu-Fw+Q>g5FDxWUl%>n{!51iy(Qb{bSmygeOi>gd|1VP#f>sc z%@yLAx=yZoJ~`?Y0@@6NHy1t?PPxzh9ZLWD0CpT0P>Xc)WuWkAF1mZ$7Wdi>^o;iF zW?Wj}!%2dp#9#qtkogQVpk`HVv$M~a`a6fO&|#{7!~`83W(FQR_GR#Cr)f!4eDaOX z(!;727CHshNyNv?BY9>1inaBjg=-J#OU(p1X_7pY*~(G?J=8<-!0*V?J|Ir)QoQ)8&ON=Y0s*M6B_ zTl+G6(~8lKL;EaU-TG8B9}+)<0ERapkr?G78`Thkv|W=HlqHE26k89K&Xi$0%~naC z0i8!sG|T<$sP35q8E_0|EwwH4;6tH+!jHB6dlw!k9lI`fb5^+_T61!z?Y6M5|F zw-re(r6|3HB!MiAAx1+gS`A0!I^a`IE>0@34KcqSEmf1bST7Th&FG8^zCyeuONW4? z8|AriByX4QP!JHjnol)2o^=4~&m)0$#4?aLG6|$f-poHZbM`oN;=_27Vj<27A?6)4 zarUd`n}RhE%(H2{J!IjSH=AI>F1L%Ys=f+D5OqvHIIsc*fL^l+MtS&WAV_L>wq|V} z$!Ld!>pmShAuCHqRm4G1HGY(?^!@Qrn+>|$YIs8T8DO4f0qE5@y+(2m_1YI)&~y#R zYHr$RX7Bx&hlbV$Ma^Y-PxzV#%vcl?l)}l7c?uA6yVME)a39RYpWUtyK#;m_Pzx47 z>e;$eE&zWN-hVLPI??O?P?S_hKlAE-2<)R132nL%2(NNlGyjLy`iWgP-XaeU%y6Z~ zG~c>JO{5dkzo&^O6*wR+wCSnw#(lo1k}p7IV^l65d0fq;Mmx0P=ygH zM-V(DIGNQE_$~|r?8ZBQsG>m)We@YzRcfC=>msNKcpqi!Uf7D?1<{Kv;5srIubVJH zdXYU9x2n&p5&V!dytt*kyVo;u`~TM$*CLLzKXC~cB7%Tb6O6#$Sb0^fLU{y1p)5-E zZtDe{9PS-jk)y3m@8{<0!w_(o7PGD;g7gjo+0p@E^|hSJ?lVYK(3%$;O?^lfUFE@%r#M7mc15nIngrd#!Mp=(h%za)s9Ep zTJawM;>V{(f>N^mV*siPAG{K@8lHM>H>5oa5oBA9&q~6_A13T+ry=-4$1#lpm~rPQ zr|>D1myY(i&GJw=mgE`Wo5gi0ir_sf**n<$n~@UTF&=KC3Iu2F2DQ>CCsGQjheYZH z5ynMZOxPwyTiIKUs^i5}39AU|1UbT5q_ju&BKeX6NP!^(Yp)6*T-$UpXMEJf6NjCf zW9()1+92wqng44x1RI*aLFu>8`baFwMIq)zCFWySoTL!AlQobECYV0YVO~QQTDszRFk3KWzwR}; z;NX?%VEo_COB`v?=euAN=7RF4U;R2r>iQOZiLyTi|MeU=7&(zNyEsawSR#rL2i~H5 zN%d)A@^2Twy9qxrI%&ZNQ{D!;_cZQ<4*aOW?cgY=kjFLOQ*6F_{=(dyqYqrko)P%K zGthdy@I~PS%AD9350uPmy7K!7Kbj00M1D*SGEvv+W67Mzq^=Q0*!OQEbsyc^Zmoot z5pI(SjLl$p9j_S+xqS~Ua76NUM<^E}ghWe~ zR}Mnn4;rcKAS>~Y2BTt38DUsk-m zm+=`HX$+lLeGYIt2;81uzW8e>r3I_vqQ&j@IQpnwg8c2zMS+9aV}v^^8cx)oQjG5DOvwv4g_D5+}Cbws4SS|Z*}4fWfgG>&YgHjV2><$co1RQ z8njWg61U*q$-K1HwHMIv(4UD_-Jg~>T4@~MDs#sgN5sxiHVU|JdTH^hMTy=4S>4NT z&hDa$4Il6lD}qA!NB(V0mBlPC`>%m9I0kk!pyV0?w^G9I98P-m=+&y*bq2r3ndS5* z0kMu#4YHMI)t6=PY-5eWHJ@k1+VllJ%F^k$gYQIIgL>Vu=-)2-dY+QOb({S1YOiOe zVuJ;AV&uj_yPqb-IxBGFtf|Zi!zFbqH`}~al-bwzah#py@$(Cg{BVf{;n#$uClGE) zxwW3R+aLb=Vc;>d`~|2G4b4Y``gMY3^x9H{r4|{xnW1np3OM_ zbCi>n;SCHCvS4JtKS0AEeh-Awa~4xK(=WddhN=1KSmwx;Uv@;fjzW==3!EtZeCC_{ z%PmMdUG+;8OjVH28TTwEJc;bnncyLMP@mH=Gnq6MEAI zj@LoniHH9*-114o{%lTbe9#(;G9<&)M;F8V(=mq8a(4b@J;&yI{?iy&DU3lj z9EP-yX=%4_3huJl;8OcG+wq4b&HJSzXkNc>ed#;@F=a1N8#dL-kAUwSL+lrhXdRN) zottYs8}cvzpn6ag90RGVP{22346WH0+>Lgipc)j`lm!5M2GCS)HaFXp{4((q_^;t$ zL7fXzm!XviOd)?hI+C)Zbp}rg1N_@=YS>M0sRfjk;E41AGnO^PyBajGx}mPE2-017 z?vnSl46U|Mw%2{@`WZo_lz>3`dW*D&^ zGuGCF9{yCW)pg?vt?7aFr2-~0K1NiKCNBfZ>4z~m8d0=G)JK$e!(Y21NOYY`N@qq6 zamorHm3h#7+N*YInm@hh?@U->HTwDigL}La6f`_N#oxdCcmxtuDPg2NhIIU#+mIg2 z*vu%Vhcq|3fypBZ(kK2$k!0;*DwEXG0qAKfwqiZdXv9jUD%6ctpT|2wN!G(Op7k|< zzvk#1W>+rBzKlrasE9197P?JAqjGJCDpnN&sYCR!TcZ%oqDD@PU@uBO=Tq%A ziF{n9+*=tFEhFY2%|KkOmcyWjvxSf}vA7LJIfZhKc5msD+IqowQ?Y&>b}pi$S!o#BmNzHO&4INEbxu{TFN0!Nva_JJABu>)NMnH( zF6|nDYdeH}Ya_pvS0tVAfpJOdsHd4P0!7G|FyT0?73R4F*m92h20eM&JT2ni%sxi1`G z=8uqZjtTS2-f4jOeey3Cv<8jIN&Jxk#E%uE!r75~9={9|?}_ACq<9xZN7`^M{dsrNtlVag=+A<~rW{gaPN~8xpcnQJ?@| zl+lO;3A09Eyr}QHTo$jZkvhYlkCdzPW{t)wqKE^i2nSMyeCK;YbkFV6+=rK^IT6e* zF{eFo%X#jh6oe^%5O!yBIM7I=bw+uwLh}`zr2A$38}z#&X*;vJldAk4VEv8l_W=X* z;DZYAhq?Lsz^2Y5giyJ9Dz_iwkG!EM+hQNly!`zus&`LIoxI+PVyKcJZew}#{;qp) z?CK=DDD}3?o$Ulc!ZN~E0y%qS*xrNNa7sODIQi`LDp#@N6Xfkqx2rl|&UWR@MmtQC zUc>er4TRfcyt*Q5k$%AAO}ZYb8zlH%w-fvbL8kn1Dwn+t{0B{RjoIb`s!xIMzCn=9 z*+Bo1%k(Kh?#@j7@*`=97?&nI%X)9usxd+T{~SV)+r97CF^ncu)INJlLD z0_(sqV65`7jqo*pa<|^1R9kl4JRK1L>>?xjX-uJ^yJH57k-a#61ub&x!)nCIyRYml zA8|o`bq#-H(Jlqqo`IP%A@xvH^$V%!a}*{MuIARQF{{JC*;W*t;b(oq*C?hry^O4o zZ^4rIkA0Nu76lfQdC$=nadNlm!mVoB*&R26Bku6WS2thw=#FUsPUxbm)Qu!^oE+U3 z)}~P-PmMV@X-2`T zng_6+`n4Y67=ZBr52Ud3Mk}_SI4;C=W*diYxx+)q%eo`8m1Xo0RX%Z&*(wtKcwek` zyp)~4mBcKsI5}I_h^t~*lCSoZQmUdB(b+e4%OZhJD6ZlBK%2_T^s&7|;K`mfbPR%R zF{cd?snn&UB_s)_b&(C>tu=JLr3dq@s4x|;n`xzB$~~?*Q8ylfYuB+q4-n6`+oIzH zriaK~`ht_nq}88xZRV>z2QcO8C;A4|-NcQ=Sa=dJtQ7f^3ZA7Ol1JHziDL%S>=D^( z7sjs%7lnz7ioe@inJvp_J0-1P`@B1{SuuD^MBm)M7(8UJUh2Iv38@gySq3LYq5e-m z(o|EP z%VO12{TEyK@|k!VvG15yS1i>Z138cAQsXy@n+H|&zEqPfN#Q#!w%Hg@FQiKr9#Y$W z{*!A>bcAV$?UQ#tEV+pK$OS2ie>43~YWFDlY~AJbnt+jWdwULk57-=Apc{j8(ArjW z{Ks%?H{j9~rF%o2n?$mGujQ>QT^TJ;YT|*L7sG3Gx+gCy&cGI0*pA@0yB64&kNQ+x zIaKmRS*JfJOHlj3rGl=s)G6Bz z`8<8{SH8VWLpa}y(?6aTobm-&2iOdC7k>EhnZE-44yBrI^lwh$<66j8LeypLzA1l? zhLjz|uH{Re{CX;5JUCl9Ka;tc?T}<&HF;5DijsCIfm#@%fG=Di0B{X{6`XGhwZzGc u7s3KyHChe!FZlBVC()LHmKy5uvvSnP;HMXHgKRGNW4gt{=&pg|$^QXc%*T!Z delta 47037 zcmeGEcRbbq|38j*WF31GBH1H*?W^K75LPg% z%YwXu2p?DvmAJ5kFsq;-t02FPH>=Ps@KI2JUl?hQ;Vudji&MQ0V?_kolf#4&jU|rJ z4snP{SxO{}X~GfadbkKjY}0za}Vv!7mZl63vl|n*OkhbvMY!5H9yA zkjvVtu(+#)F#dDm8TD}x&bsX92XLY!M-z=S*VDL|gM=G!{WH6k2E7jjVp*FKv4-b= zPK1Tw==q*&tVD=4YdYi$BPY!By^y+Q1sC*R{6vM|H^(~P0=hv8h=3!H%pd==upB=b z@|Pv+KO36gSNSjER%~z)I6nvx>(;bj_J2-~T(Mb!#nsTlED^<`6o|)@l!y`0Xh0Gb z0{(FGoB|9Hp}18^7z+Zjr@#=GK)`e!l{o~QZ=!-or(r3=BkpJ|W`NgBBrD z!W~T{h2-91x)>bhl?#3&N9bS?Ab!24M94*ABNzfX&nL?H^!}Yl2@^vIl<-9pNg@;d zCD7mgpQlU(&RzyT5+I;3D#Z5y9E4a2Cwh|Ji#gc;&q+!z&k00N0u-YOltd1M>7sYB zKn90uf6#I+<+#6*GwB0V(_nGwqa z17pY$^2!=Wp12|yLI#Tw!T6aH5v5y-2uLJF_9R51iA6Jp0&yINdqJ!eayiBNV($;> zC;v_Lt#ht?&W!)d6$%Uy!M|*Q&aT0b$GKfFM4^)flC9v?znc>ktz9ew)C^%BMvlG! zw~t@{$9Eq~2L7EiRkry*2B?0@!T9GCw-E1i3y~+EjsCKm5|Lhm{r7sOYLqWd5L%xO zLzvTCTo&W!i&-K~iWjqrTmJGJp}O=4>D=0f9&CY3Z~ued+X6Y&Sph?&J6j;q+sTl) z-=3d8AeVZ+|I5s??|gt1r*r)ete}kUa|9HUBjN}Y;-Yw95r~aJGNe`iIrsnl7P&Df zih;18xVW4b!}PGf4`sj1E{+{Is*Q*x=s^&^qC{ql+5Be!(q=s2f;h9u`HK(l{}5iV zWn$+1iZW|wF#-7mh*@7MU~m7CTh%4NPHL-}zmvj%MfpdQ(Nk^#N}VbwU`FQ$s;r=d z*NK*`OhPHrSWyx@PE!x z2}b0eOH>>!4#!|wCZyB8-X6ND{Yu*_5#w(u_7fs1b(pd>;oa_jt^LP*o{~3V>>mC>S`&p#vBN;6$ zd_UJ8;+Ok!^cgkR_^pP#=YmADw?;wD0I@{m;@a7=9vGtrPIlu zz4am9Uc+oYoOqQ|8>;m=RC4v?Lw`@@pm}5A$1e_doojcS7<^ZLIE1cka;3{N!fAA< zg7?BpXwn0q!z(&e)i|`9D-ITwgR*tSz4;@N#loX{LU7YyYXmh+~&So(N++M;m>0!Z%i^K)^tDcaM4cQc!lBN zS7f38Z^NU5I@#O@a&>ikF3>LZLU5qq$zw(l2ZOuNlNQQHVZS&7L~grTPs80iWd`im zol`vOK5UiM%|xqXxM`1jPWVsZ$Q|4)q_1ep55yoO;ZNszxd!f0{@;82pV#QFCf?+& zr>d%A+%@;8;$31&)d#JrP{~>8{(EEg^P_mhC4*k zzz#rEcwVmm-*=Et2&f+_3oApcEJ5rZa}1J;e=qg{geAApaM|k9pdo&oxsUx_yOgJi zCoVFwSQHO2V3#|eQ8%7*3HqOG^5L8NF*HAYnrM-Zz=$^Kx&3PNtun)vx}&w~x%zen zD}6loUorzL>-C@NRK+b6N?GH~%6=w!_};;jKA3zi5F@=>-rw}j^s(_Q(*H9Co_j$c z%(jtx!ru89tBTF*OF+x5(U#(0+j)DNEiwL(hk3fmnFHg*&>!p={~K$klB=oHehHyl zFPS4a+&5A86y%NFrCty%cB$SEh8+q&_#E&s<^*Ek1p}ej;9V#eJAGz`9V3G67<~0i;?0*{Pfp4ZBLww>-IM> z;y8-{?_sa$aMS5I$IQ%{!=*y&7M8-N&Y!13oZKIzhGF3CyZ{HkIjiM6&DU-#Z*1M* zQFT@z6Y0QcVZ9eIVfN`1h&w6NaJw0yM2|dj)C|(19Z#uM@FSI%A4rNS)Bk3&P2}8@ zxi;XNM>!5X@@~u8w!KNkwZAuP9qG~l!x?a;eq2ZwgwU3iAh;?iOu#5BT~yM9XTq5t zh;av>M#oT7^psq3HN5^>9LzQxHadG+xF=oU?-xEI72q~_XmoU-bZH2B^l3^d`Sp>b zc@Rb4(c zR>LJ$R%IP*g|i>W43#Fk`05E1LkQsU7Ji#8KgQ6k-%$dFN6aB%-u)LHbd&dTC++2m zt`nnGZ-b))TBPUk98|Ddytf&SvG#Qe3NKsxdP0?d&5^xWI9nlGf)imRV)Z~u!kf1R zt0q+8VeJ0IAETqQ3Y{gt{okGw{H`#V7a?b=IfrZPCi00-_YmmL(7TInxFC;8q&Qnx89QJ z2&oY3NSO+%YF9d@;ASEz9y7107gI&Z9=s&SvR7<)9O?2_Xq^jA*=x0kdh{%J*+`@O z-Z5GjAVv#~5xaS;3pP;o5-DmDG^$?BF0}b)D&(lqljn9*&L!25jKQHDaHD!b{&|(wO z(`ZS|#_%^22$fG7d$r?ne1C(19fMV@RtWiT@Opg_fq+`7jJ8B>mwc*}=kBSBo68m! zw_o=~n%l)Sz11q7EQl)}y(1sgiIFl~UobH7dF{*c{n?54uX$;SG^&yKf^LUu@X zC#&XD@s%0<-Mv;y+wQY29WECazrn!c(9uVqKY5jmgwkeVw|{q8>7 z1QyMsd3dtdpH~7lqu%o=(`pmQ9?3`ZOgkHICj7R)HQ@c8PlEyoJc%V2(ivp?L(x(` z2poywyhvjv;ma`K{VW7vX&=s)x>#=)W4-e*-mgL}tHWqW!LadrYUf}b7zQ%i8_Dys&_(R1nE)9`EgBG#z8PkIm zWN*{PRC9CM+3|eCGX<&Da(x;>8`5HR=!i06gj;CHcn4U!DR+ z3>XH+7Vy>X^tr5l8UiGTi;asHRgU|7`mIXtxC((AA~v${e_Hfz%u@=nHzS#o3Nr3JKL`u%%Xff&1_mZ_bj%u2zJK31`fL1!mcV0j|&@Zc_B zZu1+Vp1Wy4=?d5DjvjxWeP`PAO>Bbxam{9H$xfeoD52OGzay|5JrJLb>$oRN>0V~lA90HZ%&&KCCp$T@WRi`d%H^BugKpDVD6K8bF zbpIcjw^Q2yEX`F#xznyDK^_+5!JpTmL6>ntV3)Cjv8c`Xy9L_l{XHzFTf_0S$V6&zWQTVV4|r1 zctE(f;mXwa+<(x#NGhltmU z_P|Ws0bIQE1Z5d_a|}j_N0FBeZt#Z4?{{%iZ7?ca16H8FlySsyx5Pe#D6|F4(}{fA~lrl8CSNrz|ij8DFjzNlD4T-b((l&}dL(^)m|WW4g!zUE)qt z`GJ+U#is+Toej!|f?zwg>T4-};&qKrw+8pe&WZG6++hHM;sZZ8$~G@cslB&e*i(0! zT8r_C3gkJT@(9q81Lwg9gX#y=E0C4T{(~b!6G)@W_k zjW?8d3>Y6pp2vyu5;-N0Akll2r$$J=2xHRx& zCG{Ttk)n)Sr(Ea$hw&;D4;ID285RLYEnT8kLyvy{Ju+fQ1zfsLIx*I9)KKW~npzMM zc}d!DE9#m^t)cx7vd=uk@XD(ISCT9y4P|PD*^L4k0}W*uBQHE*kQxSon9UcgdE<{P zv%Qo$=`X>i*L`HgxF?U^?LP;{&;xGcUE?#yKs#STbT z-jf2U}Rv z1J4H#*a8~IJC{ja%lEI?66Ko$`;rv@87ryUaH_+8&#Ctg3@cF;oFn05NZWg0h=g?- zh`cXv{{D0>m*DwD;7LZ#X99*`kl~~QX5pB-YlC#0f`ipEbnHms?|S#Zdbty@)Os*Q z4XmfTwPpl?AAwd$&1ibul%~HRl48LPcBJU9wMGO^7{p;IzZiaQ1WB+AP3*Iy49jQM zi`M+Vk}BtA{xys4ZvY7qRRgJjxxxv61jIcOVrBH&F_)%TQe1z9aet&aqtOvva`F^tP{0?w zfC-7H$HF{c0-pW^3=T(n_Qo~-1asYM0PSrP7#MM|VHi0+*isw}Zyl+&JGby~Nprxr zX=fNLZt5qD0*~ThG8jL-@*9aZ_>}Oge86;URt8==Ukz7S3yglUw-!g*2)sj31T_dN z4ou}QU93gR4hD<+?ia-gG{Pt#`8JRzc^u!k7Kjl}ey}&{+zf(D1~w42^LMZygxBQ9 zfLi_yPy6A1C%Z#a=qv{~YVUa9?5kz!&Q8{$Dc^}BbH5_A8`DYO1!p&1PxStZS6<%m zianKvNcLcIqZc|A^0)?VzP9*U&j=lF*2)%he}q0?uh}2>wE`w!eOGkp(+l zTC7mBam4C8rMGH5=lA{*dHJ9m5{^D?DRhi)6?&EG^mM;IRAk5uxq>3G(UM%uvg%-s zBOsREkk?PH#K}NQiB1aO)Kha=6BrgAUb0IQd@4$j2wpkns(#R+9YEFtfq=NAPmTM< z)1iBnL#E~q`Pog+iDhZfslK^$=D0S!gsaVYILDcrOa|Cf@3Y>s`XBGC$)uB&wigK< zGw)sT{AHh8@?8NxR)$e&_Iv%|W+db&SIGJC%Q+N{M38yKd|lp#(j0^$mh!uTVDiC<-hmN~i>73~!WDgfy4i|GnC(cgyr@%GAmy3{EpZ9(} zYNNy088otJS?X5jtzu!=^;!Nv4Mk2>j6TL^PjT0a2QgAJIeThf5BnRKQp_FI_Y2qZ7m5t2)JC8)WiP<*}`{$!6|_ERA0kLZOmzr>X6ogS@6 zraWSBTmDcPf{yV^JB39YQBoPahM!CQE7Y+X(UEKA_77#o7f+^JTRJrvDgp8s3Zg=G zhGqb(k*=~QK74;bvnS@Wv*1NP{J8SB#}m(4dT~#U5MoA#tGE_bMEcq?PsfHpaz-bo zQzG+m_1B}~-_NU|*3iGgEd~D`*SXifAoiWFdryC%`bV)O$myDK82w0&w0Csh-SZgP zn3p?~*sDHx7WY#Xp-TP}`Wo+zDgJ(+er zxs!8q>7&K%r(E-D772AUC(R(*E=hUJ)O#C-C8+{b{5fw$Zh^c7oWyH)d5CW^gLtculC4hewvFv1=ek$h z4yP-2a`;0QkiWEO1+xA6Yv?PHfE+t+Rw>^zMcN|>D7`hikO%{GFcs=~a^egfnXb6> zi}R)`PG7#%$lUghb~ML2<)gBP1&HsT3tuugb*X%IKZnGob~@nW_w7XYm;4j{XNp#-TVNvix|?L>OxnPA1%U*0Dd7x5YD5pL9QrlWQ+>`(#k4M?BN99xv_mL@jB0q!hlcs;58`y04vcfFntMReA~a2J%%KP55nUY8bqA zC$$UjUpK7>>{s7CQE7Wk#Mw9tQc9BNb)SBka2xWLt*(U-kzOZrk=^bVvMR{W!CMLC z-$wSzzP`x)FH_r2W>inU>W30tOF^L2 zdX3QI=S1Rdu^4L4R3$qM!WeuOGD`nY=rcU$Y}3}M?iTF&x}=R^QNSSpO#ma)1k9Mx zPZ%Q?`Dg5v=Q^=HvTYi>7-y_^XBBZ{lau>F);B)m`7t#2AHV;Js(r5?4b{UBIf-R< zbXFMq_L-z@SX7n@Nr`eKIP~R>3*_1tV)bCQhtY9$>=q_Mg26x7OdU&a({GjmcjFnp zw#b_W_jJXjo~t%d!OKXtVXNTczA$F9@yV8nBezjReR`mF8yToy1srnl&vb zz6%GJHi#v~v#yF=n(eD*IM=uB9w|QTucCjWGM*s7$BMKqfQz)`+s4zfjpB}}VZ~$v zZIA-Oftj|mypXxUDsCYwah$~p*yE0t*gs5{M?^af3TjdggW{yIX7KdUCoa_purRmkqzb-c6Cj_LD2My;w z!6i#Eue+jMsGMW91}76Xs$avMfKqR3}XhKi^75g@mZlmfhTbpvb z8ji>v*R3%F5&1#p6#Ig!a0+i}80$p0+p&SZpxr{Ok@=NS51~+f37npFxiggZaYfqF zU4}MMXdR%JpZz0V;oaHmuojn_b1p-}dgwsj4jzNYfjps@i_J=WJNZ`D67P5CIor_! zqsL086L7rQ_C)TDp4ARfrsV?TQ)->97vdIg`jy`+l$t>ws!y+;)ViKnAHL2$Ec8pz zk}EK%4|9p-t6oG1VubKzj$ogZR6%ozg~-Q(y@oKXUkZTjH+VDIGvwXi{LU;>)QS;{ zD?i?5UA~=u8>h@bUe!y)vLR4>p5%3l&WHBzvGu3B#R3P*yy;n6QI}#O*ha0U;s*4F z+vyS8y>bwJXY-TN5(35h3!nRpv!>M=_GS>;t*a#O^qf>4s_3W;vRu8@WC&3mTX7Iw zRSdD|f6F$eF_DOrnLC@3;7p&P^V?HCXpeeC^|3KX9zha!f;=&h`Hl#TcC%q(!cd=! zV6m}b==76UeW)_|@`hq`JUKaElXeIfX}PPvICjVjePj7u?xE7;%Zdk8N(7p1hyvA~ z*lW@CNI5R~E23Jtvi`UaKT1J^nY#Zt10Q^wBm3@*Z*grSfzhY<{v-voD24bsyl@Ey zIPbP%&1>u!jTFVS$kVh0Yh>Ql+kJCUk;!#5S?Y54pXz@j+ zmijO=S(^-mtlrbT*{&c&V4Y*Fn%SIaSQ;=HgoB~ju=vL9aWc*zhC3G=rKi5ll07vU z93C8xddI~9$GqZtE0ekVI2j#aT8Ad^$E;?(6ZtcNdl!|HLJZIM5t4aFj4hp-F+CgS zGyuB}xu?ok5G%+nDa#%H@H(MTYUNL3_UC=p00KbSfXz;`VLz)*3VFXx^?D$-Q9qyAz-A*45Nf*~` zj~*HLzgwPoIwZ$CWRVRSeY9xtL-@VbI*Vjkyrq zHoGo*`XdM$<8D7$6H;)i#G-jBos^MlihkY4N+^4aWn>c zO8INpq$XGzue!+y?j%JBZ_DK<*}ctBoUN8bYo9AW-5LQTX1AwH}7G(DmQAY)#+&cl2ma!-$Zl!-rm9eL;1gXDtD0D ziEz$@6$|{Nd<9_L*EU$WGu02uk}gGg^TPLVucEf3fYa@Wo93xeVT^h!6AO%M_1a|x>W-NL-AuMwvnwX0lEN~N3;knu2FP;>sVU4^zSmodv=toURj=jg!sA0M^`>Yv>= zwHA~InPQXP$XXewot>U^VaL3AS9)%d2DE{B2&j0B&hYbIU&0^Zr+8&G=C}Pfm;zrf zR9#}Wp*cMP{<;;*7MqxXUb%aQj>he!&>doC!}v$%^)L<{Ip8S|N50se=cBk3d7#{| zc;h`f9i_r11Q@s@OaAI zS|M@ut>F6*fD?(niR=9h6M_qC0Aj6n*6zkN0z@>fVHie-Wd*`3!nm8`Fp9XqVHj3i z$!h+cb1S z@ih%pA$HKI3RK`Q`eDbCAPuC6-|L|>-exqm0Lm|PR+9}u0Qxsy_(Ju;-F(%f2s>ug zK!t4sn0GM}OLR%ZYO90Y8Rh%24^9%o=)d#B z3Ir`5cfQM>Rl#i25Y>wR`_2>(J%duQp_nA63gcn-e$8ejIwuN%)tt2ybfHtwQdigC zPlpJrS1U}7ux4}90%(b3cD`?*VH6F47K%-8157PY${}~j!RtgPCytHV3z~Acu*<4f%G9Oni|bD%Q&gH>_u9$w`{fbU&U*`I#0@@4ts)TbeHe30z8i390t-fUxJC-k4@Q z6GyiPK&8>KC2Ih%iBR0^a(ezK!TGn$9=iO5&NXVZ)qF52rx&D{_AP%^e6yju}%!Bd)9FZy)p7^+#BR?|Q8mu-M;h#hUS7`#+g} zLjKEvZv#kPC_0o2^5$m%m%9NfX~CeOMbJl|s}97(8XM~2N0aT$kpvR~M^4h$Is;KT zL9LYpHSu~!yT#Decji8ezZktzD%$z%1M+iP#SCLSJ%ZM2m8snYT}RUsDBFPPa52NDIgg4;mF_;Snl12&AU3=R$o>lS0v$}2 znQV!)$#xV(&b4z#*iin9pJ{U{+=-VxfSt z)C574-ce%Sl<&y=CNq(rPRHDPHdf%8Msq6sN1pVIcs}n;X|O-2C2A6|O7Ml`33<+? z`mSBMjFdawjGO}C@yjXGy0?5hrC-TPUEM~VDhUBbX6Gk;ll`VC-aIa4{zH~zmUQ36 z?6^Or9-eD9#+X^s5gA*-dDP}!R?wuIoRux@AI)@0oA#&!WBN)^F#q`Ir5M|6#am*p z>QTm^KQ-GubfIhHqV=FOX?1q8PmB)PJ_9)E4C}N*#9}nQFsQG3yw{L?Vm>Rc5M;*y8Yh$QMCqem|Bsz5<+nv_z z?!0|D{UI|PU846bPXHjGe@#q{vi8=^-+0HX&d_VVKya|!mFWOr-&!qLYUK5q<7eXT zJB)qJIuKsNH>b!Kh{Ue%ru~6Si(X|-bXMJ36}uS)>Hfw3|#^d=Hx zeE91h;tHWyPoUa$e?HmZ>8?6Xdbe(Ysgh=**K*UNZ`GzA2FS6e`MDmm4y~;M4Il(n zD-~(Y@sh++irU}BLGyp}{BzuS5Z~bNIG&he2=t24IHmvySKv~X?p06FUC|mC-Mj7g zcpk}0=~*80iDX7MKhEj*x0|Xj<`N%$ge1)6Rr&XTnQVJ6LYvl%yT4YwUPtqXRHyIY zSd*l^Cc$sbAE2AnvXGk@qu<8O&%i{pUt%7!p&RKf>&EI(5HYad@^*esDEg8zxf~6N z8Df*g=sAu2uCf4NXz6EODI@mLuLvG4DIC@euGoxFdRG-5Dm6D9viNtS*pTRml8f0| zY4@her6d_D&|8yyKXxkt4O;R5!}A1bpiD+XCXBd4GK_A<*tGLwn;V03bV#-f42#~d zr89UG&F5~{o}6?;sF!6maM*<$g>+dZ;JD|f>*h0<3#u8L1te`XNRc~?;F zTPczTT0Z_F{NU40_@y*P&ReQh*1x#ZlsO-C;KUd-2=Jgl8%4_tDPLHQC|zsKt3M`zoE0Rww(^8+py2nMD$}I~WMt`m zG;5k9sx8(InaYt7#yxpW7WccRYnFtH$}408_~#(w0cL~LL* z(=ghHPJf>{w$oP-m@De8aF|cGiBM81B4yYbNmY~O+Af3HdaOYThF>2rv>qoTf?X9u ziNcw&;L{J^7}$tQZmXCFpk@V%YqO%Mk zhp|)bnCRB1#?+(ylT9RavTPxcr@4x5)vP~g;kz%wwe3r$^We2JNBVU?mdniC+#(8; zQd{(Ep~qZfHjpDh6?+?}-AicHKqva=R%k0>9Gev@C3b@A{b=Vo^7cwU`3}M=;A-7v z-Xw+7amA8tUp6Pn=ZP;H6ehktiO`a?_v4)q-*{#zTkklnu>F3nMgEm5Xal)_zZcSf zc5f|?h!3NpD-GQRV(3OT_jJgMNQ|wKfhdM!e*RiFB$rlZhH)z*vegeK_?1vMzv9Tf z%?}DbL=yxX^i4&_eAh(-VH45(^|u|y+K;bKdgtr*X;is*eBQoIeUG9mwi=%Ub!CWE z1ay27;b@(Kcih8*Loc=)YN`)I<2ctkE<6GGDJH4 ztnBOc)^c96NNroD;xm6Vrf-%6@OArV&2zLlk6trP1lZCYoq>4b%jx3L#96@5nGv~; zYw;85<0)#seN&LFRnJYeuOW_J4UXxlAI(6ibA<608@~zn*E>FK3jCLfb9}GD7{rd5 z!R_W#d10H#z@v$yB#)YWPTZQ&foW#nNO79>hYq82Wu9Z85YCaWHG?F7WRR87G9wTi zJBy(<*D>SvxI!A-tqRmxHxPZ@F+L%ajJU^ACNGseoxZ44Qh08yqbaS1^b#khR<*$9p zc8{aJS$FvMYl?wfEJVM3pavistJ(LC*BWF$!|gHCG!9j#6-dnmhhVndF_k%+`^U-9 zWmy^7qi5Hor&{f_0{xh;SrNThY2sTH!1UEHPy7%+Z5Qm&Ed!jFltzinGjYSvPWDku z5x=B{y)CzEWu)daxv4%&UT#6a5WF$qd{@wC{ZVJ`iDlCWG!RcATK`b9N`k6%W=!9j zmD@ZSSPDO6l;QRtZ&Yv^iuC{sR8cB5EStq-{iG1j9x%wGyed|b`&{QNCaLdM=$>@o zNAbVeo}$*|R9<5=KR8zSCfa0F_d_Mz|3Rd{5xxL42{3ksUJZr=H|I}7NBg9rh6FphZHDy|ug)|RBsrgCy>L2PemvoyS?R3eU zB#Y^-Stx8Bf13T$=QTY=Ef${7=|%WXk|7|Kjjo5mPIjR6<+Ms$N5xNb-&Oe9y)$(0 z*uM$K!YlbtFxE6Wcw)TR+xNV4^fPV{^_5ErI4v5%-ZNwdM*=31-h`YC+#+Np!2B_F z5pd_mhx+TkX`c6<%is~QV4d%scajyl>bKGOy z5nN&Jt8DvgJ$d7vC~xWfu-U}Y@HMVgQ*%AzQD8qf z^ETp(EBV&@9_$#*=-{Y)+sja=-+~U&o5|PInhpd)xnrU?T6NdF&34MeL?SzI__9;m z2;&Jwt5o&L%pc3u1ma%O&o=g%^7OpyLcSG_XDIxPDuPd>PQ>!@GrjPFrgBLdId+Z* z2QJR@>0jTb7a6$3@1^OTQHal8Ry(a7+uod_CSrP+zx1P=xIh_&%{7V(-RYO6IhLAO zv*EVMSIPFN81O@&In$DO^)Qu9{G*Y42cJ~Hs|O$Su-Nv!6E7p&Sb1{GrSHh?&z;&* zdevQ9So`I=>J`pr6p(=QF>ycz3Nl7*zm5~DJWi6|liW(2-M#sVW#7aTUw%_s!83yC zp5mOn)gW@fi@c-ml#l~@bmB3+_un$Y1C#aTKVyDDGNMFE#H@DZMaR5$lenyMSv>w= zn4h6mS|yFHQjmV4b#yPM0d`KBKF#g;+n3^;gWtTl{w>n-n`{K0;#_&wv*_L;KWz0; zwy)ROEp51%#o@z!*8QSeX|wI>d23xpZaHnpZ!5`AG6ex?h3>A3e8Qal%;p=DtV#A8 zxCV=3aAxD=5GC{5jrdonVC=FKjZv)pf{3tN&jwenOPIZO9pw-qtE_IziT&K~jkJ)> zb<+Y$COazVnA*E0EV~}ftrMv9SgArW*2$qrLcua+ZgZuFf7ZwT_T~eh+f)Fv|NdsN z7l#TWh}EFf0vz8CGVzdYaFkEL(SWhqOW%1qGClc6zvK3olwxTX%&8W&>H9g9vHg4A zys*0!q%|aSq#NigO#rJ`G(bXUJegR1n8+Rq&{AR0mh2T{tl>b0m&B&MEkL;EI=K?m zEa2K5*uMR3&Sj$<_gycKiyl!r2DpzbiW0C(R%0@63cjE@5PEMP^ zz`KmksO2`B#^Yx*)cDOa$PAIA`BByYV^=7vRiroM?eU-a(JP(v$ihIk71WY`g)V2h zO`c$o{a)Qv-Gfg>z;~_H>M3UTdvDwzr_9T@g#@;orPD0RQIpX__IM?}Zf9jqP)Ol? zVUD`LaU-64jc%V0a8{bA+QyC36`39ndq3n?I+_VG+gyRk-J2v`%luG1sag(7cME2F zDgs(1)+(Q!ef;mhbeCdso9_k93}tq~Si8xAqZn!i(bnr1n^RgicNbpOH?Jh?ym=+A z=6$sYS^IsCtqe$#&z9%+Dx%lHKHevhmhZ(QzaHk9tf-ed0v6&x46a;J=kJ=|($nwS z{rW!9$$fuV_yZ%apHi{Mk9%QnKVxU-z*yWYDFm$^(JGDWD@ME~4d*Ogex+(5ouebK z>T-*c?oMRZ=Pn+k#t>#6y)>t<+U7Qlbx^~8L5SR6kEtCJ!x~Y_ySbsjO{ufBiZQ(o zPtFVI#&ON+%=+0NG(kUs>q4I|z_QbX&-W(C`t-IoN7Lr!_30;^RdZFCavIp`;_uk} zW+&j?Diu|n@#;cSHit3#TCZknS0V)$2M*?fnZA@81f&Egd5cX@Z8UWo!u3$9w)ElK zeILSw1R4SuPZ!Na!Ah5;i2SLO2qqXj)M zloU)pDpqtNoXE8Ld?ayLhk}l-iEyb^X>Uz=dDMj#8SPQy-K1&sa2UfUc-@P$yJ%YK zfYGTo#DP_~_~bw*yK9-_lOGnBn>qin^r!K$dJS_Z`Gd-A!1O0zu^$DCMX#W8)jGy8 zV-~|dR7-FqOdwGD`qS!!7U?~DxNK%{yQB-g9VSOH4_lTkYMFv1) zly8vZXQdF!Li6-NLVTi=2yO~4v@a!o7H8nC9D7f62VET|a4$S(u0Bk9 z=eyz)-q!;(ry3|9$iW_C(oa6YqDAJ+t$?{|{vn8{w=>KV<>Mp>`fq)4AFwaaejyS5$(=sK z&{1ay*YUn#Yg#8MdgECPF~@d4m%+B=X6e6S9(4Y(mO5Pz!KE1@H$QZ`aAGL6obciC zU5<2{cBhImzVJ-#N_l5cOlIQ3yh4>^BAcG|M)bp0xAoRdS|%*3+jX;rc*b{LS-!1; z>!~ZcR#(Sf(Ml#hDhp!m{btmsJKK8fKf=_8@+Yxzc6I$zVCSeD7&B-e41h=DEx5$4}F{rKO6MJ^_VK)b(zY)$by{$14Ka zg7|sVZYtd%#Z*(iW0!@Wdnms1niREz4?Shhh_A$n6oMOh-PEDB78>!W2#d8Ejy3#j zm$r+?kuWA^D$@Cg^;>a5Pb`)zl&7S1;~PAQm?J;Lc8%yit$i2eRdlBNK|Ui;@P^)7 zA_O|MdG0Wob-YbO;+X^p{_!K}n)dNMD{Ij4`0`vY$TEK# zDl(E!`0!Gb@(1cGj>Z0j3smdXR4@}?U zsd4S@fp-W#e#>sL|6&c`h0ta)b)Fx)Bns)aarY`#^l`1O943bh3yyFG@xFc;V=9q6 zSOkDG$lVEHH4UND#c7uZ7&&_3Q6U+1$8k$C&wdTy_OAgbJi6Dq+J2LSA!_by1>7>8 zDZz>64Z72Jza$iYz;~9e=#Vv(33_J-ia~xTlDfX@62E^7`IG9PUwC}-wAjyj!hTjL z;OFR3-0h^Y*Kh$&K78>z=m4cUkg7NPa38>G%fBEE4_M*aTzZi$maTSOe4cyiso8nV z8l*)z`Fi7?#|W#|G-CB=Gqtz;_H3PsBX;p|xe5i|#%&RO=^y*kT@%L-V;+bn?i-chfcKY85pJ)LvgIg=(81Ik3@l0WYzw8~A$RL!NAuz}wpGb=1fiW6 z6p!&#n38fh(-46WyEqFA3(q%Esw-AK(vr9HOI^jATZ}W}v2M~dC0^C)sX+v1qbLi# zWrH4?U`aSdZbVe^Ah{WG9m;^-xX)=xZ+`taZTo_kN>44^={|z?HO&uHp`Cm}nq7G= z-=CF&Ug_L~%W8%U^OElzG%hz>mxb&k64Uyia=32^xQfcrM+j%;uNZIbRM z$tY!Gmk>_;y92;-v!_k>d^{ZQ2W`u4IMAsg}f>m>4Hwd6rRnkKFd(qI3ur50*e9!Ir%319xdtts3)Z z6%F^~Knv6+)ne6BLE#^l^?@xWO_QKx&^D)V3R=u2t53iQ8=s(@!I_nffj0e5g&s5;iW6JcZ+L{7`0L83S#C_buL> zE&u?6fw%79&oHrFfcL3?Cui;(i*ccP;S1;dQ#Uj0U0k)Qqcc5^5(@tc_U>uN3?xU7fm=$D7hx)HLx%(V}*d${Gq zJnY5@Q?oBH!9o1;JMyZw*I$R)6vE*wxF>g$LoBb4mf`z3mbOrpX1;ZD4=!tDk?SJj z%bjliKIoWvxjF3Di1*=v1ay^4Qn@=R{_38BEgg)3G9*}agh-z@TGORz3>r}iY%vQ( z;|D7Qw!H~G=82&tF{;_4EH|z84-UCbN5BW}uc8RXUHuZ)0@&QSw|> zGQ7>6p8YKH#BRTf-z&s#o{q^>lz(o%Xd{J-_=qdL$UXc(Vtxk#jeBjOYoKN$;H4G* zA`G_zK8~DE`H;T)?WC(#X1!(```PZm!KKFCo{8-d35Ah$Y&u8&aeG~>##$4HVSmx| zZX>5HTIY>Wgf25?4*d{lxnaDlD=Xxp!K38er^?>K2i;$skCEb|cl@i8#_1n0I`uLXC7yvc zK&chb_j^z7K|ANo2LZFIE8B1>I1l_`*-et0%AVmQ2&>{?Q^z!>>+Y^xpB!%+1rXJ{ z_RiAHtoS8LM_5$^3!(xm21NUwi7eUyALjkI?pgUj9d7jF+TEWe1A6K$jMQ+51N|1% zM-qW*KB`|Ce6zsZJ9cVL^kS9pR(!h@&>1RQYeMHf3$yop}%={F_4vPbSG z9C8Cv^?!Wwmt$%n-$PpKQ-cC(eI2bA>}#lok}>~e08C~lRk3Z~xTB|6)g1?qoui&L z%lDalek*^wX;gNo8V2^HC|&1}gttxGEM9qlZK%sa&o z{a*DB$Q#J@Ij1|Tyo^_J@&i3prHRq=Ka?{Wzb(AlS$BNiB07TO|Mc*dJ=c|rj)gGr z-XQnh6pc*uhcyj5MB4Lc$Zm&lFy8M~DlrG#9JLP`lwSm#9JbU!Y?%nP6l$WsjdXcD z{5Rzjh!b3N9=Pm}1WY!3ml4O>H`H)#V}_7*p`O|mzc0z0oo=KSp?sJfAl`>kSSow2|S-Gd*!Put7+^xHPnT)(|- zZ|HWzujs;-rSL{!sEuo*@j7vzSA1&Tk#@4mbI$DP-Fu6OD=-$O`yK+chaEfi2iK#O z81HAipbC4vt?Qp~nv(XT*iJ>X1>3-s%si(x4_`sQEE|M)=7M!Tft)8-ijhu zn*HDoxH4*3UL&~FTAVo*XB}~46cBb zgPvDC_Z1O+o9d*&8V#I%gcwF-C3ux1Rnace*_B&lZr=)*07{XRd=~5|9hb3b|6)^Z zG;eQy1&6waT-%Y$+ps!v#|uL&M5bS$#rmhQ9ihI4z-?!-A%Wgyv~m3UDELuVZTHiA z;0;M`t!up$m&PE3rl_%BcotWe+Hmm7n7WzuM@~w7)DYaw+*SJ6>Q``T4H5V1P})DQ zM#$R+K@>WIVIgm&cI-HHEMJPO_W}&TN*bd}cqd`;r5BW&rj-`RcPF7W&rH6fT=`AW zr1#D}xQutNow>syE-Enk$4vvhzM-7|hq1Q|sAB8Gz7HEvN;;)GrIeD|bP8J zM5Gx&Iu%JlLPZ)$0ZHiwMY=%&L0Y=rHJp6<{c?XeH_o0}v*I7u^_y2ap!S+NXQkRF zlR^gq;UnTyKhfy(D;<4FJ9wCUZYDwR|6O5&BWTt?h7O3}Ef4y;tA)~Gphms-Oy|Ti z`%d_lT??rq#QZjn^0jO*BeS@{;Sxx|f89lP)xGS!<_DAx9h@GNH{=+)DGqc5fzXyq zjEYm<5{(|w=2O6yX@##E^O`@drXL(#-Az+V7+Klg1Da`rhgZBn_wnddA*B^z6g!bAo#)MjO4|{<)Gceq9T067*mIy`(Pow zgc@HxX>9rMh8TLc;*9Rbe_ilW#85R51TCb)wwCATjj>PHogT83EJSdJfad(LoxPJH z;L>n5H5-D9Jn`82e6e`yU&5Bp9&a4CU3&OBwEOm4HLH{6lR5-`f4lQC4M zejM0)m_c`&ZXWjK8#z|$UiLg;<9L46OQ;z(A|;waen(ro85eb{jslH4{o*M zz*dv91%xP}K;DwS>L{{<4P0j5+-LtD8aU|I(2(AHHF`Pc?K0quV|U%zjGp!5y6>Ou~W==N?SDq{D!%U{o^D~_-K9|LOn@* zAQ01lF(NJZ-KWXqShx;MfWl@A{Br0fFmcR-IgO_&u`QnrJQSC%uVS$BwthfKrXfI8 zErv1_0iD!PSUm3lf)Df2`|j6ZFv+Dy1=!e#pMp384l7!_dw~a^m#{$qqekcL!>z-` zSOT7%O5kV;nmyZaH~*qQ5lsMY!L7w6)sY+yWDafn#g_5FdvZvc2U6F@t;yMQ;Zq>M zh?RTg37tCCu9J7`qTs%}i@`QRjnw#Yh&0Ad)nAVBKz~b=M&i?i``h)@Vp8OoZNL+)P z&1gU@v`+C%@!Ssx?b>gSfkkauA_{ac&Hzu)R`LKgqyROXda_lViiL+t=KfH|`7Nn) zk;bDt9zX|Ei(qfAf<;(_I9dVjz|E$~AZJ$z62Nw#0FNzM-JgUW5x`cYwAAM~4VqF% zELS}76f{3G$t8Qzl9=q!#P|X~96oeBl(8TMRPz{RIus_ZeL~wV7@k3;r>}i~HC~(x zgjq}T)osawxbxY?HRxq1ZHg(cyhk zJeu{*``2QD@9Xb(5OAFs`yCE})af|s3O8&#iF+#Dmt=YsK@R~3qeX6q<)JG`0)o-; zMH#MK%%02~G{Yu5?o~UCSb(C^W9aP`SrTFKXK8wKmq-lA*E+h9GxS)4u*nVigE2Zn zit7Xm;mOmX7|a~N5WT9F&*)8!B4&6jB{JnKhc_jv6!Y5k!;`|Gpj-W^3?j1|z{OzT zshZ8H3+3t9X^2ceDZQOg!(2D0TvP!G@; z?F?}zVMsFfd`;N0)lnjf84p~RzNg{=TcF%cg+4-mz7={O%^Dtw(HH-lk-*+UD@h5F z7+I;9Qh$jH0TJ*K=FZl!dZrHY2zVr~kUL}%eG_oFO??PdHu^BEC-UyM|Th@9o%MKH<|CqP7k*&s1! zoCA4J3e!YzzgZomG(M-fxeFQvs)HwBFLK8$1kGVQG*cje>AvzI8#sBUR6C|#^Nj+h z$73JQfeKZTd6v<%Ra8hXiL4Db2M8(7n=%^D)*szx!OOFy6mMzarnSw0>KVLBn9;lG z7>)@4nJ&Onqtdk6rT6LHx*IHZb(54#oxuh6k~|G{vPir(>tw!NUBM?w?RC*Qe+XtU z%(J5XV2+g_4s`{igN2})xWi3>5^PN~i)<2TRQ(-@%S*|PiQSpoJ_#x&iywiOJb<|+ zSnMkKaTj!v)~zJ_>6_>bDEc_n`Nw$@=8>w))nVVkM#m;P63G{b83m%fwO>nZSZ~Z`Kv(S8Nn$|wsKW@EjkbpZ{+q{#|EhI39aTUDypSP_B5{~Fobg#;NL?vux1}$!)$^54E!$+&}jHej}9W z@V;Qa6plKn|2peYDDc7q{iW)k0}(x=RK}DdA|dGoE`kE8B8%tO66-`c=dVz{xoKXQx}7mF$1c*7G$P1{&ARoxxcI^kNnZloAJee5r!J(;Hc_lrMrz7 zS`M@6LOOO@ce}f%5@ITW%u?cIp@*bRZ?dQ+`{uLWo=%$hQ)yQbts+$grr}(k%7?3>5avq@Fk0;sDC#~*>-!yd^WW$G_p$wR88S8bSmw_3!v@%^ULdb8 z_<-5bT0@Rae^k|Y0iA8$x9u9V(yO*f7Kxs(278run(NNEZN?2yb|x(|yXFW>ec|#1 zhAV34Cb^pVn2@>N!(Oozy|whl2A2F z<*jVoaGSO@Q$j)^R(gK|mn=diUzwYhn~||)+>j?%2eQnfmepU%cxSCo*O4lA-SFot zS5MgYdicswr??oz1ou1I7vwMk@#+UQQdRHhiLrk#deWQVL*=JokgA6}kU=q=i8pN% zChZ7B?h$DGExYBeD8WaZ=B#M9qJvYCMdF)Mak^DI#?|>ixA)$?)VLlhU7o9QgIYHNjtKP=%!DT2ckcqN8}`}p9=G9- zq*-$AXc8SQ+D_D(53@Qm-uGplm?fhZmexiz+^#ld5kk?(WHSQo5Z~QU)h?~*HZLqp zL*w_~ROf$8MbO*~;2eGJsRZ>1A(i{M-6PlEB7}FKsJ>asrvZ25~nZz#%^-Fiqhij-UKzk9(!iKni~^HwohOEDk>a3X|Hi9P;+$3+eN$j9&2yP1EJ8NlTarO}o%erwpVfd~0} z)(zfq2CN*1-(=1g@MZ1c%PzU?=KklhVTdcT zGgy6KJ&&pDw^+h%y?aW)-Q!lApagiAe-D2X2K;xV|L?zpJP^Fk7q5MB0C(2+R6&9c zew#utv10MICpZ%So+mi!408xEa7EZ||Oo=K=2lywah&XF=vKD<}N7C_{g;I>Y)@U8v3x=hidu=5HEf#>xb=V*$m})k`cD z`W3ufk3WVbJ}$Jc9{K~}HXqDXGzW004~r&Kx4`|*T>a-qLB^cxZ;pyIsTP)U*z6K+ zs7}8(d71c{fA4$&Q!aiE*O)ke#xw`2S^QbwgZpjE`nzKh)KI}9I>NLt>@R zt7E)fhQOr(W$2$ruP6bR#x<=ImV5#(U0;%}r-YRTWaj2c_n-c}+hlU=LH=bS$xf;PQl!cZnzxs%I^>#VJ3_M zG>7Hg!bHEO&RtM*#4(>Ge`)wjgXSsAsl#~GvQPeBmKe-fN_9G zGh|hzxehQ0_^$+Fq(t(RDW8r5j|+VP@dbFgcs$xoFk?!7mG!iD5If-m3SJtJ*y~C@ z1+BG97~dgCElI27f|mfcL|ZTu@&OrXGit47vG*#^d1+3r_*<<*lf3tq!I& z4j(|uvPL8080R32cszIZF!gyhX?g$# z)b}?$;r?o9bSv{$;oAE6KgT2vK+eew=H7kV3~VrCY_{JYRKS~-xUs@OmNq8v|HF*O zw3jawl{@NQR@@ODXW;=(vC>yAAnm+Cjk3}IZPTwD*!UO?+%hpfTw$08#Fc0?c4!@> zchOn4K%kwTd z0?AwH;t7es@wbZMs^PckJsY{JGZ6ve1M>e7%6Ch?i6NKq*G5I4|3P+D7&r(~oSEv< zwhfBSkjeyCL7~>D|Kr20`O@ZX7_9^2Z>HGgipL>vR}qR0)jd~U1%<-Yo83F0bIdsk zmRa=;@)kpjaMpzZY>pHuR%$ci{27o%g4uB14LW#Idd|eEUz7D;2D^QA-WjPsj+TB5 z5KgA(vHu>u$bftdW9V}&7cc!(D46EHVe7@DxuwW_ek+r9xZ2SRXfcdNGASXNBwGR^ z%(1p^zu=nHRzvHS9eG(6v7mU6IIYbiE{?p9R%qAtux$67_4^n*19Zz>(4Y*sgQce-SjC1rSMpWFoMrhXc^6|8 z%NWcqUX+!FElzlBhFz&R>P7-!QCj%@4**ap3g>_{DR%czNpQ;jVvHxy&cw!qcW{2^ zxu#3=7%0Dq!6Zt7!na;86vvd19Tqn$U`H8#tqs;KD2w*9nIuSWTVMr-p43+?48v={ zY|R&25WP&T`=XaZg_Xq-kXxc7m-q`$V2CBPLy64j02VzqotXZ0+(WAZ#_>(K&SoH^ zVyU4@-lO8UHTQYGoBIPEY)MbuiugFfNQL@1vbSUR-#|ic&~BVS1uEkh#DmX%XEKQ^ zm|lEaz)iuo6^hgPIQob-MeXs&ahpg2Pf&wu0VYGb_n=ucvss;LD+Noth8^^FZ8lU8 zIx(Ao2OXY}o_&2+NAR~-qM=PV_j6ln-NZum{;8C+lY`BBgzSdziQ&ya71~$Q@Qt<8ufAS$KH2RyRbjVqVX7Q zc~$ZmVu5c|^m{6f3VC&DWhhvQ-o{D;e>2uV1oI37rlc82{=}J~ee%WVyUsNZqwlhw z$v73y|B$N#WzEs*t!wLGFM~YIKBrfpG;v=?gib_*I(*fC>+m<&%&@*$P!vAHAN4N( z6EH-N%YStV05V8IUrtWICh)W3Jll8w#6N>eh!CWsUx3l!?>Z5q!v)V35J&(nAn>_-$obih_Q?sBJjQ4c-G$2ZH3?_oPrtxQSg&Mx84~0SNx45%2GY|H|IaJ(pFZd)m@rxX zE;(T`I#dn^pU0};i9WoWj{PT2@Q2eN2nTWe`xCB5?8xgeTKLBt;Y218a1sA?6WNJ? zjH7Cj%yD2B63BEISX(uOW79ntR9RIr`JeP4^9lk(x^Si*v@%A_?44u-jzJDG(lJ~F zpA74~;L02NW4r$jzMW?P@GLUJY0^CC0@xq9ZevUsfg7#=@WB=~@&N{?f0;>{lm1^b z9ppdAfa}pBr{vv|< z6Xu`8{px{*6Q+zg1aq?eFcxUm3Q z`N6<(%njNQ65GpgpD!~3(*pUR>C1`Q3bpg*y2tzf@`fuK{ab1$FRoYZP-#tj16bjc z*Rl=Jz}Y1B?tAzwo1ce5>G8p0Hn;@kg4mR{r31jALO~289ugiD5fm8|1^%lq9;hy3 zGh#Div%aJ^XU$@rXPsp1^#R+GWwlDD%9zT^5!fDYw>}=A=ypU= zk+gh;VLCkJy@Hnrzh_sjMO;+EldX>nzLRd~f96UD?2{|c$1Vh@y)2mBJfWf1z<_A{oJMC+pJ>Wf?pl5CgS~>s#a)zkmL@-Q6nv zBa5KLN-6sKaI4bKEY2+mfI}M{hF4X%SpNoBhrlpB*u`!RdQogR)KzI<>v5q?+)UKt6Q(u9dts6_#MVd zK(&J>mWc8^>bt$GhppJ!;^ZMyM zy!peM0Nf(H%Xsj!v>qhV0@XhTfb1_rU++W|piiEK$~Y68v>oGA60e#RP|22i8|D7E0T&Vz}xNqWZi-|pM8%MLSU$$W{JsH zU9ai)akz)aKF$4e=BN6#flqq>WzBbf1i-ftP-vFumA*KwTyQ#BmpGVmod-RR%MAt3 z9VB39AEj+IQQ)l5g9psL7DFDB%IoaOnb#ePj4@un;dCWH{O5cBt9%#n=l2gHbLJn6 z(jiO)Fn9MiDiGtdU*3YU| z&L*ah%C>p{K7hU*tb33eit6X{`uE{1gO-RLj7$>+n2Lo!*~?9n9BlNMWa8`5P$R=A zhUVypWV_l!U2i(@1&HLbfsJ>dQ7~(#BS59z|_xI1G@3;-O@&m7`gKGfRVRIlK`(iN8wdH zuw%ak!3D0#`Q>*gp#Rf{DDF1#s(#@(>L24H3!JiiO{Zm(qp`v(@i04B`1T^cQ+@ew zx`!DrgeB`wRQE&-rmW#I3#bn4J%K^LN2K=>=$jECjSSJ9+-Yfi`unTjP z2{Dnb*A0v`$uN5j{q-IyV?XR#uT=Nx!{6SZMtep6-nqC*amBq~N?Y0RK}^L{vY&U9 zQ5}P)pH-hJ z|9&?`QUWw``ud9TaI}4nn9(eRr8686w~VAa*w4qQElTocLC`@)oW+ld^A>2hPL7R! ze}B^maPN1=K!K9_7IVt+}4f%aFZ=?`9(MH?{ zT`)#55i<2VDmg!1yM+GV6X*kl2Xi2wjYPN%YY#7>0MPr*GV>WNDD04ON}3heeF6|N zb6s$jxQGJ!>s!_W6$J5fWH|;YM=Gljvf=+e4wcvJbB^xKzQf4BwH!gro|TM0hH=-9 z`Ys?WaX z{a>Hi`jKglr-A7SHYw*sWn<;h!_Kvd`=;Jx(@)Byn!p#8uEWFyXHXjoflm;)5O?{p zA9HZdM1|@XmUVL(My&PJ>AsTFHJZ&InJt^m3~Ni<00+0NQt4L~G;C!v4g^lT9MScK z1KyycKe{#j2s>{Rlr0S?jTfw8Tm8rof^x!>>V01?nftD@qsRivXwM2C;tkl?vgWM7 zT4{4omBR-Kue}Eh$q?v(Xh`ry=lm&?h}}2ybGZ7l@{1nnu07(L>VX^EfX;diah2i* zIKb-%ttL6pu%EmDFJI&EuOG*WKUzYH6!&}0+Vsu-luG#fhX?WJi_ePsP2_4fint7$ ztaIpK9y<{X6S!QZpqD5Lt5ddbL6(}sXMBD6y;)DEpB44?Xc%i(o<9wIK;)JNx@z-) zD~bg}W|Dy}Ad=&&APYl)6l*3j#ntRS)-GCn#wjVsZeM_t@$0>YzE_PyzFgiXxBFU- zCp0s05Z2-+2CTT%`vdI{M^Fi3I+c4x56{-KYr(rcxCSH=Rua>bED$`V3}8&fmP2sO zrTlkZW+1E^#0H}a%4ez!cilf}{_{>NwCn7lwbX*FY2U)HGID)H7n;Rzffg2iU*kIh zVwm@PTGvvzdjA^3L|ZwkonkmD0tw!iH>`vQl4(6vv9(8F32tuYe#~c*2MxO- z1)ecY|A!d^{WE1a(LS6>J}{QcF}4-g)vOXF*WCm?Y!X<3VP{waNLHk}4yN%#o36PJ z*NldbCksRv_2S`^752|T?2&JFh0=rBC%be>dO{l%leil3{62aY4B zjt}?!+ddZf0yz@okPKE=%QtP!X`Q4Q=;CZET+`=GUY*Uu7^LB5wjTw@q?_h&tQVQ_ zlho9o7kzGE{e%GZ5){|d@H?9?7;I+^6>F*^e&h*_v&*QQjkY)K%#Xyt5;8HF{&WsX z(pd2R4VO|s4P&e&_+zN5!UNw_{pYx&67FQ40<86Qa%1PuTdgFTK|tswJ^M$c$DYhp zT2dhUt#Jk-wn_t8Z@!+(Wi~USdC%+1y7+c0Nej%yvgCPzg>~5k;Q6%j20ba13&9VG zM}PQvd?`K$J%v!v2gv(rzhlF*R{w1PVa<-2p)r!+&ii!L%K~-&gAYo{{RA7mcmSh1 z!1nb63&fdJXoSgZw-f~LL9rcX>qh3mWX%T+2lXNJI$kxVAMYcGJC(q5&^I2IDG@%_ zw#WEBdcEq2ZsS(ZL~P*^RF2X>utE?}zx6$SVD8z&m0xjtb3)8S0m)b98|DmG5<~C8 z``!HKtMquVqTd6wd|}0#ZZcA3eD-|5YVmj%CVB&?lcJ1wDD6`Ygo;sjla>Wc$^uAi z=}9fWx3WE6e0$}{@t+N3o)=FYEI!sSrDKC~8-pikS#ZDMuK{T8AXrt_3;Gw1#Wf<4 zo6BO~27K+4Mz|65WM=TX5BT)wn~-5xljwXUjhlqSgGeVZ{Pddhk>0uCuI)p0w3NuC zDjq};bbD~Xp4yZTwg0vL36LhXo;R0@B&FJWxfwg>XEUn5I+26@y##%OA zhT8iMxJ{2#$L!ulzo`zWz6sLnP(b?IA5rzD=M*5RHBnE?gxy@t8MQ0)cFD_GHSA4Fn%A@7qbA*L3cK>YJqt z-r=*A&SVD4_oDID=FF5V2fpVQ02`)QN)!L%^aK6U$!oJDd(fhjCC0Fvgd~#(i()!+F0SRWXX@`<{kCS46uYZ zJDdiJ^7pDH>vl&5PHxak4=GL7{qU^EfLI1kK*-@5llA9GVakUc4=d{4P=+aYh7bf0 zs|g!Bh7xlSWxdbvJLXt*0HVgbTZVw*wE&!OeJ=>bDFAYrLt^KuyE!ov`PJfIaDWU1 zj42HF=4T?Gw}y~zV+h~{L{Yw4SHC-!?%=W&3r&h$ik#m& zBQ|-w!Po3(GC*AV@!E<>WOW$o2{?X~vdWWI`4QL2_OK~jTHk229b9gX8JoqOer_+4 zs00vv&@dZ@s{GD(rzp|aOU$wu2v*tFi_?J%c}8Mfm5CR`qn1>jfOe&1PQBJK5%hy4 z@S$(>D$$oGpdA_8%p!2F)Q^D&P1fOU#%SW*%V%EI4=AIS)f8FnJ&%8ILFU8I^pHMs zsimnBIlKaU zdT96RCM!b#4k1T0vdbnoTB}IhfN#s4u~$UZdhr0X(v-}xT*5KSf8{xx-usbINe}jb zBnm;Wrw-%98%R9P7bvcL2h0$ny`WUEZT4(JyY3}2yq0(Sk8rsZ49*~q3E4C`%x)g$ zv)MX@e)g%~aBGr7BIh?a=-No;iBf>hvzbH3VqRGmIhQR_<8f0nE_ev;E?TMT>yBr{jyK@2*b8%G(fed@t^rtt&|S)m2k%qfShJG9tPWk&&}a@s%X*P9kJJDD z^>2{+UNx3;kzRgccDw_&st_1f=(A=t8)3mwycF#hqx?#%2x++ItrAdH>Ft(T5K$Kz z>1epD+H(5@eEnfPlMEKyl1TYKDYmY^tKHb2_4!z#&}xLaIdX3P3qhV~M=3<#Zj&iR zhvackCDLj zAa|G{f=V8%Vl!fi+=1{VWf&}4Vt>%=5zP`Dbm2`7s$mM%6HLu!E+JvhoP$Ke$NX31qNEE42x*9NLx?$CW`7`4;r23J zd>LS8D>tPH4@5h$ z3JlUqCFBgKiLQEX5YiK?b+Vu=ZiKh}fD^foKy=)6uO~~Wk~0#ia@Oi@(a#U1+@uL< z@u>mubj#Cka6`Aa#=t=k@?6?JZP~-Q8)t7=kn+;Nq->wCAg%;6zOTMXa&Tq26?G?1 zFVkuUg3mr+mu-zj7*vqIIM&VlrPm0*MoQD36-mU|?a=Fck&+bjLmp3aTE#I1Lb_tf zr06A&%=FgK;@ot$@!?FiqL|)ppwk>`y>|}4O;Vqgv%k>2th57Fst8EAE@QjS{B_}- zX&R*w?2*slQ*N0LbSM?)guaAtN!)=%@>{6JEwgC1iml*e6ukNrx?HT=t?~tTFn%i` zEwe1qXgG*W9b3bQT$n>yi8d@9yqx9cF}|26Pa9wnwTq%r$`lD-`@NebY9SyuRr>~0 zweZo{T$unp2ifP?zS|N{L4XmSFN0dJrBT+h@6(wj=ZR2^p`o&Dn74>5HVTt_|kkk=BTgBwTQ){Xlth}MEVg|py`Y-o|~`ul8pGtYmDw=KbHl5y%#?# ztb&?BO2huCnd!ILoy4Wv1Cl|xSs1G6Zmn0XUiu#fl&C~Wcjr-XbUmAkQK02J;oPCtgjl=$SAV(Ii zvl9FKbd!umSc-w!wtLxJTSPJe68)?$QFoo@$FqBU(`r$)BYl;NuOMr>k!4!dPVToo z4{jcOMV)oGIF#7e9z{LS&)}&l$0+O1`khu><-BQCkXpmOcA|iFea6*ObN&p?B}#Ji zo_!ISbTu&vbwFI~PMy-ORIm6`?r;KgpVkH65CYS^a4@0cStOJ^`s2RR?ZQ3KdG*7= zc5(7G^BsI-=J&zR&@pA7VMl3#-?b;#eCJD&j!Pt;4S&AfJJD(4#XwZmDV$i!(|!bY zxoFoWIN@TbLUzejzF?({oIoL1Q<;qE4Vy%_6Iuh zo^5IfYRw6iRAAfaKP+p?YrrCKZaNNb^8jIcw1r{WQYKWH^j!qOMXh&Oa@1b`qSTH> zhy3%SlD^x!_8bAkLvr2IyF4a6nXHv_E>LBUrS4X#5Wz(yr+~Ptj~$~p zTV?6u`;xZB5bBVgRyp4-^n^3eR%tT`4J$jncD2yaz+>#Rv~x&g0!tgeWv+ld=Yk3O zJ_)xw?@UaVs<@TM+2FlX$>%EqHk9o3Wx32g3#ytYE__zKVbWJf3@TR}jAsWXl5!OJ zP@YG3*$}MMz81;vg4~$jSl&diM&4d{aJ%*07lYw@C@p%(g5Q-yO_dzt35Wq`tKVJV9YRw% zlBZak&kQ01uANYI zoAH>VgTReLrB`vE6t)XZLC*=^((ZqW@2bS=^$=&UTR1|Ew4Rg35)^vhA%5_OeMPAK;t7(3 zd@L5y>J7z3cgjFW}nm`rN(mr}j!GJmvQ=;+}WB>bb3;+RUO?Rk-^sSm`HQ zkrkQlWC)86KI=|J!Sr-~)%~+BBVN7zI@29qW!YcttFal`g9M&yD^+V(U&&1sKv>c$ z$?9jkLsx?o-|Qkc$A2a9AH#K6YW#U5;s?@OP~t1$82|MEM(=C#ipHhm`gq?L`iyCS zm}=^SIc>OIgd1a^9azNF%7(q>`zi0ru;VN8M%q$+(XR;q!X_k`4*)Vor(f>lp-dAN z{#tyfWV}Y-y%&5uiHvS zMIG7t`#7kLORp9yNd>7y+HOt%qQ)+u%&^~jAjz68US^9+eBkLvKJE(*NwuBdVm+rl z+Oeame;~r>3imi;?l+?5(fJcKUVpUG$voY}^8&pL_SQY-et|^CI}_%|FtTr-I(AB3NgvekmzqIal)jtY-jU+(BPg zl4q$P@zoGW3*R&5Lygd>wu;Ux=wJ9XH()M85v1vO0ZAvXl8>sBTSCtGnK)(H6N{`h z4QIJ~X6Pr`K~Yd6vjD-$+m(DWN@v!*5Rr{5z)D0^%B#H1akSGat1Kanq1rq7(q6bx z&iDErvUrn<@CuWA~W(l6-zb z@^coY?GqMx*@>@M(v{Z-T+lRgrWqp~L9h63W-A$$y;Px+m{{_dVC!oY0S+Qh!$ZQ^ zxB}m`p*3lJo1?Nx)%%{9s^lIX&3Pkt($HPUkNVtI*Euy?JqOetK0%p&#AU>K-i=Y*F0PEp2~paSya-6#5&VRtyLhC@>hJzS@sm zJVD)kK1f5LS>(%!k5)q6~g-KaOmA_PnL%#)y5jl;xeS(Ocx)4Xfs8Vaob^ zC@r>PL|t-elewOzh0Kov>cnsidNy9Ah*ZNX0dc|>HOk)M%w0)*xc9EHqAaLX=2j=Hp|Bb!0@o_d+QQI68ak7111;M0pp zk&&8Sb`uFARw^ZvUxaKmP7DFW2c!sF*Xyn`I#=d%Qlv;F3B0mr`$~ScNx+RR(_1g! zESr8=U(@kSDekOgouO4BWs1PN-C;07=c@E*lx0ETL_d}&88 z^%XM8pCttHx8CFJlm0}~)irc>jFlbql`Xk1n%uf#Es|@LgVC*)X&n$Z*U^eg)!Rlr z8aE+7H%n>{;9G_ZnNSjpDwY*q;4K^5VNm+kyc0`tgP3XXaC`CXTo^JO_f}}ayVghw zSrT>~5}g!z2iYLDDN<#F&TYbu>o3gO_go~)@P|=+x8xE!+6h2CmRhCyVn^X-n}JNl zj{=rvC&yF51^P0>LenP?p?(Tgd`t7sIW+B2)%?Gy=;|^npc6H(iOyTzpXN|?*U$Vo z0?KB-GWXR_G~C2DNHYPPeUf}Lrv-~xj+kuDZA<6AOevG^C5law&$Mw>>aZA`7Zig_ zz^2CJB?x^0wGh!pqOILe_rJe}ing47yaIta`*EM>E>E7OLX=TCo+l&gcSLDKcYN8- zz^lfU269R0BEpW|o_%f7EYy|kuBIc^-az)}?v7WOd{>3FIJecmJM#(07izc8-PwL^ z-y_>=MZ0Wi;P}u@s@(5ZnPB0KR05C6Vi$#S?oV|A_%HLwxO(gqQVQAE6rQ){7=?a+ zazds3`?OPg2Id1u%Q7QCAwmHCOVCNm0Wm(=tvjkv=D1jVkZK`Ec;nc5yhSrxLBChe zn&MfrTJ*AYQID`9e%<7I%6Nkx=2=PU1?cNtW0tpwIkq)qf2nV3Rk}U`W`hXVigVgz zN!slxF&f(vNw(e|JDKp%>vZ~9H6EtfVE?ziNZt2Jv+1Xc=f7*9Yat=)H&O#8P?YCZ znI%LcTEaoKUNYKV9qdZ+RYo){j`D6v(mP3O&Gd!)`$T2F#49`jiJ09?so~zj1lBxP z4!`Q9I-6Q+KN}1wP~|S|HE{4Wmm6GUG_iWPA8eC(`vJ8nnP;ili}^KHSv_eCfk!rP zPE#grw)k~`9{yY z_8V+%HFtdFzO%pAVkZt1GaYl8rrO3Wx+0$n(PePVXiQ0K{7D<26erOvRt2g%N>Px&7LTmuKknU1j$~ zEw&?EFa+4O?JDlQ#oZUg>cASr`fRNU1zE^3d=Mh$^iFlm03{MBL^dJE01OOLGM2|2K-OM%Kw^RXCTsLv5w z3^!;jj)m)QeT!G8n%fujA!4n~WYktL8vGxDu7W@mCiz2y)g35MvH9W4 zLEIp`IeRhkZw3=CH$2D8EQXts9#1qWQQ%w3wyJDM236SEUXK0KkM(!Qr|nGZkzF*+ z3*oVP24^?V3$d|e(^|uaN$fEfW5|RTqcNH3t366P0#gY|u%5)r7~oFTjlG*&2>-#C zY9Nn`p|r%lVP_CLin|e%OFHMzzhttWh0$8kQWhtcteg3+zA|ZF32_G!Px-|9y|OlT zdR1R!WBzHmL#2|i??tzwUdHsd)+b>oY2l*Z1xX!MIA?bd ztn~P6B=&QFu1>xfzOyli8(v9GfSgIX%q>7$pg(|%CrFIwnpe%~&X;uxKf{w3LJ)U6 zsAS&=o7YVx)FAYDKuCWC#JOFKV*KI!pO(1uPi8rdW2(w`;6meI{L|OLAAXPSHuoO%WCwoSzCPjt|F$^oLi`S zZLh#zYiu?1f9ZIP^hnfGfTS)*jG1!F;K9^b$ldU#*FM#&?6i;;oW>T9_0 zo)#@A5FS^WqW2YX{Z87WcZox3?}ZciddLv&@Vq8B**|%HKxy&ttIbOc5YdBHu4^U^ zuDOOhPP`+=7~k$+^An4ZBOF_Yo;(n}kWi!K2@64^QiVLn=i82;l@e}&!z*D>)C>F- z0{sv#xry;Je|Za%BOq4|0-6gEQVy53Vz+RG67!*0H3H7p6Utb4g>!^brl4W{u_GaS z11_^YvaZ`P9*W)v={M7JMi!=CMk{8Ng+e%`WIQrFn{-~S7<}{c@Nw+kH-K@VSe#<& zd_lNRxJr2A@L94FlN4FAXy}7)Y)dzBujVjv;K!HY#K&tVov0$Hpn9Z=@k9B;C&WCb zb9&r>R{zdB+B@_FV+~KV#Ixm7&^5YVGLGFTNfOh^YoOcUMy^y?9asIR?M>%oA8?KOV_z5X#R1ng+JDH>>SK&tlmt^=1_k9K}ExW0MG#*(+xAKZryg z1w%V#yfb+cK{SCi>@tG1-*ipsi$7wqInb>=YtAuS?h1_iFl%59B0z)6?VUt?Yt^d> zj6O^n+Y0q2b`L?+WBd}HUwsu>of2Td79qRF7I&fs(|G8dZHp;5h(?^XHOS*DLz}0b zPC8zGhUkr`ixyGu8iBs-id?SRRVWnnf=@rQ77EW3Yj`?PJ|uC)6Tph>V*;I;u_q|n z(i&$qB~k?s`NL(5F-3$mvCvTYvn`yko0SOG1*q%~h2-yM5{0))l$YcXoU~W6kcQu< zgW6&-_dU9Y2MNKa{UZbyM-b?}C@0rBIV)rlv*o29S4^#-{cbO&^!NUNL@ zT9ioe!+xA%!t*TP(1{*#k`kN!g$a$Knf&wFMbCHkMixOU@b4^U@K{0EADn$Xkl0Ds zfS-61U&MZ4k~Ita?$s>J7jYghIoSwDE%(|&UaSp0=*LP947Zi(G`*ib-8D)jGfT$6{I&e=p%^l@!B{M^ECt1Jc)9 z&ygxTY8HBevG$fs+VqK`)z2wzpiCZjv3~I){v0p2n7arpERGx~wxomKzwB60Bl|b8 z)EzW2yGJ%(E9yU%cH6<&@gq;VM@uEkVU`_hrefjyL#$=sc%HCb zuLkeQed&QLr@}`Hv6_iHpxMvk(47n=EfZ(OtjhJpmlk~lLaP-#cqOL&CAjJ4K`|jm zIu1YIzIOK+rC(UGK^_qsfDKKyYMmz^-QkT#a>jgkf$@uv=? zXg;f0Da>cDuIZGSw!ktk%BQ19I( zR>$TYitYFoyAmm@ekK%?1)QTGJQn00V|5^NJVyI%Wy!wX1L=VJY{^9HH2f{^7z>W0 z)%$fu5E)0fn98Yst7fa}2BQ=W2kN8W=B-bUb6++@7Tu6{zxb>n!27A8EU6X^*5n-FRE8NP>WeyN_zX=@Wh!4|STYu;fsh4uJEJhCd3Ffl4*aceS|F+o^j?R} zegsF!z-ie6EPNzw>V>@H@>=+aA8@TW7*)6uCI-S7BgL#RA6Ua*5hf>}jW17b93=jt z>{8V9hM>~z$y;Bni70X2{;(^S?NQ-(WapNa(*|R#8F-Xz3v&pWSxJup;h7G=S+OU@fqJ#_;m zY`fc+@;HXeZI8nEPjMK-^UGdb?Bo4{QlwlRiw#Q-@929NjdZVs;r0kyp$TkjuoD8o z=a-ga!I~&$z(XT|WH4RIsX4hXE+tul6|t02Q8z-lzt9EUUP5>M5iK$krd;yAHx7%@ zf6wwP?cqnfDOsZ9WXQmSTwr!}+11e~m%yCv$~T3^2jUh!MA)NF7Q2q1KRgdJG2UD& z-rQ`w@MHAt`P!}#>0vmLDDsG%peT#CaJDQ6t822c3^I)s?UG<;6UZ4R(M}{F4~k~c z5KKINe?h!*0Aj|yqL;g{kd0;X&DuR6%Xt%uMgQwE1|hyYF+cyQEh9h}F3XIZ9&vcP zNd~}&+UBk8OL^IGrBv%(`Rj!-VyKmk7DVbC z=(4xBvYt^^z+&`@rln`-jH>Dvtb_jwunq)gaD$#RPDNj;F>k=XzWdj&17+L)Dr`Hh zX#lK2t`J3JE2B>{u%t}8tf?IC*ZzOiU3nnX+uLW5v4j~yA$uvitR+T9){3qzgdq~L zMH2a@$X3acA>2qSDqPBq(3m7kXwOoJQlt`D<9*Jc+x`9C`~Ked&-eb*HS=B0IrE(7 zJm)!|&*zbQd@UpL$D@*G;mLf<=!qWp9WsvR;qQ{J66P}2toikY8)AC{C#6*@7DFzs zY!;Y8u7HC4Mql`5KGJQOM(H zvEK;{c|N1*Gq-)>+VH0#b>1N#edyQh$(bCA?H2achg@}+hZYC7XuO~wzicrtX&JMG zD=ZJ60zFzD`yG0Kvr`p_L1O~Iw|fU_;rFm)VEz7mL@7|NLK>g}|IIBJ^tl@1f3{Li@ zM)!9!5>8CYmAf%J_djAJF`7wbchy~gk=7>&MZ*wTLx3#6@P{>z0Y5bcVX2`Y;l|&c z{HJ-N0Nc2JtmL2`AeZh2EAz8V0AYw5Y56H@blyVmT?5!m{)$C-jC>aB`)$=>sS_WO z{k3*h7rg*z>aH30xl#VB$4HsKf8y18eaPJ&=o(yNX$4<=*r;aKI7wz(s;@O}tvU`F zRNH^*!}>n_4|)nQxwN5w^ClJQ18bZda|87Y{DbMdkNppjPHUrFi`EIL>vDzk8_zN) z0e(m44?=YJ!FP9|+F&dytO-IWcb`eke7y4$9Q(?@xpL%=d{IgLDg<~xts2O%#}iVy zBprb~$;i)JGI(=m`>`veOn;u>w+ooVoRQxudA|(?!%sfTS|D>qU}YlFk6k<>U=qZR zn)DlP6{oQlXvQ{H&k1fe9aO&X5+sWqgWJ=o$+bui%Lu6%Z}bJfzVfDKC99034Z^M& zS9^0|+nrLKlxiZ#GcgPd`1zz#yBGWojrtf^F~42zuzKK7VMlg;4Lwg6YH0>7*J~xR zHdYPvdq)dRs7DpFOifKbb3vvHg}&<8*uVb_}GnxY>K=W1Ms&nhc1{O0ls6E4A<+i!PIA5JB``W%1I z@=|M<)7?a(PA9d<7PkufgS9?MTwHOD&h>D>mFHaV>VQD(_q7Pxi!Z$O_PIoo52qMtkF^gu?80_k=x5Lxg?)DN2VZ`*A4*reMS_F#xOgsN>GokFG4 zG8qRLWdMmPUE!_q!g_yiYrJ_>sq4`nYV)PvrG4g(mDp8RFub^6&-y#_A&DU?hFaVW zUhb&cX%ds?wsVd1E_!oh-yWAy{s5ksHu)j{x%Rmo9H*u$VH=MGmG!bwp+LbK*X(g! zdn-R-w;Xk^XR!oYpmVWNK8QvP{g1||>`gv}Ho1Go(o2nsu%z(ID`>E8QeKhXC$H$eXC#t>dS|5vEh~TjklWJPexsKrYzdSMGP!RP@^DvfG_++Y85~i<7T*5!o`sb=t$lSRD-tmO7s1=sbSC0%Y`1xmuxg2k&bE zPpyn&01R8s9!^OGgq9i+MQZEs-un9JRX0tU>Y{h?2Q4WwV>*kZT;F9LxNgPv>2SYW zg`v=#J8X89lMXjnWmLcps$G_kR|i5R%&ko|i0h3?+)|Q_1Xr%Qs3R4HfmWotD5&@M zfIlZ71l~}|t+E)PIID-*#y_k;A{8f(QWCO59ZOE%#YXf zsG2D+D0c2{5EFOA!pg>oi0b2eOwnU;2m9|G|m`2#$Kl z_PqwIy0{=z!&L!2RTVdS9MWV>---1Yx}qusG{n+Gex5-rKr5Qn9){*8Kmpq$1D-R? z%@n5OwqvX0vi@Pr9`RRz0!seigEyiIbp~ok;nf}~z92}iARt~Jn;#vOn(|AO=!SX5YetZ5riOtM~(WMlxsv8YO6?s6p7;MB8oBy$R-U)BuJe` zHesc(8yLlvy!C>}XuMKLz(H#Exy6i+m@UCO=7=iPUXEbR#-=92{x-up44ACGg1C$i z(qX-j;TLx`t|G2s5<)DacEVQzfv8HfQj?{dkTJSE40~1@BnD}3qk`1iUPro!R7XDW*1l^No*j4ui{tQU{1{o#0heea7Hi-6jCmGP;&#}dlD`aXlkA`4^OIh{DrFB&m}3t_Q<)spOj&BOCvF87LG#+$2|Ggpqi)a2{9*LnQ z|F_R90!P^!S~Js4_7DXTfP1M`2Pori|I)+VlC>PKhd+3yhF9I-jyY?VJBAvsPky#f zqEAN3BR@^B9M#x%L8oHRnB8~C{@q5>vN6*25)PWowbx8B=hs=LM@WFny#rQKYTk4G z;7;6ejx+{?5N;cjit6C?{_y6ME{i>jKRBRx&7f17A$J>UH5W(7n*-Gzz$L@nSK(l0 zGh1tP4X+-=EN8>A|K#F2$b>5&@xv!4B(ssq@u)8tQEoHy^|jDsO-tNv>0Ht)jWJtY zK4v4c_a}Z`@4$HGq;j=ixxC%yFJq|=Hgh4~!C8`ZXLHK*(Z(17Kgp$EbaC`$+6cI4 zZON+m_xNDe+h^Y!)>;u-8$M>WOqls0X2>npK8+dU`|73HQF zUa*gy{ayiMm0fs}nGp=IbsfcBMgpU5bfXb@WX3%v0%&0EZ@#wdLz@O2~FB>=yPxB z^(MKh%yd(-*<;WoTQx6dxzX{L%L^c0DlL%quVL7tB(!dDBXhJF-IQ^Hu=Lgf3%IH= z-~Y{$H)->Lm+W56R7j=svHiL7&$rRUcWp zi3&Fz3}Y&CW;jF+8#HhO?YBQKUO$MgSK>YTjNu@KI2vI#73xlZ-@S18Zn*r_0CVx- z1@~x;cyecr-&8ZM7RJNHmAXu}%E%oGaz?F(lwr~6U}xyU)xqd$Dtqn6r5;(L9@(ru zGNZJ3gA&{@Jj!H$_pcst{y!WCY-t=8Y;SNc4k?^WPSnhcT?S4ATFt*7u=|wt(+*LT z+$IGhFL*C3bLrDl9HJmg-dp+Rhu`G*O-u|00>zE%3+eOogU0&aD_;TYPTU#DT1fw_ z+-Ym2as;f?G|-*KR@uVm#m#C>WtAJ7fH?Wn_(*RcULDTLE0bQZ)Tki7Nf2NYMJK!g zaAX$K3hM8@kW=6W

8vrFHP@K?g>t;1^P*zm48g1vKb7sLs9;Dd!^*{C{~3cIMyS z^N5JztvW=<`+_KPh-iki>DnDiZF95TPwUWt%)IIrsBwLPzHWvBS1go830Gr#hzsiw z$HyyhGFE?9l0iX&*gt7YKZIq%NVtGUr~k|sBtJg;C03ssL(-@A5FXXRE{1p+ROdNz zj)3e+e#kr6Cau6UAv%6};?3+kEe2T^Y3@xz)!Egc_IExSG-%X(VGGRI2BQGDc>;JI ziYMVHOB4iUDX6xG#~E?Tiit5`erY-Q@rYgZ0 zI|Di{qSa9BI~G*hb+aRqUXhNVl@8xlh=sTr< zL8+)`>r~ndq_(0pp4&^Oe}fcyENqZ89sE-K(Y6p(5O)_rayhSdKoFqHI}Uq_Z6#vG zKd_D$)q;g0aV$q-A65jum13vc)ua7HQb%ewUkfyq2H%v#3CjV#(iwIob# zEyI1ri{_FeeY8gdAma6$gcp28CLl@>BruS|HUK-gP517HgEmY23kmhe0eP2rrJjqZ z7D7Ta-cP1T2|EwMIFtUYMow4OB@I=B#$ElW4_l*5T##7bEAjcs2XYybH+sEW<`LO4 z#y{e%2$Emq1tO9Q4HC|xNCgW;zvQS-F!cKc#`%Z@k`}}{la;90*0G91kj#um^v{S7 z<}L|+?}2M3{MI|Nm&*l@HawWG$Uwx%f%A!5q-u=uGFCz~$@poIntUiXd5Jv7!mArV zn}^9)ZYAaf@arT~jt$fN++Kd1eRJGwUEt zLaSyY;UBVpfz{Nya*ySsU>@6R;3%mocU_#8P=iFQE^oI_UA z+jH<~aRp!k2kik6wDz2hX>J#)xd0fYI#dFn$sTCkZk0ewQ;EkXTpC3A0mbbUv(*`o zBcnT3*xE}vU{aHAMjYRuZwg)THA~P@UfXLYnL{KB+z94bR0Cg}1eOUagy+_GY)fld z&&HF9$~dRe2n%cP-Bmt{a)f7m?H~D{pN{iLJJ4pp9s`q5`%Z*=W(C-VQRomrYPxdt zzsB!S7d?a0unyAzp%d9KASsiQa>yQ&v^`$o<9;vRaYO9v#W7%9ZQo;%`EmOIoZ!ZV z*{5B`f{N9QI$d!u}kFWQnU#epQW zb=wu6>9QCCU{S*73BBAydaPP>9o7mYB{5f&%r*cTaN-AHmLKYd0tYRm;RYnZ$1Sd> zE3rn(at&9lC!5&`oR$Z<46I}==bl-SD4vNCv>&U_wcs!(_igh7&V9SaN-l#U?m(-p zCiI+DU-`5?BlRbA^4|+W`5Yhgxrtq&nHiJ9bDTv(r;V=wAGvd@tZqDWJlopmS!KM*E27l?Ja<5`im1cH44_(Qgz2Le{lVyBTI=J zWXg-V#;~ZRQKO$C&(a2jO9&K#j{&Qqg1iX)Mt0~_<|NBQimJUlF?JNn@+HvTxZ-5f zWAV8ahQIN!*IZ1aZ*{X!f`Tc%pgeHUSBSl-N9}Z?#s!Jz7~Yq6^O&UqdkH4sV@NiP z-cq!N(81@Z^JQ=TDNRn;)yVtjE6yKIEo>nYs|9ru4@t%n6;^wl`Gn$GrI^hLngZg2nw0;)a@~u}Efe zTY1dR4DTX(?$~79)mXk*Bk8`?^J_;WpC6426?HP)GPvnohP&IaF?|bN+o^~kR5D$6 zj=)tfP?PLaT-6_NF0DAYf0dgeGA++&OCrvFqe4W^d z)UJC35rXQ_Z$V$T@>PMlP2Wwz#`sn^+lf@;OpD7)OWmm`9eJWQp;Jrn^25U$28@=` zW%O^nWuZC;;DjuE9^CIlDbAnsr3JRjzUgUGOVcMKn#pZ7t`~OtDppnXd6twgZ4jYS zf$AH}&xX)G>zGcXtnvBjr&m@{?*-siV7o>9iM^-olPV(JLriBMq`ijf0}Q!Tlu%lp zSf7wq3Xb)#*vN#nf`M$jy*xTA(2WKpg@ZpbL7AUoG;OPPJI74s! z0b**YX0KV2kDhHI%YEkkhXYzb^kXULx7-KQ`|Rq-O$!bflE Date: Tue, 14 May 2024 15:53:32 -0400 Subject: [PATCH 20/21] Fix the simulator worker sys path (#2561) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fixed the simulator worker sys path. * fixed the get_new_sys_path() logic, added in unit test. * fixed isort. * Changed the _get_new_sys_path() implementation. --------- Co-authored-by: Yuan-Ting Hsieh (謝沅廷) --- .../fed/app/simulator/simulator_runner.py | 13 ++++-- .../fed/app/simulator/simulator_worker.py | 7 ++-- .../app/simulator/simulator_runner_test.py | 40 ++++++++++++++++++- 3 files changed, 50 insertions(+), 10 deletions(-) diff --git a/nvflare/private/fed/app/simulator/simulator_runner.py b/nvflare/private/fed/app/simulator/simulator_runner.py index 1d7057c295..f6d138a2a3 100644 --- a/nvflare/private/fed/app/simulator/simulator_runner.py +++ b/nvflare/private/fed/app/simulator/simulator_runner.py @@ -658,10 +658,8 @@ def do_one_task(self, client, num_of_threads, gpu, lock, timeout=60.0, task_name if gpu: command += " --gpu " + str(gpu) new_env = os.environ.copy() - if not sys.path[0]: - new_env["PYTHONPATH"] = os.pathsep.join(sys.path[1:]) - else: - new_env["PYTHONPATH"] = os.pathsep.join(sys.path) + new_env["PYTHONPATH"] = os.pathsep.join(self._get_new_sys_path()) + _ = subprocess.Popen(shlex.split(command, True), preexec_fn=os.setsid, env=new_env) conn = self._create_connection(open_port, timeout=timeout) @@ -696,6 +694,13 @@ def do_one_task(self, client, num_of_threads, gpu, lock, timeout=60.0, task_name return stop_run, next_client, end_run_client + def _get_new_sys_path(self): + new_sys_path = [] + for i in range(0, len(sys.path) - 1): + if sys.path[i]: + new_sys_path.append(sys.path[i]) + return new_sys_path + def _create_connection(self, open_port, timeout=60.0): conn = None start = time.time() diff --git a/nvflare/private/fed/app/simulator/simulator_worker.py b/nvflare/private/fed/app/simulator/simulator_worker.py index 4fc8b2d0e9..d0e3b8b371 100644 --- a/nvflare/private/fed/app/simulator/simulator_worker.py +++ b/nvflare/private/fed/app/simulator/simulator_worker.py @@ -146,9 +146,10 @@ def run(self, args, conn): client = self._create_client(args, build_ctx, deploy_args) - app_root = get_simulator_app_root(args.workspace, client.client_name) + app_root = get_simulator_app_root(args.simulator_root, client.client_name) app_custom_folder = os.path.join(app_root, "custom") - sys.path.append(app_custom_folder) + if os.path.isdir(app_custom_folder) and app_custom_folder not in sys.path: + sys.path.append(app_custom_folder) self.create_client_engine(client, deploy_args) @@ -235,8 +236,6 @@ def main(args): log_file = os.path.join(args.workspace, WorkspaceConstants.LOG_FILE_NAME) add_logfile_handler(log_file) - app_custom_folder = os.path.join(args.workspace, "custom") - sys.path.append(app_custom_folder) os.chdir(args.workspace) startup = os.path.join(args.workspace, WorkspaceConstants.STARTUP_FOLDER_NAME) os.makedirs(startup, exist_ok=True) diff --git a/tests/unit_test/private/fed/app/simulator/simulator_runner_test.py b/tests/unit_test/private/fed/app/simulator/simulator_runner_test.py index d9f0e55c8b..5b27740e03 100644 --- a/tests/unit_test/private/fed/app/simulator/simulator_runner_test.py +++ b/tests/unit_test/private/fed/app/simulator/simulator_runner_test.py @@ -11,19 +11,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +import copy import os import shutil +import sys import threading import time import uuid +from argparse import Namespace from tempfile import TemporaryDirectory from unittest.mock import Mock, patch import pytest from nvflare.apis.fl_constant import FLContextKey, MachineStatus, WorkspaceConstants -from nvflare.private.fed.app.simulator.simulator_runner import SimulatorRunner +from nvflare.private.fed.app.simulator.simulator_runner import SimulatorClientRunner, SimulatorRunner from nvflare.private.fed.utils.fed_utils import split_gpus @@ -155,3 +157,37 @@ def test_start_server_app(self, mock_deploy, mock_admin, mock_register, mock_cel runner.server.logger = Mock() runner.server.engine.asked_to_stop = True + + def test_get_new_sys_path_with_empty(self): + args = Namespace(workspace="/tmp") + args.set = [] + runner = SimulatorClientRunner(args, [], None, None, None) + old_sys_path = copy.deepcopy(sys.path) + sys.path.insert(0, "") + sys.path.append("/temp/test") + new_sys_path = runner._get_new_sys_path() + assert old_sys_path == new_sys_path + sys.path = old_sys_path + + def test_get_new_sys_path_with_multiple_empty(self): + args = Namespace(workspace="/tmp") + args.set = [] + runner = SimulatorClientRunner(args, [], None, None, None) + old_sys_path = copy.deepcopy(sys.path) + sys.path.insert(0, "") + if len(sys.path) > 2: + sys.path.insert(2, "") + sys.path.append("/temp/test") + new_sys_path = runner._get_new_sys_path() + assert old_sys_path == new_sys_path + sys.path = old_sys_path + + def test_get_new_sys_path(self): + args = Namespace(workspace="/tmp") + args.set = [] + runner = SimulatorClientRunner(args, [], None, None, None) + old_sys_path = copy.deepcopy(sys.path) + sys.path.append("/temp/test") + new_sys_path = runner._get_new_sys_path() + assert old_sys_path == new_sys_path + sys.path = old_sys_path From a85f0bdae67ca8ebfab67131c48fa58c0c462ae6 Mon Sep 17 00:00:00 2001 From: Zhihong Zhang Date: Wed, 15 May 2024 13:45:24 -0400 Subject: [PATCH 21/21] ReliableMessage register is changed to register aux message. Added support for Mac with vertical --- nvflare/apis/utils/reliable_message.py | 10 +++- .../xgboost/histogram_based_v2/controller.py | 7 +-- .../histogram_based_v2/sec/client_handler.py | 46 ++++++++++++------- .../histogram_based_v2/sec/server_handler.py | 11 ++++- 4 files changed, 49 insertions(+), 25 deletions(-) diff --git a/nvflare/apis/utils/reliable_message.py b/nvflare/apis/utils/reliable_message.py index 802d2aff2e..8f84613b61 100644 --- a/nvflare/apis/utils/reliable_message.py +++ b/nvflare/apis/utils/reliable_message.py @@ -216,13 +216,14 @@ class ReliableMessage: _logger = logging.getLogger("ReliableMessage") @classmethod - def register_request_handler(cls, topic: str, handler_f): + def register_request_handler(cls, topic: str, handler_f, fl_ctx: FLContext): """Register a handler for the reliable message with this topic Args: topic: The topic of the reliable message handler_f: The callback function to handle the request in the form of handler_f(topic, request, fl_ctx) + fl_ctx: FL Context """ if not cls._enabled: raise RuntimeError("ReliableMessage is not enabled. Please call ReliableMessage.enable() to enable it") @@ -230,6 +231,13 @@ def register_request_handler(cls, topic: str, handler_f): raise TypeError(f"handler_f must be callable but {type(handler_f)}") cls._topic_to_handle[topic] = handler_f + # ReliableMessage also sends aux message directly if tx_timeout is too small + engine = fl_ctx.get_engine() + engine.register_aux_message_handler( + topic=topic, + message_handle_func=handler_f, + ) + @classmethod def _get_or_create_receiver(cls, topic: str, request: Shareable, handler_f) -> _RequestReceiver: tx_id = request.get_header(HEADER_TX_ID) diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py index f5f4fb4e7b..39370b7033 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/controller.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/controller.py @@ -148,18 +148,15 @@ def start_controller(self, fl_ctx: FLContext): adaptor.initialize(fl_ctx) self.adaptor = adaptor - engine = fl_ctx.get_engine() - engine.register_aux_message_handler( - topic=Constant.TOPIC_CLIENT_DONE, - message_handle_func=self._process_client_done, - ) ReliableMessage.register_request_handler( topic=Constant.TOPIC_XGB_REQUEST, handler_f=self._process_xgb_request, + fl_ctx=fl_ctx, ) ReliableMessage.register_request_handler( topic=Constant.TOPIC_CLIENT_DONE, handler_f=self._process_client_done, + fl_ctx=fl_ctx, ) def _trigger_stop(self, fl_ctx: FLContext, error=None): diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py index 889f88627d..38cb7e8644 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/client_handler.py @@ -14,15 +14,10 @@ import os import time -import tenseal as ts -from tenseal.tensors.ckksvector import CKKSVector - from nvflare.apis.event_type import EventType from nvflare.apis.fl_component import FLComponent from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable -from nvflare.app_opt.he import decomposers -from nvflare.app_opt.he.homomorphic_encrypt import load_tenseal_context_from_workspace from nvflare.app_opt.xgboost.histogram_based_v2.aggr import Aggregator from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant from nvflare.app_opt.xgboost.histogram_based_v2.mock_he.adder import Adder @@ -45,6 +40,17 @@ ) from nvflare.app_opt.xgboost.histogram_based_v2.sec.sec_handler import SecurityHandler +try: + import tenseal as ts + from tenseal.tensors.ckksvector import CKKSVector + + from nvflare.app_opt.he import decomposers + from nvflare.app_opt.he.homomorphic_encrypt import load_tenseal_context_from_workspace + + tenseal_imported = True +except Exception: + tenseal_imported = False + class ClientSecurityHandler(SecurityHandler): def __init__(self, key_length=1024, num_workers=10, tenseal_context_file="client_context.tenseal"): @@ -66,7 +72,8 @@ def __init__(self, key_length=1024, num_workers=10, tenseal_context_file="client self.tenseal_context_file = tenseal_context_file self.tenseal_context = None - decomposers.register() + if tenseal_imported: + decomposers.register() def _process_before_broadcast(self, fl_ctx: FLContext): root = fl_ctx.get_prop(Constant.PARAM_KEY_ROOT) @@ -209,7 +216,9 @@ def _process_before_all_gather_v_vertical(self, fl_ctx: FLContext): def _process_before_all_gather_v_horizontal(self, fl_ctx: FLContext): if not self.tenseal_context: - return self._abort("Horizontal secure XGBoost not supported due to missing context", fl_ctx) + return self._abort( + "Horizontal secure XGBoost not supported due to missing context or missing module", fl_ctx + ) buffer = fl_ctx.get_prop(Constant.PARAM_KEY_SEND_BUF) histograms = self.data_converter.decode_histograms(buffer, fl_ctx) @@ -240,13 +249,13 @@ def _do_aggregation(self, groups, fl_ctx: FLContext): fid, masks, num_bins = fm if not groups: gid = 0 - GH_list = self.aggregator.aggregate(self.clear_ghs, masks, num_bins, None) - aggr_result.append((fid, gid, GH_list)) + gh_list = self.aggregator.aggregate(self.clear_ghs, masks, num_bins, None) + aggr_result.append((fid, gid, gh_list)) else: for grp in groups: gid, sample_ids = grp - GH_list = self.aggregator.aggregate(self.clear_ghs, masks, num_bins, sample_ids) - aggr_result.append((fid, gid, GH_list)) + gh_list = self.aggregator.aggregate(self.clear_ghs, masks, num_bins, sample_ids) + aggr_result.append((fid, gid, gh_list)) self.info(fl_ctx, f"aggregated clear-text in {time.time()-t} secs") self.aggr_result = aggr_result @@ -323,15 +332,15 @@ def _process_after_all_gather_v_vertical(self, fl_ctx: FLContext): for a in rr: fid, gid, combined_numbers = a - GH_list = [] + gh_list = [] for n in combined_numbers: - GH_list.append(split(n)) + gh_list.append(split(n)) grp_result = combined_result.get(gid) if not grp_result: grp_result = {} combined_result[gid] = grp_result - grp_result[fid] = FeatureAggregationResult(fid, GH_list) - self.info(fl_ctx, f"aggr from rank {r}: {fid=} {gid=} bins={len(GH_list)}") + grp_result[fid] = FeatureAggregationResult(fid, gh_list) + self.info(fl_ctx, f"aggr from rank {r}: {fid=} {gid=} bins={len(gh_list)}") final_result = {} for gid, far in combined_result.items(): @@ -362,9 +371,12 @@ def handle_event(self, event_type: str, fl_ctx: FLContext): self.decrypter = Decrypter(self.private_key, self.num_workers) self.adder = Adder(self.num_workers) try: - self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx) + if tenseal_imported: + self.tenseal_context = load_tenseal_context_from_workspace(self.tenseal_context_file, fl_ctx) + else: + self.debug(fl_ctx, "Tenseal module not loaded, horizontal secure XGBoost is not supported") except Exception as ex: - self.info(fl_ctx, f"Can't load tenseal context, horizontal secure XGBoost is not supported: {ex}") + self.debug(fl_ctx, f"Can't load tenseal context, horizontal secure XGBoost is not supported: {ex}") self.tenseal_context = None elif event_type == EventType.END_RUN: self.tenseal_context = None diff --git a/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py b/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py index 119d0f5570..53e936c7d4 100644 --- a/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py +++ b/nvflare/app_opt/xgboost/histogram_based_v2/sec/server_handler.py @@ -17,10 +17,16 @@ from nvflare.apis.fl_component import FLComponent from nvflare.apis.fl_context import FLContext from nvflare.apis.shareable import Shareable -from nvflare.app_opt.he import decomposers from nvflare.app_opt.xgboost.histogram_based_v2.defs import Constant from nvflare.app_opt.xgboost.histogram_based_v2.sec.sec_handler import SecurityHandler +try: + from nvflare.app_opt.he import decomposers + + tenseal_imported = True +except Exception: + tenseal_imported = False + class ServerSecurityHandler(SecurityHandler): def __init__(self): @@ -34,7 +40,8 @@ def __init__(self): self.aggr_result_to_send = None self.aggr_result_lock = threading.Lock() - decomposers.register() + if tenseal_imported: + decomposers.register() def _process_before_broadcast(self, fl_ctx: FLContext): self.info(fl_ctx, "start")