Skip to content

Commit

Permalink
merge master
Browse files Browse the repository at this point in the history
  • Loading branch information
Xiang Wang authored and Xiang Wang committed Jun 18, 2024
2 parents ea9320b + 4bbce76 commit 1275034
Show file tree
Hide file tree
Showing 13 changed files with 227 additions and 412 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/CI_federate.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,4 @@ jobs:
run: |
cd python
bash tests/test_federate/test_federate.sh
echo "Federate example has been tested successfully!"
echo "Federate example has been tested successfully!"
42 changes: 42 additions & 0 deletions .github/workflows/CI_serving.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# This is a basic workflow to help you get started with Actions

name: CI-serving

# Controls when the workflow will run
on:
# Triggers the workflow on push or pull request events but only for the master branch
schedule:
# Nightly build at 12:12 A.M.
- cron: "0 10 */1 * *"
pull_request:
branches: [ master, dev/v0.7.0 ]

# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:

# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
serving:
runs-on: ${{ matrix.python-version }}
strategy:
fail-fast: false
matrix:
os: [ Linux ]
arch: [X64]
python-version: ['python3.8', 'python3.9', 'python3.10', 'python3.11']

steps:
- name: Checkout fedml
uses: actions/checkout@v3

- name: pip_install
run: |
cd python
pip install -e ./
- name: serving_job_in_test_env
run: |
cd python
echo "Serving example has been tested successfully!"
# python tests/test_launch/test_launch.py
1 change: 0 additions & 1 deletion .github/workflows/CI_train.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,5 +39,4 @@ jobs:
cd python
python tests/test_train/test_train.py
echo "Train example has been tested successfully!"
# cd examples/federate/quick_start/beehive
18 changes: 18 additions & 0 deletions python/fedml/computing/scheduler/comm_utils/network_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
import os
from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants


def return_this_device_connectivity_type() -> str:
"""
Return -> "http" | "http_proxy" |"mqtt"
"""
# Get the environmental variable's value and convert to lower case.
env_conn_type = os.getenv(ClientConstants.ENV_CONNECTION_TYPE_KEY, "").lower()
if env_conn_type in [
ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP,
ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY,
ClientConstants.WORKER_CONNECTIVITY_TYPE_MQTT
]:
return env_conn_type
else:
return ClientConstants.WORKER_CONNECTIVITY_TYPE_DEFAULT
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,12 @@ class ClientConstants(object):
INFERENCE_INFERENCE_SERVER_VERSION = "v2"
INFERENCE_REQUEST_TIMEOUT = 30

ENV_CONNECTION_TYPE_KEY = "FEDML_CONNECTION_TYPE"
WORKER_CONNECTIVITY_TYPE_HTTP = "http"
WORKER_CONNECTIVITY_TYPE_HTTP_PROXY = "http_proxy"
WORKER_CONNECTIVITY_TYPE_MQTT = "mqtt"
WORKER_CONNECTIVITY_TYPE_DEFAULT = WORKER_CONNECTIVITY_TYPE_HTTP

MSG_MODELOPS_DEPLOYMENT_STATUS_INITIALIZING = "INITIALIZING"
MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING = "DEPLOYING"
MSG_MODELOPS_DEPLOYMENT_STATUS_INFERRING = "INFERRING"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -344,9 +344,13 @@ def get_result_item_info(self, result_item):
result_payload = result_item_json["result"]
return device_id, replica_no, result_payload

def get_idle_device(self, end_point_id, end_point_name,
model_name, model_version,
check_end_point_status=True, limit_specific_model_version=False):
def get_idle_device(self,
end_point_id,
end_point_name,
model_name,
model_version,
check_end_point_status=True,
limit_specific_model_version=False):
# Deprecated the model status logic, query directly from the deployment result list
idle_device_list = list()

Expand All @@ -365,7 +369,7 @@ def get_idle_device(self, end_point_id, end_point_name,
if "model_status" in result_payload and result_payload["model_status"] == "DEPLOYED":
idle_device_list.append({"device_id": device_id, "end_point_id": end_point_id})

logging.info(f"{len(idle_device_list)} devices this model has on it: {idle_device_list}")
logging.debug(f"{len(idle_device_list)} devices this model has on it: {idle_device_list}")

if len(idle_device_list) <= 0:
return None, None
Expand Down Expand Up @@ -394,7 +398,7 @@ def get_idle_device(self, end_point_id, end_point_name,
logging.info("Inference Device selection Failed:")
logging.info(e)

logging.info(f"Using Round Robin, the device index is {selected_device_index}")
logging.debug(f"Using Round Robin, the device index is {selected_device_index}")
idle_device_dict = idle_device_list[selected_device_index]

# Note that within the same endpoint_id, there could be one device with multiple same models
Expand All @@ -407,7 +411,7 @@ def get_idle_device(self, end_point_id, end_point_name,
# Find deployment result from the target idle device.
try:
for result_item in result_list:
logging.info("enter the for loop")
logging.debug("enter the for loop")
device_id, _, result_payload = self.get_result_item_info(result_item)
found_end_point_id = result_payload["end_point_id"]
found_end_point_name = result_payload["end_point_name"]
Expand All @@ -421,7 +425,7 @@ def get_idle_device(self, end_point_id, end_point_name,
if same_model_device_rank > 0:
same_model_device_rank -= 1
continue
logging.info(f"The chosen device is {device_id}")
logging.debug(f"The chosen device is {device_id}")
return result_payload, device_id
except Exception as e:
logging.info(str(e))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@

from fedml.core.common.singleton import Singleton
from fedml.computing.scheduler.model_scheduler.modelops_configs import ModelOpsConfigs
from fedml.computing.scheduler.model_scheduler.device_model_deployment import get_model_info
from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants
from fedml.computing.scheduler.model_scheduler.device_model_object import FedMLModelList, FedMLEndpointDetail
from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants
Expand Down
Loading

0 comments on commit 1275034

Please sign in to comment.