From 95a8f7de80877e3e0623a87acc40194016f68c3c Mon Sep 17 00:00:00 2001 From: Satish Pasumarthi Date: Thu, 16 Dec 2021 19:36:17 -0800 Subject: [PATCH] Fix: Update universal scripts path for MME --- docker/build_artifacts/sagemaker/python_service.py | 5 ++++- docker/build_artifacts/sagemaker/serve.py | 7 +++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/docker/build_artifacts/sagemaker/python_service.py b/docker/build_artifacts/sagemaker/python_service.py index e294e5fc..f5b63c0a 100644 --- a/docker/build_artifacts/sagemaker/python_service.py +++ b/docker/build_artifacts/sagemaker/python_service.py @@ -26,7 +26,7 @@ import tfs_utils SAGEMAKER_MULTI_MODEL_ENABLED = os.environ.get("SAGEMAKER_MULTI_MODEL", "false").lower() == "true" -MODEL_DIR = "models" if SAGEMAKER_MULTI_MODEL_ENABLED else "model" +MODEL_DIR = "" if SAGEMAKER_MULTI_MODEL_ENABLED else "model" INFERENCE_SCRIPT_PATH = f"/opt/ml/{MODEL_DIR}/code/inference.py" SAGEMAKER_BATCHING_ENABLED = os.environ.get("SAGEMAKER_TFS_ENABLE_BATCHING", "false").lower() @@ -79,12 +79,15 @@ def __init__(self): self._setup_channel(grpc_port) if os.path.exists(INFERENCE_SCRIPT_PATH): + log.info("Inference script found at path {}".format(INFERENCE_SCRIPT_PATH)) + log.info("Inference script exists, importing handlers.") # Single-Model Mode & Multi-Model Mode both use one inference.py self._handler, self._input_handler, self._output_handler = self._import_handlers() self._handlers = self._make_handler(self._handler, self._input_handler, self._output_handler) else: + log.info("Inference script does not exist, using default handlers.") self._handlers = default_handler self._tfs_enable_batching = SAGEMAKER_BATCHING_ENABLED == "true" diff --git a/docker/build_artifacts/sagemaker/serve.py b/docker/build_artifacts/sagemaker/serve.py index f8b87614..5f0d9c42 100644 --- a/docker/build_artifacts/sagemaker/serve.py +++ b/docker/build_artifacts/sagemaker/serve.py @@ -28,9 +28,8 @@ JS_INVOCATIONS = "js_content invocations" GUNICORN_PING = "proxy_pass http://gunicorn_upstream/ping" GUNICORN_INVOCATIONS = "proxy_pass http://gunicorn_upstream/invocations" -MULTI_MODEL = "s" if os.environ.get("SAGEMAKER_MULTI_MODEL", "False").lower() == "true" else "" -MODEL_DIR = f"model{MULTI_MODEL}" -CODE_DIR = "/opt/ml/{}/code".format(MODEL_DIR) +MODEL_DIR = "" if os.environ.get("SAGEMAKER_MULTI_MODEL", "False").lower() == "true" else "model" +CODE_DIR = f"/opt/ml/{MODEL_DIR}/code" PYTHON_LIB_PATH = os.path.join(CODE_DIR, "lib") REQUIREMENTS_PATH = os.path.join(CODE_DIR, "requirements.txt") INFERENCE_PATH = os.path.join(CODE_DIR, "inference.py") @@ -226,7 +225,7 @@ def _download_scripts(self, bucket, prefix): paginator = client.get_paginator("list_objects") for result in paginator.paginate(Bucket=bucket, Delimiter="/", Prefix=prefix): for file in result.get("Contents", []): - destination = os.path.join(CODE_DIR, file.get("Key")) + destination = os.path.join(CODE_DIR, file.get("Key").split("/")[-1]) if not os.path.exists(os.path.dirname(destination)): os.makedirs(os.path.dirname(destination)) resource.meta.client.download_file(bucket, file.get("Key"), destination)