Skip to content
This repository has been archived by the owner on May 23, 2024. It is now read-only.

Fix: Update universal scripts path for MME #215

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion docker/build_artifacts/sagemaker/python_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
import tfs_utils

SAGEMAKER_MULTI_MODEL_ENABLED = os.environ.get("SAGEMAKER_MULTI_MODEL", "false").lower() == "true"
MODEL_DIR = "models" if SAGEMAKER_MULTI_MODEL_ENABLED else "model"
MODEL_DIR = "" if SAGEMAKER_MULTI_MODEL_ENABLED else "model"
INFERENCE_SCRIPT_PATH = f"/opt/ml/{MODEL_DIR}/code/inference.py"

SAGEMAKER_BATCHING_ENABLED = os.environ.get("SAGEMAKER_TFS_ENABLE_BATCHING", "false").lower()
Expand Down Expand Up @@ -78,12 +78,15 @@ def __init__(self):
self._setup_channel(grpc_port)

if os.path.exists(INFERENCE_SCRIPT_PATH):
log.info("Inference script found at path {}".format(INFERENCE_SCRIPT_PATH))
log.info("Inference script exists, importing handlers.")
# Single-Model Mode & Multi-Model Mode both use one inference.py
self._handler, self._input_handler, self._output_handler = self._import_handlers()
self._handlers = self._make_handler(
self._handler, self._input_handler, self._output_handler
)
else:
log.info("Inference script does not exist, using default handlers.")
self._handlers = default_handler

self._tfs_enable_batching = SAGEMAKER_BATCHING_ENABLED == "true"
Expand Down
7 changes: 3 additions & 4 deletions docker/build_artifacts/sagemaker/serve.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,8 @@
JS_INVOCATIONS = "js_content tensorflowServing.invocations"
GUNICORN_PING = "proxy_pass http://gunicorn_upstream/ping"
GUNICORN_INVOCATIONS = "proxy_pass http://gunicorn_upstream/invocations"
MULTI_MODEL = "s" if os.environ.get("SAGEMAKER_MULTI_MODEL", "False").lower() == "true" else ""
MODEL_DIR = f"model{MULTI_MODEL}"
CODE_DIR = "/opt/ml/{}/code".format(MODEL_DIR)
MODEL_DIR = "" if os.environ.get("SAGEMAKER_MULTI_MODEL", "False").lower() == "true" else "model"
CODE_DIR = f"/opt/ml/{MODEL_DIR}/code"
PYTHON_LIB_PATH = os.path.join(CODE_DIR, "lib")
REQUIREMENTS_PATH = os.path.join(CODE_DIR, "requirements.txt")
INFERENCE_PATH = os.path.join(CODE_DIR, "inference.py")
Expand Down Expand Up @@ -242,7 +241,7 @@ def _download_scripts(self, bucket, prefix):
paginator = client.get_paginator("list_objects")
for result in paginator.paginate(Bucket=bucket, Delimiter="/", Prefix=prefix):
for file in result.get("Contents", []):
destination = os.path.join(CODE_DIR, file.get("Key"))
destination = os.path.join(CODE_DIR, file.get("Key").split("/")[-1])
if not os.path.exists(os.path.dirname(destination)):
os.makedirs(os.path.dirname(destination))
resource.meta.client.download_file(bucket, file.get("Key"), destination)
Expand Down