Skip to content

Commit

Permalink
remove all ensemble_model references, replace with executor_model
Browse files Browse the repository at this point in the history
  • Loading branch information
jperez999 committed Dec 16, 2022
1 parent 0f713ba commit 86da5f9
Show file tree
Hide file tree
Showing 9 changed files with 13 additions and 14 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -412,7 +412,7 @@
"]\n",
"# send request to tritonserver\n",
"with grpcclient.InferenceServerClient(\"localhost:8001\") as client:\n",
" response = client.infer(\"ensemble_model\", inputs, outputs=outputs)"
" response = client.infer(\"executor_model\", inputs, outputs=outputs)"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,7 @@
"]\n",
"# send request to tritonserver\n",
"with grpcclient.InferenceServerClient(\"localhost:8001\") as client:\n",
" response = client.infer(\"ensemble_model\", inputs, outputs=outputs)"
" response = client.infer(\"executor_model\", inputs, outputs=outputs)"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion examples/Serving-Ranking-Models-With-Merlin-Systems.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1074,7 +1074,7 @@
"source": [
"# send request to tritonserver\n",
"with grpcclient.InferenceServerClient(\"localhost:8001\") as client:\n",
" response = client.infer(\"ensemble_model\", inputs, request_id=\"1\", outputs=outputs)"
" response = client.infer(\"executor_model\", inputs, request_id=\"1\", outputs=outputs)"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion merlin/systems/triton/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ def send_triton_request(
request_id : str, optional
The id of the inference request, by default "1"
triton_model : str, optional
Name of the model to run inputs through, by default "ensemble_model"
Name of the model to run inputs through, by default "executor_model"
Returns
-------
Expand Down
2 changes: 1 addition & 1 deletion tests/integration/t4r/test_pytorch_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ def test_serve_t4r_with_torchscript(tmpdir):
# Send request to Triton and check response
# ===========================================
triton_response = run_ensemble_on_tritonserver(
tmpdir, input_schema, df, output_schema.column_names, "ensemble_model"
tmpdir, input_schema, df, output_schema.column_names, "executor_model"
)

assert triton_response
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -112,6 +112,6 @@ def test_example_04_exporting_ranking_models(tb):
)

response = run_ensemble_on_tritonserver(
"/tmp/data/ensemble/", schema.without(["click"]), batch, outputs, "ensemble_model"
"/tmp/data/ensemble/", schema.without(["click"]), batch, outputs, "executor_model"
)
assert len(response["click/binary_classification_task"]) == 3
4 changes: 2 additions & 2 deletions tests/unit/systems/dag/ops/test_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def test_softmax_sampling(tmpdir):
ens_config, node_configs = ensemble.export(tmpdir)

response = run_ensemble_on_tritonserver(
tmpdir, request_schema, request_df, ensemble.output_schema.column_names, "ensemble_model"
tmpdir, request_schema, request_df, ensemble.output_schema.column_names, "executor_model"
)
assert response is not None
assert len(response["ordered_ids"]) == 10
Expand Down Expand Up @@ -83,7 +83,7 @@ def test_filter_candidates_with_triton(tmpdir):
ens_config, node_configs = ensemble.export(tmpdir)

response = run_ensemble_on_tritonserver(
tmpdir, request_schema, request_df, ensemble.output_schema.column_names, "ensemble_model"
tmpdir, request_schema, request_df, ensemble.output_schema.column_names, "executor_model"
)

assert response is not None
Expand Down
1 change: 0 additions & 1 deletion tests/unit/systems/dag/runtimes/triton/test_triton.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@
@pytest.mark.parametrize(
["runtime", "model_name", "expected_model_name"],
[
(None, None, "ensemble_model"),
(TritonExecutorRuntime(), None, "executor_model"),
(TritonExecutorRuntime(), "triton_model", "triton_model"),
],
Expand Down
10 changes: 5 additions & 5 deletions tests/unit/systems/ops/tf/test_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def test_workflow_tf_e2e_config_verification(tmpdir, dataset, engine):
parsed = text_format.Parse(raw_config, config)

# The config file contents are correct
assert parsed.name == "ensemble_model"
assert parsed.name == "executor_model"
assert parsed.platform == "ensemble"
assert hasattr(parsed, "ensemble_scheduling")

Expand Down Expand Up @@ -145,7 +145,7 @@ def test_workflow_tf_e2e_multi_op_run(tmpdir, dataset, engine):

# Creating Triton Ensemble Config
ensemble_config, nodes_config = triton_ens.export(str(tmpdir))
config_path = tmpdir / "ensemble_model" / "config.pbtxt"
config_path = tmpdir / "executor_model" / "config.pbtxt"

# Checking Triton Ensemble Config
with open(config_path, "rb") as f:
Expand All @@ -154,7 +154,7 @@ def test_workflow_tf_e2e_multi_op_run(tmpdir, dataset, engine):
parsed = text_format.Parse(raw_config, config)

# The config file contents are correct
assert parsed.name == "ensemble_model"
assert parsed.name == "executor_model"
assert parsed.platform == "ensemble"
assert hasattr(parsed, "ensemble_scheduling")

Expand Down Expand Up @@ -202,7 +202,7 @@ def test_workflow_tf_python_wrapper(tmpdir, dataset, engine, python):

# Creating Triton Ensemble Config
ensemble_config, nodes_config = triton_ens.export(str(tmpdir))
config_path = tmpdir / "ensemble_model" / "config.pbtxt"
config_path = tmpdir / "executor_model" / "config.pbtxt"

# Checking Triton Ensemble Config
with open(config_path, "rb") as f:
Expand All @@ -211,7 +211,7 @@ def test_workflow_tf_python_wrapper(tmpdir, dataset, engine, python):
parsed = text_format.Parse(raw_config, config)

# The config file contents are correct
assert parsed.name == "ensemble_model"
assert parsed.name == "executor_model"
assert parsed.platform == "ensemble"
assert hasattr(parsed, "ensemble_scheduling")

Expand Down

0 comments on commit 86da5f9

Please sign in to comment.