diff --git a/examples/getting-started-movielens/04-Triton-Inference-with-TF.ipynb b/examples/getting-started-movielens/04-Triton-Inference-with-TF.ipynb index d4a3d881c..ab2cc2960 100644 --- a/examples/getting-started-movielens/04-Triton-Inference-with-TF.ipynb +++ b/examples/getting-started-movielens/04-Triton-Inference-with-TF.ipynb @@ -327,9 +327,10 @@ } ], "source": [ - "batch = df_lib.read_parquet(\n", - " os.path.join(INPUT_DATA_DIR, \"valid.parquet\"), num_rows=3, columns=[\"userId\", \"movieId\"]\n", + "valid = df_lib.read_parquet(\n", + " os.path.join(INPUT_DATA_DIR, \"valid.parquet\"), columns=[\"userId\", \"movieId\"]\n", ")\n", + "batch = valid[:3]\n", "print(batch)" ] }, diff --git a/examples/scaling-criteo/04-Triton-Inference-with-HugeCTR.ipynb b/examples/scaling-criteo/04-Triton-Inference-with-HugeCTR.ipynb index 07841ebd7..0d875274e 100644 --- a/examples/scaling-criteo/04-Triton-Inference-with-HugeCTR.ipynb +++ b/examples/scaling-criteo/04-Triton-Inference-with-HugeCTR.ipynb @@ -516,11 +516,11 @@ "df_lib = get_lib()\n", "input_cols = workflow.input_schema.column_names\n", "# read in data for request\n", - "batch = df_lib.read_parquet(\n", + "data = df_lib.read_parquet(\n", " os.path.join(sorted(glob.glob(original_data_path + \"/*.parquet\"))[-1]),\n", - " num_rows=3,\n", " columns=input_cols\n", ")\n", + "batch = data[:3]\n", "batch = batch[[x for x in batch.columns if x not in ['label']]]\n", "batch" ] diff --git a/examples/scaling-criteo/04-Triton-Inference-with-Merlin-Models-TensorFlow.ipynb b/examples/scaling-criteo/04-Triton-Inference-with-Merlin-Models-TensorFlow.ipynb index a8a038829..ecc8ba1c2 100644 --- a/examples/scaling-criteo/04-Triton-Inference-with-Merlin-Models-TensorFlow.ipynb +++ b/examples/scaling-criteo/04-Triton-Inference-with-Merlin-Models-TensorFlow.ipynb @@ -270,11 +270,11 @@ "df_lib = get_lib()\n", "input_cols = workflow.input_schema.column_names\n", "# read in data for request\n", - "batch = df_lib.read_parquet(\n", + "data = df_lib.read_parquet(\n", " os.path.join(sorted(glob.glob(original_data_path + \"/*.parquet\"))[-1]),\n", - " num_rows=3,\n", " columns=input_cols\n", ")\n", + "batch = data[:3]\n", "batch" ] }, diff --git a/tests/unit/examples/test_building_deploying_multi_stage_RecSys.py b/tests/unit/examples/test_building_deploying_multi_stage_RecSys.py index 225b8d756..ebb7a2b0b 100644 --- a/tests/unit/examples/test_building_deploying_multi_stage_RecSys.py +++ b/tests/unit/examples/test_building_deploying_multi_stage_RecSys.py @@ -65,11 +65,11 @@ def test_func(): from merlin.dataloader.tf_utils import configure_tensorflow configure_tensorflow() df_lib = get_lib() - batch = df_lib.read_parquet( + train = df_lib.read_parquet( os.path.join("/tmp/data/processed_nvt/", "train", "part_0.parquet"), - num_rows=1, columns=["user_id_raw"], ) + batch = train[:1] from merlin.systems.triton.utils import run_ensemble_on_tritonserver response = run_ensemble_on_tritonserver( "/tmp/examples/poc_ensemble", ensemble.graph.input_schema, batch, outputs, "executor_model"