diff --git a/redisai/client.py b/redisai/client.py index 21b09fb..5458e4c 100644 --- a/redisai/client.py +++ b/redisai/client.py @@ -1,16 +1,16 @@ -from functools import wraps, partial -from typing import Union, AnyStr, ByteString, List, Sequence, Any import warnings +from functools import partial, wraps +from typing import Any, AnyStr, ByteString, List, Sequence, Union -from redis import StrictRedis import numpy as np +from deprecated import deprecated +from redis import StrictRedis from redisai import command_builder as builder from redisai.dag import Dag from redisai.pipeline import Pipeline from redisai.postprocessor import Processor - processor = Processor() @@ -96,7 +96,7 @@ def dag( ------- >>> con.tensorset('tensor', ...) 'OK' - >>> con.modelset('model', ...) + >>> con.modelstore('model', ...) 'OK' >>> dag = con.dag(load=['tensor'], persist=['output']) >>> dag.tensorset('another', ...) @@ -136,6 +136,83 @@ def loadbackend(self, identifier: AnyStr, path: AnyStr) -> str: res = self.execute_command(*args) return res if not self.enable_postprocess else processor.loadbackend(res) + def modelstore( + self, + key: AnyStr, + backend: str, + device: str, + data: ByteString, + batch: int = None, + minbatch: int = None, + minbatchtimeout: int = None, + tag: AnyStr = None, + inputs: Union[AnyStr, List[AnyStr]] = None, + outputs: Union[AnyStr, List[AnyStr]] = None, + ) -> str: + """ + Set the model on provided key. + + Parameters + ---------- + key : AnyStr + Key name + backend : str + Backend name. Allowed backends are TF, TORCH, TFLITE, ONNX + device : str + Device name. Allowed devices are CPU and GPU. If multiple GPUs are available, + it can be specified using the format GPU:. For example: GPU:0 + data : bytes + Model graph read as bytes string + batch : int + Number of batches for doing auto-batching + minbatch : int + Minimum number of samples required in a batch for model execution + minbatchtimeout : int + The max number of miliseconds for which the engine will not trigger an execution + if the number of samples is lower than minbatch (after minbatchtimeout is passed, + the execution will start even if minbatch jas not reached) + tag : AnyStr + Any string that will be saved in RedisAI as tag for the model + inputs : Union[AnyStr, List[AnyStr]] + Input node(s) in the graph. Required only Tensorflow graphs + outputs : Union[AnyStr, List[AnyStr]] + Output node(s) in the graph Required only for Tensorflow graphs + + Returns + ------- + str + 'OK' if success, raise an exception otherwise + + Example + ------- + >>> # Torch model + >>> model_path = os.path.join('path/to/TorchScriptModel.pt') + >>> model = open(model_path, 'rb').read() + >>> con.modeltore("model", 'torch', 'cpu', model, tag='v1.0') + 'OK' + >>> # Tensorflow model + >>> model_path = os.path.join('/path/to/tf_frozen_graph.pb') + >>> model = open(model_path, 'rb').read() + >>> con.modelstore('m', 'tf', 'cpu', model, + ... inputs=['a', 'b'], outputs=['mul'], tag='v1.0') + 'OK' + """ + args = builder.modelstore( + key, + backend, + device, + data, + batch, + minbatch, + minbatchtimeout, + tag, + inputs, + outputs, + ) + res = self.execute_command(*args) + return res if not self.enable_postprocess else processor.modelstore(res) + + @deprecated(version="1.2.0", reason="Use modelstore instead") def modelset( self, key: AnyStr, @@ -247,6 +324,56 @@ def modeldel(self, key: AnyStr) -> str: res = self.execute_command(*args) return res if not self.enable_postprocess else processor.modeldel(res) + def modelexecute( + self, + key: AnyStr, + inputs: Union[AnyStr, List[AnyStr]], + outputs: Union[AnyStr, List[AnyStr]], + timeout: int = None, + ) -> str: + """ + Run the model using input(s) which are already in the scope and are associated + to some keys. Modelexecute also needs the output key name(s) to store the output + from the model. The number of outputs from the model and the number of keys + provided here must be same. Otherwise, RedisAI throws an error + + Parameters + ---------- + key : str + Model key to run + inputs : Union[AnyStr, List[AnyStr]] + Tensor(s) which is already saved in the RedisAI using a tensorset call. These + tensors will be used as the inputs for the modelexecute + outputs : Union[AnyStr, List[AnyStr]] + keys on which the outputs to be saved. If those keys exist already, + modelexecute will overwrite them with new values + timeout : int + The max number on milisecinds that may pass before the request is prossced + (meaning that the result will not be computed after that time and TIMEDOUT + is returned in that case + + Returns + ------- + str + 'OK' if success, raise an exception otherwise + + Example + ------- + >>> con.modelstore('m', 'tf', 'cpu', model_pb, + ... inputs=['a', 'b'], outputs=['mul'], tag='v1.0') + 'OK' + >>> con.tensorset('a', (2, 3), dtype='float') + 'OK' + >>> con.tensorset('b', (2, 3), dtype='float') + 'OK' + >>> con.modelexecute('m', ['a', 'b'], ['c']) + 'OK' + """ + args = builder.modelexecute(key, inputs, outputs, timeout) + res = self.execute_command(*args) + return res if not self.enable_postprocess else processor.modelexecute(res) + + @deprecated(version="1.2.0", reason="Use modelexecute instead") def modelrun( self, key: AnyStr, @@ -277,7 +404,7 @@ def modelrun( Example ------- - >>> con.modelset('m', 'tf', 'cpu', model_pb, + >>> con.modelstore('m', 'tf', 'cpu', model_pb, ... inputs=['a', 'b'], outputs=['mul'], tag='v1.0') 'OK' >>> con.tensorset('a', (2, 3), dtype='float') diff --git a/redisai/command_builder.py b/redisai/command_builder.py index 3087342..3f1c151 100644 --- a/redisai/command_builder.py +++ b/redisai/command_builder.py @@ -1,5 +1,7 @@ -from typing import Union, AnyStr, ByteString, List, Sequence +from typing import AnyStr, ByteString, List, Sequence, Union + import numpy as np + from . import utils # TODO: mypy check @@ -9,29 +11,90 @@ def loadbackend(identifier: AnyStr, path: AnyStr) -> Sequence: return "AI.CONFIG LOADBACKEND", identifier, path -def modelset( +def modelstore( name: AnyStr, backend: str, device: str, data: ByteString, batch: int, minbatch: int, + minbatchtimeout: int, tag: AnyStr, inputs: Union[AnyStr, List[AnyStr]], outputs: Union[AnyStr, List[AnyStr]], ) -> Sequence: + if name is None: + raise ValueError("Model name was not given") if device.upper() not in utils.allowed_devices: raise ValueError(f"Device not allowed. Use any from {utils.allowed_devices}") if backend.upper() not in utils.allowed_backends: raise ValueError(f"Backend not allowed. Use any from {utils.allowed_backends}") - args = ["AI.MODELSET", name, backend, device] + args = ["AI.MODELSTORE", name, backend, device] + if tag is not None: + args += ["TAG", tag] if batch is not None: args += ["BATCHSIZE", batch] if minbatch is not None: + if batch is None: + raise ValueError("Minbatch is not allowed without batch") args += ["MINBATCHSIZE", minbatch] + if minbatchtimeout is not None: + if minbatch is None: + raise ValueError("Minbatchtimeout is not allowed without minbatch") + args += ["MINBATCHTIMEOUT", minbatchtimeout] + + if backend.upper() == "TF": + if not all((inputs, outputs)): + raise ValueError( + "Require keyword arguments inputs and outputs for TF models" + ) + args += [ + "INPUTS", + len(inputs) if isinstance(inputs, List) else 1, + *utils.listify(inputs), + ] + args += [ + "OUTPUTS", + len(outputs) if isinstance(outputs, List) else 1, + *utils.listify(outputs), + ] + elif inputs is not None or outputs is not None: + raise ValueError( + "Inputs and outputs keywords should not be specified for this backend" + ) + chunk_size = 500 * 1024 * 1024 # TODO: this should be configurable. + data_chunks = [data[i : i + chunk_size] for i in range(0, len(data), chunk_size)] + # TODO: need a test case for this + args += ["BLOB", *data_chunks] + return args + + +def modelset( + name: AnyStr, + backend: str, + device: str, + data: ByteString, + batch: int, + minbatch: int, + tag: AnyStr, + inputs: Union[AnyStr, List[AnyStr]], + outputs: Union[AnyStr, List[AnyStr]], +) -> Sequence: + if device.upper() not in utils.allowed_devices: + raise ValueError(f"Device not allowed. Use any from {utils.allowed_devices}") + if backend.upper() not in utils.allowed_backends: + raise ValueError(f"Backend not allowed. Use any from {utils.allowed_backends}") + args = ["AI.MODELSET", name, backend, device] + if tag is not None: args += ["TAG", tag] + if batch is not None: + args += ["BATCHSIZE", batch] + if minbatch is not None: + if batch is None: + raise ValueError("Minbatch is not allowed without batch") + args += ["MINBATCHSIZE", minbatch] if backend.upper() == "TF": if not (all((inputs, outputs))): @@ -56,7 +119,34 @@ def modeldel(name: AnyStr) -> Sequence: return "AI.MODELDEL", name -def modelrun(name: AnyStr, inputs: List[AnyStr], outputs: List[AnyStr]) -> Sequence: +def modelexecute( + name: AnyStr, + inputs: Union[AnyStr, List[AnyStr]], + outputs: Union[AnyStr, List[AnyStr]], + timeout: int, +) -> Sequence: + if name is None or inputs is None or outputs is None: + raise ValueError("Missing required arguments for model execute command") + args = [ + "AI.MODELEXECUTE", + name, + "INPUTS", + len(utils.listify(inputs)), + *utils.listify(inputs), + "OUTPUTS", + len(utils.listify(outputs)), + *utils.listify(outputs), + ] + if timeout is not None: + args += ["TIMEOUT", timeout] + return args + + +def modelrun( + name: AnyStr, + inputs: Union[AnyStr, List[AnyStr]], + outputs: Union[AnyStr, List[AnyStr]], +) -> Sequence: args = ( "AI.MODELRUN", name, diff --git a/redisai/dag.py b/redisai/dag.py index 1b60529..eb2e1ad 100644 --- a/redisai/dag.py +++ b/redisai/dag.py @@ -1,11 +1,10 @@ from functools import partial -from typing import AnyStr, Union, Sequence, Any, List +from typing import Any, AnyStr, List, Sequence, Union import numpy as np -from redisai.postprocessor import Processor from redisai import command_builder as builder - +from redisai.postprocessor import Processor processor = Processor() diff --git a/redisai/pipeline.py b/redisai/pipeline.py index 447f528..f6a8255 100644 --- a/redisai/pipeline.py +++ b/redisai/pipeline.py @@ -1,14 +1,13 @@ import warnings from functools import partial -from typing import AnyStr, Union, Sequence +from typing import AnyStr, Sequence, Union import numpy as np +import redis from redisai import command_builder as builder -import redis from redisai.postprocessor import Processor - processor = Processor() diff --git a/redisai/postprocessor.py b/redisai/postprocessor.py index c37c1ef..42bd141 100644 --- a/redisai/postprocessor.py +++ b/redisai/postprocessor.py @@ -63,8 +63,10 @@ def infoget(res): decoder = staticmethod(decoder) decoding_functions = ( "loadbackend", + "modelstore", "modelset", "modeldel", + "modelexecute", "modelrun", "tensorset", "scriptset", diff --git a/redisai/utils.py b/redisai/utils.py index 3723bc5..ca8007f 100644 --- a/redisai/utils.py +++ b/redisai/utils.py @@ -1,6 +1,6 @@ -from typing import Union, ByteString, Sequence, List, AnyStr, Callable -import numpy as np +from typing import AnyStr, ByteString, Callable, List, Sequence, Union +import numpy as np dtype_dict = { "float": "FLOAT", diff --git a/setup.py b/setup.py index 6b3dfd3..f5de6bb 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ author="RedisLabs", author_email="oss@redislabs.com", packages=find_packages(), - install_requires=["redis", "hiredis", "numpy"], + install_requires=["redis", "hiredis", "numpy", "deprecated"], python_requires=">=3.6", classifiers=[ "Development Status :: 4 - Beta", diff --git a/test-requirements.txt b/test-requirements.txt index 38dac3d..252e5cc 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -6,3 +6,4 @@ nose codecov numpy ml2rt +deprecated \ No newline at end of file diff --git a/test/test.py b/test/test.py index 1dcf8fa..506c1ce 100644 --- a/test/test.py +++ b/test/test.py @@ -1,12 +1,13 @@ -from io import StringIO +import os.path import sys +from io import StringIO from unittest import TestCase + import numpy as np -import os.path -from redisai import Client from ml2rt import load_model from redis.exceptions import ResponseError +from redisai import Client DEBUG = False tf_graph = "graph.pb" @@ -115,8 +116,9 @@ def test_numpy_tensor(self): with self.assertRaises(TypeError): con.tensorset("trying", stringarr) - def test_modelset_errors(self): - model_path = os.path.join(MODEL_DIR, tf_graph) + # AI.MODELSET is deprecated by AI.MODELSTORE. + def test_deprecated_modelset(self): + model_path = os.path.join(MODEL_DIR, "graph.pb") model_pb = load_model(model_path) con = self.get_client() with self.assertRaises(ValueError): @@ -139,12 +141,116 @@ def test_modelset_errors(self): outputs=["mul"], tag="v1.0", ) + con.modelset( + "m", "tf", "cpu", model_pb, inputs=["a", "b"], outputs=["mul"], tag="v1.0" + ) + model = con.modelget("m", meta_only=True) + self.assertEqual( + model, + { + "backend": "TF", + "batchsize": 0, + "device": "cpu", + "inputs": ["a", "b"], + "minbatchsize": 0, + "minbatchtimeout": 0, + "outputs": ["mul"], + "tag": "v1.0", + }, + ) + + def test_modelstore_errors(self): + model_path = os.path.join(MODEL_DIR, "graph.pb") + model_pb = load_model(model_path) + con = self.get_client() + + with self.assertRaises(ValueError) as e: + con.modelstore( + None, + "TF", + "CPU", + model_pb, + inputs=["a", "b"], + outputs=["mul"] + ) + self.assertEqual(str(e.exception), "Model name was not given") + + with self.assertRaises(ValueError) as e: + con.modelstore( + "m", + "tf", + "wrongdevice", + model_pb, + inputs=["a", "b"], + outputs=["mul"], + tag="v1.0", + ) + self.assertTrue(str(e.exception).startswith("Device not allowed")) + with self.assertRaises(ValueError) as e: + con.modelstore( + "m", + "wrongbackend", + "cpu", + model_pb, + inputs=["a", "b"], + outputs=["mul"], + tag="v1.0", + ) + self.assertTrue(str(e.exception).startswith("Backend not allowed")) + with self.assertRaises(ValueError) as e: + con.modelstore( + "m", + "tf", + "cpu", + model_pb, + inputs=["a", "b"], + outputs=["mul"], + tag="v1.0", + minbatch=2, + ) + self.assertEqual(str(e.exception), + "Minbatch is not allowed without batch") + with self.assertRaises(ValueError) as e: + con.modelstore( + "m", + "tf", + "cpu", + model_pb, + inputs=["a", "b"], + outputs=["mul"], + tag="v1.0", + batch=4, + minbatchtimeout=1000, + ) + self.assertTrue( + str(e.exception), "Minbatchtimeout is not allowed without minbatch" + ) + with self.assertRaises(ValueError) as e: + con.modelstore("m", "tf", "cpu", model_pb, tag="v1.0") + self.assertTrue( + str(e.exception), + "Require keyword arguments inputs and outputs for TF models", + ) + with self.assertRaises(ValueError) as e: + con.modelstore( + "m", + "torch", + "cpu", + model_pb, + inputs=["a", "b"], + outputs=["mul"], + tag="v1.0", + ) + self.assertTrue( + str(e.exception), + "Inputs and outputs keywords should not be specified for this backend", + ) def test_modelget_meta(self): model_path = os.path.join(MODEL_DIR, tf_graph) model_pb = load_model(model_path) con = self.get_client() - con.modelset( + con.modelstore( "m", "tf", "cpu", model_pb, inputs=["a", "b"], outputs=["mul"], tag="v1.0" ) model = con.modelget("m", meta_only=True) @@ -156,21 +262,22 @@ def test_modelget_meta(self): "device": "cpu", "inputs": ["a", "b"], "minbatchsize": 0, + "minbatchtimeout": 0, "outputs": ["mul"], "tag": "v1.0", }, ) - def test_modelrun_non_list_input_output(self): - model_path = os.path.join(MODEL_DIR, tf_graph) + def test_modelexecute_non_list_input_output(self): + model_path = os.path.join(MODEL_DIR, "graph.pb") model_pb = load_model(model_path) con = self.get_client() - con.modelset( + con.modelstore( "m", "tf", "cpu", model_pb, inputs=["a", "b"], outputs=["mul"], tag="v1.7" ) con.tensorset("a", (2, 3), dtype="float") con.tensorset("b", (2, 3), dtype="float") - ret = con.modelrun("m", ["a", "b"], "out") + ret = con.modelexecute("m", ["a", "b"], "out") self.assertEqual(ret, "OK") def test_nonasciichar(self): @@ -178,7 +285,7 @@ def test_nonasciichar(self): model_path = os.path.join(MODEL_DIR, tf_graph) model_pb = load_model(model_path) con = self.get_client() - con.modelset( + con.modelstore( "m" + nonascii, "tf", "cpu", @@ -189,7 +296,8 @@ def test_nonasciichar(self): ) con.tensorset("a" + nonascii, (2, 3), dtype="float") con.tensorset("b", (2, 3), dtype="float") - con.modelrun("m" + nonascii, ["a" + nonascii, "b"], ["c" + nonascii]) + con.modelexecute( + "m" + nonascii, ["a" + nonascii, "b"], ["c" + nonascii]) tensor = con.tensorget("c" + nonascii) self.assertTrue((np.allclose(tensor, [4.0, 9.0]))) @@ -201,44 +309,34 @@ def test_run_tf_model(self): wrong_model_pb = load_model(bad_model_path) con = self.get_client() - con.modelset( + con.modelstore( "m", "tf", "cpu", model_pb, inputs=["a", "b"], outputs=["mul"], tag="v1.0" ) con.modeldel("m") self.assertRaises(ResponseError, con.modelget, "m") - con.modelset( + con.modelstore( "m", "tf", "cpu", model_pb, inputs=["a", "b"], outputs="mul", tag="v1.0" ) - # wrong model - self.assertRaises( - ResponseError, - con.modelset, - "m", - "tf", - "cpu", - wrong_model_pb, - inputs=["a", "b"], - outputs=["mul"], - ) - # missing inputs/outputs - self.assertRaises(ValueError, con.modelset, "m", "tf", "cpu", wrong_model_pb) + # Required arguments ar None + with self.assertRaises(ValueError) as e: + con.modelexecute( + "m", + inputs=None, + outputs=None + ) + self.assertEqual(str(e.exception), "Missing required arguments for model execute command") - # wrong backend - self.assertRaises( - ResponseError, - con.modelset, - "m", - "torch", - "cpu", - model_pb, - inputs=["a", "b"], - outputs=["mul"], - ) + # wrong model + with self.assertRaises(ResponseError) as e: + con.modelstore( + "m", "tf", "cpu", wrong_model_pb, inputs=["a", "b"], outputs=["mul"] + ) + self.assertEqual(str(e.exception), "Invalid GraphDef") con.tensorset("a", (2, 3), dtype="float") con.tensorset("b", (2, 3), dtype="float") - con.modelrun("m", ["a", "b"], ["c"]) + con.modelexecute("m", ["a", "b"], ["c"]) tensor = con.tensorget("c") self.assertTrue(np.allclose([4, 9], tensor)) model_det = con.modelget("m") @@ -252,7 +350,8 @@ def test_run_tf_model(self): def test_scripts(self): con = self.get_client() - self.assertRaises(ResponseError, con.scriptset, "ket", "cpu", "return 1") + self.assertRaises(ResponseError, con.scriptset, + "ket", "cpu", "return 1") con.scriptset("ket", "cpu", script) con.tensorset("a", (2, 3), dtype="float") con.tensorset("b", (2, 3), dtype="float") @@ -276,10 +375,10 @@ def test_run_onnxml_model(self): mlmodel_path = os.path.join(MODEL_DIR, "boston.onnx") onnxml_model = load_model(mlmodel_path) con = self.get_client() - con.modelset("onnx_model", "onnx", "cpu", onnxml_model) + con.modelstore("onnx_model", "onnx", "cpu", onnxml_model) tensor = np.ones((1, 13)).astype(np.float32) con.tensorset("input", tensor) - con.modelrun("onnx_model", ["input"], ["output"]) + con.modelexecute("onnx_model", ["input"], ["output"]) # tests `convert_to_num` outtensor = con.tensorget("output", as_numpy=False) self.assertEqual(int(float(outtensor["values"][0])), 24) @@ -289,10 +388,10 @@ def test_run_onnxdl_model(self): dlmodel_path = os.path.join(MODEL_DIR, "findsquare.onnx") onnxdl_model = load_model(dlmodel_path) con = self.get_client() - con.modelset("onnx_model", "onnx", "cpu", onnxdl_model) + con.modelstore("onnx_model", "onnx", "cpu", onnxdl_model) tensor = np.array((2,)).astype(np.float32) con.tensorset("input", tensor) - con.modelrun("onnx_model", ["input"], ["output"]) + con.modelexecute("onnx_model", ["input"], ["output"]) outtensor = con.tensorget("output") self.assertTrue(np.allclose(outtensor, [4.0])) @@ -300,10 +399,10 @@ def test_run_pytorch_model(self): model_path = os.path.join(MODEL_DIR, torch_graph) ptmodel = load_model(model_path) con = self.get_client() - con.modelset("pt_model", "torch", "cpu", ptmodel, tag="v1.0") + con.modelstore("pt_model", "torch", "cpu", ptmodel, tag="v1.0") con.tensorset("a", [2, 3, 2, 3], shape=(2, 2), dtype="float") con.tensorset("b", [2, 3, 2, 3], shape=(2, 2), dtype="float") - con.modelrun("pt_model", ["a", "b"], ["output"]) + con.modelexecute("pt_model", ["a", "b"], ["output"]) output = con.tensorget("output", as_numpy=False) self.assertTrue(np.allclose(output["values"], [4, 6, 4, 6])) @@ -311,18 +410,35 @@ def test_run_tflite_model(self): model_path = os.path.join(MODEL_DIR, "mnist_model_quant.tflite") tflmodel = load_model(model_path) con = self.get_client() - con.modelset("tfl_model", "tflite", "cpu", tflmodel) + con.modelstore("tfl_model", "tflite", "cpu", tflmodel) img = np.random.random((1, 1, 28, 28)).astype(np.float) con.tensorset("img", img) - con.modelrun("tfl_model", ["img"], ["output1", "output2"]) + con.modelexecute("tfl_model", ["img"], ["output1", "output2"]) output = con.tensorget("output1") self.assertTrue(np.allclose(output, [8])) + # AI.MODELRUN is deprecated by AI.MODELEXECUTE + def test_deprecated_modelrun(self): + model_path = os.path.join(MODEL_DIR, "graph.pb") + model_pb = load_model(model_path) + + con = self.get_client() + con.modelstore( + "m", "tf", "cpu", model_pb, inputs=["a", "b"], outputs=["mul"], tag="v1.0" + ) + + con.tensorset("a", (2, 3), dtype="float") + con.tensorset("b", (2, 3), dtype="float") + con.modelrun("m", ["a", "b"], ["c"]) + tensor = con.tensorget("c") + self.assertTrue(np.allclose([4, 9], tensor)) + def test_info(self): model_path = os.path.join(MODEL_DIR, tf_graph) model_pb = load_model(model_path) con = self.get_client() - con.modelset("m", "tf", "cpu", model_pb, inputs=["a", "b"], outputs=["mul"]) + con.modelstore("m", "tf", "cpu", model_pb, + inputs=["a", "b"], outputs=["mul"]) first_info = con.infoget("m") expected = { "key": "m", @@ -338,26 +454,27 @@ def test_info(self): self.assertEqual(first_info, expected) con.tensorset("a", (2, 3), dtype="float") con.tensorset("b", (2, 3), dtype="float") - con.modelrun("m", ["a", "b"], ["c"]) - con.modelrun("m", ["a", "b"], ["c"]) + con.modelexecute("m", ["a", "b"], ["c"]) + con.modelexecute("m", ["a", "b"], ["c"]) second_info = con.infoget("m") self.assertEqual(second_info["calls"], 2) # 2 model runs con.inforeset("m") third_info = con.infoget("m") - self.assertEqual(first_info, third_info) # before modelrun and after reset + # before modelrun and after reset + self.assertEqual(first_info, third_info) def test_model_scan(self): model_path = os.path.join(MODEL_DIR, tf_graph) model_pb = load_model(model_path) con = self.get_client() - con.modelset( + con.modelstore( "m", "tf", "cpu", model_pb, inputs=["a", "b"], outputs=["mul"], tag="v1.2" ) - model_path = os.path.join(MODEL_DIR, torch_graph) + model_path = os.path.join(MODEL_DIR, "pt-minimal.pt") ptmodel = load_model(model_path) con = self.get_client() # TODO: RedisAI modelscan issue - con.modelset("pt_model", "torch", "cpu", ptmodel) + con.modelstore("pt_model", "torch", "cpu", ptmodel) mlist = con.modelscan() self.assertEqual(mlist, [["pt_model", ""], ["m", "v1.2"]]) @@ -381,7 +498,7 @@ def setUp(self): con = self.get_client() model_path = os.path.join(MODEL_DIR, torch_graph) ptmodel = load_model(model_path) - con.modelset("pt_model", "torch", "cpu", ptmodel, tag="v7.0") + con.modelstore("pt_model", "torch", "cpu", ptmodel, tag="v7.0") def test_dagrun_with_load(self): con = self.get_client() @@ -392,7 +509,8 @@ def test_dagrun_with_load(self): dag.modelrun("pt_model", ["a", "b"], ["output"]) dag.tensorget("output") result = dag.run() - expected = ["OK", "OK", np.array([[4.0, 6.0], [4.0, 6.0]], dtype=np.float32)] + expected = ["OK", "OK", np.array( + [[4.0, 6.0], [4.0, 6.0]], dtype=np.float32)] self.assertTrue(np.allclose(expected.pop(), result.pop())) self.assertEqual(expected, result) self.assertRaises(ResponseError, con.tensorget, "b") @@ -424,7 +542,8 @@ def test_dagrun_calling_on_return(self): .tensorget("output") .run() ) - expected = ["OK", "OK", np.array([[4.0, 6.0], [4.0, 6.0]], dtype=np.float32)] + expected = ["OK", "OK", np.array( + [[4.0, 6.0], [4.0, 6.0]], dtype=np.float32)] self.assertTrue(np.allclose(expected.pop(), result.pop())) self.assertEqual(expected, result) @@ -486,12 +605,14 @@ def test_pipeline_non_transaction(self): pipe = con.pipeline(transaction=False) pipe = pipe.tensorset("a", arr).set("native", 1) pipe = pipe.tensorget("a", as_numpy=False) - pipe = pipe.tensorget("a", as_numpy=True).tensorget("a", meta_only=True) + pipe = pipe.tensorget("a", as_numpy=True).tensorget( + "a", meta_only=True) result = pipe.execute() expected = [ b"OK", True, - {"dtype": "FLOAT", "shape": [2, 2], "values": [2.0, 3.0, 2.0, 3.0]}, + {"dtype": "FLOAT", "shape": [2, 2], + "values": [2.0, 3.0, 2.0, 3.0]}, arr, {"dtype": "FLOAT", "shape": [2, 2]}, ] @@ -507,12 +628,14 @@ def test_pipeline_transaction(self): pipe = con.pipeline(transaction=True) pipe = pipe.tensorset("a", arr).set("native", 1) pipe = pipe.tensorget("a", as_numpy=False) - pipe = pipe.tensorget("a", as_numpy=True).tensorget("a", meta_only=True) + pipe = pipe.tensorget("a", as_numpy=True).tensorget( + "a", meta_only=True) result = pipe.execute() expected = [ b"OK", True, - {"dtype": "FLOAT", "shape": [2, 2], "values": [2.0, 3.0, 2.0, 3.0]}, + {"dtype": "FLOAT", "shape": [2, 2], + "values": [2.0, 3.0, 2.0, 3.0]}, arr, {"dtype": "FLOAT", "shape": [2, 2]}, ]