Skip to content

Commit

Permalink
[Refactor] Rename asnumpy -> numpy in NDArray (#8083)
Browse files Browse the repository at this point in the history
  • Loading branch information
icemelon authored May 21, 2021
1 parent 5a7c081 commit 720e7b1
Show file tree
Hide file tree
Showing 345 changed files with 1,672 additions and 1,713 deletions.
6 changes: 3 additions & 3 deletions apps/android_rpc/tests/android_rpc_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ def test_rpc_module():
time_f = f2.time_evaluator(f2.entry_name, dev, number=10)
cost = time_f(a, b).mean
print("%g secs/op\n" % cost)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)

# Compile the Graph for OpenCL target
if test_opencl:
Expand All @@ -99,7 +99,7 @@ def test_rpc_module():
time_f = f1.time_evaluator(f1.entry_name, dev, number=10)
cost = time_f(a, b).mean
print("%g secs/op\n" % cost)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)

# Compile the Graph for Vulkan target
if test_vulkan:
Expand All @@ -122,7 +122,7 @@ def test_rpc_module():
time_f = f1.time_evaluator(f1.entry_name, dev, number=10)
cost = time_f(a, b).mean
print("%g secs/op\n" % cost)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)


if __name__ == "__main__":
Expand Down
4 changes: 2 additions & 2 deletions apps/extension/tests/test_ext.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def check_llvm():
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1)
tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1)

check_llvm()

Expand Down Expand Up @@ -92,7 +92,7 @@ def check_llvm():
a = tvm.nd.array(np.random.uniform(size=n).astype(A.dtype), dev)
b = tvm.nd.array(np.zeros(n, dtype=B.dtype), dev)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1)
tvm.testing.assert_allclose(b.numpy(), a.numpy() + 1)

check_llvm()

Expand Down
4 changes: 2 additions & 2 deletions apps/howto_deploy/python_deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ def verify(mod, fname):
y = tvm.nd.array(np.zeros(N, dtype=np.float32))
# Invoke the function
f(x, y)
np_x = x.asnumpy()
np_y = y.asnumpy()
np_x = x.numpy()
np_y = y.numpy()
# Verify correctness of function
assert np.all([xi + 1 == yi for xi, yi in zip(np_x, np_y)])
print("Finish verification...")
Expand Down
2 changes: 1 addition & 1 deletion apps/ios_rpc/tests/ios_rpc_mobilenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def run(mod, target):
m.set_input("data", tvm.nd.array(image, dev))
m.run()
tvm_output = m.get_output(0)
top1 = np.argmax(tvm_output.asnumpy()[0])
top1 = np.argmax(tvm_output.numpy()[0])
print("TVM prediction top-1:", top1, synset[top1])

# evaluate
Expand Down
6 changes: 3 additions & 3 deletions apps/ios_rpc/tests/ios_rpc_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def test_rpc_module():
time_f = f1.time_evaluator(f1.entry_name, dev, number=10)
cost = time_f(a, b).mean
print("%g secs/op" % cost)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)
# CPU
dev = remote.cpu(0)
f2 = remote.load_module("cpu_lib.dylib")
Expand All @@ -106,7 +106,7 @@ def test_rpc_module():
time_f = f2.time_evaluator(f2.entry_name, dev, number=10)
cost = time_f(a, b).mean
print("%g secs/op" % cost)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)


def test_rpc_module_with_upload():
Expand Down Expand Up @@ -142,7 +142,7 @@ def test_rpc_module_with_upload():
time_f = f.time_evaluator(f.entry_name, dev, number=10)
cost = time_f(a, b).mean
print("%g secs/op" % cost)
np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
np.testing.assert_equal(b.numpy(), a.numpy() + 1)


if __name__ == "__main__":
Expand Down
4 changes: 2 additions & 2 deletions apps/topi_recipe/broadcast/test_broadcast_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def test_broadcast_to(in_shape, out_shape):
out_nd = tvm.nd.array(np.empty(out_shape).astype(B.dtype), tvm.cuda())
for _ in range(2):
fcuda(data_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)


def test_broadcast_binary_op(lhs_shape, rhs_shape, typ="add"):
Expand Down Expand Up @@ -121,7 +121,7 @@ def test_broadcast_binary_op(lhs_shape, rhs_shape, typ="add"):
out_nd = tvm.nd.array(np.empty(out_npy.shape).astype(B.dtype), tvm.cuda())
for _ in range(2):
fcuda(lhs_nd, rhs_nd, out_nd)
tvm.testing.assert_allclose(out_nd.asnumpy(), out_npy)
tvm.testing.assert_allclose(out_nd.numpy(), out_npy)


if __name__ == "__main__":
Expand Down
16 changes: 6 additions & 10 deletions apps/topi_recipe/conv/depthwise_conv2d_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,11 +146,9 @@ def check_device(device):
depthwise_conv2d_scipy[:, c, :, :] * scale_np[c] + shift_np[c]
)
relu_scipy = np.maximum(scale_shift_scipy, 0)
tvm.testing.assert_allclose(
depthwise_conv2d_tvm.asnumpy(), depthwise_conv2d_scipy, rtol=1e-5
)
tvm.testing.assert_allclose(scale_shift_tvm.asnumpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.asnumpy(), relu_scipy, rtol=1e-5)
tvm.testing.assert_allclose(depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5)
tvm.testing.assert_allclose(scale_shift_tvm.numpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5)
print("success")

for device in ["cuda", "opencl", "rocm"]:
Expand Down Expand Up @@ -253,11 +251,9 @@ def check_device(device):
depthwise_conv2d_scipy[:, :, :, c] * scale_np[c] + shift_np[c]
)
relu_scipy = np.maximum(scale_shift_scipy, 0)
tvm.testing.assert_allclose(
depthwise_conv2d_tvm.asnumpy(), depthwise_conv2d_scipy, rtol=1e-5
)
tvm.testing.assert_allclose(scale_shift_tvm.asnumpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.asnumpy(), relu_scipy, rtol=1e-5)
tvm.testing.assert_allclose(depthwise_conv2d_tvm.numpy(), depthwise_conv2d_scipy, rtol=1e-5)
tvm.testing.assert_allclose(scale_shift_tvm.numpy(), scale_shift_scipy, rtol=1e-5)
tvm.testing.assert_allclose(relu_tvm.numpy(), relu_scipy, rtol=1e-5)
print("success")

for device in ["cuda", "opencl", "rocm"]:
Expand Down
4 changes: 2 additions & 2 deletions apps/topi_recipe/conv/test_conv2d_hwcn_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,10 +88,10 @@ def check_device(device):
):
func1 = tvm.build(s1, [A, W, B], device)
func1(a, w, b)
tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)
func2 = tvm.build(s2, [A, W, C], device)
func2(a, w, c)
tvm.testing.assert_allclose(c.asnumpy(), c_np, rtol=1e-5)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)

for device in ["cuda", "opencl", "rocm"]:
check_device(device)
Expand Down
4 changes: 2 additions & 2 deletions apps/topi_recipe/conv/test_conv_int8_arm.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,9 +186,9 @@ def run_inference(

# Functional check
if data_dtype == "uint8":
np.testing.assert_equal(c_orig.asnumpy(), c_sch.asnumpy())
np.testing.assert_equal(c_orig.numpy(), c_sch.numpy())
else:
assert np.allclose(c_orig.asnumpy(), c_sch.asnumpy())
assert np.allclose(c_orig.numpy(), c_sch.numpy())

evaluator = func.time_evaluator(func.entry_name, DEV, number=1000)
LOGGER.debug(tvm.lower(sconv, [data, kernel], simple_mode=True))
Expand Down
4 changes: 2 additions & 2 deletions apps/topi_recipe/conv/test_conv_int8_intel.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,9 +172,9 @@ def run_inference(

# Functional check
if data_dtype == "uint8":
np.testing.assert_equal(c_orig.asnumpy(), c_sch.asnumpy())
np.testing.assert_equal(c_orig.numpy(), c_sch.numpy())
else:
assert np.allclose(c_orig.asnumpy(), c_sch.asnumpy())
assert np.allclose(c_orig.numpy(), c_sch.numpy())

evaluator = func.time_evaluator(func.entry_name, DEV, number=1000)
LOGGER.debug(tvm.lower(sconv, [data, kernel], simple_mode=True))
Expand Down
2 changes: 1 addition & 1 deletion apps/topi_recipe/gemm/android_gemm_square.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def evaluate(func, dev, N, times):
cost = time_f(a, b, c).mean
gf = ngflops(N) / cost
print("%g secs/op, %g GFLOPS" % (cost, gf))
np.testing.assert_almost_equal(c.asnumpy(), a_np.dot(b_np), decimal=2)
np.testing.assert_almost_equal(c.numpy(), a_np.dot(b_np), decimal=2)


def test_gemm_gpu(N, times, bn, num_block, num_thread):
Expand Down
2 changes: 1 addition & 1 deletion apps/topi_recipe/gemm/cuda_gemm_square.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ def check_device(device):
c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev)
for i in range(2):
f(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), np.dot(b_np.T, a_np), rtol=1e-5)
tvm.testing.assert_allclose(c.numpy(), np.dot(b_np.T, a_np), rtol=1e-5)

num_flops = 2 * nn * nn * nn
num_runs = 10
Expand Down
2 changes: 1 addition & 1 deletion apps/topi_recipe/gemm/gemm_int8.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def block_size_filter(entity):
f(a, b, c)

tvm.testing.assert_allclose(
c.asnumpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5
c.numpy(), np.dot(a_np.astype("int32"), b_np.T.astype("int32")), rtol=1e-5
)

num_ops = 2 * l * m * n
Expand Down
2 changes: 1 addition & 1 deletion apps/topi_recipe/reduce/test_reduce_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def test_reduce_map(in_shape, axis, keepdims, type="sum", test_id=0):

for _ in range(2):
fcuda(data_tvm, out_tvm)
tvm.testing.assert_allclose(out_tvm.asnumpy(), out_npy, rtol=4e-4, atol=4e-4)
tvm.testing.assert_allclose(out_tvm.numpy(), out_npy, rtol=4e-4, atol=4e-4)


if __name__ == "__main__":
Expand Down
2 changes: 1 addition & 1 deletion apps/topi_recipe/rnn/matexp.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ def check_device(target):
print("Time cost=%g" % tgap)
# correctness
if not SKIP_CHECK:
res_cuda = res_a.asnumpy()
res_cuda = res_a.numpy()
res_cmp = np.ones_like(res_np).astype("float64")
Whh_np = Whh_np.astype("float64")
for t in range(1, n_num_step):
Expand Down
2 changes: 1 addition & 1 deletion docs/deploy/hls.rst
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ We use two python scripts for this tutorial.
c = tvm.nd.array(np.zeros(n, dtype="float32"), dev)
fadd(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + b.asnumpy())
tvm.testing.assert_allclose(c.numpy(), a.numpy() + b.numpy())
Setup
Expand Down
2 changes: 1 addition & 1 deletion docs/dev/codebase_walkthrough.rst
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ The returned module, which can be thought of as a combination of a compiled func
b = tvm.nd.array(np.random.uniform(size=n).astype(B.dtype), dev)
c = tvm.nd.array(np.zeros(n, dtype=C.dtype), dev)
fadd(a, b, c)
output = c.asnumpy()
output = c.numpy()

Under the hood, TVM allocates device memory and manages memory transfers automatically. To do that, each backend needs to subclass ``DeviceAPI`` class, defined in ``include/tvm/runtime/device_api.h``, and override memory management methods to use device specific API. For example, the CUDA backend implements ``CUDADeviceAPI`` in ``src/runtime/cuda/cuda_device_api.cc`` to use ``cudaMalloc``, ``cudaMemcpy`` etc.

Expand Down
2 changes: 1 addition & 1 deletion docs/dev/debugger.rst
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ How to use Debugger?
m.set_input(**params)
# execute
m.run()
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).asnumpy()
tvm_out = m.get_output(0, tvm.nd.empty(out_shape, dtype)).numpy()

The outputs are dumped to a temporary folder in ``/tmp`` folder or the
folder specified while creating the runtime.
Expand Down
4 changes: 2 additions & 2 deletions docs/dev/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ The main goal of TVM's runtime is to provide a minimal API for loading and execu
arr: tvm.runtime.NDArray = tvm.nd.array([1, 2, 3], device=tvm.cuda(0))
fun: tvm.runtime.PackedFunc = mod["addone"]
fun(a)
print(a.asnumpy())
print(a.numpy())
:py:class:`tvm.runtime.Module` encapsulates the result of compilation. A runtime.Module contains a GetFunction method to obtain PackedFuncs by name.
Expand All @@ -172,7 +172,7 @@ The above example only deals with a simple `addone` function. The code snippet b
# execute the model
gmod["run"]()
# get the output
result = gmod["get_output"](0).asnumpy()
result = gmod["get_output"](0).numpy()
The main take away is that runtime.Module and runtime.PackedFunc are sufficient to encapsulate both operator level programs (such as addone), as well as the end-to-end models.

Expand Down
2 changes: 1 addition & 1 deletion docs/langref/relay_pattern.rst
Original file line number Diff line number Diff line change
Expand Up @@ -446,7 +446,7 @@ with a single batch_norm op:
beta = node_map[self.beta][0]
gamma = node_map[self.gamma][0]
eps = node_map[self.eps][0]
return relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon = eps.data.asnumpy().item())[0]
return relay.op.nn.batch_norm(x, gamma, beta, mean, var, epsilon = eps.data.numpy().item())[0]
# A graph of arithmetic operators that are functional equivalent to batch_norm.
x = relay.var('x')
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/auto_scheduler/search_task.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ def _save_buffer_to_file(buffer_name, buffer_data):
File name will be: {buffer_name}.{buffer_shape}_{buffer_data_type}.npy
"""
np_data = buffer_data.asnumpy()
np_data = buffer_data.numpy()

buffer_name += "."
for i in np_data.shape:
Expand Down
15 changes: 13 additions & 2 deletions python/tvm/contrib/sparse.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
# under the License.
"""Tensor and Operation class for computation declaration."""
# pylint: disable=invalid-name
import warnings
import numpy as _np
from tvm.runtime import ndarray as _nd
from tvm import te
Expand Down Expand Up @@ -81,11 +82,21 @@ def __init__(self, arg1, device=None, shape=None):
)

def asnumpy(self):
"""Construct a full matrix and convert it to numpy array. This API will be deprecated
in TVM v0.8 release. Please use `numpy` instead."""
warnings.warn(
"CSRNDArray.asnumpy() will be deprecated in TVM v0.8 release. "
"Please use CSRNDArray.numpy() instead.",
DeprecationWarning,
)
return self.numpy()

def numpy(self):
"""Construct a full matrix and convert it to numpy array."""
full = _np.zeros(self.shape, self.dtype)
ridx = _np.diff(self.indptr.asnumpy())
ridx = _np.diff(self.indptr.numpy())
ridx = _np.hstack((_np.ones((v,), itype) * i for i, v in enumerate(ridx)))
full[ridx, self.indices.asnumpy().astype(itype)] = self.data.asnumpy()
full[ridx, self.indices.numpy().astype(itype)] = self.data.numpy()
return full


Expand Down
4 changes: 2 additions & 2 deletions python/tvm/contrib/target/coreml.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ def _convert_softmax(builder, name, inputs, outputs, args, attrs):


def _convert_conv2d(builder, name, inputs, outputs, args, attrs):
weight = args[1].data.asnumpy()
weight = args[1].data.numpy()
if attrs["kernel_layout"] == "OIHW":
# convert to 'HWIO'
weight = weight.transpose([2, 3, 1, 0])
Expand Down Expand Up @@ -169,7 +169,7 @@ def visit_constant(self, const):
self.builder.add_load_constant_nd(
name=output,
output_name=output,
constant_value=const.data.asnumpy(),
constant_value=const.data.numpy(),
shape=const.data.shape,
)
self.buf_idx_ = self.buf_idx_ + 1
Expand Down
6 changes: 3 additions & 3 deletions python/tvm/contrib/target/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -684,7 +684,7 @@ def _get_opsets(self):
return opsets

def make_model(self):
""" Creates the onnx model from the graph """
"""Creates the onnx model from the graph"""
onnx_graph = onnx.helper.make_graph(
self._nodes, self._name, self._inputs, self._outputs, self._initializers
)
Expand Down Expand Up @@ -734,7 +734,7 @@ def _get_node_entry(cls, relay_node, name):
}

def convert_to_onnx(self, func):
""" Traverse Relay graph and generate a ONNX model"""
"""Traverse Relay graph and generate a ONNX model"""

self.visit(func)
self._add_output(self._node_dict[self.last_node])
Expand Down Expand Up @@ -826,7 +826,7 @@ def _add_params(self, node_entry, idx):
param_name in self._params
), "The parameter {0} is not present" "in params dict provided.".format(param_name)
value = self._params[param_name]
numpy_array = value.asnumpy()
numpy_array = value.numpy()
tensor = numpy_helper.from_array(numpy_array, param_name)
self._mc.add_initializers([tensor])
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[numpy_array.dtype]
Expand Down
4 changes: 2 additions & 2 deletions python/tvm/driver/tvmc/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@

@register_parser
def add_run_parser(subparsers):
""" Include parser for 'run' subcommand """
"""Include parser for 'run' subcommand"""

parser = subparsers.add_parser("run", help="run a compiled module")
parser.set_defaults(func=drive_run)
Expand Down Expand Up @@ -420,6 +420,6 @@ def run_module(
outputs = {}
for i in range(num_outputs):
output_name = "output_{}".format(i)
outputs[output_name] = module.get_output(i).asnumpy()
outputs[output_name] = module.get_output(i).numpy()

return TVMCResult(outputs, times)
2 changes: 1 addition & 1 deletion python/tvm/relay/analysis/sparse_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def process_params(expr, params, block_size, sparsity_threshold, layout):
weight_names = _search_conv2d_op_weight(expr)
for name in weight_names:
name = str(name)
w_np = params[name].asnumpy()
w_np = params[name].numpy()
# currently only support conv2d_1*1
if not (
(w_np.shape[0] == 1 and w_np.shape[1] == 1)
Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/analysis/sparse_dense.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def process_params(expr, params, block_size, sparsity_threshold):
weight_names = _search_dense_op_weight(expr)
for name in weight_names:
name = str(name)
w_np = params[name].asnumpy()
w_np = params[name].numpy()
sparsity = 1.0 - (np.count_nonzero(w_np) / w_np.size)
if sparsity >= sparsity_threshold:
sparse_weight = sp.bsr_matrix(w_np, blocksize=block_size)
Expand Down
Loading

0 comments on commit 720e7b1

Please sign in to comment.