Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

rename some python funciton #114

Merged
merged 3 commits into from
Jun 26, 2017
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions benchmark/python/sparse_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,7 @@ def get_iter(path, data_shape, batch_size):
for batch in train_iter:
data = train_iter.getdata()
csr_data.append(data)
dns_data.append(data.to_dense())
dns_data.append(data.todense())
num_batch += 1
bag_of_data = [csr_data, dns_data]
num_repeat = 5
Expand Down Expand Up @@ -140,7 +140,7 @@ def bench_dot_forward(m, k, n, density, ctx, repeat):
dns = mx.nd.random_uniform(shape=(k, n)).copyto(ctx)
data_shape = (m, k)
csr_data = rand_ndarray(data_shape, 'csr', density)
dns_data = csr_data.to_dense()
dns_data = csr_data.todense()
rhs_dns_np = dns.asnumpy()
lhs_csr_sp = sp.csr_matrix(dns_data.asnumpy()) # csr in scipy
lhs_dns_np = lhs_csr_sp.todense()
Expand Down Expand Up @@ -169,7 +169,7 @@ def bench_dot_backward(m, k, n, density, ctx, repeat):
dns = mx.nd.random_uniform(shape=(m, n)).copyto(ctx)
data_shape = (m, k)
csr_data = rand_ndarray(data_shape, 'csr', density)
dns_data = csr_data.to_dense()
dns_data = csr_data.todense()
rhs_dns_np = dns.asnumpy()
lhs_csr_sp = sp.csr_matrix(dns_data.asnumpy())
lhs_dns_np = lhs_csr_sp.todense()
Expand Down
4 changes: 2 additions & 2 deletions python/mxnet/ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -975,11 +975,11 @@ def backward(self, out_grad=None, retain_graph=False):
c_array(NDArrayHandle, ograd_handles),
ctypes.c_int(retain_graph)))

def to_csr(self):
def _to_csr(self):
# pylint: disable=undefined-variable
return cast_storage(self, storage_type='csr')

def to_rsp(self):
def _to_rsp(self):
# pylint: disable=undefined-variable
return cast_storage(self, storage_type='row_sparse')

Expand Down
18 changes: 9 additions & 9 deletions python/mxnet/sparse_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,19 +86,19 @@ class SparseNDArray(NDArray):
for more details.
"""
def __iadd__(self, other):
raise Exception('Not implemented for SparseND yet!')
raise NotImplementedError("SparseND doesn't support __iadd__")

def __isub__(self, other):
raise Exception('Not implemented for SparseND yet!')
raise NotImplementedError("SparseND doesn't support __isub__")

def __imul__(self, other):
raise Exception('Not implemented for SparseND yet!')
raise NotImplementedError("SparseND doesn't support __imul__")

def __idiv__(self, other):
raise Exception('Not implemented for SparseND yet!')
raise NotImplementedError("SparseND doesn't support __idiv__")

def __itruediv__(self, other):
raise Exception('Not implemented for SparseND yet!')
raise NotImplementedError("SparseND doesn't support __itruediv__")

def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Expand Down Expand Up @@ -290,7 +290,7 @@ def asnumpy(self):
"""Return a dense ``numpy.ndarray`` object with value copied from this array

"""
return self.to_dense().asnumpy()
return self.todense().asnumpy()

def astype(self, dtype):
"""Returns a copy of the array after casting to a specified type.
Expand Down Expand Up @@ -343,8 +343,8 @@ def copyto(self, other):
else:
raise TypeError('copyto does not support type ' + str(type(other)))

def to_dense(self):
return to_dense(self)
def todense(self):
return todense(self)

def _aux_data(self, i, writable=False):
""" Get an NDArray referencing the ith aux data array associated with the SparseNDArray.
Expand Down Expand Up @@ -550,7 +550,7 @@ def row_sparse(values, indices, shape, ctx=None, dtype=None, indices_type=None):
return result


def to_dense(source):
def todense(source):
""" Return a dense array representation of this SparseNDArray.

Returns
Expand Down
6 changes: 3 additions & 3 deletions tests/nightly/dist_sync_kvstore.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ def init_kv():
def init_kv_rsp():
kv = mx.kv.create('dist_sync')
# init kv
kv.init(rsp_keys, [mx.nd.ones(shape).to_rsp()] * len(rsp_keys))
kv.init(rsp_keys, [mx.nd.ones(shape)._to_rsp()] * len(rsp_keys))
# kv.init(99, mx.nd.ones(big_shape))
my_rank = kv.rank
nworker = kv.num_workers
Expand Down Expand Up @@ -68,11 +68,11 @@ def test_sync_push_pull_row_sparse():
v[my_row][col] = my_rank + 1

for i in range(nrepeat):
kv.push('9', v.to_rsp())
kv.push('9', v._to_rsp())
# kv.push(99, mx.nd.ones(big_shape)*(my_rank+1))

# pull a subset of rows this worker is interested in
val = v.copyto(mx.cpu()).to_rsp()
val = v.copyto(mx.cpu())._to_rsp()
kv.pull('9', out = val)

expected = mx.nd.zeros(shape)
Expand Down
4 changes: 2 additions & 2 deletions tests/python/unittest/test_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,15 +36,15 @@ def compare_optimizer(opt1, opt2, shape, w_stype='default', g_stype='default'):
w1 = w2.copyto(default_context())
elif w_stype == 'row_sparse':
w2 = rand_ndarray(shape, w_stype, density=1)
w1 = w2.copyto(default_context()).to_dense()
w1 = w2.copyto(default_context()).todense()
else:
raise Exception("type not supported yet")
if g_stype == 'default':
g2 = mx.random.uniform(shape=shape, ctx=default_context())
g1 = g2.copyto(default_context())
elif g_stype == 'row_sparse':
g2 = rand_ndarray(shape, g_stype)
g1 = g2.copyto(default_context()).to_dense()
g1 = g2.copyto(default_context()).todense()
else:
raise Exception("type not supported yet")

Expand Down
16 changes: 8 additions & 8 deletions tests/python/unittest/test_sparse_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,10 +262,10 @@ def check_binary(fn):
rshape[bdim-i-1] = 1
lhs = np.random.uniform(0, 1, size=lshape)
rhs = np.random.uniform(0, 1, size=rshape)
lhs_nd_csr = mx.nd.array(lhs).to_csr()
rhs_nd_csr = mx.nd.array(rhs).to_csr()
lhs_nd_rsp = mx.nd.array(lhs).to_rsp()
rhs_nd_rsp = mx.nd.array(rhs).to_rsp()
lhs_nd_csr = mx.nd.array(lhs)._to_csr()
rhs_nd_csr = mx.nd.array(rhs)._to_csr()
lhs_nd_rsp = mx.nd.array(lhs)._to_rsp()
rhs_nd_rsp = mx.nd.array(rhs)._to_rsp()
for lhs_nd, rhs_nd in [(lhs_nd_csr, rhs_nd_csr), (lhs_nd_rsp, rhs_nd_rsp)]:
assert_allclose(fn(lhs, rhs),
fn(lhs_nd, rhs_nd).asnumpy(),
Expand All @@ -290,8 +290,8 @@ def check(fn):
ndim = 2
shape = np.random.randint(1, 6, size=(ndim,))
npy_nd = np.random.normal(0, 1, size=shape)
csr_nd = mx.nd.array(npy_nd).to_csr()
rsp_nd = mx.nd.array(npy_nd).to_rsp()
csr_nd = mx.nd.array(npy_nd)._to_csr()
rsp_nd = mx.nd.array(npy_nd)._to_rsp()
for sparse_nd in [csr_nd, rsp_nd]:
assert_allclose(
fn(npy_nd),
Expand All @@ -314,8 +314,8 @@ def check(fn):

def test_sparse_nd_negate():
npy = np.random.uniform(-10, 10, rand_shape_2d())
arr_csr = mx.nd.array(npy).to_csr()
arr_rsp = mx.nd.array(npy).to_rsp()
arr_csr = mx.nd.array(npy)._to_csr()
arr_rsp = mx.nd.array(npy)._to_rsp()
for arr in [arr_csr, arr_rsp]:
assert_almost_equal(npy, arr.asnumpy())
assert_almost_equal(-npy, (-arr).asnumpy())
Expand Down
6 changes: 3 additions & 3 deletions tests/python/unittest/test_sparse_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def test_dns_to_rsp(shape):

def test_csr_to_dns(shape):
csr, (indptr, indices, values) = rand_sparse_ndarray(shape, 'csr')
mx_dns = csr.to_dense()
mx_dns = csr.todense()
np_dns = sp.csr_matrix((values, indices, indptr), shape).todense()
assert_almost_equal(mx_dns.asnumpy(), np_dns)

Expand All @@ -105,7 +105,7 @@ def test_dot_csr(lhs_shape, rhs_shape, rhs_stype, trans_lhs):
lhs_dns = rand_ndarray(lhs_shape, 'default')
lhs_nd = mx.nd.cast_storage(lhs_dns, storage_type='csr')
rhs_nd = rand_ndarray(rhs_shape, rhs_stype, density=1)
rhs_dns = rhs_nd if rhs_stype == 'default' else rhs_nd.to_dense()
rhs_dns = rhs_nd if rhs_stype == 'default' else rhs_nd.todense()
out = mx.nd.dot(lhs_nd, rhs_dns, transpose_a=trans_lhs)
assert out.storage_type == 'default'
out_expected = mx.nd.dot(lhs_dns, rhs_dns, transpose_a=trans_lhs)
Expand Down Expand Up @@ -148,7 +148,7 @@ def test_sparse_embedding():
np_weight = np.random.uniform(-0.01, 0.01, arg_map["embed_weight"].shape)
np_onehot = np.zeros((batch, in_dim))
np_onehot[np.arange(batch), np_data] = 1.0
nd_onehot = mx.nd.array(np_onehot).to_csr()
nd_onehot = mx.nd.array(np_onehot)._to_csr()
# forward
arg_map["data"][:] = nd_onehot
arg_map["embed_weight"][:] = np_weight
Expand Down