diff --git a/src/operator/tensor/indexing_op.h b/src/operator/tensor/indexing_op.h index 7b6c16a0714f..5449fbe4afbd 100644 --- a/src/operator/tensor/indexing_op.h +++ b/src/operator/tensor/indexing_op.h @@ -66,8 +66,8 @@ enum QuantizedEmbeddingOpResource {kTempSpace}; struct SparseEmbeddingParam: public dmlc::Parameter { - int input_dim; - int output_dim; + index_t input_dim; + index_t output_dim; int dtype; bool deterministic; DMLC_DECLARE_PARAMETER(SparseEmbeddingParam) { @@ -89,8 +89,8 @@ struct SparseEmbeddingParam: public dmlc::Parameter { }; struct EmbeddingParam: public dmlc::Parameter { - int input_dim; - int output_dim; + index_t input_dim; + index_t output_dim; int dtype; bool sparse_grad; DMLC_DECLARE_PARAMETER(EmbeddingParam) { diff --git a/tests/nightly/test_large_array.py b/tests/nightly/test_large_array.py index edf796c1a947..8b36d09cbaf8 100644 --- a/tests/nightly/test_large_array.py +++ b/tests/nightly/test_large_array.py @@ -38,6 +38,7 @@ SMALL_X = 100 SMALL_Y = 50 LARGE_SIZE = LARGE_X * SMALL_Y +LARGE_TENSOR_SHAPE = 2**32 def test_nn(): @@ -467,6 +468,17 @@ def check_col2im(): assert res.shape[2] == 2 assert res.shape[3] == 2 assert res.shape[4] == 1 + def check_embedding(): + data = nd.random_normal(shape=(LARGE_TENSOR_SHAPE, 1)) + weight = nd.random_normal(shape=(LARGE_TENSOR_SHAPE, 1)) + input_dim = LARGE_TENSOR_SHAPE + output_dim = 1 + + out = nd.Embedding(data=data, weight=weight, input_dim=input_dim, output_dim=output_dim) + + assert out.shape[0] == LARGE_TENSOR_SHAPE + assert out.shape[1] == 1 + assert out.shape[2] == 1 check_gluon_embedding() check_fully_connected() @@ -488,6 +500,7 @@ def check_col2im(): check_l2_normalization() check_instance_norm() check_col2im() + check_embedding() def test_tensor():