Skip to content

Commit

Permalink
Revert "Cherry pick getitem/setitem 0d (#53125)" (#53265)
Browse files Browse the repository at this point in the history
This reverts commit a79c04f.
  • Loading branch information
zoooo0820 authored Apr 24, 2023
1 parent bfd1dd7 commit 50f6121
Show file tree
Hide file tree
Showing 26 changed files with 110 additions and 440 deletions.
23 changes: 11 additions & 12 deletions paddle/fluid/framework/attribute_checker.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,10 +73,10 @@ class TypedAttrVarInfoChecker {
platform::errors::InvalidArgument(
"Required Attribute with Variable type shall not be nullptr."));
auto shape = var_desc->GetShape();
PADDLE_ENFORCE_LE(shape.size(),
PADDLE_ENFORCE_EQ(shape.size(),
1U,
platform::errors::InvalidArgument(
"Required shape rank of Attribute(%s) <= 1, "
"Required shape rank of Attribute(%s) == 1, "
"but received rank == %s",
var_desc->Name(),
shape.size()));
Expand Down Expand Up @@ -105,21 +105,20 @@ class TypedAttrVarInfoChecker {
platform::errors::InvalidArgument(
"Required Attribute with Variable type shall not be nullptr."));
auto shape = var_desc->GetShape();
PADDLE_ENFORCE_LE(shape.size(),
PADDLE_ENFORCE_EQ(shape.size(),
1U,
platform::errors::InvalidArgument(
"Required shape rank of Attribute(%s) <= 1, "
"Required shape rank of Attribute(%s) == 1, "
"but received rank == %s",
var_desc->Name(),
shape.size()));
PADDLE_ENFORCE_EQ(
shape.size() == 0U || shape[0] == 1U || shape[0] == -1,
true,
platform::errors::InvalidArgument(
"Required shape is (), or shape[0] of Attribute(%s) == 1 or -1, "
"but received shape[0] == %s",
var_desc->Name(),
shape[0]));
PADDLE_ENFORCE_EQ(shape[0] == 1U || shape[0] == -1,
true,
platform::errors::InvalidArgument(
"Required shape[0] of Attribute(%s) == 1 or -1, "
"but received shape[0] == %s",
var_desc->Name(),
shape[0]));
}
}
};
Expand Down
4 changes: 2 additions & 2 deletions paddle/fluid/inference/tensorrt/engine.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,10 @@ template <typename T>
nvinfer1::Dims Vec2TRT_Dims(const std::vector<T>& shape,
std::string input,
bool with_dynamic_shape = false) {
PADDLE_ENFORCE_GE(shape.size(),
PADDLE_ENFORCE_GT(shape.size(),
0UL,
platform::errors::InvalidArgument(
"TensorRT's tensor input requires at least 0 "
"TensorRT's tensor input requires at least 1 "
"dimensions, but input %s has %d dims.",
input,
shape.size()));
Expand Down
11 changes: 11 additions & 0 deletions paddle/fluid/pybind/eager_method.cc
Original file line number Diff line number Diff line change
Expand Up @@ -923,6 +923,17 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self,
}

if (!none_axes.empty()) {
// Deal with cases when all axes are decreased.
// After slice, the shape of out is [1], which should have been
// [], but Paddle doesn't support scalar.
// In order to ensure the correctness of the final shape of out,
// one dimension of out needs to be decreased.
// For example:
// # x.shape: (2,3,4)
// out = x[0, 1, 1, None] # out.shape : (1)
if (static_cast<int>(decrease_axis.size()) == tensor->dims().size()) {
none_axes.pop_back();
}
if (!none_axes.empty()) {
paddle::Tensor new_out;
{
Expand Down
12 changes: 12 additions & 0 deletions paddle/fluid/pybind/imperative.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1068,6 +1068,18 @@ void BindImperative(py::module *m_ptr) {
tracer->TraceOp(op_type, ins, outs, std::move(attrs));
}
if (!none_axes.empty()) {
// Deal with cases when all axes are decreased.
// After slice, the shape of out is [1], which should have been
// [], but Paddle doesn't support scalar.
// In order to ensure the correctness of the final shape of out,
// one dimension of out needs to be decreased.
// For example:
// # x.shape: (2,3,4)
// out = x[0, 1, 1, None] # out.shape : (1)
if (static_cast<int>(decrease_axis.size()) ==
tensor->dims().size()) {
none_axes.pop_back();
}
if (!none_axes.empty()) {
// Deal with cases that decrease_axes is not empty
// For example:
Expand Down
3 changes: 3 additions & 0 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3927,6 +3927,9 @@ void StridedSliceRawInferMeta(const MetaTensor& x,
new_out_shape.push_back(out_dims[i]);
}
}
if (new_out_shape.size() == 0) {
new_out_shape.push_back(1);
}
out_dims = phi::make_ddim(new_out_shape);
}
VLOG(4) << "out_dims: " << out_dims;
Expand Down
6 changes: 6 additions & 0 deletions paddle/phi/kernels/funcs/slice_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -203,6 +203,12 @@ inline DDim GetDecreasedDims(const DDim slice_dims,
}
}

// NOTE(liym27): Paddle does not support that the rank of Tensor is 0, and
// uses [1] instead.
if (new_shape.size() == 0) {
new_shape.push_back(1);
}

decreased_dims = phi::make_ddim(new_shape);
}
return decreased_dims;
Expand Down
5 changes: 0 additions & 5 deletions paddle/phi/kernels/xpu/set_value_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -266,11 +266,6 @@ void SetValueGradImpl(const Context& dev_ctx,
{fake_value_grad_dims.Get(), fake_value_grad_dims.size()},
static_cast<T>(0));
auto value_grad_dims_vec = phi::vectorize<int64_t>(value_grad_dims);
// for value is a 0-D Tensor
if (value_grad_dims.size() == 0) {
value_grad_dims_vec =
phi::vectorize<int64_t>(phi::make_ddim(std::vector<int>({1})));
}
for (auto offset : offsets) {
for (int i = 0; i < out_dims_size; i++) {
slice_end[i] = offset[i] + fake_value_grad_dims[i];
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,9 @@ def is_output_compatible(self, dist_op):
if i not in decrease_axis:
ref_indices.append(i)
if ref_indices == []:
assert len(out_dims_mapping) == 0
assert len(out_dims_mapping) == 1
if is_dim_shard(out_dims_mapping[0]):
return False
else:
for i in range(len(out_dims_mapping)):
ref_index = ref_indices[i]
Expand Down Expand Up @@ -140,7 +142,9 @@ def update_dims_mapping(self, dist_op):
ref_indices.append(i)

if ref_dims_mapping == []:
ref_dims_mapping = [-1]
assert len(ref_dims_mapping) == len(out_dims_mapping)
assert ref_dims_mapping[0] == out_dims_mapping[0]
changed = False
else:
assert len(ref_dims_mapping) == len(out_dims_mapping)
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fft.py
Original file line number Diff line number Diff line change
Expand Up @@ -1371,7 +1371,7 @@ def fftshift(x, axes=None, name=None):
elif isinstance(axes, int):
shifts = shape[axes] // 2
else:
shifts = paddle.stack([shape[ax] // 2 for ax in axes])
shifts = paddle.concat([shape[ax] // 2 for ax in axes])
return paddle.roll(x, shifts, axes, name=name)


Expand Down Expand Up @@ -1416,7 +1416,7 @@ def ifftshift(x, axes=None, name=None):
elif isinstance(axes, int):
shifts = -shape[axes] // 2
else:
shifts = paddle.stack([-shape[ax] // 2 for ax in axes])
shifts = paddle.concat([-shape[ax] // 2 for ax in axes])
return paddle.roll(x, shifts, axes, name=name)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def test_tensor_from_numpy(self):
np.testing.assert_array_equal(var2.numpy(), data_np)
data_np[0][0] = -1
self.assertEqual(data_np[0][0], -1)
self.assertNotEqual(var2[0][0].numpy(), -1)
self.assertNotEqual(var2[0][0].numpy()[0], -1)
self.assertFalse(np.array_equal(var2.numpy(), data_np))


Expand Down
8 changes: 4 additions & 4 deletions python/paddle/fluid/tests/unittests/test_kthvalue_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,16 +140,16 @@ def test_nan_in_cpu_kernel():
nan_position = 100
self.x[0, nan_position, 2] = float('nan')
v, inds = self.x.kthvalue(k=200, axis=1)
self.assertTrue(np.isnan(v[0, 2].numpy()))
self.assertEqual(inds[0, 2].numpy(), nan_position)
self.assertTrue(np.isnan(v[0, 2].numpy()[0]))
self.assertEqual(inds[0, 2].numpy()[0], nan_position)

def test_nan_in_gpu_kernel():
paddle.set_device('gpu')
nan_position = 100
self.x[0, nan_position, 2] = float('nan')
v, inds = self.x.kthvalue(k=200, axis=1)
self.assertTrue(np.isnan(v[0, 2].numpy()))
self.assertEqual(inds[0, 2].numpy(), nan_position)
self.assertTrue(np.isnan(v[0, 2].numpy()[0]))
self.assertEqual(inds[0, 2].numpy()[0], nan_position)

test_nan_in_cpu_kernel()
if fluid.core.is_compiled_with_cuda():
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/fluid/tests/unittests/test_set_value_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -1590,7 +1590,7 @@ def test_inplace(self):
a.stop_gradient = False
b = a[:]
c = b
b[paddle.zeros([], dtype='int32')] = 1.0
b[paddle.to_tensor(0)] = 1.0

self.assertTrue(id(b) == id(c))
np.testing.assert_array_equal(b.numpy(), c.numpy())
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/tests/unittests/test_slice_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -541,8 +541,8 @@ class TestSliceAPI(unittest.TestCase):
def test_1(self):
with paddle_static_guard():
input = np.random.random([3, 4, 5, 6]).astype("float64")
minus_1 = paddle.tensor.fill_constant([], "int32", -1)
minus_3 = paddle.tensor.fill_constant([], "int64", -3)
minus_1 = paddle.tensor.fill_constant([1], "int32", -1)
minus_3 = paddle.tensor.fill_constant([1], "int64", -3)
starts = paddle.static.data(
name='starts', shape=[1, 3], dtype="float32"
)
Expand Down
11 changes: 6 additions & 5 deletions python/paddle/fluid/tests/unittests/test_var_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -604,7 +604,8 @@ def _test_slice(self):

nw = w[1, 1, 1]

self.assertEqual(len(nw.shape), 0)
self.assertEqual(len(nw.shape), 1)
self.assertEqual(nw.shape[0], 1)

nw = w[:, :, :-1]
self.assertEqual((784, 100, 99), tuple(nw.shape))
Expand Down Expand Up @@ -704,10 +705,10 @@ def _test_slice_for_tensor_attr(self):

var = paddle.to_tensor(tensor_array)

one = paddle.ones(shape=[], dtype="int32")
two = paddle.full(shape=[], fill_value=2, dtype="int32")
negative_one = paddle.full(shape=[], fill_value=-1, dtype="int32")
four = paddle.full(shape=[], fill_value=4, dtype="int32")
one = paddle.ones(shape=[1], dtype="int32")
two = paddle.full(shape=[1], fill_value=2, dtype="int32")
negative_one = paddle.full(shape=[1], fill_value=-1, dtype="int32")
four = paddle.full(shape=[1], fill_value=4, dtype="int32")

var = fluid.dygraph.to_variable(tensor_array)
var1 = var[0, one, one]
Expand Down
3 changes: 2 additions & 1 deletion python/paddle/fluid/tests/unittests/test_variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,8 @@ def _test_slice(self, place):

nw = w[1, 1, 1]

self.assertEqual(len(nw.shape), 0)
self.assertEqual(len(nw.shape), 1)
self.assertEqual(nw.shape[0], 1)

nw = w[:, :, :-1]
self.assertEqual((784, 100, 99), nw.shape)
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/fluid/tests/unittests/test_while_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,9 +192,9 @@ def test_outputs_exists_inputs(self):
with fluid.program_guard(main_program, startup_program):

def func(x):
s = paddle.zeros([])
i = paddle.ones([])
max_len = paddle.shape(x)
s = paddle.zeros([1])
i = paddle.ones([1])
max_len = paddle.shape(x)[0]

def cond(i, s, x):
return i < max_len
Expand Down
Loading

0 comments on commit 50f6121

Please sign in to comment.