Skip to content

Commit

Permalink
[BugFix]Fix reduce_mean/min/sum/prod, cumsum grad_op infershape bug (#…
Browse files Browse the repository at this point in the history
…46408)

* [BugFix]Fix reduce_mean/min/sum/prod, cumsum grad_op infershape bug

* fix typo

* fix typo
  • Loading branch information
Aurelius84 authored Sep 23, 2022
1 parent f778470 commit 812e4b4
Show file tree
Hide file tree
Showing 4 changed files with 26 additions and 18 deletions.
6 changes: 1 addition & 5 deletions paddle/fluid/operators/cum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,13 +72,9 @@ class CumsumGradMaker : public framework::SingleGradOpMaker<T> {
grad_op->SetType("cumsum");
grad_op->SetInput("X", this->OutputGrad("Out"));
grad_op->SetOutput("Out", this->InputGrad("X"));
grad_op->SetAttr("axis", PADDLE_GET_CONST(int, this->GetAttr("axis")));
grad_op->SetAttr("flatten",
PADDLE_GET_CONST(bool, this->GetAttr("flatten")));
grad_op->SetAttrMap(this->Attrs());
grad_op->SetAttr("reverse",
!PADDLE_GET_CONST(bool, this->GetAttr("reverse")));
grad_op->SetAttr("exclusive",
PADDLE_GET_CONST(bool, this->GetAttr("exclusive")));
}
};

Expand Down
32 changes: 19 additions & 13 deletions paddle/fluid/operators/reduce_ops/reduce_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -634,20 +634,26 @@ class ReduceGradOp : public framework::OperatorWithKernel {
"ReduceOp");
auto x_dims = ctx->GetInputDim("X");
auto x_rank = x_dims.size();
auto dims = ctx->Attrs().Get<std::vector<int>>("dim");
for (size_t i = 0; i < dims.size(); ++i) {
PADDLE_ENFORCE_LT(dims[i],
x_rank,
platform::errors::InvalidArgument(
"The reduce dim index %d should be in the "
"range [-dimension(X), dimension(X)], "
"which dimesion = %d. But received dim index = %d.",
i,
x_rank,
dims[i]));
if (dims[i] < 0) dims[i] = x_rank + dims[i];
// TODO(dev): We should delete Infershape and migrate it into
// UnchangeInferMeta.In case of 'dim' is Variable, it will
// not exist in Attrs but in Inputs.
if (ctx->HasAttr("dim")) {
auto dims = ctx->Attrs().Get<std::vector<int>>("dim");
for (size_t i = 0; i < dims.size(); ++i) {
PADDLE_ENFORCE_LT(
dims[i],
x_rank,
platform::errors::InvalidArgument(
"The reduce dim index %d should be in the "
"range [-dimension(X), dimension(X)], "
"which dimesion = %d. But received dim index = %d.",
i,
x_rank,
dims[i]));
if (dims[i] < 0) dims[i] = x_rank + dims[i];
}
}
sort(dims.begin(), dims.end());

auto x_grad_name = framework::GradVarName("X");
if (ctx->HasOutput(x_grad_name)) {
ctx->SetOutputDim(x_grad_name, x_dims);
Expand Down
3 changes: 3 additions & 0 deletions python/paddle/fluid/tests/unittests/test_cumsum_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -356,6 +356,9 @@ def test_static_and_infer(self):
relu_out = paddle.nn.functional.relu(linear_out)
axis = paddle.full([1], 2, dtype='int64')
out = paddle.cumsum(relu_out, axis=axis)
loss = paddle.mean(out)
sgd = paddle.optimizer.SGD(learning_rate=0.)
sgd.minimize(paddle.mean(out))

exe = paddle.static.Executor(self.place)
exe.run(starup_prog)
Expand Down
3 changes: 3 additions & 0 deletions python/paddle/fluid/tests/unittests/test_sum_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -543,6 +543,9 @@ def test_static_and_infer(self):
linear = paddle.nn.Linear(x.shape[-1], 5)
linear_out = linear(x)
out = self.pd_api(linear_out, axis, keepdim=self.keepdim)

sgd = paddle.optimizer.SGD(learning_rate=0.)
sgd.minimize(paddle.mean(out))
exe = paddle.static.Executor(self.place)
exe.run(starup_prog)
static_out = exe.run(feed={'x': self.x.numpy().astype('float32')},
Expand Down

0 comments on commit 812e4b4

Please sign in to comment.