Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Zero-Dim] correct some code to adapt to 0D Tensor #51562

Merged
merged 1 commit into from
Mar 14, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 8 additions & 13 deletions python/paddle/fluid/dygraph/learning_rate_scheduler.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,10 @@ def state_dict(self):
continue
value = self.__dict__[key]
if isinstance(value, Variable):
assert value.shape == [
1
], "shape of Variable in state_dict must be [1] {}".format(
value.shape
)
value = value.numpy()[0]
assert (
value.size == 1
), "size of Variable in state_dict must be 1"
value = float(value)
state_dict[key] = value

return state_dict
Expand Down Expand Up @@ -857,7 +855,7 @@ class ReduceLROnPlateau(LearningRateDecay):
# adjust learning rate according to avg_loss
reduce_lr.step(avg_loss)
lr = adam.current_step_lr()
print("current avg_loss is %s, current lr is %s" % (avg_loss.numpy()[0], lr))
print("current avg_loss is %s, current lr is %s" % (float(avg_loss), lr))

"""

Expand Down Expand Up @@ -979,14 +977,11 @@ def step(self, loss):
)
if self.learning_rate - new_lr > self.eps:
if self.verbose:
old_lr = (
self.learning_rate.numpy()[0]
if isinstance(self.learning_rate, Variable)
else self.learning_rate
)
print(
'Epoch {}: reducing learning rate from {} to {}.'.format(
self.epoch_num, old_lr, new_lr.numpy()[0]
self.epoch_num,
float(self.learning_rate),
float(new_lr),
)
)
self.learning_rate = new_lr
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/layers/control_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -1150,7 +1150,7 @@ def body(i, ten):
)

if in_dygraph_mode():
now_cond = pre_cond.numpy()[0]
now_cond = pre_cond.numpy().item()
while now_cond:
output_vars = body(*loop_vars)
if not isinstance(output_vars, (list, tuple)):
Expand All @@ -1160,7 +1160,7 @@ def body(i, ten):
"body in while_loop should return the same arity "
"(length and structure) and types as loop_vars"
)
now_cond = cond(*output_vars).numpy()[0]
now_cond = cond(*output_vars).numpy().item()
map_structure(assign_skip_lod_tensor_array, output_vars, loop_vars)
return loop_vars
else:
Expand Down
6 changes: 3 additions & 3 deletions python/paddle/fluid/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -596,19 +596,19 @@ def current_step_lr(self):
"""
current_lr = self._global_learning_rate()
if isinstance(current_lr, framework.Variable):
return self._global_learning_rate().numpy()[0]
return float(current_lr)

if isinstance(self._learning_rate, float):
return self._learning_rate
elif isinstance(self._learning_rate, _LearningRateEpochDecay):
step_lr = self._learning_rate()
return step_lr.numpy()[0]
return float(step_lr)
else:
step_lr = self._learning_rate.step()
if isinstance(step_lr, (float, int)):
return step_lr
else:
return step_lr.numpy()[0]
return float(step_lr)

def _global_learning_rate(self, program=None):
"""
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ def dyfunc_empty_nonlocal(x):


def dyfunc_with_if_else(x_v, label=None):
if paddle.mean(x_v).numpy()[0] > 5:
if paddle.mean(x_v).numpy() > 5:
x_v = x_v - 1
else:
x_v = x_v + 1
Expand All @@ -61,7 +61,7 @@ def dyfunc_with_if_else2(x, col=100):
# `x` is Tensor, `col` is not Tensor, and `col` is the return value of `true_fn` after transformed.
# col = -1
col = fluid.layers.fill_constant(shape=[1], value=-1, dtype="int64")
if paddle.mean(x).numpy()[0] > x.numpy()[row][col]:
if paddle.mean(x).numpy() > x.numpy()[row][col]:
y = paddle.nn.functional.relu(x)
else:
x_pow = paddle.pow(x, 2)
Expand Down Expand Up @@ -89,14 +89,14 @@ def false_fn_0(q, x, y):
m = x + 2
n = x + 3
return q, x, y, z
q, x, y, z = paddle.static.nn.cond(paddle.mean(x)[0] < 5, lambda :
q, x, y, z = paddle.static.nn.cond(paddle.mean(x) < 5, lambda :
paddle.jit.dy2static.convert_call(true_fn_0)(q, x, y),
lambda : paddle.jit.dy2static.convert_call(false_fn_0)(q,
x, y))
"""
y = x + 1
# NOTE: x_v[0] < 5 is True
if paddle.mean(x).numpy()[0] < 5:
if paddle.mean(x).numpy() < 5:
x = x + 1
z = x + 2
q = x + 3
Expand Down Expand Up @@ -164,7 +164,7 @@ def nested_if_else(x_v):
if y.numpy()[0] < 10:
tmp = y * w
y = paddle.nn.functional.relu(tmp)
if paddle.mean(y).numpy()[0] < batch_size:
if paddle.mean(y).numpy() < batch_size:
y = paddle.abs(y)
else:
tmp = fluid.layers.fill_constant(
Expand Down Expand Up @@ -264,7 +264,7 @@ def forward(self, input):
)
# Control flow `if` statement
fc_out = self.fc(input)
if paddle.mean(fc_out).numpy()[0] < 0:
if paddle.mean(fc_out).numpy() < 0:
y = fc_out + self.constant_vars['bias']
self.constant_vars['w'] = fluid.layers.fill_constant(
[5], dtype='float32', value=10
Expand Down Expand Up @@ -297,7 +297,7 @@ def if_with_and_or(x_v, label=None):
batch_size = paddle.shape(x_v)
if (
x_v is not None
and (paddle.mean(x_v).numpy()[0] > 0 or label is not None)
and (paddle.mean(x_v).numpy() > 0 or label is not None)
and batch_size[0] > 1
and True
):
Expand Down Expand Up @@ -338,10 +338,10 @@ def if_with_and_or_3(x, y=None):
x is not None
and batch_size[0] > 1
and y is not None
and mean_res.numpy()[0] > 0
and mean_res.numpy() > 0
):
x = x + 1
if mean_res.numpy()[0] > 0 and (x is not None and batch_size[0] > 1) and y:
if mean_res.numpy() > 0 and (x is not None and batch_size[0] > 1) and y:
x = x - 1
return x

Expand All @@ -350,11 +350,11 @@ def if_with_and_or_4(x, y=None):
batch_size = paddle.shape(x)
mean_res = paddle.mean(x)
if (x is not None and batch_size[0] > 1) or (
y is not None and mean_res.numpy()[0] > 0
y is not None and mean_res.numpy() > 0
):
x = x + 1
if (x is not None or batch_size[0] > 1) and (
y is not None or mean_res.numpy()[0] > 0
y is not None or mean_res.numpy() > 0
):
x = x - 1
return x
Expand Down
32 changes: 16 additions & 16 deletions python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
Original file line number Diff line number Diff line change
Expand Up @@ -620,19 +620,19 @@ def val_bmn(model, args):
avg_loss = paddle.mean(loss)

loss_data += [
avg_loss.numpy()[0],
tem_loss.numpy()[0],
pem_reg_loss.numpy()[0],
pem_cls_loss.numpy()[0],
float(avg_loss),
float(tem_loss),
float(pem_reg_loss),
float(pem_cls_loss),
]

print(
'[VALID] iter {} '.format(batch_id)
+ '\tLoss = {}, \ttem_loss = {}, \tpem_reg_loss = {}, \tpem_cls_loss = {}'.format(
'%f' % avg_loss.numpy()[0],
'%f' % tem_loss.numpy()[0],
'%f' % pem_reg_loss.numpy()[0],
'%f' % pem_cls_loss.numpy()[0],
'%f' % float(avg_loss),
'%f' % float(tem_loss),
'%f' % float(pem_reg_loss),
'%f' % float(pem_cls_loss),
)
)

Expand Down Expand Up @@ -716,10 +716,10 @@ def train_bmn(self, args, place, to_static):
bmn.clear_gradients()
# log loss data to verify correctness
loss_data += [
avg_loss.numpy()[0],
tem_loss.numpy()[0],
pem_reg_loss.numpy()[0],
pem_cls_loss.numpy()[0],
float(avg_loss),
float(tem_loss),
float(pem_reg_loss),
float(pem_cls_loss),
]

if args.log_interval > 0 and (
Expand All @@ -728,10 +728,10 @@ def train_bmn(self, args, place, to_static):
print(
'[TRAIN] Epoch {}, iter {} '.format(epoch, batch_id)
+ '\tLoss = {}, \ttem_loss = {}, \tpem_reg_loss = {}, \tpem_cls_loss = {}'.format(
'%f' % avg_loss.numpy()[0],
'%f' % tem_loss.numpy()[0],
'%f' % pem_reg_loss.numpy()[0],
'%f' % pem_cls_loss.numpy()[0],
'%f' % float(avg_loss),
'%f' % float(tem_loss),
'%f' % float(pem_reg_loss),
'%f' % float(pem_cls_loss),
)
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
# Use a decorator to test exception
@paddle.jit.to_static
def dyfunc_with_if(x_v):
if paddle.mean(x_v).numpy()[0] > 5:
if paddle.mean(x_v).numpy() > 5:
x_v = x_v - 1
else:
x_v = x_v + 1
Expand All @@ -53,7 +53,7 @@ def fn1():
@paddle.jit.to_static
def dyfunc_with_third_library_logging(x_v):
logging.info('test dyfunc_with_third_library_logging')
if paddle.mean(x_v).numpy()[0] > 5:
if paddle.mean(x_v).numpy() > 5:
x_v = x_v - 1
else:
x_v = x_v + 1
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -669,7 +669,7 @@ def train(args, to_static):
cyc_B_loss,
idt_loss_B,
]
cur_batch_loss = [x.numpy()[0] for x in cur_batch_loss]
cur_batch_loss = [float(x) for x in cur_batch_loss]

batch_time = time.time() - s_time
t_time += batch_time
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,12 +75,12 @@ def train_net(self, to_static=False):
if to_static:
paddle.jit.save(net, self.path)

return out.numpy()[0]
return float(out)

def load_train(self):
net = paddle.jit.load(self.path)
out = net(self.x)
return out.numpy()[0]
return float(out)

def test_hook(self):
dy_out = self.train_net(to_static=False)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def train(self, to_static=False):
avg_loss.backward()

adam.minimize(avg_loss)
loss_data.append(avg_loss.numpy()[0])
loss_data.append(float(avg_loss))
# save checkpoint
mnist.clear_gradients()
if batch_id % 10 == 0:
Expand All @@ -236,7 +236,7 @@ def train(self, to_static=False):
if batch_id == 50:
mnist.eval()
prediction, acc, avg_loss = mnist(img, label)
loss_data.append(avg_loss.numpy()[0])
loss_data.append(float(avg_loss))
# new save load check
self.check_jit_save_load(
mnist, [dy_x_data], [img], to_static, prediction
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ def train(self, to_static=False):
scaled.backward()
scaler.minimize(adam, scaled)

loss_data.append(avg_loss.numpy()[0])
loss_data.append(float(avg_loss))
# save checkpoint
mnist.clear_gradients()
if batch_id % 10 == 0:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -346,25 +346,25 @@ def train(args, fake_data_reader, to_static):
optimizer.minimize(avg_loss)
video_model.clear_gradients()

total_loss += avg_loss.numpy()[0]
total_acc1 += acc_top1.numpy()[0]
total_acc5 += acc_top5.numpy()[0]
total_loss += float(avg_loss)
total_acc1 += float(acc_top1)
total_acc5 += float(acc_top5)
total_sample += 1

print(
'TRAIN Epoch {}, iter {}, loss = {}, acc1 {}, acc5 {}'.format(
epoch,
batch_id,
avg_loss.numpy()[0],
acc_top1.numpy()[0],
acc_top5.numpy()[0],
float(avg_loss),
float(acc_top1),
float(acc_top5),
)
)
ret.extend(
[
avg_loss.numpy()[0],
acc_top1.numpy()[0],
acc_top5.numpy()[0],
float(avg_loss),
float(acc_top1),
float(acc_top5),
]
)

Expand Down
5 changes: 1 addition & 4 deletions python/paddle/fluid/tests/unittests/gradient_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,10 +25,7 @@


def _product(t):
if isinstance(t, int):
return t
else:
return np.product(t)
return int(np.product(t))


def dtype_to_np_dtype(dtype):
Expand Down
20 changes: 18 additions & 2 deletions python/paddle/fluid/tests/unittests/test_zero_dim_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1546,7 +1546,19 @@ def test_argsort(self):
self.assertEqual(x2.grad.numpy(), 0)

def test_lerp(self):
# 0D + 0D
# 0D + 0D, weight is float scalar
x = paddle.rand([])
y = paddle.rand([])
x.stop_gradient = False
y.stop_gradient = False
out = paddle.lerp(x, y, 0.5)
out.backward()

self.assertEqual(out.shape, [])
self.assertEqual(x.grad.shape, [])
self.assertEqual(y.grad.shape, [])

# 0D + 0D, weigh is 0D
x0 = paddle.rand([])
y0 = paddle.rand([])
w0 = paddle.rand([])
Expand Down Expand Up @@ -2896,11 +2908,15 @@ def test_lerp(self):
[(), (), (), ()],
[(), (64, 64), (), (64, 64)],
[(64, 64), (), (), (64, 64)],
[(64, 64), (), 0.5, (64, 64)],
]
for shape in shapes:
x = paddle.rand(shape[0])
y = paddle.rand(shape[1])
w = paddle.rand(shape[2])
if isinstance(shape[2], float):
w = shape[2]
else:
w = paddle.rand(shape[2])

x.stop_gradient = False
y.stop_gradient = False
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -706,7 +706,7 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size):
else:
for i, var in enumerate(output_size):
if isinstance(var, Variable):
output_size[i] = var.numpy()[0]
output_size[i] = var.numpy().item()

if len(output_size) == len(kernel_size) + 2:
output_size = output_size[2:]
Expand Down
Loading