diff --git a/backends/npu/tests/unittests/test_momentum_op_npu.py b/backends/npu/tests/unittests/test_momentum_op_npu.py index a3b022c6f..4db2ce76d 100644 --- a/backends/npu/tests/unittests/test_momentum_op_npu.py +++ b/backends/npu/tests/unittests/test_momentum_op_npu.py @@ -237,11 +237,11 @@ def _test_momentum_dygraph_common(self, regularization): out = linear(inp) loss = paddle.mean(out) # This can be any optimizer supported by dygraph. - momentum = paddle.fluid.contrib.optimizer.Momentum( + momentum = paddle.optimizer.Momentum( learning_rate=0.01, momentum=0.9, - parameter_list=linear.parameters(), - regularization=regularization, + parameters=linear.parameters(), + weight_decay=regularization, ) momentum.minimize(loss) @@ -261,7 +261,7 @@ def test_momentum_static(self): cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) avg_cost = paddle.mean(cost) - momentum_optimizer = paddle.fluid.contrib.optimizer.Momentum( + momentum_optimizer = paddle.optimizer.Momentum( learning_rate=0.1, momentum=0.9 ) momentum_optimizer.minimize(avg_cost) @@ -311,11 +311,11 @@ def __test_vs(self, place=paddle.CustomPlace("npu", 0)): weight_attr=paddle.nn.initializer.Constant(value=2.0), bias_attr=paddle.nn.initializer.Constant(value=2.0), ) - momentum_new = paddle.fluid.contrib.optimizer.Momentum( + momentum_new = paddle.optimizer.Momentum( learning_rate=0.01, momentum=0.9, - parameter_list=linear_new.parameters(), - regularization=paddle.regularizer.L2Decay(coeff=0.1), + parameters=linear_new.parameters(), + weight_decay=paddle.regularizer.L2Decay(coeff=0.1), ) self.__update_params(momentum=momentum_new, linear=linear_new) diff --git a/backends/npu/tests/unittests/test_rmsprop_op_npu.py b/backends/npu/tests/unittests/test_rmsprop_op_npu.py index 804c85b26..0777bac26 100644 --- a/backends/npu/tests/unittests/test_rmsprop_op_npu.py +++ b/backends/npu/tests/unittests/test_rmsprop_op_npu.py @@ -18,7 +18,6 @@ import numpy as np import paddle -import paddle.fluid as fluid paddle.enable_static() SEED = 2021 @@ -49,7 +48,7 @@ def _test(self, run_npu=True): cost = paddle.nn.functional.cross_entropy(input=prediction, label=label) loss = paddle.mean(cost) - rmsprop = fluid.optimizer.RMSProp(learning_rate=0.01) + rmsprop = paddle.optimizer.RMSProp(learning_rate=0.01) rmsprop.minimize(loss) if run_npu: @@ -110,7 +109,7 @@ def _test(self, run_npu=True): cost = paddle.nn.functional.cross_entropy(input=prediction, label=label) loss = paddle.mean(cost) - rmsprop = fluid.optimizer.RMSProp(learning_rate=0.01, centered=True) + rmsprop = paddle.optimizer.RMSProp(learning_rate=0.01, centered=True) rmsprop.minimize(loss) if run_npu: