diff --git a/python/paddle/fluid/contrib/mixed_precision/bf16/amp_utils.py b/python/paddle/fluid/contrib/mixed_precision/bf16/amp_utils.py index 4551947e0fad2..0fb86593b2d62 100644 --- a/python/paddle/fluid/contrib/mixed_precision/bf16/amp_utils.py +++ b/python/paddle/fluid/contrib/mixed_precision/bf16/amp_utils.py @@ -235,7 +235,7 @@ def bf16_guard(): def are_post_ops_bf16(post_ops, keep_fp32_ops): for post_op in post_ops: for op in post_op: - if op.type in keep_fp32_ops: + if op in keep_fp32_ops: return False return True diff --git a/python/paddle/fluid/tests/book/test_fit_a_line.py b/python/paddle/fluid/tests/book/test_fit_a_line.py index 65542e2096cc2..a533d1b40cf8d 100644 --- a/python/paddle/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/fluid/tests/book/test_fit_a_line.py @@ -48,7 +48,8 @@ def train(use_cuda, save_dirname, is_local, use_bf16, pure_bf16): cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = fluid.layers.mean(cost) - sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + lr = 5e-3 if use_bf16 else 1e-3 + sgd_optimizer = fluid.optimizer.SGD(learning_rate=lr) if use_bf16: sgd_optimizer = amp.bf16.decorate_bf16( @@ -83,7 +84,7 @@ def train_loop(main_program): avg_loss_value, = exe.run(main_program, feed=feeder.feed(data), fetch_list=[avg_cost]) - if avg_loss_value[0] < 10.0 or pure_bf16: + if avg_loss_value[0] < 10.0: if save_dirname is not None: paddle.static.save_inference_model(save_dirname, [x], [y_predict], exe)