From f8f4d232c2a0f063c3555d6a667e43a3dbe61408 Mon Sep 17 00:00:00 2001 From: Difers <707065510@qq.com> Date: Wed, 16 Aug 2023 15:22:41 +0800 Subject: [PATCH 01/10] rename fluid to base --- .flake8 | 2 +- .gitignore | 2 +- paddle/CMakeLists.txt | 2 +- paddle/cinn/hlir/op/transform.cc | 2 +- paddle/fluid/framework/CMakeLists.txt | 8 +- .../framework/ir/auto_mixed_precision_pass.cc | 2 +- paddle/phi/api/profiler/CMakeLists.txt | 2 +- paddle/scripts/paddle_build.sh | 2 +- pyproject.toml | 4 +- python/CMakeLists.txt | 4 +- python/paddle/_C_ops.py | 2 +- python/paddle/__init__.py | 2 +- python/paddle/_ir_ops.py | 2 +- python/paddle/_legacy_C_ops.py | 2 +- python/paddle/amp/__init__.py | 4 +- python/paddle/amp/auto_cast.py | 6 +- python/paddle/amp/debugging.py | 20 +- python/paddle/amp/grad_scaler.py | 8 +- python/paddle/autograd/__init__.py | 10 +- python/paddle/autograd/autograd.py | 4 +- python/paddle/autograd/backward_mode.py | 4 +- python/paddle/autograd/py_layer.py | 4 +- python/paddle/autograd/saved_tensors_hooks.py | 2 +- python/paddle/{fluid => base}/.gitignore | 0 python/paddle/{fluid => base}/__init__.py | 6 +- python/paddle/{fluid => base}/backward.py | 26 +- python/paddle/{fluid => base}/compiler.py | 6 +- python/paddle/{fluid => base}/core.py | 0 .../paddle/{fluid => base}/data_feed_desc.py | 30 +- python/paddle/{fluid => base}/data_feeder.py | 28 +- python/paddle/{fluid => base}/dataset.py | 220 +++---- .../{fluid => base}/default_scope_funcs.py | 6 +- .../paddle/{fluid => base}/device_worker.py | 0 .../{fluid => base}/dygraph/__init__.py | 0 python/paddle/{fluid => base}/dygraph/base.py | 78 +-- .../{fluid => base}/dygraph/math_op_patch.py | 0 .../dygraph/tensor_patch_methods.py | 20 +- .../paddle/{fluid => base}/dygraph/tracer.py | 4 +- .../paddle/{fluid => base}/dygraph_utils.py | 0 python/paddle/{fluid => base}/executor.py | 40 +- python/paddle/{fluid => base}/framework.py | 170 +++--- .../{fluid => base}/incubate/__init__.py | 2 +- .../incubate/checkpoint/__init__.py | 0 .../incubate/checkpoint/auto_checkpoint.py | 4 +- .../incubate/checkpoint/checkpoint_saver.py | 0 python/paddle/{fluid => base}/initializer.py | 2 +- python/paddle/{fluid => base}/io.py | 12 +- python/paddle/{fluid => base}/layer_helper.py | 0 .../{fluid => base}/layer_helper_base.py | 6 +- .../paddle/{fluid => base}/layers/__init__.py | 0 python/paddle/{fluid => base}/layers/io.py | 0 .../layers/layer_function_generator.py | 2 +- .../{fluid => base}/layers/math_op_patch.py | 18 +- python/paddle/{fluid => base}/lod_tensor.py | 12 +- python/paddle/{fluid => base}/log_helper.py | 0 .../{fluid => base}/multiprocess_utils.py | 0 python/paddle/{fluid => base}/param_attr.py | 2 +- python/paddle/{fluid => base}/reader.py | 94 +-- python/paddle/{fluid => base}/trainer_desc.py | 0 .../paddle/{fluid => base}/trainer_factory.py | 2 +- python/paddle/{fluid => base}/unique_name.py | 0 .../paddle/{fluid => base}/variable_index.py | 22 +- .../{fluid => base}/wrapped_decorator.py | 0 python/paddle/common_ops_import.py | 16 +- python/paddle/cost_model/cost_model.py | 2 +- python/paddle/dataset/uci_housing.py | 6 +- python/paddle/decomposition/decomp.py | 2 +- python/paddle/device/__init__.py | 16 +- python/paddle/device/cuda/__init__.py | 4 +- python/paddle/device/cuda/graphs.py | 8 +- python/paddle/device/cuda/streams.py | 4 +- python/paddle/device/xpu/__init__.py | 2 +- .../auto_parallel/static/auto_align_tool.py | 4 +- .../auto_parallel/static/cluster.py | 2 +- .../auto_parallel/static/completion.py | 2 +- .../auto_parallel/static/cost_model.py | 2 +- .../auto_parallel/static/dist_attribute.py | 6 +- .../auto_parallel/static/dist_loader.py | 2 +- .../auto_parallel/static/engine.py | 2 +- .../static/tuner/rule_based_tuner.py | 6 +- .../distributed/auto_parallel/static/utils.py | 8 +- python/paddle/distributed/collective.py | 2 +- .../communication/stream/all_gather.py | 2 +- .../communication/stream/all_reduce.py | 2 +- .../communication/stream/all_to_all.py | 2 +- .../communication/stream/broadcast.py | 2 +- .../distributed/communication/stream/recv.py | 2 +- .../communication/stream/reduce.py | 2 +- .../communication/stream/reduce_scatter.py | 2 +- .../communication/stream/scatter.py | 2 +- .../distributed/communication/stream/send.py | 2 +- .../fleet/base/distributed_strategy.py | 4 +- .../distributed/fleet/base/role_maker.py | 2 +- .../paddle/distributed/fleet/base/topology.py | 2 +- .../distributed/fleet/base/util_factory.py | 8 +- .../distributed/fleet/dataset/dataset.py | 4 +- .../fleet/dataset/index_dataset.py | 2 +- python/paddle/distributed/fleet/fleet.py | 8 +- .../distributed/fleet/layers/mpu/mp_layers.py | 2 +- .../distributed/fleet/layers/mpu/mp_ops.py | 2 +- .../distributed/fleet/layers/mpu/random.py | 4 +- .../fleet/meta_optimizers/dgc_optimizer.py | 6 +- .../meta_optimizers/raw_program_optimizer.py | 2 +- .../fleet/meta_optimizers/sharding/utils.py | 2 +- .../meta_optimizers/sharding_optimizer.py | 2 +- .../fleet/meta_parallel/pipeline_parallel.py | 2 +- .../sharding/group_sharded_stage3.py | 2 +- .../sharding/group_sharded_utils.py | 4 +- .../distributed/fleet/recompute/recompute.py | 2 +- .../fleet/recompute/recompute_hybrid.py | 4 +- .../distributed/fleet/runtime/the_one_ps.py | 28 +- python/paddle/distributed/fleet/scaler.py | 2 +- python/paddle/distributed/fleet/utils/fs.py | 2 +- .../fleet/utils/hybrid_parallel_inference.py | 34 +- .../fleet/utils/hybrid_parallel_util.py | 4 +- .../fleet/utils/mix_precision_utils.py | 6 +- .../fleet/utils/sequence_parallel_utils.py | 2 +- .../fleet/utils/tensor_parallel_utils.py | 2 +- python/paddle/distributed/io.py | 38 +- .../distributed/launch/context/device.py | 2 +- python/paddle/distributed/parallel.py | 2 +- .../paddle/distributed/parallel_with_gloo.py | 2 +- .../distributed/passes/auto_parallel_amp.py | 2 +- .../distributed/passes/auto_parallel_fp16.py | 4 +- .../passes/auto_parallel_pipeline.py | 4 +- .../passes/auto_parallel_recompute.py | 2 +- .../paddle/distributed/passes/pass_utils.py | 6 +- .../distributed/passes/pipeline_pass_base.py | 2 +- .../passes/pipeline_scheduler_pass.py | 2 +- .../distributed/passes/ps_trainer_pass.py | 2 +- .../ps/utils/collective_transpiler.py | 2 +- .../ps/utils/ps_program_builder.py | 12 +- python/paddle/distributed/ps/utils/public.py | 14 +- python/paddle/distributed/rpc/rpc.py | 2 +- python/paddle/distributed/spawn.py | 2 +- .../distributed/transpiler/collective.py | 2 +- .../transpiler/details/vars_distributed.py | 4 +- .../transpiler/distribute_transpiler.py | 10 +- python/paddle/distributed/utils/nccl_utils.py | 2 +- python/paddle/distribution/bernoulli.py | 4 +- python/paddle/distribution/categorical.py | 4 +- python/paddle/distribution/cauchy.py | 2 +- python/paddle/distribution/dirichlet.py | 4 +- python/paddle/distribution/distribution.py | 4 +- python/paddle/distribution/geometric.py | 2 +- python/paddle/distribution/gumbel.py | 2 +- python/paddle/distribution/laplace.py | 2 +- python/paddle/distribution/normal.py | 4 +- python/paddle/distribution/transform.py | 16 +- python/paddle/distribution/uniform.py | 4 +- python/paddle/fft.py | 4 +- python/paddle/framework/__init__.py | 76 +-- python/paddle/framework/dtype.py | 6 +- python/paddle/framework/framework.py | 2 +- python/paddle/framework/io.py | 28 +- python/paddle/framework/io_utils.py | 34 +- python/paddle/framework/ir.py | 2 +- python/paddle/framework/random.py | 16 +- python/paddle/geometric/math.py | 4 +- .../geometric/message_passing/send_recv.py | 6 +- .../paddle/geometric/message_passing/utils.py | 4 +- python/paddle/geometric/reindex.py | 6 +- python/paddle/geometric/sampling/neighbors.py | 4 +- python/paddle/hapi/model.py | 88 +-- python/paddle/hapi/model_summary.py | 4 +- python/paddle/hapi/static_flops.py | 2 +- python/paddle/incubate/__init__.py | 2 +- python/paddle/incubate/asp/asp.py | 8 +- .../incubate/asp/supported_layer_list.py | 2 +- .../incubate/autograd/composite_rules.py | 26 +- python/paddle/incubate/autograd/functional.py | 4 +- python/paddle/incubate/autograd/primapi.py | 8 +- python/paddle/incubate/autograd/primops.py | 4 +- python/paddle/incubate/autograd/primx.py | 22 +- python/paddle/incubate/autograd/utils.py | 4 +- python/paddle/incubate/autotune.py | 2 +- python/paddle/incubate/checkpoint/__init__.py | 2 +- .../paddle/incubate/distributed/fleet/base.py | 12 +- .../incubate/distributed/fleet/collective.py | 20 +- .../incubate/distributed/fleet/fleet_util.py | 66 +-- .../distribute_transpiler/__init__.py | 6 +- .../distributed_strategy.py | 12 +- .../fleet/parameter_server/ir/public.py | 2 +- .../parameter_server/ir/vars_metatools.py | 4 +- .../fleet/parameter_server/pslib/__init__.py | 12 +- .../incubate/distributed/fleet/role_maker.py | 22 +- .../incubate/distributed/fleet/utils.py | 38 +- .../distributed/utils/io/dist_load.py | 2 +- .../distributed/utils/io/dist_save.py | 2 +- .../distributed/utils/io/save_for_auto.py | 2 +- python/paddle/incubate/layers/nn.py | 12 +- .../incubate/multiprocessing/reductions.py | 16 +- .../nn/functional/fused_dropout_add.py | 2 +- .../incubate/nn/functional/fused_ec_moe.py | 2 +- .../nn/functional/fused_matmul_bias.py | 2 +- .../nn/functional/fused_transformer.py | 8 +- .../incubate/nn/layer/fused_transformer.py | 10 +- python/paddle/incubate/nn/layer/io.py | 8 +- python/paddle/incubate/nn/loss.py | 4 +- .../incubate/nn/memory_efficient_attention.py | 2 +- .../incubate/operators/graph_khop_sampler.py | 4 +- .../incubate/operators/graph_reindex.py | 4 +- .../operators/graph_sample_neighbors.py | 4 +- .../incubate/operators/graph_send_recv.py | 6 +- .../paddle/incubate/operators/resnet_unit.py | 12 +- .../incubate/operators/softmax_mask_fuse.py | 2 +- .../softmax_mask_fuse_upper_triangle.py | 2 +- python/paddle/incubate/operators/unzip.py | 4 +- .../optimizer/distributed_fused_lamb.py | 8 +- .../incubate/optimizer/functional/utils.py | 4 +- .../incubate/optimizer/gradient_merge.py | 14 +- .../incubate/optimizer/lars_momentum.py | 20 +- python/paddle/incubate/optimizer/lbfgs.py | 8 +- python/paddle/incubate/optimizer/lookahead.py | 12 +- .../paddle/incubate/optimizer/modelaverage.py | 14 +- python/paddle/incubate/optimizer/pipeline.py | 26 +- python/paddle/incubate/optimizer/recompute.py | 26 +- python/paddle/incubate/passes/ir.py | 16 +- python/paddle/incubate/tensor/manipulation.py | 4 +- python/paddle/incubate/tensor/math.py | 4 +- python/paddle/incubate/xpu/resnet_block.py | 14 +- python/paddle/inference/__init__.py | 2 +- .../inference/contrib/utils/__init__.py | 2 +- python/paddle/inference/wrapper.py | 6 +- .../paddle/io/dataloader/dataloader_iter.py | 6 +- python/paddle/io/dataloader/flat.py | 4 +- python/paddle/io/dataloader/worker.py | 2 +- python/paddle/io/reader.py | 4 +- python/paddle/ir/__init__.py | 4 +- python/paddle/ir/core.py | 8 +- python/paddle/jit/api.py | 34 +- .../paddle/jit/dy2static/base_transformer.py | 2 +- .../jit/dy2static/basic_api_transformer.py | 2 +- .../dy2static/break_continue_transformer.py | 2 +- .../paddle/jit/dy2static/convert_operators.py | 8 +- python/paddle/jit/dy2static/function_spec.py | 6 +- .../jit/dy2static/ifelse_transformer.py | 2 +- python/paddle/jit/dy2static/logging_utils.py | 2 +- .../paddle/jit/dy2static/loop_transformer.py | 2 +- python/paddle/jit/dy2static/origin_info.py | 4 +- .../paddle/jit/dy2static/partial_program.py | 16 +- .../jit/dy2static/program_translator.py | 10 +- .../jit/dy2static/return_transformer.py | 2 +- python/paddle/jit/dy2static/utils.py | 14 +- python/paddle/jit/dy2static/utils_helper.py | 10 +- .../jit/dy2static/variable_trans_func.py | 2 +- python/paddle/jit/layer.py | 4 +- python/paddle/jit/translated_layer.py | 8 +- python/paddle/metric/metrics.py | 20 +- python/paddle/nn/clip.py | 70 +-- python/paddle/nn/decode.py | 2 +- python/paddle/nn/functional/activation.py | 6 +- python/paddle/nn/functional/common.py | 4 +- python/paddle/nn/functional/conv.py | 6 +- python/paddle/nn/functional/distance.py | 6 +- python/paddle/nn/functional/extension.py | 4 +- .../paddle/nn/functional/flash_attention.py | 4 +- python/paddle/nn/functional/input.py | 4 +- python/paddle/nn/functional/loss.py | 38 +- python/paddle/nn/functional/norm.py | 14 +- python/paddle/nn/functional/pooling.py | 6 +- .../paddle/nn/functional/sparse_attention.py | 2 +- python/paddle/nn/functional/vision.py | 6 +- python/paddle/nn/initializer/Bilinear.py | 4 +- python/paddle/nn/initializer/__init__.py | 2 +- python/paddle/nn/initializer/assign.py | 6 +- python/paddle/nn/initializer/constant.py | 4 +- python/paddle/nn/initializer/dirac.py | 20 +- python/paddle/nn/initializer/initializer.py | 2 +- python/paddle/nn/initializer/kaiming.py | 4 +- python/paddle/nn/initializer/lazy_init.py | 2 +- python/paddle/nn/initializer/normal.py | 6 +- python/paddle/nn/initializer/orthogonal.py | 6 +- python/paddle/nn/initializer/uniform.py | 6 +- python/paddle/nn/initializer/xavier.py | 6 +- python/paddle/nn/layer/container.py | 4 +- python/paddle/nn/layer/layers.py | 24 +- python/paddle/nn/layer/loss.py | 8 +- python/paddle/nn/layer/norm.py | 10 +- python/paddle/nn/layer/rnn.py | 22 +- python/paddle/nn/layer/transformer.py | 4 +- python/paddle/nn/quant/quant_layers.py | 6 +- .../paddle/nn/utils/transform_parameters.py | 10 +- python/paddle/nn/utils/weight_norm_hook.py | 4 +- python/paddle/optimizer/adadelta.py | 12 +- python/paddle/optimizer/adagrad.py | 4 +- python/paddle/optimizer/adam.py | 16 +- python/paddle/optimizer/adamax.py | 14 +- python/paddle/optimizer/adamw.py | 12 +- python/paddle/optimizer/lamb.py | 12 +- python/paddle/optimizer/lbfgs.py | 16 +- python/paddle/optimizer/lr.py | 8 +- python/paddle/optimizer/momentum.py | 10 +- python/paddle/optimizer/optimizer.py | 38 +- python/paddle/optimizer/rmsprop.py | 12 +- python/paddle/optimizer/sgd.py | 14 +- python/paddle/profiler/profiler.py | 2 +- python/paddle/profiler/profiler_statistic.py | 2 +- python/paddle/profiler/utils.py | 4 +- python/paddle/quantization/imperative/qat.py | 2 +- .../paddle/quantization/quanters/abs_max.py | 4 +- python/paddle/reader/decorator.py | 22 +- python/paddle/regularizer.py | 4 +- python/paddle/signal.py | 4 +- python/paddle/sparse/binary.py | 4 +- python/paddle/sparse/creation.py | 6 +- python/paddle/sparse/multiary.py | 2 +- .../paddle/sparse/nn/functional/activation.py | 4 +- python/paddle/sparse/nn/functional/conv.py | 2 +- .../sparse/nn/functional/transformer.py | 2 +- python/paddle/sparse/nn/layer/norm.py | 2 +- python/paddle/sparse/unary.py | 4 +- python/paddle/static/__init__.py | 52 +- python/paddle/static/amp/amp_nn.py | 8 +- python/paddle/static/amp/bf16/amp_lists.py | 2 +- python/paddle/static/amp/bf16/amp_utils.py | 10 +- python/paddle/static/amp/bf16/decorator.py | 2 +- python/paddle/static/amp/debugging.py | 16 +- python/paddle/static/amp/decorator.py | 4 +- python/paddle/static/amp/fp16_lists.py | 4 +- python/paddle/static/amp/fp16_utils.py | 10 +- python/paddle/static/amp/function_overload.py | 2 +- python/paddle/static/input.py | 10 +- python/paddle/static/io.py | 66 +-- python/paddle/static/nn/common.py | 24 +- python/paddle/static/nn/control_flow.py | 16 +- python/paddle/static/nn/loss.py | 14 +- python/paddle/static/nn/metric.py | 6 +- python/paddle/static/nn/sequence_lod.py | 44 +- .../post_training_quantization.py | 2 +- .../quantization/quant2_int8_mkldnn_pass.py | 2 +- .../quantization/quant_int8_mkldnn_pass.py | 2 +- python/paddle/static/quantization/quanter.py | 2 +- .../static/quantization/quantization_pass.py | 14 +- python/paddle/static/quantization/utils.py | 2 +- python/paddle/tensor/array.py | 2 +- python/paddle/tensor/attribute.py | 4 +- python/paddle/tensor/creation.py | 6 +- python/paddle/tensor/einsum.py | 6 +- .../paddle/tensor/layer_function_generator.py | 4 +- python/paddle/tensor/linalg.py | 2 +- python/paddle/tensor/logic.py | 6 +- python/paddle/tensor/manipulation.py | 14 +- python/paddle/tensor/math.py | 2 +- python/paddle/tensor/ops.py | 2 +- python/paddle/tensor/random.py | 6 +- python/paddle/tensor/search.py | 6 +- python/paddle/tensor/stat.py | 2 +- python/paddle/tensor/to_string.py | 2 +- python/paddle/text/viterbi_decode.py | 6 +- python/paddle/utils/__init__.py | 2 +- .../utils/cpp_extension/cpp_extension.py | 2 +- .../utils/cpp_extension/extension_utils.py | 34 +- python/paddle/utils/dlpack.py | 12 +- python/paddle/utils/inplace_utils.py | 4 +- python/paddle/utils/layers_utils.py | 6 +- python/paddle/utils/op_version.py | 2 +- python/paddle/utils/unique_name.py | 8 +- python/paddle/vision/models/alexnet.py | 2 +- python/paddle/vision/models/densenet.py | 2 +- python/paddle/vision/models/googlenet.py | 2 +- python/paddle/vision/models/inceptionv3.py | 2 +- python/paddle/vision/models/squeezenet.py | 2 +- python/paddle/vision/ops.py | 8 +- python/paddle/vision/transforms/functional.py | 2 +- .../vision/transforms/functional_cv2.py | 2 +- .../vision/transforms/functional_pil.py | 2 +- .../vision/transforms/functional_tensor.py | 2 +- python/setup.py.in | 36 +- setup.py | 16 +- test/amp/test_amp_decorate.py | 2 +- test/amp/test_amp_list.py | 2 +- test/amp/test_amp_master_grad.py | 4 +- test/amp/test_amp_promote.py | 4 +- test/amp/test_collect_operator_stats.py | 2 +- test/amp/test_compare_accuracy_api.py | 4 +- test/amp/test_layer_convert_dtype.py | 2 +- test/amp/test_model_cast_to_bf16.py | 22 +- test/asp/asp_pruning_base.py | 18 +- test/asp/test_asp_customized_pruning.py | 20 +- test/asp/test_asp_optimize_dynamic.py | 2 +- test/asp/test_asp_optimize_static.py | 30 +- test/asp/test_asp_pruning_dynamic.py | 2 +- test/asp/test_asp_pruning_static.py | 18 +- test/asp/test_asp_save_load.py | 16 +- test/asp/test_fleet_with_asp_dynamic.py | 2 +- test/asp/test_fleet_with_asp_sharding.py | 20 +- test/asp/test_fleet_with_asp_static.py | 54 +- test/auto_parallel/1F1B_pass_unittest.py | 6 +- test/auto_parallel/amp_o2_pass.py | 6 +- test/auto_parallel/amp_pass_unittest.py | 6 +- .../auto_parallel_relaunch_model.py | 2 +- .../auto_parallel/clip_grad_by_global_norm.py | 10 +- test/auto_parallel/engine_api.py | 12 +- .../generation_pipeline_pass_unittest.py | 2 +- .../gradient_merge_pass_unittest.py | 6 +- .../pipeline_scheduler_unittest.py | 6 +- test/auto_parallel/random_control_unittest.py | 6 +- test/auto_parallel/recompute_pass_unittest.py | 6 +- test/auto_parallel/reshard_s_to_r.py | 2 +- test/auto_parallel/sharding_newexe.py | 6 +- test/auto_parallel/sharding_pass_unittest.py | 4 +- test/auto_parallel/test_align_tool.py | 10 +- test/auto_parallel/test_dist_assign.py | 4 +- test/auto_parallel/test_dist_attr_v2.py | 4 +- test/auto_parallel/test_dist_context.py | 2 +- test/auto_parallel/test_dist_embedding.py | 6 +- test/auto_parallel/test_dist_matmul.py | 12 +- test/auto_parallel/test_dist_op_cost.py | 4 +- test/auto_parallel/test_dist_pnorm.py | 20 +- test/auto_parallel/test_dist_reshape.py | 4 +- test/auto_parallel/test_dist_scale.py | 4 +- test/auto_parallel/test_dist_shape.py | 4 +- test/auto_parallel/test_dist_slice.py | 8 +- test/auto_parallel/test_dist_split.py | 4 +- test/auto_parallel/test_fp16_assign.py | 4 +- test/auto_parallel/test_fused_linear_pass.py | 6 +- test/auto_parallel/test_interface.py | 2 +- test/auto_parallel/test_pass_base_list.py | 6 +- test/auto_parallel/test_pass_bf16.py | 8 +- test/auto_parallel/test_prim_dist_op.py | 2 +- test/auto_parallel/test_process_mesh.py | 6 +- .../auto_parallel/test_selective_recompute.py | 6 +- test/auto_parallel/test_serialization.py | 6 +- .../auto_parallel/test_while_op_completion.py | 2 +- test/auto_parallel/test_while_op_partition.py | 6 +- .../test_autograd_functional_dynamic.py | 2 +- .../test_autograd_functional_static.py | 58 +- test/autograd/test_jvp_and_transpose.py | 2 +- test/autograd/test_orig2prim.py | 2 +- test/autograd/test_prim2orig.py | 2 +- test/autograd/test_primapi.py | 10 +- test/autograd/utils.py | 4 +- test/book/notest_understand_sentiment.py | 34 +- test/book/test_fit_a_line.py | 38 +- test/book/test_image_classification.py | 34 +- test/book/test_recognize_digits.py | 36 +- test/book/test_recommender_system.py | 38 +- test/book/test_word2vec_book.py | 60 +- test/cinn/op_mappers/op_mapper_test.py | 6 +- test/cinn/ops/test_conv2d_op.py | 2 +- test/cinn/test_computation.py | 8 +- test/cinn/test_efficientnet.py | 8 +- test/cinn/test_facedet.py | 8 +- test/cinn/test_frontend.py | 14 +- test/cinn/test_mobilenetv1.py | 8 +- test/cinn/test_mobilenetv2.py | 8 +- test/cinn/test_resnet.py | 8 +- test/cinn/test_resnet18.py | 8 +- test/cinn/test_resnet50.py | 8 +- test/cinn/test_squeezenet.py | 8 +- test/collective/collective_allgather_api.py | 22 +- .../collective_allgather_api_dygraph.py | 4 +- ...collective_allgather_object_api_dygraph.py | 4 +- test/collective/collective_allreduce_api.py | 8 +- .../collective_allreduce_api_dygraph.py | 4 +- .../collective_allreduce_new_group_api.py | 4 +- test/collective/collective_allreduce_op.py | 6 +- .../collective_allreduce_op_wait.py | 6 +- test/collective/collective_alltoall_api.py | 8 +- .../collective_alltoall_api_dygraph.py | 4 +- .../collective_alltoall_single_api_dygraph.py | 4 +- test/collective/collective_barrier_api.py | 4 +- test/collective/collective_broadcast_api.py | 8 +- .../collective_broadcast_api_dygraph.py | 4 +- ...ctive_broadcast_object_list_api_dygraph.py | 4 +- test/collective/collective_broadcast_op.py | 6 +- test/collective/collective_concat_api.py | 6 +- test/collective/collective_concat_op.py | 6 +- .../collective_gather_api_dygraph.py | 4 +- test/collective/collective_global_gather.py | 22 +- .../collective_global_gather_dygraph.py | 4 +- test/collective/collective_global_scatter.py | 18 +- .../collective_global_scatter_dygraph.py | 4 +- test/collective/collective_identity_op.py | 6 +- .../collective_isend_irecv_api_dygraph.py | 4 +- test/collective/collective_reduce_api.py | 8 +- .../collective_reduce_api_dygraph.py | 4 +- test/collective/collective_reduce_op.py | 6 +- .../collective_reduce_op_calc_stream.py | 6 +- .../collective_reduce_scatter_api.py | 4 +- .../collective_reduce_scatter_api_dygraph.py | 4 +- test/collective/collective_scatter_api.py | 4 +- .../collective_scatter_api_dygraph.py | 4 +- ...lective_scatter_object_list_api_dygraph.py | 4 +- test/collective/collective_scatter_op.py | 6 +- test/collective/collective_sendrecv_api.py | 10 +- .../collective_sendrecv_api_dygraph.py | 4 +- test/collective/collective_sendrecv_op.py | 4 +- .../collective_sendrecv_op_array.py | 4 +- .../collective_sendrecv_op_dynamic_shape.py | 4 +- test/collective/collective_split_op.py | 6 +- test/collective/column_parallel_linear_api.py | 8 +- .../fleet/auto_parallel_parallelizer.py | 2 +- test/collective/fleet/c_comm_init_op.py | 12 +- .../fleet/dist_mnist_gradient_merge.py | 8 +- ...dist_mnist_gradient_merge_raw_optimizer.py | 4 +- .../fleet/dygraph_group_sharded_stage3.py | 2 +- .../dygraph_group_sharded_stage3_offload.py | 2 +- .../fleet/fused_attention_pass_with_mp.py | 10 +- .../fleet/hybrid_parallel_inference_helper.py | 16 +- test/collective/fleet/hybrid_parallel_qat.py | 10 +- .../parallel_dygraph_control_flow_same.py | 2 +- .../fleet/parallel_dygraph_no_sync.py | 12 +- .../fleet/parallel_dygraph_se_resnext.py | 8 +- .../fleet/parallel_dygraph_sync_batch_norm.py | 2 +- .../fleet/parallel_dygraph_transformer.py | 12 +- test/collective/fleet/pipeline_mnist.py | 26 +- .../fleet/pipeline_mnist_multi_device.py | 26 +- .../fleet/pipeline_mnist_one_device.py | 22 +- .../fleet/static_model_parallel_by_col.py | 8 +- .../fleet/static_model_parallel_by_row.py | 8 +- .../fleet/static_model_parallel_embedding.py | 8 +- test/collective/fleet/test_auto_checkpoint.py | 4 +- .../fleet/test_auto_checkpoint_dist_basic.py | 6 +- .../fleet/test_auto_checkpoint_multiple.py | 2 +- .../fleet/test_communicator_half_async.py | 10 +- test/collective/fleet/test_dgc_momentum_op.py | 6 +- test/collective/fleet/test_dgc_op.py | 6 +- test/collective/fleet/test_dgc_optimizer.py | 2 +- .../fleet/test_dist_mnist_dgc_nccl.py | 8 +- .../fleet/test_dist_mnist_gradient_merge.py | 8 +- .../fleet/test_dist_se_resnext_dgc.py | 4 +- .../fleet/test_distributed_strategy.py | 10 +- test/collective/fleet/test_fleet_amp_init.py | 6 +- .../fleet/test_fleet_amp_meta_optimizer.py | 24 +- .../collective/fleet/test_fleet_checkpoint.py | 14 +- .../fleet/test_fleet_dgc_meta_optimizer.py | 20 +- .../fleet/test_fleet_distributed_strategy.py | 8 +- ...est_fleet_fp16_allreduce_meta_optimizer.py | 8 +- ...est_fleet_gradient_merge_meta_optimizer.py | 18 +- .../fleet/test_fleet_lamb_meta_optimizer.py | 18 +- .../fleet/test_fleet_lars_meta_optimizer.py | 18 +- .../test_fleet_localsgd_meta_optimizer.py | 10 +- .../fleet/test_fleet_meta_optimizer_base.py | 8 +- .../test_fleet_pipeline_meta_optimizer.py | 6 +- ..._pipeline_meta_optimizer_with_recompute.py | 4 +- .../fleet/test_fleet_qat_meta_optimizer.py | 22 +- .../test_fleet_recompute_meta_optimizer.py | 18 +- .../test_fleet_sharding_meta_optimizer.py | 96 ++-- .../test_fleet_tensor_parallel_extra_sync.py | 2 +- test/collective/fleet/test_hdfs1.py | 4 +- ...perative_auto_mixed_precision_for_eager.py | 120 ++-- .../test_parallel_dygraph_control_flow.py | 6 +- .../fleet/test_parallel_dygraph_mnist.py | 12 +- .../fleet/test_parallel_dygraph_no_sync.py | 14 +- .../fleet/test_parallel_dygraph_qat.py | 6 +- .../fleet/test_parallel_dygraph_se_resnext.py | 6 +- .../test_parallel_dygraph_sparse_embedding.py | 8 +- ...el_dygraph_sparse_embedding_over_height.py | 6 +- .../test_parallel_dygraph_sync_batch_norm.py | 4 +- .../test_parallel_dygraph_transformer.py | 6 +- .../test_parallel_dygraph_unused_variables.py | 10 +- test/collective/fleet/test_pipeline.py | 8 +- test/collective/fleet/test_recv_save_op.py | 34 +- .../fleet/test_static_model_parallel.py | 12 +- test/collective/fleet/test_tcp_store.py | 2 +- test/collective/parallel_embedding_api.py | 8 +- test/collective/process_group_gloo.py | 6 +- test/collective/process_group_mpi.py | 4 +- test/collective/row_parallel_linear_api.py | 8 +- .../test_collective_allreduce_api.py | 4 +- .../test_collective_cpu_barrier_with_gloo.py | 12 +- test/collective/test_collective_reduce_api.py | 6 +- .../test_collective_sendrecv_api.py | 4 +- test/collective/test_gen_nccl_id_op.py | 2 +- test/contrib/test_bf16_utils.py | 14 +- test/contrib/test_correlation.py | 16 +- test/contrib/test_fp16_utils.py | 8 +- .../contrib/test_image_classification_fp16.py | 36 +- .../test_multi_precision_fp16_train.py | 36 +- .../custom_kernel_dot_c_setup.py | 2 +- test/custom_kernel/custom_kernel_dot_setup.py | 2 +- .../custom_raw_op_kernel_op_setup.py | 2 +- test/custom_op/test_custom_concat.py | 4 +- test/custom_op/test_custom_conj.py | 4 +- test/custom_runtime/process_group_xccl.py | 24 +- test/custom_runtime/test_custom_op_setup.py | 2 +- .../auto_parallel_pass_test_base.py | 6 +- .../distributed_passes/dist_pass_test_base.py | 2 +- .../test_auto_parallel_gradient_merge_pass.py | 2 +- .../test_dist_inplace_addto_pass.py | 2 +- test/distribution/test_dirichlet_op.py | 2 +- .../test_distribution_bernoulli.py | 28 +- test/distribution/test_distribution_beta.py | 10 +- .../test_distribution_categorical.py | 26 +- test/distribution/test_distribution_cauchy.py | 32 +- .../test_distribution_dirichlet.py | 10 +- .../test_distribution_geometric.py | 16 +- .../test_distribution_lognormal.py | 4 +- test/distribution/test_distribution_normal.py | 32 +- .../distribution/test_distribution_uniform.py | 34 +- test/distribution/test_kl.py | 4 +- test/dygraph_to_static/bert_dygraph_model.py | 22 +- test/dygraph_to_static/darknet.py | 2 +- .../dygraph_to_static_util.py | 2 +- test/dygraph_to_static/ifelse_simple_func.py | 4 +- test/dygraph_to_static/predictor_utils.py | 8 +- .../seq2seq_dygraph_model.py | 14 +- .../dygraph_to_static/simnet_dygraph_model.py | 10 +- test/dygraph_to_static/test_assert.py | 8 +- test/dygraph_to_static/test_ast_util.py | 12 +- .../test_basic_api_transformation.py | 136 ++--- test/dygraph_to_static/test_bert.py | 20 +- test/dygraph_to_static/test_bmn.py | 26 +- test/dygraph_to_static/test_break_continue.py | 32 +- test/dygraph_to_static/test_build_strategy.py | 8 +- test/dygraph_to_static/test_cache_program.py | 20 +- test/dygraph_to_static/test_cast.py | 18 +- test/dygraph_to_static/test_cinn_prim.py | 2 +- test/dygraph_to_static/test_cinn_prim_gelu.py | 2 +- .../test_cinn_prim_layer_norm.py | 2 +- test/dygraph_to_static/test_cinn_prim_mean.py | 2 +- test/dygraph_to_static/test_convert_call.py | 24 +- test/dygraph_to_static/test_cycle_gan.py | 40 +- test/dygraph_to_static/test_declarative.py | 18 +- test/dygraph_to_static/test_dict.py | 14 +- .../test_duplicate_output.py | 8 +- test/dygraph_to_static/test_error.py | 14 +- test/dygraph_to_static/test_eval_frame.py | 4 +- test/dygraph_to_static/test_fetch_feed.py | 6 +- test/dygraph_to_static/test_for_enumerate.py | 42 +- .../dygraph_to_static/test_full_name_usage.py | 8 +- test/dygraph_to_static/test_ifelse.py | 26 +- test/dygraph_to_static/test_lac.py | 42 +- test/dygraph_to_static/test_lambda.py | 22 +- test/dygraph_to_static/test_len.py | 32 +- test/dygraph_to_static/test_list.py | 38 +- test/dygraph_to_static/test_logical.py | 6 +- test/dygraph_to_static/test_loop.py | 36 +- test/dygraph_to_static/test_mnist.py | 28 +- test/dygraph_to_static/test_mnist_amp.py | 4 +- .../dygraph_to_static/test_mnist_pure_fp16.py | 6 +- test/dygraph_to_static/test_mobile_net.py | 24 +- .../dygraph_to_static/test_partial_program.py | 22 +- .../test_partial_program_hook.py | 2 +- test/dygraph_to_static/test_print.py | 4 +- .../test_program_translator.py | 18 +- test/dygraph_to_static/test_ptb_lm.py | 22 +- .../test_reinforcement_learning.py | 12 +- test/dygraph_to_static/test_resnet.py | 28 +- test/dygraph_to_static/test_resnet_amp.py | 12 +- .../test_resnet_pure_fp16.py | 12 +- test/dygraph_to_static/test_resnet_v2.py | 12 +- test/dygraph_to_static/test_return.py | 44 +- .../test_save_inference_model.py | 20 +- test/dygraph_to_static/test_save_load.py | 18 +- test/dygraph_to_static/test_se_resnet.py | 28 +- test/dygraph_to_static/test_sentiment.py | 22 +- test/dygraph_to_static/test_seq2seq.py | 16 +- test/dygraph_to_static/test_simnet.py | 14 +- test/dygraph_to_static/test_simnet_v2.py | 2 +- .../dygraph_to_static/test_static_analysis.py | 8 +- .../test_tensor_memcpy_on_cpu.py | 10 +- .../test_tensor_memcpy_on_gpu.py | 12 +- test/dygraph_to_static/test_tensor_shape.py | 46 +- test/dygraph_to_static/test_to_tensor.py | 4 +- test/dygraph_to_static/test_transformer.py | 24 +- test/dygraph_to_static/test_tsm.py | 20 +- test/dygraph_to_static/test_typehint.py | 12 +- .../dygraph_to_static/test_unuseful_inputs.py | 2 +- test/dygraph_to_static/test_word2vec.py | 24 +- test/dygraph_to_static/test_yolov3.py | 12 +- .../transformer_dygraph_model.py | 22 +- test/dygraph_to_static/yolov3.py | 12 +- test/fft/test_fft.py | 70 +-- test/indexing/test_getitem.py | 2 +- test/indexing/test_setitem.py | 2 +- .../custom_ops/test_custom_leaky_relu_ipu.py | 4 +- .../distributed/test_dist_pod128_sample.py | 4 +- test/ipu/distributed/test_dist_sample.py | 4 +- test/ipu/op_test_ipu.py | 6 +- test/ipu/test_dy2static_fp16_ipu.py | 2 +- test/ipu/test_dy2static_ipu.py | 2 +- test/ipu/test_greater_op_ipu.py | 10 +- test/ipu/test_identity_loss_ipu.py | 12 +- test/ipu/test_inference_model_io_ipu.py | 4 +- test/ipu/test_one_hot_v2_op_ipu.py | 2 +- test/ipu/test_save_load_ipu.py | 4 +- test/ipu/test_weight_sharing_ipu.py | 4 +- test/ir/inference/auto_scan_test.py | 2 +- test/ir/inference/inference_pass_test.py | 24 +- test/ir/inference/program_config.py | 18 +- test/ir/inference/quant_dequant_test.py | 48 +- test/ir/inference/test_inplace_op_pass.py | 2 +- .../test_mkldnn_cpu_bfloat16_pass.py | 6 +- .../test_mkldnn_elt_act_fuse_pass.py | 6 +- .../test_mkldnn_matmul_op_output_fuse_pass.py | 8 +- ...n_reshape_transpose_matmul_v2_fuse_pass.py | 6 +- .../test_trt_c_allreduce_infer_script.py | 2 +- test/ir/inference/test_trt_conv3d_op.py | 10 +- .../inference/test_trt_conv3d_transpose_op.py | 10 +- test/ir/inference/test_trt_conv_pass.py | 12 +- .../test_trt_conv_quant_dequant_pass.py | 30 +- .../test_trt_convert_preln_residual_bias.py | 2 +- ...test_trt_convert_preln_residual_no_bias.py | 2 +- test/ir/inference/test_trt_deformable_conv.py | 8 +- test/ir/inference/test_trt_dynamic_shape.py | 8 +- test/ir/inference/test_trt_elementwise_op.py | 8 +- test/ir/inference/test_trt_fc_fuse_pass.py | 24 +- .../test_trt_fc_fuse_quant_dequant_pass.py | 30 +- test/ir/inference/test_trt_flatten_op.py | 10 +- test/ir/inference/test_trt_gather_nd_op.py | 10 +- test/ir/inference/test_trt_gather_op.py | 10 +- test/ir/inference/test_trt_inspector.py | 8 +- .../ir/inference/test_trt_instance_norm_op.py | 8 +- test/ir/inference/test_trt_matmul.py | 14 +- .../test_trt_matmul_quant_dequant.py | 30 +- .../inference/test_trt_multiclass_nms3_op.py | 10 +- .../inference/test_trt_nearest_interp_op.py | 8 +- .../test_trt_nearest_interp_v2_op.py | 8 +- test/ir/inference/test_trt_pad_op.py | 8 +- test/ir/inference/test_trt_pool3d_op.py | 12 +- test/ir/inference/test_trt_pool_op.py | 8 +- test/ir/inference/test_trt_reduce_sum_op.py | 10 +- test/ir/inference/test_trt_reshape_op.py | 14 +- test/ir/inference/test_trt_scale_op.py | 10 +- .../test_trt_shuffle_channel_detect_pass.py | 6 +- .../test_trt_skip_layernorm_fuse_pass.py | 14 +- .../test_trt_slice_dynamic_plugin.py | 8 +- test/ir/inference/test_trt_slice_plugin.py | 12 +- test/ir/inference/test_trt_subgraph_pass.py | 30 +- test/ir/inference/test_trt_tile_op.py | 14 +- ..._trt_transpose_flatten_concat_fuse_pass.py | 8 +- .../inference/test_trt_tuned_dynamic_shape.py | 12 +- test/ir/inference/test_trt_yolo_box_op.py | 10 +- test/ir/inference/test_yolo_box_post.py | 2 +- test/ir/new_ir/test_data_op.py | 2 +- test/ir/new_ir/test_ir_pybind.py | 6 +- test/ir/new_ir/test_ir_vjp.py | 2 +- test/ir/new_ir/test_special_op_translator.py | 2 +- test/ir/pass_test.py | 28 +- test/ir/test_fuse_resnet_unit.py | 4 +- ...r_embedding_eltwise_layernorm_fuse_pass.py | 8 +- test/ir/test_ir_fc_fuse_pass.py | 8 +- test/ir/test_ir_fusion_group_pass.py | 28 +- test/ir/test_ir_generate_pass.py | 10 +- test/ir/test_ir_graph_to_program_pass.py | 8 +- test/ir/test_ir_skip_layernorm_pass.py | 8 +- test/ir/test_ir_subgraph_python_interface.py | 10 +- test/ir/test_ir_yolo_box_pass.py | 6 +- test/legacy_test/auto_checkpoint_utils.py | 24 +- test/legacy_test/auto_parallel_gpt_model.py | 2 +- test/legacy_test/check_nan_inf_base.py | 18 +- test/legacy_test/collective_allgather_op.py | 6 +- .../collective_reducescatter_op.py | 6 +- test/legacy_test/decorator_helper.py | 12 +- test/legacy_test/detected_gpu.py | 10 +- test/legacy_test/detected_xpu.py | 8 +- test/legacy_test/dist_allreduce_op.py | 14 +- test/legacy_test/dist_ctr.py | 14 +- test/legacy_test/dist_ctr_reader.py | 2 +- test/legacy_test/dist_fleet_ctr.py | 40 +- test/legacy_test/dist_fleet_ctr_ps_gpu.py | 18 +- test/legacy_test/dist_fleet_debug_gloo.py | 2 +- .../dist_fleet_heter_pipeline_ctr.py | 40 +- .../dist_fleet_raw_program_optimizer.py | 14 +- ...et_raw_program_optimizer_fuse_allreduce.py | 14 +- test/legacy_test/dist_fleet_simnet_bow.py | 34 +- .../dist_fleet_sparse_embedding_ctr.py | 18 +- .../legacy_test/dist_fleet_sync_batch_norm.py | 18 +- test/legacy_test/dist_hapi_mnist_dynamic.py | 4 +- test/legacy_test/dist_hapi_mnist_static.py | 4 +- .../legacy_test/dist_hapi_pure_fp16_static.py | 6 +- test/legacy_test/dist_mnist.py | 14 +- test/legacy_test/dist_mnist_batch_merge.py | 4 +- test/legacy_test/dist_mnist_dgc.py | 14 +- test/legacy_test/dist_mnist_fp16_allreduce.py | 8 +- test/legacy_test/dist_mnist_lars.py | 8 +- test/legacy_test/dist_se_resnext.py | 16 +- test/legacy_test/dist_sharding_save.py | 18 +- test/legacy_test/dist_text_classification.py | 12 +- test/legacy_test/dist_word2vec.py | 20 +- .../distributed_fused_lamb_test_base.py | 4 +- test/legacy_test/eager_op_test.py | 102 ++-- test/legacy_test/feed_data_reader.py | 6 +- test/legacy_test/fleet_heter_ps_training.py | 24 +- test/legacy_test/fleet_meta_optimizer_base.py | 22 +- test/legacy_test/fleet_ps_training.py | 8 +- test/legacy_test/gradient_checker.py | 106 ++-- .../ir_memory_optimize_net_base.py | 24 +- test/legacy_test/jit_load_rename_var.py | 4 +- test/legacy_test/nets.py | 36 +- test/legacy_test/nproc_process.py | 4 +- test/legacy_test/op.py | 4 +- test/legacy_test/parallel_dygraph_mnist.py | 2 +- .../parallel_dygraph_shared_unused_var.py | 2 +- .../parallel_dygraph_sparse_embedding.py | 10 +- .../parallel_executor_test_base.py | 44 +- test/legacy_test/prim_op_test.py | 48 +- test/legacy_test/seresnext_net.py | 6 +- test/legacy_test/seresnext_test_base.py | 2 +- test/legacy_test/simple_nets.py | 8 +- .../static_model_parallel_fused_attention.py | 4 +- ...static_model_parallel_fused_feedforward.py | 4 +- ..._model_parallel_fused_multi_transformer.py | 4 +- test/legacy_test/test_accuracy_op.py | 12 +- test/legacy_test/test_activation_nn_grad.py | 100 ++-- test/legacy_test/test_activation_op.py | 366 ++++++------ test/legacy_test/test_activation_sparse_op.py | 2 +- test/legacy_test/test_adadelta_op.py | 14 +- test/legacy_test/test_adagrad_op.py | 2 +- test/legacy_test/test_adam_op.py | 20 +- .../test_adam_optimizer_fp32_fp64.py | 22 +- test/legacy_test/test_adamax_api.py | 14 +- test/legacy_test/test_adamw_op.py | 26 +- test/legacy_test/test_adaptive_avg_pool1d.py | 18 +- test/legacy_test/test_adaptive_avg_pool2d.py | 8 +- test/legacy_test/test_adaptive_avg_pool3d.py | 8 +- test/legacy_test/test_adaptive_max_pool1d.py | 18 +- test/legacy_test/test_adaptive_max_pool2d.py | 8 +- test/legacy_test/test_adaptive_max_pool3d.py | 8 +- .../test_add_position_encoding_op.py | 2 +- .../legacy_test/test_add_reader_dependency.py | 24 +- test/legacy_test/test_addmm_op.py | 24 +- test/legacy_test/test_affine_grid_function.py | 38 +- test/legacy_test/test_affine_grid_op.py | 2 +- test/legacy_test/test_allclose_layer.py | 46 +- test/legacy_test/test_allclose_op.py | 2 +- test/legacy_test/test_angle_op.py | 2 +- .../legacy_test/test_apply_pass_to_program.py | 6 +- test/legacy_test/test_arange.py | 2 +- test/legacy_test/test_arg_min_max_op.py | 2 +- test/legacy_test/test_arg_min_max_v2_op.py | 6 +- test/legacy_test/test_argsort_op.py | 26 +- test/legacy_test/test_array_read_write_op.py | 18 +- test/legacy_test/test_assert_op.py | 10 +- test/legacy_test/test_assign_op.py | 36 +- test/legacy_test/test_assign_pos_op.py | 2 +- test/legacy_test/test_assign_value_op.py | 24 +- test/legacy_test/test_async_read_write.py | 2 +- test/legacy_test/test_atan2_op.py | 2 +- test/legacy_test/test_attribute_var.py | 2 +- test/legacy_test/test_auc_op.py | 4 +- .../test_auto_growth_allocator_gpu.py | 16 +- .../test_auto_parallel_cost_model.py | 2 +- test/legacy_test/test_auto_parallel_mapper.py | 10 +- test/legacy_test/test_auto_search_dist_op.py | 2 +- .../test_avoid_twice_initialization.py | 4 +- test/legacy_test/test_backward.py | 96 ++-- ...test_backward_infer_var_data_type_shape.py | 6 +- test/legacy_test/test_base_layer.py | 78 +-- test/legacy_test/test_batch_fc_op.py | 2 +- test/legacy_test/test_batch_norm_op.py | 48 +- .../test_batch_norm_op_prim_nchw.py | 2 +- .../test_batch_norm_op_prim_nhwc.py | 2 +- test/legacy_test/test_batch_norm_op_v2.py | 78 +-- test/legacy_test/test_bce_loss.py | 16 +- test/legacy_test/test_bce_with_logits_loss.py | 22 +- .../legacy_test/test_beam_search_decode_op.py | 2 +- test/legacy_test/test_beam_search_op.py | 2 +- test/legacy_test/test_bernoulli_op.py | 2 +- test/legacy_test/test_bicubic_interp_op.py | 38 +- test/legacy_test/test_bicubic_interp_v2_op.py | 40 +- test/legacy_test/test_bilateral_slice_op.py | 12 +- test/legacy_test/test_bilinear_api.py | 12 +- test/legacy_test/test_bilinear_interp_op.py | 2 +- .../legacy_test/test_bilinear_interp_v2_op.py | 22 +- .../test_bilinear_tensor_product_op.py | 10 +- test/legacy_test/test_bincount_op.py | 26 +- test/legacy_test/test_bmm_op.py | 16 +- test/legacy_test/test_boxps.py | 20 +- test/legacy_test/test_broadcast_error.py | 2 +- test/legacy_test/test_broadcast_tensors_op.py | 2 +- test/legacy_test/test_broadcast_to_op.py | 16 +- test/legacy_test/test_bucketize_api.py | 2 +- .../test_buffer_shared_memory_reuse_pass.py | 36 +- .../test_build_strategy_fusion_group_pass.py | 8 +- test/legacy_test/test_c_comm_init_all_op.py | 12 +- test/legacy_test/test_calc_gradient.py | 56 +- test/legacy_test/test_case.py | 50 +- test/legacy_test/test_cast_op.py | 18 +- test/legacy_test/test_channel_shuffle.py | 28 +- test/legacy_test/test_checkpoint_saver.py | 2 +- test/legacy_test/test_cholesky_op.py | 28 +- test/legacy_test/test_cholesky_solve_op.py | 18 +- test/legacy_test/test_chunk_op.py | 22 +- .../test_class_center_sample_op.py | 26 +- test/legacy_test/test_clip_by_norm_op.py | 2 +- test/legacy_test/test_clip_op.py | 22 +- test/legacy_test/test_coalesce_tensor_op.py | 14 +- test/legacy_test/test_collective_api_base.py | 18 +- test/legacy_test/test_collective_base.py | 16 +- test/legacy_test/test_communicator_geo.py | 14 +- test/legacy_test/test_communicator_ps_gpu.py | 4 +- test/legacy_test/test_compare_op.py | 32 +- test/legacy_test/test_compiled_program.py | 38 +- test/legacy_test/test_complex_abs.py | 8 +- .../test_complex_elementwise_layers.py | 6 +- test/legacy_test/test_complex_getitem.py | 10 +- .../test_complex_grad_accumulated.py | 2 +- test/legacy_test/test_complex_kron.py | 6 +- test/legacy_test/test_complex_matmul.py | 10 +- test/legacy_test/test_complex_op.py | 2 +- test/legacy_test/test_complex_reshape.py | 6 +- test/legacy_test/test_complex_simplenet.py | 2 +- test/legacy_test/test_complex_sum_layer.py | 6 +- test/legacy_test/test_complex_trace_layer.py | 10 +- test/legacy_test/test_complex_transpose.py | 6 +- test/legacy_test/test_complex_variable.py | 8 +- test/legacy_test/test_complex_view_op.py | 2 +- test/legacy_test/test_concat_op.py | 60 +- test/legacy_test/test_cond.py | 94 +-- test/legacy_test/test_conditional_block.py | 16 +- test/legacy_test/test_conj_op.py | 6 +- test/legacy_test/test_const_value.py | 2 +- test/legacy_test/test_context_manager.py | 10 +- test/legacy_test/test_conv1d_layer.py | 24 +- .../test_conv1d_transpose_layer.py | 22 +- test/legacy_test/test_conv2d_api.py | 20 +- test/legacy_test/test_conv2d_fusion_op.py | 10 +- test/legacy_test/test_conv2d_layer.py | 38 +- test/legacy_test/test_conv2d_op.py | 20 +- .../test_conv2d_op_depthwise_conv.py | 2 +- .../test_conv2d_transpose_layer.py | 38 +- test/legacy_test/test_conv2d_transpose_op.py | 10 +- test/legacy_test/test_conv3d_layer.py | 38 +- test/legacy_test/test_conv3d_op.py | 14 +- .../test_conv3d_transpose_layer.py | 38 +- test/legacy_test/test_conv3d_transpose_op.py | 2 +- .../test_conv3d_transpose_part2_op.py | 10 +- test/legacy_test/test_conv_nn_grad.py | 138 ++--- .../test_conv_transpose_nn_grad.py | 16 +- test/legacy_test/test_corr.py | 14 +- .../legacy_test/test_cosine_similarity_api.py | 2 +- test/legacy_test/test_cost_model.py | 2 +- test/legacy_test/test_count_nonzero_api.py | 2 +- test/legacy_test/test_cov.py | 38 +- test/legacy_test/test_create_global_var.py | 2 +- test/legacy_test/test_create_parameter.py | 2 +- test/legacy_test/test_cross_entropy_loss.py | 536 +++++++++--------- test/legacy_test/test_cross_entropy_op.py | 12 +- test/legacy_test/test_cross_op.py | 20 +- test/legacy_test/test_crypto.py | 2 +- .../test_cuda_graph_static_mode.py | 2 +- .../test_cuda_graph_static_mode_error.py | 2 +- .../test_cuda_max_memory_allocated.py | 2 +- .../test_cuda_max_memory_reserved.py | 2 +- .../legacy_test/test_cuda_memory_allocated.py | 2 +- test/legacy_test/test_cuda_memory_reserved.py | 2 +- test/legacy_test/test_cuda_random_seed.py | 24 +- test/legacy_test/test_cummax_op.py | 20 +- test/legacy_test/test_cummin_op.py | 20 +- test/legacy_test/test_cumprod_op.py | 2 +- test/legacy_test/test_cumsum_op.py | 22 +- test/legacy_test/test_custom_grad_input.py | 2 +- test/legacy_test/test_data.py | 2 +- test/legacy_test/test_data_feeder.py | 8 +- test/legacy_test/test_data_norm_op.py | 26 +- .../test_dataloader_early_reset.py | 20 +- .../legacy_test/test_dataloader_keep_order.py | 24 +- .../test_dataloader_unkeep_order.py | 26 +- test/legacy_test/test_dataset.py | 194 +++---- .../test_dataset_consistency_inspection.py | 6 +- test/legacy_test/test_dataset_dataloader.py | 36 +- test/legacy_test/test_debugger.py | 4 +- test/legacy_test/test_decoupled_py_reader.py | 32 +- .../test_decoupled_py_reader_data_check.py | 22 +- test/legacy_test/test_default_scope_funcs.py | 2 +- test/legacy_test/test_deformable_conv_op.py | 8 +- .../legacy_test/test_deformable_conv_v1_op.py | 6 +- test/legacy_test/test_deg2rad.py | 18 +- test/legacy_test/test_deprecated_decorator.py | 4 +- ...t_deprecated_memory_optimize_interfaces.py | 10 +- test/legacy_test/test_desc_clone.py | 32 +- test/legacy_test/test_detach.py | 18 +- test/legacy_test/test_detection.py | 36 +- test/legacy_test/test_device.py | 12 +- test/legacy_test/test_device_guard.py | 2 +- test/legacy_test/test_diag.py | 2 +- test/legacy_test/test_diag_embed.py | 8 +- test/legacy_test/test_diag_v2.py | 20 +- test/legacy_test/test_diagflat.py | 2 +- test/legacy_test/test_diagonal_op.py | 2 +- test/legacy_test/test_diff_op.py | 16 +- test/legacy_test/test_digamma_op.py | 8 +- test/legacy_test/test_dist_allreduce_op.py | 4 +- test/legacy_test/test_dist_base.py | 120 ++-- .../test_dist_fleet_a_sync_optimizer_async.py | 18 +- .../test_dist_fleet_a_sync_optimizer_auto.py | 8 +- ..._dist_fleet_a_sync_optimizer_auto_async.py | 10 +- ...st_dist_fleet_a_sync_optimizer_auto_geo.py | 8 +- .../test_dist_fleet_a_sync_optimizer_geo.py | 18 +- .../test_dist_fleet_a_sync_optimizer_sync.py | 2 +- test/legacy_test/test_dist_fleet_base.py | 12 +- .../test_dist_fleet_heter_program.py | 22 +- test/legacy_test/test_dist_fleet_minimize.py | 18 +- test/legacy_test/test_dist_fleet_ps.py | 18 +- test/legacy_test/test_dist_fleet_ps11.py | 18 +- test/legacy_test/test_dist_fleet_ps12.py | 18 +- test/legacy_test/test_dist_fleet_ps13.py | 18 +- test/legacy_test/test_dist_fleet_ps2.py | 18 +- test/legacy_test/test_dist_fleet_ps3.py | 18 +- test/legacy_test/test_dist_fleet_ps4.py | 18 +- test/legacy_test/test_dist_fleet_ps5.py | 18 +- test/legacy_test/test_dist_fleet_ps6.py | 18 +- .../test_dist_fleet_raw_program_optimizer.py | 8 +- ...et_raw_program_optimizer_fuse_allreduce.py | 4 +- .../test_dist_fleet_sparse_embedding_ctr.py | 16 +- test/legacy_test/test_dist_fleet_spmt.py | 26 +- test/legacy_test/test_dist_hapi_model.py | 4 +- .../test_dist_lookup_sparse_table_fuse_ops.py | 26 +- .../test_dist_mnist_backward_deps.py | 4 +- .../legacy_test/test_dist_mnist_fleet_save.py | 40 +- test/legacy_test/test_dist_mnist_fleetapi.py | 8 +- .../test_dist_mnist_fp16_allreduce.py | 4 +- .../legacy_test/test_dist_mnist_hallreduce.py | 4 +- .../legacy_test/test_dist_mnist_multi_comm.py | 4 +- test/legacy_test/test_dist_mnist_pg.py | 4 +- .../test_dist_mnist_ring_allreduce.py | 4 +- .../test_dist_mnist_with_program.py | 8 +- test/legacy_test/test_dist_op.py | 18 +- test/legacy_test/test_dist_se_resnext_nccl.py | 8 +- test/legacy_test/test_dist_sharding_save.py | 4 +- test/legacy_test/test_dist_sparse_load_ps0.py | 22 +- test/legacy_test/test_dist_sparse_load_ps1.py | 8 +- .../test_dist_sparse_tensor_load_adagrad.py | 6 +- .../test_dist_sparse_tensor_load_adam.py | 6 +- .../test_dist_sparse_tensor_load_ftrl.py | 6 +- .../test_dist_sparse_tensor_load_momentum.py | 6 +- .../test_dist_sparse_tensor_load_rmsprop.py | 6 +- .../test_dist_sparse_tensor_load_sgd.py | 18 +- test/legacy_test/test_dist_train.py | 26 +- test/legacy_test/test_dist_transpiler.py | 80 +-- test/legacy_test/test_dlpack.py | 18 +- test/legacy_test/test_dot_op.py | 22 +- test/legacy_test/test_downpoursgd.py | 22 +- test/legacy_test/test_dropout_nd_op.py | 16 +- test/legacy_test/test_dropout_op.py | 152 ++--- test/legacy_test/test_dygraph_mnist_fp16.py | 10 +- .../legacy_test/test_dygraph_multi_forward.py | 18 +- test/legacy_test/test_dygraph_weight_norm.py | 8 +- .../test_dynamic_rnn_stop_gradient.py | 18 +- .../test_eager_deletion_conditional_block.py | 4 +- .../test_eager_deletion_delete_vars.py | 42 +- .../test_eager_deletion_dynamic_rnn_base.py | 22 +- test/legacy_test/test_eager_deletion_mnist.py | 4 +- .../test_eager_deletion_padding_rnn.py | 20 +- .../test_eager_deletion_recurrent_op.py | 4 +- .../test_eager_deletion_while_op.py | 18 +- test/legacy_test/test_eager_run_program.py | 10 +- test/legacy_test/test_eager_trace_op.py | 6 +- test/legacy_test/test_egr_python_api.py | 40 +- .../legacy_test/test_egr_string_tensor_api.py | 2 +- test/legacy_test/test_eig_op.py | 16 +- test/legacy_test/test_eigvals_op.py | 2 +- test/legacy_test/test_einsum.py | 24 +- test/legacy_test/test_einsum_op.py | 2 +- test/legacy_test/test_einsum_v2.py | 24 +- test/legacy_test/test_elementwise_add_op.py | 36 +- test/legacy_test/test_elementwise_div_op.py | 16 +- .../test_elementwise_floordiv_op.py | 14 +- .../test_elementwise_floormod_op.py | 4 +- .../test_elementwise_gradient_op.py | 10 +- .../test_elementwise_heaviside_op.py | 2 +- test/legacy_test/test_elementwise_max_op.py | 2 +- test/legacy_test/test_elementwise_min_op.py | 8 +- test/legacy_test/test_elementwise_mod_op.py | 16 +- test/legacy_test/test_elementwise_mul_op.py | 26 +- test/legacy_test/test_elementwise_nn_grad.py | 52 +- test/legacy_test/test_elementwise_pow_op.py | 16 +- test/legacy_test/test_elementwise_sub_op.py | 24 +- test/legacy_test/test_ema.py | 26 +- .../test_embedding_id_stop_gradient.py | 20 +- test/legacy_test/test_empty_like_op.py | 8 +- test/legacy_test/test_empty_op.py | 8 +- test/legacy_test/test_entry_attr.py | 12 +- test/legacy_test/test_entry_attr2.py | 12 +- test/legacy_test/test_erf_op.py | 26 +- test/legacy_test/test_erfinv_op.py | 2 +- test/legacy_test/test_error_clip.py | 18 +- test/legacy_test/test_exception.py | 20 +- test/legacy_test/test_executor_and_mul.py | 2 +- .../test_executor_and_use_program_cache.py | 14 +- test/legacy_test/test_executor_check_feed.py | 32 +- .../test_executor_feed_non_tensor.py | 116 ++-- ..._executor_return_tensor_not_overwriting.py | 30 +- test/legacy_test/test_expand_as_v2_op.py | 10 +- test/legacy_test/test_expand_op.py | 8 +- test/legacy_test/test_expand_v2_op.py | 24 +- test/legacy_test/test_exponential_op.py | 2 +- test/legacy_test/test_eye_op.py | 8 +- test/legacy_test/test_fake_dequantize_op.py | 6 +- test/legacy_test/test_fake_init_op.py | 2 +- test/legacy_test/test_fc_op.py | 10 +- .../test_feed_data_check_shape_type.py | 22 +- test/legacy_test/test_feed_fetch_method.py | 2 +- test/legacy_test/test_fetch_handler.py | 14 +- .../test_fetch_lod_tensor_array.py | 20 +- test/legacy_test/test_fetch_var.py | 8 +- test/legacy_test/test_fill_any_like_op.py | 2 +- test/legacy_test/test_fill_any_op.py | 6 +- test/legacy_test/test_fill_constant_op.py | 22 +- .../test_fill_diagonal_tensor_op.py | 2 +- test/legacy_test/test_fill_op.py | 2 +- test/legacy_test/test_fill_zeros_like2_op.py | 2 +- test/legacy_test/test_flash_attention.py | 8 +- .../test_flatten_contiguous_range_op.py | 2 +- test/legacy_test/test_flatten_op.py | 2 +- test/legacy_test/test_fleet.py | 16 +- test/legacy_test/test_fleet_base.py | 8 +- test/legacy_test/test_fleet_base_2.py | 10 +- test/legacy_test/test_fleet_base_single.py | 16 +- .../test_fleet_exe_dist_model_run.py | 2 +- .../test_fleet_exe_dist_model_tensor.py | 2 +- test/legacy_test/test_fleet_executor.py | 8 +- .../test_fleet_executor_cond_interceptor.py | 4 +- .../test_fleet_executor_multi_devices.py | 6 +- .../test_fleet_executor_origin_scheduler.py | 8 +- .../test_fleet_executor_task_node.py | 2 +- .../test_fleet_executor_with_task_nodes.py | 8 +- test/legacy_test/test_fleet_gradient_scale.py | 6 +- test/legacy_test/test_fleet_metric.py | 18 +- test/legacy_test/test_fleet_nocvm_1.py | 16 +- test/legacy_test/test_fleet_ps.py | 2 +- test/legacy_test/test_fleet_pyramid_hash.py | 6 +- test/legacy_test/test_fleet_rolemaker.py | 14 +- test/legacy_test/test_fleet_rolemaker_2.py | 14 +- test/legacy_test/test_fleet_rolemaker_3.py | 14 +- test/legacy_test/test_fleet_unitaccessor.py | 16 +- test/legacy_test/test_flip.py | 30 +- test/legacy_test/test_fmax_op.py | 2 +- test/legacy_test/test_fmin_op.py | 2 +- test/legacy_test/test_fold_op.py | 14 +- test/legacy_test/test_frac_api.py | 12 +- test/legacy_test/test_frame_op.py | 2 +- test/legacy_test/test_framework_debug_str.py | 2 +- test/legacy_test/test_frexp_api.py | 2 +- test/legacy_test/test_ftrl_op.py | 2 +- test/legacy_test/test_full_like_op.py | 6 +- test/legacy_test/test_full_op.py | 10 +- test/legacy_test/test_functional_conv1d.py | 2 +- .../test_functional_conv1d_transpose.py | 2 +- test/legacy_test/test_functional_conv2d.py | 48 +- .../test_functional_conv2d_transpose.py | 48 +- test/legacy_test/test_functional_conv3d.py | 48 +- .../test_functional_conv3d_transpose.py | 48 +- test/legacy_test/test_fuse_all_reduce_pass.py | 8 +- test/legacy_test/test_fuse_bn_act_pass.py | 38 +- test/legacy_test/test_fuse_bn_add_act_pass.py | 58 +- .../test_fuse_elewise_add_act_pass.py | 28 +- .../test_fuse_gemm_epilogue_pass.py | 2 +- test/legacy_test/test_fuse_optimizer_pass.py | 8 +- .../test_fuse_relu_depthwise_conv_pass.py | 2 +- .../test_fused_attention_no_dropout.py | 2 +- test/legacy_test/test_fused_attention_op.py | 2 +- test/legacy_test/test_fused_attention_pass.py | 2 +- test/legacy_test/test_fused_bias_act_op.py | 2 +- ...sed_bias_dropout_residual_layer_norm_op.py | 2 +- test/legacy_test/test_fused_dropout_add_op.py | 6 +- test/legacy_test/test_fused_ec_moe_op.py | 2 +- .../test_fused_elemwise_activation_op.py | 6 +- .../legacy_test/test_fused_emb_seq_pool_op.py | 10 +- .../test_fused_fc_elementwise_layernorm_op.py | 2 +- test/legacy_test/test_fused_feedforward_op.py | 2 +- .../test_fused_feedforward_pass.py | 2 +- .../test_fused_gate_attention_op.py | 2 +- .../test_fused_gemm_epilogue_grad_op.py | 2 +- .../test_fused_gemm_epilogue_op.py | 2 +- test/legacy_test/test_fused_layernorm_op.py | 12 +- test/legacy_test/test_fused_matmul_bias.py | 2 +- .../test_fused_multi_transformer_int8_op.py | 4 +- .../test_fused_multi_transformer_op.py | 8 +- .../test_fused_multihead_matmul_op.py | 2 +- .../test_fused_rotary_position_embedding.py | 2 +- .../test_fused_transformer_encoder_layer.py | 2 +- ...test_fusion_transpose_flatten_concat_op.py | 2 +- test/legacy_test/test_gather_nd_op.py | 16 +- test/legacy_test/test_gather_op.py | 14 +- test/legacy_test/test_gather_tree_op.py | 2 +- test/legacy_test/test_gaussian_nll_loss.py | 2 +- test/legacy_test/test_gaussian_random_op.py | 24 +- test/legacy_test/test_gcd.py | 18 +- test/legacy_test/test_gelu_op.py | 10 +- test/legacy_test/test_generator_dataloader.py | 34 +- .../test_get_all_op_or_kernel_names.py | 16 +- .../legacy_test/test_get_device_properties.py | 2 +- test/legacy_test/test_get_set_flags.py | 16 +- .../test_get_tensor_from_selected_rows_op.py | 2 +- .../test_global_var_getter_setter.py | 4 +- test/legacy_test/test_glu.py | 10 +- .../test_gpu_package_without_gpu_device.py | 2 +- test/legacy_test/test_grad_clip_minimize.py | 10 +- test/legacy_test/test_gradient_clip.py | 88 +-- test/legacy_test/test_graph_khop_sampler.py | 14 +- .../test_graph_sample_neighbors.py | 6 +- .../legacy_test/test_graph_send_ue_recv_op.py | 2 +- test/legacy_test/test_greater_equal_op.py | 2 +- test/legacy_test/test_grid_sample_function.py | 22 +- test/legacy_test/test_grid_sampler_op.py | 2 +- test/legacy_test/test_group_norm_op.py | 48 +- test/legacy_test/test_group_norm_op_v2.py | 12 +- test/legacy_test/test_gru_rnn_op.py | 2 +- test/legacy_test/test_gru_unit_op.py | 8 +- test/legacy_test/test_gumbel_softmax_op.py | 14 +- test/legacy_test/test_hapi_amp.py | 10 +- test/legacy_test/test_histogram_op.py | 30 +- test/legacy_test/test_host_memory_stats.py | 2 +- test/legacy_test/test_hsigmoid_op.py | 26 +- test/legacy_test/test_huber_loss_op.py | 2 +- test/legacy_test/test_i0_op.py | 2 +- test/legacy_test/test_i0e_op.py | 2 +- test/legacy_test/test_i1_op.py | 2 +- test/legacy_test/test_i1e_op.py | 2 +- test/legacy_test/test_identity_loss_op.py | 6 +- .../test_image_classification_layer.py | 12 +- .../legacy_test/test_imperative_auto_prune.py | 102 ++-- test/legacy_test/test_imperative_base.py | 14 +- .../test_imperative_container_layerlist.py | 8 +- ...test_imperative_container_parameterlist.py | 12 +- .../test_imperative_container_sequential.py | 10 +- .../test_imperative_data_loader_base.py | 26 +- .../test_imperative_data_loader_exception.py | 26 +- .../test_imperative_data_loader_exit_func.py | 2 +- .../test_imperative_data_loader_fds_clear.py | 14 +- .../test_imperative_data_loader_process.py | 16 +- .../test_imperative_data_parallel.py | 10 +- test/legacy_test/test_imperative_decorator.py | 12 +- test/legacy_test/test_imperative_deepcf.py | 32 +- .../test_imperative_double_grad.py | 42 +- test/legacy_test/test_imperative_framework.py | 6 +- test/legacy_test/test_imperative_gan.py | 28 +- test/legacy_test/test_imperative_gnn.py | 22 +- test/legacy_test/test_imperative_group.py | 4 +- .../test_imperative_hook_for_layer.py | 30 +- .../test_imperative_layer_apply.py | 4 +- .../test_imperative_layer_children.py | 4 +- .../test_imperative_layer_trainable.py | 6 +- .../test_imperative_load_static_param.py | 16 +- ..._imperative_lod_tensor_to_selected_rows.py | 24 +- test/legacy_test/test_imperative_mnist.py | 28 +- .../test_imperative_mnist_sorted_gradient.py | 28 +- .../test_imperative_named_members.py | 12 +- .../test_imperative_numpy_bridge.py | 10 +- .../test_imperative_ocr_attention_model.py | 36 +- test/legacy_test/test_imperative_optimizer.py | 46 +- .../test_imperative_optimizer_v2.py | 50 +- ...test_imperative_parallel_coalesce_split.py | 10 +- .../test_imperative_partitial_backward.py | 6 +- test/legacy_test/test_imperative_ptb_rnn.py | 26 +- ...test_imperative_ptb_rnn_sorted_gradient.py | 18 +- .../test_imperative_recurrent_usage.py | 34 +- .../test_imperative_reinforcement.py | 24 +- test/legacy_test/test_imperative_resnet.py | 26 +- .../test_imperative_resnet_sorted_gradient.py | 24 +- .../test_imperative_save_load_v2.py | 64 +-- .../legacy_test/test_imperative_se_resnext.py | 26 +- .../test_imperative_selected_rows.py | 20 +- ..._imperative_selected_rows_to_lod_tensor.py | 24 +- .../test_imperative_signal_handler.py | 2 +- ...perative_star_gan_with_gradient_penalty.py | 72 +-- .../test_imperative_tensor_clear_gradient.py | 14 +- ...imperative_trace_non_persistable_inputs.py | 12 +- ..._imperative_transformer_sorted_gradient.py | 30 +- .../test_imperative_triple_grad.py | 18 +- .../test_imperative_using_non_zero_gpu.py | 10 +- test/legacy_test/test_increment.py | 14 +- test/legacy_test/test_index_add_op.py | 2 +- test/legacy_test/test_index_put_op.py | 2 +- test/legacy_test/test_index_sample_op.py | 12 +- test/legacy_test/test_index_select_op.py | 24 +- .../test_infer_no_need_buffer_slots.py | 10 +- test/legacy_test/test_infer_shape.py | 2 +- test/legacy_test/test_inference_api.py | 18 +- test/legacy_test/test_inference_model_io.py | 20 +- test/legacy_test/test_initializer.py | 18 +- test/legacy_test/test_initializer_nn.py | 10 +- test/legacy_test/test_inplace.py | 30 +- test/legacy_test/test_inplace_abn_op.py | 18 +- .../test_inplace_addto_strategy.py | 26 +- ...test_inplace_softmax_with_cross_entropy.py | 22 +- test/legacy_test/test_input_spec.py | 4 +- test/legacy_test/test_instance_norm_op.py | 30 +- test/legacy_test/test_instance_norm_op_v2.py | 42 +- test/legacy_test/test_inverse_op.py | 32 +- test/legacy_test/test_io_save_load.py | 18 +- test/legacy_test/test_ir_graph.py | 34 +- test/legacy_test/test_ir_inplace_pass.py | 6 +- .../test_ir_memory_optimize_pass.py | 2 +- .../test_ir_memory_optimize_transformer.py | 2 +- test/legacy_test/test_isclose_op.py | 14 +- test/legacy_test/test_isfinite_op.py | 2 +- test/legacy_test/test_isfinite_v2_op.py | 20 +- test/legacy_test/test_jit_layer.py | 2 +- test/legacy_test/test_jit_save_load.py | 50 +- test/legacy_test/test_kldiv_loss_op.py | 4 +- test/legacy_test/test_kron_op.py | 24 +- test/legacy_test/test_kthvalue_op.py | 8 +- test/legacy_test/test_l1_loss.py | 34 +- .../test_label_smooth_functional.py | 38 +- test/legacy_test/test_label_smooth_op.py | 2 +- test/legacy_test/test_lamb_op.py | 2 +- test/legacy_test/test_lambv2_op.py | 28 +- test/legacy_test/test_layer_norm_op.py | 14 +- test/legacy_test/test_layer_norm_op_v2.py | 44 +- test/legacy_test/test_layers.py | 156 ++--- test/legacy_test/test_layout_autotune.py | 12 +- test/legacy_test/test_lazy_init.py | 2 +- test/legacy_test/test_lcm.py | 18 +- test/legacy_test/test_ldexp.py | 2 +- .../test_learning_rate_scheduler.py | 96 ++-- test/legacy_test/test_lerp_op.py | 2 +- test/legacy_test/test_less_equal_op.py | 2 +- test/legacy_test/test_less_than_op.py | 2 +- test/legacy_test/test_lgamma_op.py | 2 +- test/legacy_test/test_limit_by_capacity_op.py | 2 +- test/legacy_test/test_linalg_lstsq_op.py | 12 +- test/legacy_test/test_linalg_pinv_op.py | 14 +- test/legacy_test/test_linear.py | 8 +- test/legacy_test/test_linear_interp_op.py | 8 +- test/legacy_test/test_linear_interp_v2_op.py | 10 +- test/legacy_test/test_linspace.py | 12 +- test/legacy_test/test_listen_and_serv_op.py | 10 +- .../test_load_state_dict_from_old_format.py | 20 +- test/legacy_test/test_lod_array_length_op.py | 4 +- test/legacy_test/test_lod_tensor.py | 50 +- test/legacy_test/test_lod_tensor_array.py | 2 +- test/legacy_test/test_log_softmax.py | 6 +- test/legacy_test/test_logaddexp.py | 2 +- test/legacy_test/test_logcumsumexp_op.py | 28 +- test/legacy_test/test_logit_op.py | 4 +- test/legacy_test/test_logspace.py | 2 +- test/legacy_test/test_logsumexp.py | 10 +- test/legacy_test/test_lookahead.py | 14 +- test/legacy_test/test_lookup_table_bf16_op.py | 16 +- test/legacy_test/test_lookup_table_op.py | 2 +- .../test_lookup_table_v2_bf16_op.py | 16 +- test/legacy_test/test_lookup_table_v2_op.py | 22 +- test/legacy_test/test_lr_scheduler.py | 2 +- test/legacy_test/test_lrn_op.py | 44 +- test/legacy_test/test_lstm_cudnn_op.py | 2 +- test/legacy_test/test_lu_op.py | 20 +- test/legacy_test/test_lu_unpack_op.py | 28 +- test/legacy_test/test_manual_seed.py | 6 +- .../test_margin_cross_entropy_op.py | 18 +- test/legacy_test/test_margin_rank_loss_op.py | 18 +- test/legacy_test/test_masked_select_op.py | 2 +- test/legacy_test/test_math_op_patch.py | 126 ++-- .../test_math_op_patch_var_base.py | 156 ++--- test/legacy_test/test_matmul_int8_op.py | 2 +- test/legacy_test/test_matmul_op.py | 28 +- test/legacy_test/test_matmul_v2_op.py | 46 +- test/legacy_test/test_matrix_nms_op.py | 2 +- test/legacy_test/test_matrix_power_op.py | 30 +- test/legacy_test/test_matrix_rank_op.py | 26 +- test/legacy_test/test_max_min_amax_amin_op.py | 18 +- test/legacy_test/test_max_op.py | 2 +- test/legacy_test/test_maximum_op.py | 2 +- test/legacy_test/test_maxout_op.py | 2 +- test/legacy_test/test_mean_op.py | 26 +- test/legacy_test/test_memcpy_op.py | 22 +- .../test_memory_efficient_attention.py | 2 +- .../test_memory_reuse_exclude_feed_var.py | 26 +- .../legacy_test/test_merge_selectedrows_op.py | 2 +- test/legacy_test/test_merged_adam_op.py | 18 +- test/legacy_test/test_merged_momentum_op.py | 2 +- test/legacy_test/test_meshgrid_op.py | 44 +- test/legacy_test/test_metrics.py | 14 +- test/legacy_test/test_min_op.py | 6 +- test/legacy_test/test_minimum_op.py | 2 +- .../test_mix_precision_all_reduce_fuse.py | 2 +- test/legacy_test/test_mode_op.py | 8 +- test/legacy_test/test_model.py | 84 +-- test/legacy_test/test_modelaverage.py | 16 +- test/legacy_test/test_momentum_op.py | 42 +- test/legacy_test/test_monitor.py | 16 +- test/legacy_test/test_mse_loss.py | 70 +-- test/legacy_test/test_mul_nn_grad.py | 8 +- test/legacy_test/test_mul_op.py | 2 +- test/legacy_test/test_multi_dot_op.py | 2 +- .../test_multi_label_soft_margin_loss.py | 2 +- test/legacy_test/test_multiclass_nms_op.py | 4 +- test/legacy_test/test_multinomial_op.py | 18 +- test/legacy_test/test_multiplex_op.py | 6 +- .../test_multiprocess_dataloader_dataset.py | 24 +- .../test_multiprocess_dataloader_dynamic.py | 14 +- .../test_multiprocess_dataloader_exception.py | 16 +- ...ess_dataloader_iterable_dataset_dynamic.py | 14 +- ...ocess_dataloader_iterable_dataset_split.py | 10 +- ...cess_dataloader_iterable_dataset_static.py | 34 +- .../test_multiprocess_dataloader_static.py | 44 +- .../test_multiprocess_reader_exception.py | 26 +- .../test_naive_best_fit_gpu_memory_limit.py | 20 +- test/legacy_test/test_name_scope.py | 14 +- test/legacy_test/test_nan_inf.py | 10 +- test/legacy_test/test_nan_inf_dir.py | 2 +- test/legacy_test/test_nan_to_num_op.py | 4 +- test/legacy_test/test_nanmean_api.py | 2 +- test/legacy_test/test_nanmedian.py | 4 +- test/legacy_test/test_nansum_api.py | 22 +- test/legacy_test/test_nce.py | 44 +- test/legacy_test/test_nearest_interp_op.py | 2 +- test/legacy_test/test_nearest_interp_v2_op.py | 12 +- test/legacy_test/test_neg_op.py | 2 +- test/legacy_test/test_network_with_dtype.py | 18 +- test/legacy_test/test_nll_loss.py | 296 +++++----- .../test_nn_functional_embedding_static.py | 14 +- test/legacy_test/test_nn_functional_hot_op.py | 16 +- test/legacy_test/test_nn_grad.py | 72 +-- test/legacy_test/test_nn_margin_rank_loss.py | 6 +- test/legacy_test/test_nn_matmul_v2_grad.py | 68 +-- test/legacy_test/test_nn_sigmoid_op.py | 16 +- test/legacy_test/test_noamdecay_op.py | 2 +- test/legacy_test/test_nonzero_api.py | 16 +- test/legacy_test/test_norm_all.py | 24 +- test/legacy_test/test_norm_nn_grad.py | 40 +- test/legacy_test/test_norm_op.py | 12 +- test/legacy_test/test_normal.py | 2 +- .../legacy_test/test_normalization_wrapper.py | 12 +- test/legacy_test/test_normalize.py | 20 +- test/legacy_test/test_npair_loss_op.py | 10 +- test/legacy_test/test_number_count_op.py | 2 +- test/legacy_test/test_numel_op.py | 16 +- test/legacy_test/test_one_hot_v2_op.py | 16 +- test/legacy_test/test_ones_like.py | 16 +- test/legacy_test/test_ones_op.py | 2 +- .../legacy_test/test_op_function_generator.py | 24 +- test/legacy_test/test_op_name_conflict.py | 14 +- test/legacy_test/test_op_support_gpu.py | 2 +- test/legacy_test/test_op_version.py | 10 +- test/legacy_test/test_operator.py | 2 +- test/legacy_test/test_operator_desc.py | 4 +- test/legacy_test/test_optimizer.py | 36 +- .../legacy_test/test_optimizer_for_varbase.py | 8 +- test/legacy_test/test_optimizer_grad.py | 26 +- .../test_optimizer_in_control_flow.py | 30 +- test/legacy_test/test_overlap_add_op.py | 2 +- test/legacy_test/test_pad3d_op.py | 2 +- test/legacy_test/test_pad_op.py | 8 +- .../test_paddle_imperative_double_grad.py | 10 +- .../test_paddle_multiprocessing.py | 2 +- test/legacy_test/test_paddle_save_load.py | 84 +-- .../test_paddle_save_load_binary.py | 54 +- test/legacy_test/test_paddlescience.py | 4 +- test/legacy_test/test_pairwise_distance.py | 8 +- .../test_parallel_dygraph_dataparallel.py | 6 +- ...t_parallel_dygraph_dataparallel_cpuonly.py | 4 +- .../test_parallel_dygraph_transformer_gloo.py | 4 +- ...utor_seresnext_with_fuse_all_reduce_cpu.py | 6 +- ...utor_seresnext_with_fuse_all_reduce_gpu.py | 6 +- ...llel_executor_seresnext_with_reduce_cpu.py | 2 +- .../test_parallel_executor_transformer.py | 2 +- test/legacy_test/test_parameter.py | 8 +- ...test_partial_eager_deletion_transformer.py | 4 +- test/legacy_test/test_pass_builder.py | 16 +- test/legacy_test/test_pixel_shuffle_op.py | 26 +- test/legacy_test/test_pixel_unshuffle.py | 28 +- test/legacy_test/test_poisson_nll_loss.py | 2 +- test/legacy_test/test_poisson_op.py | 4 +- test/legacy_test/test_polar.py | 2 +- test/legacy_test/test_polygamma_op.py | 2 +- test/legacy_test/test_pool1d_api.py | 84 +-- test/legacy_test/test_pool2d_api.py | 120 ++-- test/legacy_test/test_pool2d_op.py | 4 +- test/legacy_test/test_pool3d_api.py | 118 ++-- test/legacy_test/test_pool3d_op.py | 4 +- test/legacy_test/test_pool_max_op.py | 2 +- test/legacy_test/test_pow.py | 2 +- test/legacy_test/test_prelu_op.py | 16 +- test/legacy_test/test_print_op.py | 12 +- test/legacy_test/test_prod_op.py | 2 +- test/legacy_test/test_program.py | 12 +- test/legacy_test/test_program_code.py | 6 +- test/legacy_test/test_program_converter.py | 2 +- .../test_program_prune_backward.py | 46 +- test/legacy_test/test_program_to_string.py | 4 +- test/legacy_test/test_protobuf.py | 2 +- test/legacy_test/test_protobuf_descs.py | 4 +- test/legacy_test/test_prune.py | 172 +++--- .../test_prune_gate_by_capacity_op.py | 2 +- test/legacy_test/test_psroi_pool_op.py | 8 +- test/legacy_test/test_pull_gpups_sparse_op.py | 16 +- test/legacy_test/test_py_func_op.py | 38 +- .../legacy_test/test_py_reader_combination.py | 14 +- .../legacy_test/test_py_reader_return_list.py | 18 +- .../test_py_reader_sample_generator.py | 16 +- test/legacy_test/test_pybind_interface.py | 2 +- test/legacy_test/test_pyramid_hash_op.py | 14 +- .../test_python_operator_overriding.py | 20 +- test/legacy_test/test_qr_op.py | 22 +- test/legacy_test/test_query_op.py | 2 +- test/legacy_test/test_queue.py | 16 +- test/legacy_test/test_rad2deg.py | 18 +- test/legacy_test/test_rand_op.py | 28 +- test/legacy_test/test_randint_op.py | 8 +- test/legacy_test/test_randn_op.py | 2 +- test/legacy_test/test_random_routing_op.py | 2 +- test/legacy_test/test_random_seed.py | 88 +-- test/legacy_test/test_randperm_op.py | 2 +- test/legacy_test/test_rank_attention_op.py | 2 +- .../legacy_test/test_raw_program_optimizer.py | 6 +- test/legacy_test/test_reader_reset.py | 20 +- test/legacy_test/test_real_imag_op.py | 6 +- test/legacy_test/test_reduce_op.py | 74 +-- .../test_registered_phi_kernels.py | 2 +- test/legacy_test/test_registry.py | 6 +- test/legacy_test/test_regularizer.py | 48 +- test/legacy_test/test_regularizer_api.py | 46 +- test/legacy_test/test_renorm_op.py | 10 +- test/legacy_test/test_repeat_interleave_op.py | 46 +- test/legacy_test/test_require_version.py | 114 ++-- test/legacy_test/test_reshape_op.py | 24 +- test/legacy_test/test_retain_graph.py | 4 +- test/legacy_test/test_rms_norm_op.py | 10 +- test/legacy_test/test_rmsprop_op.py | 24 +- test/legacy_test/test_rnn_cell_api.py | 10 +- test/legacy_test/test_rnn_decode_api.py | 22 +- test/legacy_test/test_rnn_memory_helper_op.py | 6 +- test/legacy_test/test_rnn_op.py | 2 +- test/legacy_test/test_roll_op.py | 24 +- test/legacy_test/test_rot90_op.py | 118 ++-- test/legacy_test/test_row_conv_op.py | 14 +- test/legacy_test/test_rrelu_op.py | 38 +- ...est_run_fluid_by_module_or_command_line.py | 4 +- test/legacy_test/test_run_program_op.py | 58 +- .../test_save_model_without_var.py | 12 +- test/legacy_test/test_scalar.py | 2 +- test/legacy_test/test_scale_op.py | 12 +- test/legacy_test/test_scatter_nd_op.py | 32 +- test/legacy_test/test_scatter_op.py | 26 +- test/legacy_test/test_scope.py | 14 +- test/legacy_test/test_searchsorted_op.py | 2 +- test/legacy_test/test_segment_ops.py | 10 +- .../test_select_input_output_op.py | 14 +- test/legacy_test/test_selected_rows.py | 2 +- test/legacy_test/test_selu_op.py | 10 +- test/legacy_test/test_set_bool_attr.py | 8 +- test/legacy_test/test_set_value_op.py | 16 +- test/legacy_test/test_sgd_op.py | 14 +- test/legacy_test/test_sgd_op_bf16.py | 20 +- test/legacy_test/test_shape_op.py | 2 +- test/legacy_test/test_share_data_op.py | 2 +- test/legacy_test/test_shuffle_batch_op.py | 4 +- ...st_sigmoid_cross_entropy_with_logits_op.py | 12 +- test/legacy_test/test_sigmoid_focal_loss.py | 8 +- test/legacy_test/test_sign_op.py | 14 +- test/legacy_test/test_simple_rnn_op.py | 2 +- test/legacy_test/test_size_op.py | 14 +- test/legacy_test/test_slice_op.py | 60 +- test/legacy_test/test_slice_var.py | 4 +- test/legacy_test/test_smooth_l1_loss.py | 82 +-- test/legacy_test/test_softmax2d.py | 2 +- test/legacy_test/test_softmax_mask_fuse_op.py | 16 +- ...est_softmax_mask_fuse_upper_triangle_op.py | 14 +- test/legacy_test/test_softmax_op.py | 26 +- .../test_softmax_with_cross_entropy_op.py | 2 +- test/legacy_test/test_solve_op.py | 54 +- test/legacy_test/test_sort_op.py | 12 +- test/legacy_test/test_space_to_depth_op.py | 14 +- test/legacy_test/test_sparse_attention_op.py | 6 +- test/legacy_test/test_sparse_conv_op.py | 2 +- test/legacy_test/test_sparse_embedding_op.py | 2 +- .../test_sparse_fused_attention_op.py | 2 +- test/legacy_test/test_sparse_norm_op.py | 6 +- test/legacy_test/test_sparse_unary_op.py | 2 +- test/legacy_test/test_sparse_utils_op.py | 2 +- .../test_spawn_and_init_parallel_env.py | 2 +- test/legacy_test/test_spectral_norm_op.py | 2 +- test/legacy_test/test_split_op.py | 60 +- test/legacy_test/test_splits_api.py | 2 +- test/legacy_test/test_square_error_cost.py | 10 +- test/legacy_test/test_squared_l2_norm_op.py | 2 +- test/legacy_test/test_squeeze2_op.py | 4 +- test/legacy_test/test_squeeze_op.py | 14 +- test/legacy_test/test_stack_op.py | 52 +- ...t_static_model_parallel_fused_attention.py | 4 +- ...static_model_parallel_fused_feedforward.py | 4 +- ..._model_parallel_fused_multi_transformer.py | 4 +- test/legacy_test/test_static_save_load.py | 274 ++++----- .../legacy_test/test_static_save_load_bf16.py | 22 +- .../test_static_save_load_large.py | 18 +- test/legacy_test/test_std_layer.py | 4 +- test/legacy_test/test_stride.py | 2 +- test/legacy_test/test_strided_slice_op.py | 12 +- test/legacy_test/test_subtract_op.py | 2 +- test/legacy_test/test_sum_op.py | 40 +- test/legacy_test/test_svd_op.py | 16 +- test/legacy_test/test_switch.py | 6 +- test/legacy_test/test_switch_autotune.py | 6 +- test/legacy_test/test_switch_case.py | 44 +- test/legacy_test/test_sync_batch_norm_op.py | 34 +- test/legacy_test/test_take.py | 20 +- test/legacy_test/test_tdm_child_op.py | 10 +- test/legacy_test/test_tdm_sampler_op.py | 14 +- test/legacy_test/test_temporal_shift_op.py | 6 +- test/legacy_test/test_tensor.py | 18 +- .../test_tensor_array_to_tensor.py | 26 +- test/legacy_test/test_tensor_copy_from.py | 2 +- test/legacy_test/test_tensor_fill_.py | 18 +- .../legacy_test/test_tensor_fill_diagonal_.py | 38 +- .../test_tensor_fill_diagonal_tensor.py | 8 +- .../test_tensor_fill_diagonal_tensor_.py | 8 +- test/legacy_test/test_tensor_register_hook.py | 6 +- test/legacy_test/test_tensor_to_list.py | 10 +- test/legacy_test/test_tensor_to_numpy.py | 12 +- test/legacy_test/test_tensor_uva.py | 8 +- test/legacy_test/test_tensor_zero_.py | 10 +- test/legacy_test/test_tensordot.py | 6 +- test/legacy_test/test_tf32_cublas.py | 10 +- test/legacy_test/test_tf32_cudnn.py | 2 +- test/legacy_test/test_tile_op.py | 18 +- test/legacy_test/test_top_k_v2_op.py | 6 +- test/legacy_test/test_trace_op.py | 8 +- test/legacy_test/test_traced_layer_err_msg.py | 64 +-- test/legacy_test/test_trainable.py | 16 +- test/legacy_test/test_trainer_desc.py | 6 +- test/legacy_test/test_trans_layout_op.py | 12 +- test/legacy_test/test_transfer_dtype_op.py | 2 +- test/legacy_test/test_transfer_layout_op.py | 14 +- test/legacy_test/test_transformer_api.py | 40 +- test/legacy_test/test_transpose_op.py | 72 +-- test/legacy_test/test_trapezoid.py | 4 +- test/legacy_test/test_triangular_solve_op.py | 18 +- test/legacy_test/test_tril_indices_op.py | 14 +- test/legacy_test/test_tril_triu_op.py | 32 +- test/legacy_test/test_trilinear_interp_op.py | 2 +- .../test_trilinear_interp_v2_op.py | 8 +- test/legacy_test/test_triu_indices_op.py | 14 +- test/legacy_test/test_trunc_op.py | 2 +- .../test_truncated_gaussian_random_op.py | 18 +- test/legacy_test/test_unbind_op.py | 16 +- test/legacy_test/test_unfold_op.py | 12 +- .../test_uniform_random_bf16_op.py | 24 +- .../test_uniform_random_inplace_op.py | 10 +- test/legacy_test/test_uniform_random_op.py | 100 ++-- test/legacy_test/test_unique.py | 2 +- .../legacy_test/test_unique_consecutive_op.py | 40 +- test/legacy_test/test_unique_name.py | 30 +- test/legacy_test/test_unique_with_counts.py | 2 +- test/legacy_test/test_unpool1d_op.py | 12 +- test/legacy_test/test_unpool3d_op.py | 14 +- test/legacy_test/test_unpool_op.py | 30 +- test/legacy_test/test_unsqueeze_op.py | 12 +- test/legacy_test/test_unstack_op.py | 6 +- test/legacy_test/test_unzip_op.py | 10 +- .../test_update_loss_scaling_op.py | 60 +- test/legacy_test/test_vander.py | 2 +- test/legacy_test/test_var_base.py | 142 ++--- test/legacy_test/test_variable.py | 70 +-- ...iable_length_memory_efficient_attention.py | 4 +- test/legacy_test/test_variance_layer.py | 2 +- test/legacy_test/test_version.py | 28 +- .../test_view_op_reuse_allocation.py | 2 +- test/legacy_test/test_viterbi_decode_op.py | 12 +- test/legacy_test/test_warpctc_op.py | 2 +- test/legacy_test/test_warprnnt_op.py | 2 +- test/legacy_test/test_weight_decay.py | 42 +- test/legacy_test/test_weight_normalization.py | 16 +- test/legacy_test/test_where_op.py | 66 +-- test/legacy_test/test_while_loop_op.py | 70 +-- test/legacy_test/test_while_op.py | 36 +- test/legacy_test/test_yolov3_loss_op.py | 2 +- test/legacy_test/test_zero_dim_tensor.py | 2 +- test/legacy_test/test_zeros_like_op.py | 20 +- test/legacy_test/test_zeros_op.py | 10 +- test/legacy_test/testsuite.py | 2 +- test/legacy_test/transformer_model.py | 4 +- test/legacy_test/utils.py | 6 +- test/mkldnn/check_flags_mkldnn_ops_on_off.py | 18 +- test/mkldnn/check_flags_use_mkldnn.py | 16 +- test/mkldnn/mkldnn_op_test.py | 16 +- test/mkldnn/test_activation_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_activation_mkldnn_op.py | 24 +- test/mkldnn/test_batch_norm_mkldnn_op.py | 2 +- test/mkldnn/test_cast_mkldnn_op.py | 2 +- test/mkldnn/test_clip_mkldnn_op.py | 2 +- test/mkldnn/test_concat_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_concat_mkldnn_op.py | 2 +- test/mkldnn/test_conv2d_bf16_mkldnn_op.py | 6 +- test/mkldnn/test_conv2d_int8_mkldnn_op.py | 8 +- test/mkldnn/test_conv2d_mkldnn_op.py | 4 +- .../test_conv2d_transpose_bf16_mkldnn_op.py | 6 +- .../mkldnn/test_conv2d_transpose_mkldnn_op.py | 2 +- test/mkldnn/test_dequantize_mkldnn_op.py | 4 +- .../test_elementwise_add_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_elementwise_div_mkldnn_op.py | 8 +- .../test_elementwise_mul_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_elementwise_sub_onednn_op.py | 8 +- test/mkldnn/test_expand_v2_mkldnn_op.py | 2 +- test/mkldnn/test_fc_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_flatten_mkldnn_op.py | 2 +- test/mkldnn/test_fused_vit_attention.py | 4 +- test/mkldnn/test_fusion_gru_bf16_mkldnn_op.py | 2 +- .../mkldnn/test_fusion_lstm_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_layer_norm_bf16_mkldnn_op.py | 10 +- test/mkldnn/test_layer_norm_mkldnn_op.py | 10 +- test/mkldnn/test_log_softmax_mkldnn_op.py | 2 +- test/mkldnn/test_matmul_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_matmul_v2_mkldnn_op.py | 2 +- test/mkldnn/test_mul_int8_mkldnn_op.py | 2 +- test/mkldnn/test_mul_mkldnn_op.py | 2 +- ...st_onnx_format_quantization_mobilenetv1.py | 6 +- test/mkldnn/test_pool2d_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_pool2d_int8_mkldnn_op.py | 4 +- test/mkldnn/test_prelu_mkldnn_op.py | 2 +- test/mkldnn/test_quantize_mkldnn_op.py | 2 +- test/mkldnn/test_reduce_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_requantize_mkldnn_op.py | 12 +- test/mkldnn/test_reshape_bf16_op.py | 2 +- test/mkldnn/test_reshape_mkldnn_op.py | 2 +- test/mkldnn/test_scale_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_shape_mkldnn_op.py | 2 +- test/mkldnn/test_shuffle_channel_mkldnn_op.py | 2 +- test/mkldnn/test_slice_mkldnn_op.py | 2 +- test/mkldnn/test_softmax_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_softmax_mkldnn_op.py | 4 +- test/mkldnn/test_split_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_squeeze2_mkldnn_op.py | 2 +- test/mkldnn/test_stack_mkldnn_op.py | 2 +- test/mkldnn/test_sum_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_sum_mkldnn_op.py | 2 +- test/mkldnn/test_transpose_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_transpose_int8_mkldnn_op.py | 2 +- .../test_composite_batch_norm.py | 2 +- .../test_composite_batch_norm_grad.py | 2 +- .../composite_ops/test_composite_dropout.py | 2 +- .../prim/composite_ops/test_composite_gelu.py | 2 +- .../composite_ops/test_composite_gelu_grad.py | 2 +- .../test_composite_layer_norm.py | 6 +- .../test_composite_layer_norm_grad.py | 2 +- .../prim/composite_ops/test_composite_mean.py | 2 +- .../composite_ops/test_composite_mean_grad.py | 2 +- .../test_composite_relu_custom_vjp.py | 2 +- .../composite_ops/test_composite_softmax.py | 2 +- .../test_composite_softmax_custom_vjp.py | 2 +- .../test_composite_softmax_grad.py | 2 +- test/prim/model/bert.py | 2 +- test/prim/model/test_bert_cinn.py | 6 +- test/prim/model/test_bert_prim.py | 6 +- test/prim/model/test_bert_prim_cinn.py | 6 +- test/prim/model/test_comp_model_simple_net.py | 2 +- test/prim/model/test_prim_simplenet_cinn.py | 2 +- test/prim/model/test_resnet_cinn.py | 8 +- test/prim/model/test_resnet_prim.py | 8 +- test/prim/model/test_resnet_prim_cinn.py | 8 +- test/prim/prim/flags/test_prim_flags.py | 2 +- test/prim/prim/flags/test_prim_flags_case.py | 2 +- .../vjp/eager/test_comp_eager_add_grad.py | 2 +- .../eager/test_comp_eager_batch_norm_grad.py | 2 +- .../vjp/eager/test_comp_eager_cast_grad.py | 4 +- .../vjp/eager/test_comp_eager_div_grad.py | 2 +- .../vjp/eager/test_comp_eager_exp_grad.py | 2 +- .../vjp/eager/test_comp_eager_expand_grad.py | 2 +- .../vjp/eager/test_comp_eager_gather_grad.py | 2 +- .../test_comp_eager_matmul_double_grad.py | 2 +- .../eager/test_comp_eager_multiply_grad.py | 2 +- .../vjp/eager/test_comp_eager_reshape_grad.py | 2 +- .../vjp/eager/test_comp_eager_sigmoid_grad.py | 2 +- .../vjp/eager/test_comp_eager_sqrt_grad.py | 2 +- .../vjp/eager/test_comp_eager_sub_grad.py | 2 +- .../vjp/eager/test_comp_eager_sum_grad.py | 2 +- .../vjp/eager/test_comp_eager_tanh_grad.py | 2 +- .../eager/test_comp_eager_transpose_grad.py | 2 +- .../prim/vjp/static/test_comp_add_grad.py | 2 +- .../vjp/static/test_comp_add_tanh_grad.py | 2 +- .../vjp/static/test_comp_batch_norm_grad.py | 2 +- .../prim/vjp/static/test_comp_cast_grad.py | 2 +- .../prim/vjp/static/test_comp_div_grad.py | 2 +- .../prim/vjp/static/test_comp_exp_grad.py | 2 +- .../prim/vjp/static/test_comp_expand_grad.py | 2 +- .../prim/vjp/static/test_comp_gather_grad.py | 2 +- .../static/test_comp_matmul_double_grad.py | 2 +- .../vjp/static/test_comp_multiply_grad.py | 2 +- .../prim/vjp/static/test_comp_reshape_grad.py | 2 +- .../prim/vjp/static/test_comp_sigmoid_grad.py | 2 +- .../prim/vjp/static/test_comp_sqrt_grad.py | 2 +- .../prim/vjp/static/test_comp_sub_grad.py | 2 +- .../prim/vjp/static/test_comp_sum_grad.py | 2 +- .../prim/vjp/static/test_comp_tanh_grad.py | 2 +- .../vjp/static/test_comp_transpose_grad.py | 2 +- test/prim/prim/vjp/test_comp_high_grad.py | 36 +- test/prim/process/test_check_inputs.py | 2 +- test/prim/process/test_copy_op.py | 2 +- test/prim/process/test_prim_amp.py | 2 +- test/prim/test_comp_custom_vjp.py | 2 +- test/prim/test_comp_dispensable.py | 6 +- ...est_comp_get_grad_op_desc_prim_disabled.py | 4 +- ...test_comp_get_grad_op_desc_prim_enabled.py | 4 +- test/prim/test_comp_skip_op_set.py | 2 +- test/ps/fl_ps_trainer.py | 26 +- test/ps/ps_dnn_model.py | 12 +- test/quantization/README.md | 2 +- test/quantization/convert_model2dot.py | 2 +- ...t2_int8_image_classification_comparison.py | 2 +- test/quantization/quant2_int8_lstm_model.py | 2 +- .../quant2_int8_nlp_comparison.py | 4 +- ...nt_int8_image_classification_comparison.py | 2 +- test/quantization/test_graph.py | 4 +- .../quantization/test_imperative_out_scale.py | 6 +- test/quantization/test_imperative_qat.py | 4 +- test/quantization/test_imperative_qat_amp.py | 4 +- ...t_post_training_quantization_lstm_model.py | 6 +- .../test_quant2_int8_mkldnn_pass.py | 2 +- test/quantization/test_quant_aware.py | 4 +- .../test_quant_aware_user_defined.py | 2 +- .../test_quantization_mkldnn_pass.py | 4 +- test/quantization/test_quantization_pass.py | 8 +- .../test_quantization_scale_pass.py | 4 +- .../test_user_defined_quantization.py | 4 +- test/rnn/test_rnn_api.py | 28 +- test/rnn/test_rnn_cells.py | 2 +- test/rnn/test_rnn_cells_static.py | 26 +- test/rnn/test_rnn_cudnn_params_packing.py | 6 +- test/rnn/test_rnn_nets.py | 2 +- test/rnn/test_rnn_nets_static.py | 32 +- test/rnn/test_wrappers.py | 2 +- test/sequence/test_sequence_conv.py | 10 +- test/sequence/test_sequence_expand_as.py | 2 +- test/sequence/test_sequence_first_step.py | 2 +- test/sequence/test_sequence_last_step.py | 2 +- test/sequence/test_sequence_mask.py | 2 +- test/sequence/test_sequence_pad_op.py | 2 +- test/sequence/test_sequence_softmax_op.py | 2 +- .../test_standalone_controlflow.py | 4 +- .../test_standalone_cross_step_overlap.py | 2 +- .../test_standalone_custom_stream.py | 2 +- .../test_standalone_executor.py | 18 +- ...t_standalone_executor_aot_choose_kernel.py | 4 +- ...t_standalone_executor_multi_micro_batch.py | 8 +- .../test_standalone_executor_plan.py | 2 +- .../test_standalone_multiply_write.py | 2 +- test/tokenizer/test_faster_tokenizer_op.py | 4 +- test/xpu/collective_allgather_op_xpu.py | 6 +- test/xpu/collective_allreduce_op_xpu.py | 6 +- test/xpu/collective_broadcast_op_xpu.py | 6 +- test/xpu/collective_concat_op.py | 6 +- test/xpu/collective_identity_op_xpu.py | 6 +- test/xpu/collective_split_op.py | 6 +- test/xpu/get_test_cover_info.py | 2 +- test/xpu/op_test_xpu.py | 10 +- test/xpu/test_activation_op_xpu.py | 24 +- test/xpu/test_adadelta_op_xpu.py | 14 +- test/xpu/test_adam_op_xpu.py | 2 +- test/xpu/test_adamw_op_xpu.py | 26 +- test/xpu/test_affine_channel_op_xpu.py | 2 +- test/xpu/test_assign_value_op_xpu.py | 14 +- test/xpu/test_batch_norm_op_xpu.py | 8 +- test/xpu/test_bilinear_interp_op_xpu.py | 16 +- test/xpu/test_bitwise_op_xpu.py | 14 +- test/xpu/test_c_concat.py | 2 +- test/xpu/test_c_split.py | 2 +- test/xpu/test_cast_op_xpu.py | 8 +- test/xpu/test_clip_op_xpu.py | 20 +- test/xpu/test_coalesce_tensor_op_xpu.py | 8 +- test/xpu/test_collective_allgather_xpu.py | 2 +- test/xpu/test_collective_allreduce_xpu.py | 2 +- test/xpu/test_collective_base_xpu.py | 14 +- test/xpu/test_collective_broadcast_xpu.py | 2 +- test/xpu/test_collective_identity_xpu.py | 2 +- test/xpu/test_conv2d_op_xpu.py | 10 +- test/xpu/test_conv3d_op_xpu.py | 8 +- test/xpu/test_deformable_conv_op_xpu.py | 10 +- test/xpu/test_device_guard_xpu.py | 2 +- test/xpu/test_diag_v2_op_xpu.py | 14 +- test/xpu/test_dropout_op_xpu.py | 26 +- test/xpu/test_elementwise_add_op_xpu.py | 20 +- test/xpu/test_elementwise_add_op_xpu_kp.py | 20 +- test/xpu/test_elementwise_div_op_xpu.py | 6 +- test/xpu/test_elementwise_floordiv_op_xpu.py | 4 +- test/xpu/test_elementwise_mod_op_xpu.py | 8 +- test/xpu/test_elementwise_mul_op_xpu.py | 4 +- test/xpu/test_empty_op_xpu.py | 2 +- test/xpu/test_expand_as_v2_op_xpu.py | 6 +- test/xpu/test_expand_v2_op_xpu.py | 10 +- test/xpu/test_fill_any_op_xpu.py | 6 +- test/xpu/test_fill_op_xpu.py | 2 +- test/xpu/test_fleet_exe_dist_model_run_xpu.py | 2 +- test/xpu/test_fused_attention_op_xpu.py | 2 +- test/xpu/test_fused_feedforward_op_xpu.py | 2 +- .../test_fused_gemm_epilogue_grad_op_xpu.py | 2 +- test/xpu/test_fused_gemm_epilogue_op_xpu.py | 2 +- .../test_fused_resnet_basic_block_op_xpu.py | 42 +- test/xpu/test_gaussian_random_op_xpu.py | 18 +- test/xpu/test_gen_bkcl_id_op.py | 2 +- test/xpu/test_generate_proposals_v2_op_xpu.py | 2 +- test/xpu/test_group_norm_op_xpu.py | 6 +- test/xpu/test_huber_loss_op_xpu.py | 4 +- test/xpu/test_index_sample_op_xpu.py | 10 +- test/xpu/test_index_select_op_xpu.py | 20 +- test/xpu/test_instance_norm_op_xpu.py | 8 +- test/xpu/test_kldiv_loss_op_xpu.py | 4 +- test/xpu/test_logical_op_xpu.py | 14 +- test/xpu/test_matmul_op_xpu.py | 28 +- test/xpu/test_mean_op_xpu.py | 2 +- test/xpu/test_merged_momentum_op_xpu.py | 2 +- test/xpu/test_merged_momentum_op_xpu_base.py | 4 +- test/xpu/test_momentum_op_xpu.py | 2 +- test/xpu/test_nearest_interp_op_xpu.py | 6 +- test/xpu/test_one_hot_op_xpu.py | 2 +- test/xpu/test_one_hot_v2_op_xpu.py | 16 +- test/xpu/test_pad3d_op_xpu.py | 2 +- test/xpu/test_pad_op_xpu.py | 8 +- .../xpu/test_parallel_dygraph_dataparallel.py | 6 +- test/xpu/test_pool2d_op_xpu.py | 2 +- test/xpu/test_pool3d_op_xpu.py | 2 +- test/xpu/test_prelu_op_xpu.py | 12 +- test/xpu/test_randperm_op_xpu.py | 2 +- test/xpu/test_refactor_op_xpu.py | 6 +- test/xpu/test_rmsprop_op_xpu.py | 8 +- test/xpu/test_rnn_op_xpu.py | 2 +- test/xpu/test_roi_align_op_xpu.py | 2 +- test/xpu/test_scale_op_xpu.py | 2 +- test/xpu/test_sequence_conv_op_xpu.py | 10 +- test/xpu/test_sequence_unpad_op_xpu.py | 8 +- test/xpu/test_set_value_op_xpu.py | 8 +- test/xpu/test_sgd_op_xpu.py | 10 +- test/xpu/test_shape_op_xpu.py | 4 +- test/xpu/test_squeeze_op_xpu.py | 6 +- test/xpu/test_sum_op_xpu.py | 18 +- test/xpu/test_tile_op_xpu.py | 4 +- .../test_truncated_gaussian_random_op_xpu.py | 8 +- test/xpu/test_unbind_op_xpu.py | 14 +- test/xpu/test_unfold_op_xpu.py | 6 +- test/xpu/test_unique_op_xpu.py | 4 +- test/xpu/test_update_loss_scaling_op_xpu.py | 38 +- test/xpu/test_warpctc_op_xpu.py | 2 +- test/xpu/test_where_index_xpu.py | 10 +- test/xpu/test_where_op_xpu.py | 28 +- test/xpu/test_while_op_xpu.py | 20 +- test/xpu/test_xpu_place.py | 8 +- tools/CrossStackProfiler/ProfileFileReader.py | 2 +- tools/check_file_diff_approvals.sh | 34 +- tools/check_op_desc.py | 2 +- tools/check_op_register_type.py | 4 +- .../paddle_benchmark/paddle_test_benchmark.py | 2 +- .../cinn/paddle_benchmark/test_paddle_ops.py | 2 +- tools/count_api_without_core_ops.py | 6 +- tools/diff_use_default_grad_op_maker.py | 4 +- tools/parse_kernel_info.py | 4 +- tools/print_op_desc.py | 2 +- tools/print_signatures.py | 4 +- tools/summary_env.py | 2 +- tools/test_check_api_compatible.py | 2 +- tools/test_runner.py | 16 +- tools/timeline.py | 2 +- 1935 files changed, 13573 insertions(+), 13573 deletions(-) rename python/paddle/{fluid => base}/.gitignore (100%) rename python/paddle/{fluid => base}/__init__.py (97%) rename python/paddle/{fluid => base}/backward.py (99%) rename python/paddle/{fluid => base}/compiler.py (99%) rename python/paddle/{fluid => base}/core.py (100%) rename python/paddle/{fluid => base}/data_feed_desc.py (91%) rename python/paddle/{fluid => base}/data_feeder.py (95%) rename python/paddle/{fluid => base}/dataset.py (85%) rename python/paddle/{fluid => base}/default_scope_funcs.py (94%) rename python/paddle/{fluid => base}/device_worker.py (100%) rename python/paddle/{fluid => base}/dygraph/__init__.py (100%) rename python/paddle/{fluid => base}/dygraph/base.py (93%) rename python/paddle/{fluid => base}/dygraph/math_op_patch.py (100%) rename python/paddle/{fluid => base}/dygraph/tensor_patch_methods.py (98%) rename python/paddle/{fluid => base}/dygraph/tracer.py (99%) rename python/paddle/{fluid => base}/dygraph_utils.py (100%) rename python/paddle/{fluid => base}/executor.py (98%) rename python/paddle/{fluid => base}/framework.py (98%) rename python/paddle/{fluid => base}/incubate/__init__.py (94%) rename python/paddle/{fluid => base}/incubate/checkpoint/__init__.py (100%) rename python/paddle/{fluid => base}/incubate/checkpoint/auto_checkpoint.py (99%) rename python/paddle/{fluid => base}/incubate/checkpoint/checkpoint_saver.py (100%) rename python/paddle/{fluid => base}/initializer.py (97%) rename python/paddle/{fluid => base}/io.py (84%) rename python/paddle/{fluid => base}/layer_helper.py (100%) rename python/paddle/{fluid => base}/layer_helper_base.py (99%) rename python/paddle/{fluid => base}/layers/__init__.py (100%) rename python/paddle/{fluid => base}/layers/io.py (100%) rename python/paddle/{fluid => base}/layers/layer_function_generator.py (99%) rename python/paddle/{fluid => base}/layers/math_op_patch.py (98%) rename python/paddle/{fluid => base}/lod_tensor.py (93%) rename python/paddle/{fluid => base}/log_helper.py (100%) rename python/paddle/{fluid => base}/multiprocess_utils.py (100%) rename python/paddle/{fluid => base}/param_attr.py (99%) rename python/paddle/{fluid => base}/reader.py (95%) rename python/paddle/{fluid => base}/trainer_desc.py (100%) rename python/paddle/{fluid => base}/trainer_factory.py (99%) rename python/paddle/{fluid => base}/unique_name.py (100%) rename python/paddle/{fluid => base}/variable_index.py (98%) rename python/paddle/{fluid => base}/wrapped_decorator.py (100%) diff --git a/.flake8 b/.flake8 index ed87df8fb1b6f..9a569f7a6a56e 100644 --- a/.flake8 +++ b/.flake8 @@ -3,7 +3,7 @@ select = C,E,W exclude = ./build, # Exclude fluid directory - ./python/paddle/fluid/**, + ./python/paddle/base/**, # Exclude third-party libraries ./third_party/**, ./python/paddle/utils/gast/**, diff --git a/.gitignore b/.gitignore index 6abc4d02a4392..46b8e3a47c618 100644 --- a/.gitignore +++ b/.gitignore @@ -95,7 +95,7 @@ paddle/fluid/framework/__init__.py paddle/phi/api/profiler/__init__.py python/paddle/incubate/fleet/parameter_server/pslib/ps_pb2.py paddle/phi/kernels/fusion/cutlass/conv2d/generated/* -python/paddle/fluid/incubate/fleet/parameter_server/pslib/ps_pb2.py +python/paddle/base/incubate/fleet/parameter_server/pslib/ps_pb2.py paddle/fluid/ir_adaptor/translator/op_compat_info.cc paddle/fluid/pybind/static_op_function.* paddle/fluid/pybind/ops_api.cc diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 22eac537766c4..92e302eb15acc 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -1,5 +1,5 @@ set(PYTHON_TESTS_DIR - ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests + ${PADDLE_BINARY_DIR}/python/paddle/base/tests CACHE INTERNAL "python tests directory") add_subdirectory(utils) diff --git a/paddle/cinn/hlir/op/transform.cc b/paddle/cinn/hlir/op/transform.cc index 05cb9adb9778b..a668d3518df30 100644 --- a/paddle/cinn/hlir/op/transform.cc +++ b/paddle/cinn/hlir/op/transform.cc @@ -2044,7 +2044,7 @@ CINN_REGISTER_HELPER(transform_ops) { // pointers, the code generated by operator fusion will have out-of-bounds // access. It should not fuse with any other injective operators, though // scatter_add is injective. turn KNonFusible to kInjective will fail - // /Paddle/python/paddle/fluid/tests/unittests/test_index_select_op.py + // /Paddle/python/paddle/base/tests/unittests/test_index_select_op.py .set_attr( "OpPattern", cinn::hlir::framework::OpPatternKind::kNonFusible) .set_support_level(4); diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 055163ed6206b..2baf3ab5e8da1 100755 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -600,8 +600,8 @@ if(WITH_PYTHON) TARGET framework_py_proto POST_BUILD COMMAND ${CMAKE_COMMAND} -E make_directory - ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto - COMMAND cp *.py ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/ + ${PADDLE_BINARY_DIR}/python/paddle/base/proto + COMMAND cp *.py ${PADDLE_BINARY_DIR}/python/paddle/base/proto/ COMMAND cp distributed_strategy_*.py ${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto COMMENT "Copy generated python proto into directory paddle/fluid/proto." @@ -620,7 +620,7 @@ if(WITH_PYTHON) endif() else() string(REPLACE "/" "\\" proto_dstpath - "${PADDLE_BINARY_DIR}/python/paddle/fluid/proto/") + "${PADDLE_BINARY_DIR}/python/paddle/base/proto/") string( REPLACE "/" "\\" fleet_proto_dstpath "${PADDLE_BINARY_DIR}/python/paddle/distributed/fleet/proto/") @@ -628,7 +628,7 @@ if(WITH_PYTHON) TARGET framework_py_proto POST_BUILD COMMAND ${CMAKE_COMMAND} -E make_directory - ${PADDLE_BINARY_DIR}/python/paddle/fluid/proto + ${PADDLE_BINARY_DIR}/python/paddle/base/proto COMMAND copy /Y *.py ${proto_dstpath} COMMAND copy /Y distributed_strategy_*.py ${fleet_proto_dstpath} COMMENT "Copy generated python proto into directory paddle/fluid/proto." diff --git a/paddle/fluid/framework/ir/auto_mixed_precision_pass.cc b/paddle/fluid/framework/ir/auto_mixed_precision_pass.cc index 37f90289c4b53..698de5d90c256 100644 --- a/paddle/fluid/framework/ir/auto_mixed_precision_pass.cc +++ b/paddle/fluid/framework/ir/auto_mixed_precision_pass.cc @@ -175,7 +175,7 @@ bool OpSupportPrecision(const std::string& op_type, // The set of ops that support fp16 calculation and are considered // numerically-dangerous, slower and whose effects may also be observed in // downstream ops. -// ref to python/paddle/fluid/contrib/mixed_precision/fp16_lists.py +// ref to python/paddle/base/contrib/mixed_precision/fp16_lists.py void AutoMixedPrecisionPass::SetDefaultBlacklist() const { black_list_.insert({ // numerically-dangerous diff --git a/paddle/phi/api/profiler/CMakeLists.txt b/paddle/phi/api/profiler/CMakeLists.txt index ec569fe9fbc85..af1aacae63bd5 100644 --- a/paddle/phi/api/profiler/CMakeLists.txt +++ b/paddle/phi/api/profiler/CMakeLists.txt @@ -1,7 +1,7 @@ proto_library(phi_profiler_proto SRCS profiler.proto) if(WITH_PYTHON AND EXISTS ${PADDLE_BINARY_DIR}) - set(FLUID_PATH ${PADDLE_BINARY_DIR}/python/paddle/fluid) + set(FLUID_PATH ${PADDLE_BINARY_DIR}/python/paddle/base) py_proto_compile(profiler_py_proto SRCS profiler.proto) file(TOUCH ${CMAKE_CURRENT_BINARY_DIR}/__init__.py) if(NOT WIN32) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index f83028c0ffb4f..a788171bdad1e 100644 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -2574,7 +2574,7 @@ set -ex function parallel_test_base_ipu() { mkdir -p ${PADDLE_ROOT}/build - cd ${PADDLE_ROOT}/build/python/paddle/fluid/tests/unittests/ipu + cd ${PADDLE_ROOT}/build/python/paddle/base/tests/unittests/ipu if [ ${WITH_TESTING:-ON} == "ON" ] ; then cat < 0 ): diff --git a/python/paddle/autograd/backward_mode.py b/python/paddle/autograd/backward_mode.py index 4419a60598abc..0e3382dcac700 100644 --- a/python/paddle/autograd/backward_mode.py +++ b/python/paddle/autograd/backward_mode.py @@ -13,8 +13,8 @@ # limitations under the License. import paddle -from paddle.fluid import core, framework -from paddle.fluid.backward import gradients_with_optimizer # noqa: F401 +from paddle.base import core, framework +from paddle.base.backward import gradients_with_optimizer # noqa: F401 __all__ = [] diff --git a/python/paddle/autograd/py_layer.py b/python/paddle/autograd/py_layer.py index fa9243804faf0..5ddf610bb032b 100644 --- a/python/paddle/autograd/py_layer.py +++ b/python/paddle/autograd/py_layer.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle -from paddle.fluid import core +from paddle.base import core __all__ = [] @@ -404,7 +404,7 @@ def backward(ctx, *args): def once_differentiable(backward): def wrapper(ctx, *args): - with paddle.fluid.dygraph.no_grad(): + with paddle.base.dygraph.no_grad(): outputs = backward(ctx, *args) return outputs diff --git a/python/paddle/autograd/saved_tensors_hooks.py b/python/paddle/autograd/saved_tensors_hooks.py index 709c646325ed0..5d68a90e78c89 100644 --- a/python/paddle/autograd/saved_tensors_hooks.py +++ b/python/paddle/autograd/saved_tensors_hooks.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid import core +from paddle.base import core __all__ = [] diff --git a/python/paddle/fluid/.gitignore b/python/paddle/base/.gitignore similarity index 100% rename from python/paddle/fluid/.gitignore rename to python/paddle/base/.gitignore diff --git a/python/paddle/fluid/__init__.py b/python/paddle/base/__init__.py similarity index 97% rename from python/paddle/fluid/__init__.py rename to python/paddle/base/__init__.py index 833576a9c7f8a..07ca385e88bac 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/base/__init__.py @@ -32,11 +32,11 @@ except Exception as e: raise e -# import all class inside framework into fluid module +# import all class inside framework into base module from . import framework from .framework import * -# import all class inside executor into fluid module +# import all class inside executor into base module from . import executor from .executor import * @@ -73,7 +73,7 @@ from . import unique_name from . import compiler from .compiler import * -from paddle.fluid.layers.math_op_patch import monkey_patch_variable +from paddle.base.layers.math_op_patch import monkey_patch_variable from .dygraph.base import enable_dygraph, disable_dygraph from .dygraph.tensor_patch_methods import monkey_patch_tensor from .core import _cuda_synchronize diff --git a/python/paddle/fluid/backward.py b/python/paddle/base/backward.py similarity index 99% rename from python/paddle/fluid/backward.py rename to python/paddle/base/backward.py index 9b09ec11cd3ab..f468f3a6a74a7 100755 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/base/backward.py @@ -14,15 +14,15 @@ from .proto import framework_pb2 -from paddle.fluid import framework as framework -from paddle.fluid import program_guard +from paddle.base import framework as framework +from paddle.base import program_guard from . import core import collections import copy import logging from . import unique_name from . import log_helper -import paddle.fluid +import paddle.base from .data_feeder import check_type import warnings @@ -1393,7 +1393,7 @@ def update_distop_context( grad_sub_block_list.append(grad_sub_block.desc) # In primitive mode, raw phi GradOp will be split into multiple small # primitive operators, and the split rules are defined in c++ level, - # see details: paddle/fluid/prim/api/manual/backward/composite_backward_api.h + # see details: paddle/base/prim/api/manual/backward/composite_backward_api.h # It means that the output's shape and dtype of previous operators which # maybe used as the input of next operators must be known. Therefore, # we infer shape and dtype in a sandbox block(named composite_block) for @@ -1462,10 +1462,10 @@ def find_op_index(block_desc, cur_op_desc): # Rename internal gradient variables in multiple backward # so that they have different names with previous backward. # For example: - # y = x * x, grad = fluid.gradients(fluid.gradients(y, x) + y * y, x) + # y = x * x, grad = base.gradients(base.gradients(y, x) + y * y, x) # In second-time backward, gradient variable names of partial # forward network (y * y) may be have same names with first-time - # fluid.gradients(y, x). + # base.gradients(y, x). # So rename here before _addup_repetitive_outputs_. if program._appending_grad_times > 1: for op_desc in grad_op_desc: @@ -1796,7 +1796,7 @@ def infershape_for_composite(block, grad_op_desc): grad_op_desc.copy_from(op_desc) if not framework.OpProtoHolder.instance().has_op_proto(grad_op_desc.type()): - # NOTE: Some raw fluid grad operators which hadn't been decomposed may not + # NOTE: Some raw base grad operators which hadn't been decomposed may not # implement InferVarType method, such as elementwise_xx_grad, and it will # cause the dtype or shape of corresponding cotangent incorrect. This # patch set the cotangent dtype and shape same with corresponding @@ -1869,7 +1869,7 @@ def _get_no_grad_set_name(no_grad_set): no_grad_set_name.add(no_grad_var) else: raise TypeError( - "The type of no_grad_set's member must be paddle.fluid.Variable or str, but received %s." + "The type of no_grad_set's member must be paddle.base.Variable or str, but received %s." % (type(no_grad_var)) ) else: @@ -2169,7 +2169,7 @@ def append_backward( parameter_list, 'parameter_list', (list, tuple, set), - 'fluid.backward.append_backward', + 'base.backward.append_backward', ) parameters = [] for i, param in enumerate(parameter_list): @@ -2177,7 +2177,7 @@ def append_backward( param, 'parameter_list[%s]' % i, (framework.Variable, str), - 'fluid.backward.append_backward', + 'base.backward.append_backward', ) if isinstance(param, framework.Variable): parameters.append(param.name) @@ -2710,14 +2710,14 @@ def gradients_with_optimizer(program, optimizer, inputs=None, outputs=None): img = static.data(name='image', shape=[None, 784]) pred = static.nn.fc(x=img, size=10, activation='relu') loss = paddle.mean(pred) - opt_ops, pram_grads = paddle.fluid.backward.gradients_with_optimizer(static.default_main_program(), opt) + opt_ops, pram_grads = paddle.base.backward.gradients_with_optimizer(static.default_main_program(), opt) print(opt_ops) """ check_type( program, 'program', - paddle.fluid.Program, + paddle.base.Program, 'paddle.static.gradients_with_optimizer', ) check_type( @@ -2747,7 +2747,7 @@ def gradients_with_optimizer(program, optimizer, inputs=None, outputs=None): pram_grads = [ (pram, grad) for pram, grad in zip(inputs, grads) - if isinstance(pram, paddle.fluid.framework.Parameter) + if isinstance(pram, paddle.base.framework.Parameter) and grad is not None ] diff --git a/python/paddle/fluid/compiler.py b/python/paddle/base/compiler.py similarity index 99% rename from python/paddle/fluid/compiler.py rename to python/paddle/base/compiler.py index 505b7ad566dc6..b1944ac343b8b 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/base/compiler.py @@ -402,9 +402,9 @@ def convert_concrete_program( """ Convert the ConcreteProgram to IPUConcreteProgram. """ - from ..fluid.dygraph.base import switch_to_static_graph - from ..fluid import backward - from ..fluid.framework import device_guard + from ..base.dygraph.base import switch_to_static_graph + from ..base import backward + from ..base.framework import device_guard import paddle inputs = concrete_program.inputs diff --git a/python/paddle/fluid/core.py b/python/paddle/base/core.py similarity index 100% rename from python/paddle/fluid/core.py rename to python/paddle/base/core.py diff --git a/python/paddle/fluid/data_feed_desc.py b/python/paddle/base/data_feed_desc.py similarity index 91% rename from python/paddle/fluid/data_feed_desc.py rename to python/paddle/base/data_feed_desc.py index 9e6257d96ef6a..3987440ecc6da 100644 --- a/python/paddle/fluid/data_feed_desc.py +++ b/python/paddle/base/data_feed_desc.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid.proto import data_feed_pb2 +from paddle.base.proto import data_feed_pb2 from google.protobuf import text_format __all__ = ['DataFeedDesc'] @@ -28,12 +28,12 @@ class DataFeedDesc: DataFeedDesc shall be initialized from a valid protobuf message from disk. - See :code:`paddle/fluid/framework/data_feed.proto` for message definition. + See :code:`paddle/base/framework/data_feed.proto` for message definition. A typical message might look like: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base f = open("data.proto", "w") print >> f, 'name: "MultiSlotDataFeed"' print >> f, 'batch_size: 2' @@ -52,7 +52,7 @@ class DataFeedDesc: print >> f, ' }' print >> f, '}' f.close() - data_feed = fluid.DataFeedDesc('data.proto') + data_feed = base.DataFeedDesc('data.proto') However, users usually shouldn't care about the message format; instead, they are encouraged to use :code:`Data Generator` as a tool to generate a @@ -64,8 +64,8 @@ class DataFeedDesc: .. code-block:: python - import paddle.fluid as fluid - data_feed = fluid.DataFeedDesc('data.proto') + import paddle.base as base + data_feed = base.DataFeedDesc('data.proto') data_feed.set_batch_size(128) data_feed.set_dense_slots('wd') # The slot named 'wd' will be dense data_feed.set_use_slots('wd') # The slot named 'wd' will be used @@ -94,12 +94,12 @@ def __init__(self, proto_file): def set_batch_size(self, batch_size): """ - Set :attr:`batch_size` in :ref:`api_fluid_DataFeedDesc` . :attr:`batch_size` can be changed during training. + Set :attr:`batch_size` in :ref:`api_base_DataFeedDesc` . :attr:`batch_size` can be changed during training. Example: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base f = open("data.proto", "w") print >> f, 'name: "MultiSlotDataFeed"' print >> f, 'batch_size: 2' @@ -118,7 +118,7 @@ def set_batch_size(self, batch_size): print >> f, ' }' print >> f, '}' f.close() - data_feed = fluid.DataFeedDesc('data.proto') + data_feed = base.DataFeedDesc('data.proto') data_feed.set_batch_size(128) Args: @@ -140,7 +140,7 @@ def set_dense_slots(self, dense_slots_name): Example: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base f = open("data.proto", "w") print >> f, 'name: "MultiSlotDataFeed"' print >> f, 'batch_size: 2' @@ -159,7 +159,7 @@ def set_dense_slots(self, dense_slots_name): print >> f, ' }' print >> f, '}' f.close() - data_feed = fluid.DataFeedDesc('data.proto') + data_feed = base.DataFeedDesc('data.proto') data_feed.set_dense_slots(['words']) Args: @@ -187,7 +187,7 @@ def set_use_slots(self, use_slots_name): Example: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base f = open("data.proto", "w") print >> f, 'name: "MultiSlotDataFeed"' print >> f, 'batch_size: 2' @@ -206,7 +206,7 @@ def set_use_slots(self, use_slots_name): print >> f, ' }' print >> f, '}' f.close() - data_feed = fluid.DataFeedDesc('data.proto') + data_feed = base.DataFeedDesc('data.proto') data_feed.set_use_slots(['words']) Args: @@ -231,7 +231,7 @@ def desc(self): Example: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base f = open("data.proto", "w") print >> f, 'name: "MultiSlotDataFeed"' print >> f, 'batch_size: 2' @@ -250,7 +250,7 @@ def desc(self): print >> f, ' }' print >> f, '}' f.close() - data_feed = fluid.DataFeedDesc('data.proto') + data_feed = base.DataFeedDesc('data.proto') print(data_feed.desc()) Returns: diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/base/data_feeder.py similarity index 95% rename from python/paddle/fluid/data_feeder.py rename to python/paddle/base/data_feeder.py index 687504e3670e8..d4800aac174f2 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/base/data_feeder.py @@ -155,7 +155,7 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''): expected_type += (core.eager.Tensor,) elif isinstance(input, core.eager.Tensor): raise TypeError( - "Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. " + "Please use `with base.dygraph.guard()` as context or `base.enable_dygraph()` to switch to imperative mode firstly. " "Because received '{}' in {} is a imperative Variable.".format( input_name, op_name ) @@ -337,12 +337,12 @@ class DataFeeder: Parameters: feed_list (list): Variables or names of Variables that need to feed. - place (:ref:`api_fluid_CPUPlace` | :ref:`api_fluid_CUDAPlace` ): + place (:ref:`api_base_CPUPlace` | :ref:`api_base_CUDAPlace` ): place indicates the device (CPU | GPU) the data will be fed into, if - you want to feed data into GPU, please using :code:`fluid.CUDAPlace(i)` + you want to feed data into GPU, please using :code:`base.CUDAPlace(i)` (:code:`i` represents the GPU id), or if you want to feed data into CPU, - please using :code:`fluid.CPUPlace()`. - program (:ref:`api_fluid_Program` , optional): The Program that will + please using :code:`base.CPUPlace()`. + program (:ref:`api_base_Program` , optional): The Program that will feed data into, if program is None, it will use default_main_program(). Default None. @@ -354,24 +354,24 @@ class DataFeeder: import numpy as np import paddle - import paddle.fluid as fluid + import paddle.base as base - place = fluid.CPUPlace() + place = base.CPUPlace() def reader(): for _ in range(4): yield np.random.random([4]).astype('float32'), np.random.random([3]).astype('float32'), - main_program = fluid.Program() - startup_program = fluid.Program() + main_program = base.Program() + startup_program = base.Program() - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): data_1 = paddle.static.data(name='data_1', shape=[None, 2, 2], dtype='float32') data_2 = paddle.static.data(name='data_2', shape=[None, 1, 3], dtype='float32') out = paddle.static.nn.fc(x=[data_1, data_2], size=2) # ... - feeder = fluid.DataFeeder([data_1, data_2], place) + feeder = base.DataFeeder([data_1, data_2], place) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup_program) feed_data = feeder.feed(reader()) @@ -426,7 +426,7 @@ def feed(self, iterable): # result['data_1'] a LoD-Tensor with shape of [5, 2, 1, 3]. 5 is batch size, and [2, 1, 3] is the real shape of data_1. # result['data_2'], result['data_3'] are similar. import numpy as np - import paddle.fluid as fluid + import paddle.base as base def reader(limit=5): for i in range(1, limit + 1): @@ -435,7 +435,7 @@ def reader(limit=5): data_1 = paddle.static.data(name='data_1', shape=[None, 2, 1, 3]) data_2 = paddle.static.data(name='data_2', shape=[None, 1], dtype='int64') data_3 = paddle.static.data(name='data_3', shape=[None, 3, 3], dtype='float32') - feeder = fluid.DataFeeder(['data_1','data_2', 'data_3'], fluid.CPUPlace()) + feeder = base.DataFeeder(['data_1','data_2', 'data_3'], base.CPUPlace()) result = feeder.feed(reader()) diff --git a/python/paddle/fluid/dataset.py b/python/paddle/base/dataset.py similarity index 85% rename from python/paddle/fluid/dataset.py rename to python/paddle/base/dataset.py index 6f1ef89c504bd..099dba1e6d755 100644 --- a/python/paddle/fluid/dataset.py +++ b/python/paddle/base/dataset.py @@ -13,7 +13,7 @@ # limitations under the License. """This is definition of dataset class, which is high performance IO.""" -from paddle.fluid.proto import data_feed_pb2 +from paddle.base.proto import data_feed_pb2 from google.protobuf import text_format from . import core from ..utils import deprecated @@ -30,8 +30,8 @@ class DatasetFactory: Example: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") """ @@ -51,8 +51,8 @@ def create_dataset(self, datafeed_class="QueueDataset"): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() + import paddle.base as base + dataset = base.DatasetFactory().create_dataset() """ try: @@ -87,8 +87,8 @@ def set_pipe_command(self, pipe_command): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() + import paddle.base as base + dataset = base.DatasetFactory().create_dataset() dataset.set_pipe_command("python my_script.py") Args: @@ -104,8 +104,8 @@ def set_so_parser_name(self, so_parser_name): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() + import paddle.base as base + dataset = base.DatasetFactory().create_dataset() dataset.set_so_parser_name("./abc.so") Args: @@ -121,8 +121,8 @@ def set_rank_offset(self, rank_offset): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() + import paddle.base as base + dataset = base.DatasetFactory().create_dataset() dataset.set_rank_offset("rank_offset") Args: @@ -145,8 +145,8 @@ def set_fea_eval(self, record_candidate_size, fea_eval=True): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_fea_eval(1000000, True) """ @@ -166,8 +166,8 @@ def slots_shuffle(self, slots): slots(list[string]): the set of slots(string) to do slots shuffle. Examples: - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_merge_by_lineid() #suppose there is a slot 0 dataset.slots_shuffle(['0']) @@ -183,8 +183,8 @@ def set_batch_size(self, batch_size): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() + import paddle.base as base + dataset = base.DatasetFactory().create_dataset() dataset.set_batch_size(128) Args: @@ -200,8 +200,8 @@ def set_pv_batch_size(self, pv_batch_size): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() + import paddle.base as base + dataset = base.DatasetFactory().create_dataset() dataset.set_pv_batch(128) Args: pv_batch_size(int): pv batch size @@ -216,8 +216,8 @@ def set_thread(self, thread_num): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() + import paddle.base as base + dataset = base.DatasetFactory().create_dataset() dataset.set_thread(12) Args: @@ -233,8 +233,8 @@ def set_filelist(self, filelist): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() + import paddle.base as base + dataset = base.DatasetFactory().create_dataset() dataset.set_filelist(['a.txt', 'b.txt']) Args: @@ -253,8 +253,8 @@ def set_use_var(self, var_list): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() + import paddle.base as base + dataset = base.DatasetFactory().create_dataset() dataset.set_use_var([data, label]) Args: @@ -276,7 +276,7 @@ def set_use_var(self, var_list): slot_var.type = "uint32" else: raise ValueError( - "Currently, fluid.dataset only supports dtype=float32, dtype=int32 and dtype=int64" + "Currently, base.dataset only supports dtype=float32, dtype=int32 and dtype=int64" ) def set_hdfs_config(self, fs_name, fs_ugi): @@ -286,8 +286,8 @@ def set_hdfs_config(self, fs_name, fs_ugi): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() + import paddle.base as base + dataset = base.DatasetFactory().create_dataset() dataset.set_hdfs_config("my_fs_name", "my_fs_ugi") Args: @@ -303,8 +303,8 @@ def set_download_cmd(self, download_cmd): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() + import paddle.base as base + dataset = base.DatasetFactory().create_dataset() dataset.set_download_cmd("./read_from_afs") Args: @@ -347,8 +347,8 @@ def desc(self): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset() + import paddle.base as base + dataset = base.DatasetFactory().create_dataset() print(dataset.desc()) Returns: @@ -370,7 +370,7 @@ class InMemoryDataset(DatasetBase): This class should be created by DatasetFactory Example: - dataset = paddle.fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset = paddle.base.DatasetFactory().create_dataset("InMemoryDataset") """ @deprecated(since="2.0.0", update_to="paddle.distributed.InMemoryDataset") @@ -465,8 +465,8 @@ def set_queue_num(self, queue_num): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_queue_num(12) """ @@ -487,8 +487,8 @@ def set_parse_ins_id(self, parse_ins_id): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_parse_ins_id(True) """ @@ -508,8 +508,8 @@ def set_parse_content(self, parse_content): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_parse_content(True) """ @@ -525,8 +525,8 @@ def set_parse_logkey(self, parse_logkey): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_parse_logkey(True) """ @@ -542,8 +542,8 @@ def _set_trainer_num(self, trainer_num): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset._set_trainer_num(1) """ @@ -563,8 +563,8 @@ def set_merge_by_sid(self, merge_by_sid): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_merge_by_sid(True) """ @@ -580,8 +580,8 @@ def set_enable_pv_merge(self, enable_pv_merge): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_enable_pv_merge(True) """ @@ -595,8 +595,8 @@ def preprocess_instance(self): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.load_into_memory() @@ -613,8 +613,8 @@ def set_current_phase(self, current_phase): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.load_into_memory() @@ -630,8 +630,8 @@ def postprocess_instance(self): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.load_into_memory() @@ -656,8 +656,8 @@ def set_fleet_send_batch_size(self, fleet_send_batch_size=1024): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_fleet_send_batch_size(800) """ @@ -677,8 +677,8 @@ def set_fleet_send_sleep_seconds(self, fleet_send_sleep_seconds=0): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_fleet_send_sleep_seconds(2) """ @@ -699,8 +699,8 @@ def set_merge_by_lineid(self, merge_size=2): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_merge_by_lineid() """ @@ -740,9 +740,9 @@ def set_date(self, date): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_date("20211111") """ year = int(date[:4]) @@ -766,8 +766,8 @@ def load_into_memory(self, is_shuffle=False): .. code-block:: python # required: skiptest - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.load_into_memory() @@ -794,8 +794,8 @@ def preload_into_memory(self, thread_num=None): .. code-block:: python # required: skiptest - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.preload_into_memory() @@ -820,8 +820,8 @@ def wait_preload_done(self): .. code-block:: python # required: skiptest - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.preload_into_memory() @@ -842,8 +842,8 @@ def local_shuffle(self): .. code-block:: python # required: skiptest - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.load_into_memory() @@ -866,9 +866,9 @@ def global_shuffle(self, fleet=None, thread_num=12): .. code-block:: python # required: skiptest - import paddle.fluid as fluid + import paddle.base as base from paddle.incubate.distributed.fleet.parameter_server.pslib import fleet - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.load_into_memory() @@ -928,16 +928,16 @@ def release_memory(self): .. code-block:: python # required: skiptest - import paddle.fluid as fluid + import paddle.base as base from paddle.incubate.distributed.fleet.parameter_server.pslib import fleet - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.load_into_memory() dataset.global_shuffle(fleet) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) - exe.train_from_dataset(fluid.default_main_program(), dataset) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) + exe.train_from_dataset(base.default_main_program(), dataset) dataset.release_memory() """ @@ -957,8 +957,8 @@ def get_pv_data_size(self): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.load_into_memory() @@ -992,9 +992,9 @@ def get_memory_data_size(self, fleet=None): .. code-block:: python # required: skiptest - import paddle.fluid as fluid + import paddle.base as base from paddle.incubate.distributed.fleet.parameter_server.pslib import fleet - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.load_into_memory() @@ -1036,9 +1036,9 @@ def get_shuffle_data_size(self, fleet=None): .. code-block:: python # required: skiptest - import paddle.fluid as fluid + import paddle.base as base from paddle.incubate.distributed.fleet.parameter_server.pslib import fleet - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.load_into_memory() @@ -1083,9 +1083,9 @@ def set_graph_config(self, config): .. code-block:: python # required: skiptest - import paddle.fluid as fluid + import paddle.base as base from paddle.incubate.distributed.fleet.parameter_server.pslib import fleet - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") graph_config = {"walk_len": 24, "walk_degree": 10, "once_sample_startid_len": 80000, @@ -1145,9 +1145,9 @@ def set_pass_id(self, pass_id): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base pass_id = 0 - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_pass_id(pass_id) """ self.pass_id = pass_id @@ -1163,8 +1163,8 @@ def get_pass_id(self): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") pass_id = dataset.get_pass_id() """ return self.pass_id @@ -1183,8 +1183,8 @@ class QueueDataset(DatasetBase): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("QueueDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("QueueDataset") """ @@ -1224,8 +1224,8 @@ def local_shuffle(self): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("QueueDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("QueueDataset") dataset.local_shuffle() Raises: @@ -1250,9 +1250,9 @@ def global_shuffle(self, fleet=None): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base from paddle.incubate.distributed.fleet.parameter_server.pslib import fleet - dataset = fluid.DatasetFactory().create_dataset("QueueDataset") + dataset = base.DatasetFactory().create_dataset("QueueDataset") #dataset.global_shuffle(fleet) Raises: @@ -1272,8 +1272,8 @@ class FileInstantDataset(DatasetBase): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory.create_dataset("FileInstantDataset") + import paddle.base as base + dataset = base.DatasetFactory.create_dataset("FileInstantDataset") """ def __init__(self): @@ -1312,8 +1312,8 @@ class BoxPSDataset(InMemoryDataset): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("BoxPSDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("BoxPSDataset") """ def __init__(self): @@ -1342,8 +1342,8 @@ def begin_pass(self): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("BoxPSDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("BoxPSDataset") dataset.begin_pass() """ self.boxps.begin_pass() @@ -1355,8 +1355,8 @@ def end_pass(self, need_save_delta): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("BoxPSDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("BoxPSDataset") dataset.end_pass(True) """ self.boxps.end_pass(need_save_delta) @@ -1368,8 +1368,8 @@ def wait_preload_done(self): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("BoxPSDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("BoxPSDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.preload_into_memory() @@ -1383,8 +1383,8 @@ def load_into_memory(self): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("BoxPSDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("BoxPSDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.load_into_memory() @@ -1398,8 +1398,8 @@ def preload_into_memory(self): Examples: .. code-block:: python - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("BoxPSDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("BoxPSDataset") filelist = ["a.txt", "b.txt"] dataset.set_filelist(filelist) dataset.preload_into_memory() @@ -1427,8 +1427,8 @@ def slots_shuffle(self, slots): slots(list[string]): the set of slots(string) to do slots shuffle. Examples: - import paddle.fluid as fluid - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + import paddle.base as base + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_merge_by_lineid() #suppose there is a slot 0 dataset.slots_shuffle(['0']) diff --git a/python/paddle/fluid/default_scope_funcs.py b/python/paddle/base/default_scope_funcs.py similarity index 94% rename from python/paddle/fluid/default_scope_funcs.py rename to python/paddle/base/default_scope_funcs.py index f8faf69425246..80cfe40db57ad 100644 --- a/python/paddle/fluid/default_scope_funcs.py +++ b/python/paddle/base/default_scope_funcs.py @@ -26,7 +26,7 @@ invoked in a new local scope. """ -import paddle.fluid.core +import paddle.base.core import threading __tl_scope__ = threading.local() @@ -44,13 +44,13 @@ def get_cur_scope(): """ Get current scope. - :rtype: paddle.fluid.core.Scope + :rtype: paddle.base.core.Scope """ cur_scope_stack = getattr(__tl_scope__, 'cur_scope', None) if cur_scope_stack is None: __tl_scope__.cur_scope = list() if len(__tl_scope__.cur_scope) == 0: - __tl_scope__.cur_scope.append(paddle.fluid.core.Scope()) + __tl_scope__.cur_scope.append(paddle.base.core.Scope()) return __tl_scope__.cur_scope[-1] diff --git a/python/paddle/fluid/device_worker.py b/python/paddle/base/device_worker.py similarity index 100% rename from python/paddle/fluid/device_worker.py rename to python/paddle/base/device_worker.py diff --git a/python/paddle/fluid/dygraph/__init__.py b/python/paddle/base/dygraph/__init__.py similarity index 100% rename from python/paddle/fluid/dygraph/__init__.py rename to python/paddle/base/dygraph/__init__.py diff --git a/python/paddle/fluid/dygraph/base.py b/python/paddle/base/dygraph/base.py similarity index 93% rename from python/paddle/fluid/dygraph/base.py rename to python/paddle/base/dygraph/base.py index 5999d323b955c..06922ef9bd281 100644 --- a/python/paddle/fluid/dygraph/base.py +++ b/python/paddle/base/dygraph/base.py @@ -18,10 +18,10 @@ import inspect import sys import numpy as np -from paddle.fluid import core -from paddle.fluid import framework -from paddle.fluid.framework import global_var -from paddle.fluid.multiprocess_utils import CleanupFuncRegistrar +from paddle.base import core +from paddle.base import framework +from paddle.base.framework import global_var +from paddle.base.multiprocess_utils import CleanupFuncRegistrar from .tracer import Tracer import logging from ..data_feeder import convert_dtype @@ -166,13 +166,13 @@ def _convert_into_variable(tensor): def enabled(): """ This function checks whether the program runs in dynamic graph mode or not. - You can enter dynamic graph mode with :ref:`api_fluid_dygraph_guard` api, - or enable and disable dynamic graph mode with :ref:`api_fluid_dygraph_enable_dygraph` - and :ref:`api_fluid_dygraph_disable_dygraph` api . + You can enter dynamic graph mode with :ref:`api_base_dygraph_guard` api, + or enable and disable dynamic graph mode with :ref:`api_base_dygraph_enable_dygraph` + and :ref:`api_base_dygraph_disable_dygraph` api . **Note**: - ``fluid.dygraph.enabled`` is the alias of ``fluid.in_dygraph_mode``, and - ``fluid.in_dygraph_mode`` is recommended to use for now. + ``base.dygraph.enabled`` is the alias of ``base.in_dygraph_mode``, and + ``base.in_dygraph_mode`` is recommended to use for now. Returns: bool: Whether the program is running in dynamic graph mode. @@ -180,12 +180,12 @@ def enabled(): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base - fluid.enable_dygraph() # Now we are in dygragh mode - print(fluid.dygraph.enabled()) # True - fluid.disable_dygraph() - print(fluid.dygraph.enabled()) # False + base.enable_dygraph() # Now we are in dygragh mode + print(base.dygraph.enabled()) # True + base.disable_dygraph() + print(base.dygraph.enabled()) # False """ # TODO(jiabin): Make this check as in_dygraph_mode when we support default eager mode. return framework.in_dygraph_mode() @@ -289,18 +289,18 @@ def no_grad(func=None): .. code-block:: python import numpy as np - import paddle.fluid as fluid + import paddle.base as base # use as generator data = np.array([[2, 3], [4, 5]]).astype('float32') - with fluid.dygraph.guard(): - l0 = fluid.Linear(2, 2) # l0.weight.gradient() is None - l1 = fluid.Linear(2, 2) - with fluid.dygraph.no_grad(): + with base.dygraph.guard(): + l0 = base.Linear(2, 2) # l0.weight.gradient() is None + l1 = base.Linear(2, 2) + with base.dygraph.no_grad(): # l1.weight.stop_gradient is False tmp = l1.weight * 2 # tmp.stop_gradient is True - x = fluid.dygraph.to_variable(data) + x = base.dygraph.to_variable(data) y = l0(x) + tmp o = l1(y) o.backward() @@ -309,13 +309,13 @@ def no_grad(func=None): # use as decorator - @fluid.dygraph.no_grad + @base.dygraph.no_grad def test_layer(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): inp = np.ones([3, 1024], dtype='float32') - t = fluid.dygraph.base.to_variable(inp) - linear1 = fluid.Linear(1024, 4, bias_attr=False) - linear2 = fluid.Linear(4, 4) + t = base.dygraph.base.to_variable(inp) + linear1 = base.Linear(1024, 4, bias_attr=False) + linear2 = base.Linear(4, 4) ret = linear1(t) dy_ret = linear2(ret) @@ -555,7 +555,7 @@ def guard(place=None): This context will create a dygraph context for dygraph to run, using python ``with`` statement. Parameters: - place(fluid.CPUPlace| fluid.CUDAPlace|str, optional): Place to execute dygraph. + place(base.CPUPlace| base.CUDAPlace|str, optional): Place to execute dygraph. If None, the running place will be determined according to the way of paddle compilation. If ``place`` is string, It can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the index of the GPUs or XPUs. Default: None @@ -568,13 +568,13 @@ def guard(place=None): .. code-block:: python import numpy as np - import paddle.fluid as fluid + import paddle.base as base - with fluid.dygraph.guard(): + with base.dygraph.guard(): inp = np.ones([3, 1024], dtype='float32') - t = fluid.dygraph.base.to_variable(inp) - linear1 = fluid.Linear(1024, 4, bias_attr=False) - linear2 = fluid.Linear(4, 4) + t = base.dygraph.base.to_variable(inp) + linear1 = base.Linear(1024, 4, bias_attr=False) + linear2 = base.Linear(4, 4) ret = linear1(t) dy_ret = linear2(ret) @@ -859,25 +859,25 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): .. code-block:: python import numpy as np - import paddle.fluid as fluid + import paddle.base as base - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): x = np.ones([2, 2], np.float32) - y = fluid.dygraph.to_variable(x, zero_copy=False) + y = base.dygraph.to_variable(x, zero_copy=False) x[0][0] = -1 y[0][0].numpy() # array([1.], dtype=float32) - y = fluid.dygraph.to_variable(x) + y = base.dygraph.to_variable(x) x[0][0] = 0 y[0][0].numpy() # array([0.], dtype=float32) c = np.array([2+1j, 2]) - z = fluid.dygraph.to_variable(c) + z = base.dygraph.to_variable(c) z.numpy() # array([2.+1.j, 2.+0.j]) z.dtype # 'complex128' - y = fluid.dygraph.to_variable([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]) + y = base.dygraph.to_variable([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]) y.shape # [3L, 2L] - y = fluid.dygraph.to_variable(((0.1, 1.2), (2.2, 3.1), (4.9, 5.2)), dtype='int32') + y = base.dygraph.to_variable(((0.1, 1.2), (2.2, 3.1), (4.9, 5.2)), dtype='int32') y.shape # [3L, 2L] """ @@ -892,7 +892,7 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): ) if not isinstance(value, support_type): raise TypeError( - "The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s." + "The type of 'value' in base.dygraph.to_variable must be %s, but received %s." % (support_type, type(value)) ) if isinstance(value, (core.eager.Tensor, framework.Variable)): diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/base/dygraph/math_op_patch.py similarity index 100% rename from python/paddle/fluid/dygraph/math_op_patch.py rename to python/paddle/base/dygraph/math_op_patch.py diff --git a/python/paddle/fluid/dygraph/tensor_patch_methods.py b/python/paddle/base/dygraph/tensor_patch_methods.py similarity index 98% rename from python/paddle/fluid/dygraph/tensor_patch_methods.py rename to python/paddle/base/dygraph/tensor_patch_methods.py index 0ba734237ffc8..2db290e1e1d4e 100644 --- a/python/paddle/fluid/dygraph/tensor_patch_methods.py +++ b/python/paddle/base/dygraph/tensor_patch_methods.py @@ -34,7 +34,7 @@ ) from .base import switch_to_static_graph from .math_op_patch import monkey_patch_math_tensor -from paddle.fluid.data_feeder import ( +from paddle.base.data_feeder import ( convert_uint16_to_float, _PADDLE_DTYPE_2_NUMPY_DTYPE, ) @@ -43,7 +43,7 @@ from paddle.profiler.utils import in_profiler_mode from paddle import _C_ops, _legacy_C_ops from paddle.device import get_all_custom_device_type -from paddle.fluid.framework import _global_flags +from paddle.base.framework import _global_flags _grad_scalar = None @@ -100,12 +100,12 @@ def _to_static_var(self, to_parameter=False, **kwargs): Examples: .. code-block:: python - import paddle.fluid as fluid - from paddle.fluid.dygraph.base import to_variable + import paddle.base as base + from paddle.base.dygraph.base import to_variable import numpy as np data = np.ones([3, 1024], dtype='float32') - with fluid.dygraph.guard(): + with base.dygraph.guard(): tensor = to_variable(data) static_var = tensor._to_static_var() @@ -172,13 +172,13 @@ def set_value(self, value): Examples: .. code-block:: python - import paddle.fluid as fluid - from paddle.fluid.dygraph.base import to_variable + import paddle.base as base + from paddle.base.dygraph.base import to_variable from paddle.nn import Linear import numpy as np data = np.ones([3, 1024], dtype='float32') - with fluid.dygraph.guard(): + with base.dygraph.guard(): linear = Linear(1024, 4) t = to_variable(data) linear(t) # call with default weight @@ -480,7 +480,7 @@ def transform(t, device, dtype, blocking): # 2. cast Tensor to dtype if dtype is not None and dtype != t_used.dtype: - with paddle.fluid.framework._dygraph_place_guard( + with paddle.base.framework._dygraph_place_guard( place=t_used.place ): t_casted = t_used.cast(dtype=dtype) @@ -999,7 +999,7 @@ def dtype_str(dtype): prefix = 'paddle.' return prefix + numpy_dtype else: - # for example, paddle.fluid.core.VarDesc.VarType.LOD_TENSOR + # for example, paddle.base.core.VarDesc.VarType.LOD_TENSOR return origin(dtype) setattr(core.VarDesc.VarType, "__str__", dtype_str) diff --git a/python/paddle/fluid/dygraph/tracer.py b/python/paddle/base/dygraph/tracer.py similarity index 99% rename from python/paddle/fluid/dygraph/tracer.py rename to python/paddle/base/dygraph/tracer.py index f079d1946124f..34493a0ffa90a 100644 --- a/python/paddle/fluid/dygraph/tracer.py +++ b/python/paddle/base/dygraph/tracer.py @@ -16,8 +16,8 @@ import numpy as np -from paddle.fluid import core -from paddle.fluid import framework +from paddle.base import core +from paddle.base import framework from paddle import _C_ops, _legacy_C_ops name_mapping = { diff --git a/python/paddle/fluid/dygraph_utils.py b/python/paddle/base/dygraph_utils.py similarity index 100% rename from python/paddle/fluid/dygraph_utils.py rename to python/paddle/base/dygraph_utils.py diff --git a/python/paddle/fluid/executor.py b/python/paddle/base/executor.py similarity index 98% rename from python/paddle/fluid/executor.py rename to python/paddle/base/executor.py index 8ab4c13f40542..e3c20efa706d6 100755 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/base/executor.py @@ -131,14 +131,14 @@ def as_numpy(tensor, copy=False): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import numpy - new_scope = fluid.Scope() - with fluid.scope_guard(new_scope): - fluid.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), fluid.CPUPlace()) + new_scope = base.Scope() + with base.scope_guard(new_scope): + base.global_scope().var("data").get_tensor().set(numpy.ones((2, 2)), base.CPUPlace()) tensor = new_scope.find_var("data").get_tensor() - fluid.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor()) + base.executor.as_numpy(tensor) # or numpy.array(new_scope.find_var("data").get_tensor()) Args: tensor(Variable): a instance of Tensor @@ -679,11 +679,11 @@ def _as_lodtensor(data, place, dtype=None): For higher dimensional sequence data, please use LoDTensor directly. Examples: - >>> import paddle.fluid as fluid - >>> place = fluid.CPUPlace() - >>> exe = fluid.executor(place) + >>> import paddle.base as base + >>> place = base.CPUPlace() + >>> exe = base.executor(place) >>> data = np.array(size=(100, 200, 300)) - >>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data) + >>> np_outs = map(lambda x: base.executor._as_lodtensor(x, place), data) >>> ... Args: @@ -712,7 +712,7 @@ def _as_lodtensor(data, place, dtype=None): raise TypeError( "\n\tFaild to convert input data to a regular ndarray :\n\t* Usually " "this means the input data contains nested lists with different lengths. " - "Please consider using 'fluid.create_lod_tensor' to convert it to a LoD-Tensor." + "Please consider using 'base.create_lod_tensor' to convert it to a LoD-Tensor." ) data = data.astype(dtype) else: @@ -1724,7 +1724,7 @@ def _run_impl( else: error_info = ( "There are no operators in the program to be executed. " - "If you pass Program manually, please use fluid.program_guard " + "If you pass Program manually, please use base.program_guard " "to ensure the current Program is being used." ) warnings.warn(error_info) @@ -2245,7 +2245,7 @@ def _run_from_dataset( for var in program.global_block().vars.values(): if var.is_data: data_vars.append(var) - dataset = paddle.fluid.DatasetFactory().create_dataset( + dataset = paddle.base.DatasetFactory().create_dataset( 'FileInstantDataset' ) dataset.set_batch_size(1) @@ -2270,7 +2270,7 @@ def _run_from_dataset( for var in program.global_block().vars.values(): if var.is_data: data_vars.append(var) - dataset = paddle.fluid.DatasetFactory().create_dataset( + dataset = paddle.base.DatasetFactory().create_dataset( 'InMemoryDataset' ) dataset.set_batch_size(1) @@ -2417,7 +2417,7 @@ def _get_dataset(): for var in program.global_block().vars.values(): if var.is_data: data_vars.append(var) - dataset = paddle.fluid.DatasetFactory().create_dataset( + dataset = paddle.base.DatasetFactory().create_dataset( 'FileInstantDataset' ) dataset.set_batch_size(1) @@ -2897,7 +2897,7 @@ def infer_from_dataset( fetch_handler=None, ): """ - Infer from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset. + Infer from a pre-defined Dataset. Dataset is defined in paddle.base.dataset. Given a program, either a program or compiled program, infer_from_dataset will consume all data samples in dataset. Input scope can be given by users. By default, scope is global_scope(). The total number of thread run in training is `thread`. @@ -2912,7 +2912,7 @@ def infer_from_dataset( Args: program(Program|CompiledProgram): the program that needs to be run, if not provided, then default_main_program (not compiled) will be used. - dataset(paddle.fluid.Dataset): dataset created outside this function, + dataset(paddle.base.Dataset): dataset created outside this function, a user should provide a well-defined dataset before calling this function. Please check the document of Dataset if needed. default is None scope(Scope): the scope used to run this program, you can switch it to different scope @@ -2940,7 +2940,7 @@ def infer_from_dataset( exe = paddle.static.Executor(place) x = paddle.static.data(name="x", shape=[None, 10, 10], dtype="int64") y = paddle.static.data(name="y", shape=[None, 1], dtype="int64", lod_level=1) - dataset = paddle.fluid.DatasetFactory().create_dataset() + dataset = paddle.base.DatasetFactory().create_dataset() dataset.set_use_var([x, y]) dataset.set_thread(1) # you should set your own filelist, e.g. filelist = ["dataA.txt"] @@ -3021,7 +3021,7 @@ def train_from_dataset( fetch_handler=None, ): """ - Train from a pre-defined Dataset. Dataset is defined in paddle.fluid.dataset. + Train from a pre-defined Dataset. Dataset is defined in paddle.base.dataset. Given a program, either a program or compiled program, train_from_dataset will consume all data samples in dataset. Input scope can be given by users. By default, scope is global_scope(). The total number of thread run in training is `thread`. @@ -3034,7 +3034,7 @@ def train_from_dataset( Args: program(Program|CompiledProgram): the program that needs to be run, if not provided, then default_main_program (not compiled) will be used. - dataset(paddle.fluid.Dataset): dataset created outside this function, + dataset(paddle.base.Dataset): dataset created outside this function, a user should provide a well-defined dataset before calling this function. Please check the document of Dataset if needed. scope(Scope): the scope used to run this program, you can switch it to different scope @@ -3063,7 +3063,7 @@ def train_from_dataset( exe = paddle.static.Executor(place) x = paddle.static.data(name="x", shape=[None, 10, 10], dtype="int64") y = paddle.static.data(name="y", shape=[None, 1], dtype="int64", lod_level=1) - dataset = paddle.fluid.DatasetFactory().create_dataset() + dataset = paddle.base.DatasetFactory().create_dataset() dataset.set_use_var([x, y]) dataset.set_thread(1) # you should set your own filelist, e.g. filelist = ["dataA.txt"] diff --git a/python/paddle/fluid/framework.py b/python/paddle/base/framework.py similarity index 98% rename from python/paddle/fluid/framework.py rename to python/paddle/base/framework.py index b4442bcedf923..476658aa37c75 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/base/framework.py @@ -35,7 +35,7 @@ from . import core from . import unique_name from .. import ir -from paddle.fluid.libpaddle import DataType +from paddle.base.libpaddle import DataType import paddle.version as fluid_version import warnings import functools @@ -409,13 +409,13 @@ def require_version(min_version, max_version=None): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base # any version >= 0.1.0 is acceptable. - fluid.require_version('0.1.0') + base.require_version('0.1.0') # if 0.1.0 <= version <= 10.0.0, it is acceptable. - fluid.require_version(min_version='0.1.0', max_version='10.0.0') + base.require_version(min_version='0.1.0', max_version='10.0.0') """ if not isinstance(min_version, str): raise TypeError( @@ -445,10 +445,10 @@ def require_version(min_version, max_version=None): ) version_installed = [ - fluid_version.major, - fluid_version.minor, - fluid_version.patch, - fluid_version.rc, + base_version.major, + base_version.minor, + base_version.patch, + base_version.rc, ] zero_version = ['0', '0', '0', '0'] @@ -466,14 +466,14 @@ def version_cmp(ver_a, ver_b): "PaddlePaddle version in [%s, %s] required, but %s installed. " "Maybe you are using a develop version, " "please make sure the version is good with your code." - % (min_version, max_version, fluid_version.full_version) + % (min_version, max_version, base_version.full_version) ) else: warnings.warn( "PaddlePaddle version %s or higher is required, but %s installed, " "Maybe you are using a develop version, " "please make sure the version is good with your code." - % (min_version, fluid_version.full_version) + % (min_version, base_version.full_version) ) return @@ -494,14 +494,14 @@ def version_cmp(ver_a, ver_b): ): raise Exception( "VersionError: PaddlePaddle version in [%s, %s] required, but %s installed." - % (min_version, max_version, fluid_version.full_version) + % (min_version, max_version, base_version.full_version) ) else: if version_cmp(version_installed, min_version_to_check) < 0: raise Exception( "VersionError: PaddlePaddle version %s or higher is required, but %s installed, " "please upgrade your PaddlePaddle to %s or other higher version." - % (min_version, fluid_version.full_version, min_version) + % (min_version, base_version.full_version, min_version) ) @@ -576,7 +576,7 @@ def __impl__(*args, **kwargs): # NOTE(chenweihang): There is argument name typo (stat_dict, correct name is state_dict) -# in fluid api Layer.set_dict, Optimizer.load, in order to correct the argument without +# in base api Layer.set_dict, Optimizer.load, in order to correct the argument without # introducing compatibility issues, add this decorator # NOTE(chenweihang): not using `wrap_decorator` here is because `wrap_decorator` will # move kwargs to args, which doesn't work in this decorate case @@ -722,8 +722,8 @@ def is_compiled_with_xpu(): Examples: .. code-block:: python - import paddle.fluid as fluid - support_xpu = fluid.is_compiled_with_xpu() + import paddle.base as base + support_xpu = base.is_compiled_with_xpu() """ return core.is_compiled_with_xpu() @@ -925,7 +925,7 @@ def cpu_places(device_count=None): def cuda_pinned_places(device_count=None): """ - This function creates a list of :code:`fluid.CUDAPinnedPlace` objects. + This function creates a list of :code:`base.CUDAPinnedPlace` objects. If :code:`device_count` is None, the device count would be determined by environment variable :code:`CPU_NUM`. @@ -938,15 +938,15 @@ def cuda_pinned_places(device_count=None): device_count (int, optional): device number. Default: None. Returns: - list of fluid.CUDAPinnedPlace: Created list of CUDA pinned places. + list of base.CUDAPinnedPlace: Created list of CUDA pinned places. Examples: .. code-block:: python - import paddle.fluid as fluid - cuda_pinned_places_cpu_num = fluid.cuda_pinned_places() + import paddle.base as base + cuda_pinned_places_cpu_num = base.cuda_pinned_places() # or - cuda_pinned_places = fluid.cuda_pinned_places(1) + cuda_pinned_places = base.cuda_pinned_places(1) """ assert core.is_compiled_with_cuda(), "Not compiled with CUDA" @@ -1318,7 +1318,7 @@ class Variable(metaclass=VariableMetaClass): In Static Graph Mode: Please use ** `Block.create_var` ** to create a Static variable which has no data until being feed. - In Dygraph Mode: Please use ** :ref:`api_fluid_dygraph_to_variable` ** to create a dygraph variable with real data. + In Dygraph Mode: Please use ** :ref:`api_base_dygraph_to_variable` ** to create a dygraph variable with real data. In Fluid, every input and output of an OP is a variable. In most cases, variables are used for holding different kinds of data or training @@ -1326,7 +1326,7 @@ class Variable(metaclass=VariableMetaClass): two variables in different :ref:`api_guide_Block_en` could have the same name. There are many kinds of variables. Each kind of them has its own attributes - and usages. Please refer to the `framework.proto `_ for details. + and usages. Please refer to the `framework.proto `_ for details. Most of a Variable's member variables can be set to be None. It mean it is not available or will be specified later. @@ -1337,8 +1337,8 @@ class Variable(metaclass=VariableMetaClass): .. code-block:: python :name: code-example-1 - import paddle.fluid as fluid - cur_program = fluid.Program() + import paddle.base as base + cur_program = base.Program() cur_block = cur_program.current_block() new_variable = cur_block.create_var(name="X", shape=[-1, 23, 48], @@ -1349,11 +1349,11 @@ class Variable(metaclass=VariableMetaClass): .. code-block:: python :name: code-example-2 - import paddle.fluid as fluid + import paddle.base as base import numpy as np - with fluid.dygraph.guard(): - new_variable = fluid.dygraph.to_variable(np.arange(10)) + with base.dygraph.guard(): + new_variable = base.dygraph.to_variable(np.arange(10)) """ @@ -1535,13 +1535,13 @@ def numpy(self): Examples: .. code-block:: python - import paddle.fluid as fluid - from paddle.fluid.dygraph.base import to_variable - from paddle.fluid.dygraph import Linear + import paddle.base as base + from paddle.base.dygraph.base import to_variable + from paddle.base.dygraph import Linear import numpy as np data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') - with fluid.dygraph.guard(): + with base.dygraph.guard(): linear = Linear(32, 64) data = to_variable(data) x = linear(data) @@ -1614,15 +1614,15 @@ def gradient(self): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np # example1: return ndarray x = np.ones([2, 2], np.float32) - with fluid.dygraph.guard(): + with base.dygraph.guard(): inputs2 = [] for _ in range(10): - tmp = fluid.dygraph.base.to_variable(x) + tmp = base.dygraph.base.to_variable(x) tmp.stop_gradient=False inputs2.append(tmp) ret2 = paddle.add_n(inputs2) @@ -1631,7 +1631,7 @@ def gradient(self): print(loss2.gradient()) # example2: return tuple of ndarray - with fluid.dygraph.guard(): + with base.dygraph.guard(): embedding = paddle.nn.Embedding( 20, 32, @@ -1639,7 +1639,7 @@ def gradient(self): sparse=True) x_data = np.arange(12).reshape(4, 3).astype('int64') x_data = x_data.reshape((-1, 3, 1)) - x = fluid.dygraph.base.to_variable(x_data) + x = base.dygraph.base.to_variable(x_data) out = embedding(x) out.backward() print(embedding.weight.gradient()) @@ -1663,14 +1663,14 @@ def clear_gradient(self): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np x = np.ones([2, 2], np.float32) - with fluid.dygraph.guard(): + with base.dygraph.guard(): inputs2 = [] for _ in range(10): - tmp = fluid.dygraph.base.to_variable(x) + tmp = base.dygraph.base.to_variable(x) tmp.stop_gradient=False inputs2.append(tmp) ret2 = paddle.add_n(inputs2) @@ -1788,11 +1788,11 @@ def to_string(self, throw_on_error, with_details=False): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import paddle paddle.enable_static() - cur_program = fluid.Program() + cur_program = base.Program() cur_block = cur_program.current_block() new_variable = cur_block.create_var(name="X", shape=[-1, 23, 48], @@ -1853,22 +1853,22 @@ def stop_gradient(self): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import numpy as np - with fluid.dygraph.guard(): + with base.dygraph.guard(): value0 = np.arange(26).reshape(2, 13).astype("float32") value1 = np.arange(6).reshape(2, 3).astype("float32") value2 = np.arange(10).reshape(2, 5).astype("float32") - linear = fluid.Linear(13, 5, dtype="float32") - linear2 = fluid.Linear(3, 3, dtype="float32") - a = fluid.dygraph.to_variable(value0) - b = fluid.dygraph.to_variable(value1) - c = fluid.dygraph.to_variable(value2) + linear = base.Linear(13, 5, dtype="float32") + linear2 = base.Linear(3, 3, dtype="float32") + a = base.dygraph.to_variable(value0) + b = base.dygraph.to_variable(value1) + c = base.dygraph.to_variable(value2) out1 = linear(a) out2 = linear2(b) out1.stop_gradient = True - out = fluid.layers.concat(input=[out1, out2, c], axis=1) + out = base.layers.concat(input=[out1, out2, c], axis=1) out.backward() assert linear.weight.gradient() is None @@ -1895,8 +1895,8 @@ def persistable(self): Examples: .. code-block:: python - import paddle.fluid as fluid - cur_program = fluid.Program() + import paddle.base as base + cur_program = base.Program() cur_block = cur_program.current_block() new_variable = cur_block.create_var(name="X", shape=[-1, 23, 48], @@ -1944,8 +1944,8 @@ def name(self): Examples: .. code-block:: python - import paddle.fluid as fluid - cur_program = fluid.Program() + import paddle.base as base + cur_program = base.Program() cur_block = cur_program.current_block() new_variable = cur_block.create_var(name="X", shape=[-1, 23, 48], @@ -1988,8 +1988,8 @@ def shape(self): Examples: .. code-block:: python - import paddle.fluid as fluid - cur_program = fluid.Program() + import paddle.base as base + cur_program = base.Program() cur_block = cur_program.current_block() new_variable = cur_block.create_var(name="X", shape=[-1, 23, 48], @@ -2010,8 +2010,8 @@ def dtype(self): Examples: .. code-block:: python - import paddle.fluid as fluid - cur_program = fluid.Program() + import paddle.base as base + cur_program = base.Program() cur_block = cur_program.current_block() new_variable = cur_block.create_var(name="X", shape=[-1, 23, 48], @@ -2023,7 +2023,7 @@ def dtype(self): @property def lod_level(self): """ - Indicating ``LoD`` info of current Variable, please refer to :ref:`api_fluid_LoDTensor_en` to check the meaning + Indicating ``LoD`` info of current Variable, please refer to :ref:`api_base_LoDTensor_en` to check the meaning of ``LoD`` **Notes**: @@ -2036,10 +2036,10 @@ def lod_level(self): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base paddle.enable_static() - cur_program = fluid.Program() + cur_program = base.Program() cur_block = cur_program.current_block() new_variable = cur_block.create_var(name="X", shape=[-1, 23, 48], @@ -2062,8 +2062,8 @@ def type(self): Examples: .. code-block:: python - import paddle.fluid as fluid - cur_program = fluid.Program() + import paddle.base as base + cur_program = base.Program() cur_block = cur_program.current_block() new_variable = cur_block.create_var(name="X", shape=[-1, 23, 48], @@ -2767,8 +2767,8 @@ class Operator: Examples: .. code-block:: python - import paddle.fluid as fluid - cur_program = fluid.Program() + import paddle.base as base + cur_program = base.Program() cur_block = cur_program.current_block() # var1 += var2 + var3 cur_block.append_op(type="sum", @@ -3122,9 +3122,9 @@ def _to_readable_code(self, skip_op_callstack=True): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base - cur_program = fluid.Program() + cur_program = base.Program() cur_block = cur_program.current_block() var = cur_block.create_var(name="X", shape=[-1, 23, 48], @@ -3653,7 +3653,7 @@ def check_if_to_static_diff_with_dygraph(op_type, inplace_map, outputs): and inplace_map.get("Input", None) == "Out" ): raise ValueError( - 'Sorry about what\'s happend. In to_static mode, %s\'s output variable %s is a viewed Tensor in dygraph. This will result in inconsistent calculation behavior between dynamic and static graphs. If you are sure it is safe, you can call with paddle.fluid.framework._stride_in_no_check_dy2st_diff() in your safe code block.' + 'Sorry about what\'s happend. In to_static mode, %s\'s output variable %s is a viewed Tensor in dygraph. This will result in inconsistent calculation behavior between dynamic and static graphs. If you are sure it is safe, you can call with paddle.base.framework._stride_in_no_check_dy2st_diff() in your safe code block.' % (op_type, k) ) elif isinstance(v, list): @@ -3664,7 +3664,7 @@ def check_if_to_static_diff_with_dygraph(op_type, inplace_map, outputs): and inplace_map.get("Input", None) == "Out" ): raise ValueError( - 'Sorry about what\'s happend. In to_static mode, %s\'s output variable %s is a viewed Tensor in dygraph. This will result in inconsistent calculation behavior between dynamic and static graphs. If you are sure it is safe, you can call with paddle.fluid.framework._stride_in_no_check_dy2st_diff() in your safe code block.' + 'Sorry about what\'s happend. In to_static mode, %s\'s output variable %s is a viewed Tensor in dygraph. This will result in inconsistent calculation behavior between dynamic and static graphs. If you are sure it is safe, you can call with paddle.base.framework._stride_in_no_check_dy2st_diff() in your safe code block.' % (op_type, k) ) @@ -3872,9 +3872,9 @@ class Block: Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base - cur_program = fluid.Program() + cur_program = base.Program() cur_block = cur_program.current_block() var = cur_block.create_var(name="X", shape=[-1, 23, 48], @@ -3911,9 +3911,9 @@ def _to_readable_code(self, skip_op_callstack=True): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base - cur_program = fluid.Program() + cur_program = base.Program() cur_block = cur_program.current_block() new_var = cur_block.create_var(name="X", shape=[-1, 23, 48], @@ -4292,7 +4292,7 @@ def append_op(self, *args, **kwargs): inplace_map, ) else: - from paddle.fluid.dygraph.base import param_guard + from paddle.base.dygraph.base import param_guard from paddle.utils import flatten def pass_stop_gradient(ins, outs): @@ -5534,11 +5534,11 @@ def _update_desc_attr(self, desc, name, val): class Program: """ Create Python Program. It has at least one :ref:`api_guide_Block_en`, when the - control flow op like conditional_block, while :ref:`api_paddle_fluid_layers_While` is included, + control flow op like conditional_block, while :ref:`api_paddle_base_layers_While` is included, it will contain nested block. Please reference the - `framework.proto `_ + `framework.proto `_ for details. A set of Program usually contains startup program and main program. @@ -5551,9 +5551,9 @@ class Program: backward ops and vars. **Notes**: - **we have** :ref:`api_paddle_fluid_framework_default_startup_program` **and** :ref:`api_paddle_fluid_framework_default_main_program` - **by default, a pair of them will shared the parameters. The** :ref:`api_paddle_fluid_framework_default_startup_program` **only run once to initialize parameters,** - :ref:`api_paddle_fluid_framework_default_main_program` **run in every mini batch and adjust the weights.** + **we have** :ref:`api_paddle_base_framework_default_startup_program` **and** :ref:`api_paddle_base_framework_default_main_program` + **by default, a pair of them will shared the parameters. The** :ref:`api_paddle_base_framework_default_startup_program` **only run once to initialize parameters,** + :ref:`api_paddle_base_framework_default_main_program` **run in every mini batch and adjust the weights.** Returns: Program: An empty Program. @@ -5857,7 +5857,7 @@ def _optimized_guard(self, param_and_grads): Examples: - >>> import paddle.fluid as fluid + >>> import paddle.base as base >>> p, g = backward(...) >>> with program._optimized_guard([p,g]): >>> p = p - 0.001 * g @@ -5893,7 +5893,7 @@ def _lr_schedule_guard(self, is_with_opt=False): Examples: - >>> import paddle.fluid as fluid + >>> import paddle.base as base >>> p, g = backward(...) >>> with program.lr_schedule_guard(): >>> lr = lr * decay @@ -6062,7 +6062,7 @@ def clone(self, for_test=False): Create a new Program with forward content of original one when ``for_test=True``. Create a new Program as same as the original one when ``for_test=False``. - Some operators, e.g., :ref:`api_paddle_fluid_layers_batch_norm` , behave differently between + Some operators, e.g., :ref:`api_paddle_base_layers_batch_norm` , behave differently between training and testing. They have an attribute, :code:`is_test`, to control this behaviour. This method will change the :code:`is_test` attribute of them to :code:`True` when :code:`for_test=True`. @@ -7252,10 +7252,10 @@ def to_string(self, throw_on_error, with_details=False): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import paddle - prog = fluid.default_main_program() + prog = base.default_main_program() rlt = paddle.static.data("fake_data", shape=[-1,1,1], dtype='float32') debug_str = prog.to_string(throw_on_error=True, with_details=False) print(debug_str) @@ -7466,7 +7466,7 @@ def default_startup_program(): The :code:`startup_program` will initialize the parameters by the OPs. This method will return the default or the current startup program. Users can use - :ref:`api_paddle_fluid_framework_program_guard` to switch :ref:`api_paddle_fluid_framework_Program` . + :ref:`api_paddle_base_framework_program_guard` to switch :ref:`api_paddle_base_framework_Program` . Returns: Program: current default startup program. @@ -7499,7 +7499,7 @@ def default_main_program(): a lot of APIs. For example, the :code:`Executor.run()` will execute the :code:`default_main_program` when the program is not specified. - If you want to switch the ``default main program``, you can use :ref:`api_paddle_fluid_framework_program_guard` . + If you want to switch the ``default main program``, you can use :ref:`api_paddle_base_framework_program_guard` . Returns: Program: A ``Program`` which holding the descriptions of OPs and tensors in the network. diff --git a/python/paddle/fluid/incubate/__init__.py b/python/paddle/base/incubate/__init__.py similarity index 94% rename from python/paddle/fluid/incubate/__init__.py rename to python/paddle/base/incubate/__init__.py index 76c5c6391fde3..af8ad3be6630f 100644 --- a/python/paddle/fluid/incubate/__init__.py +++ b/python/paddle/base/incubate/__init__.py @@ -13,5 +13,5 @@ # incubate directory is mainly for internal use # after we have tested incubate APIs in industrial application for a period -# we will move stable functions into fluid +# we will move stable functions into base __version__ = '0.1.0' diff --git a/python/paddle/fluid/incubate/checkpoint/__init__.py b/python/paddle/base/incubate/checkpoint/__init__.py similarity index 100% rename from python/paddle/fluid/incubate/checkpoint/__init__.py rename to python/paddle/base/incubate/checkpoint/__init__.py diff --git a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py b/python/paddle/base/incubate/checkpoint/auto_checkpoint.py similarity index 99% rename from python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py rename to python/paddle/base/incubate/checkpoint/auto_checkpoint.py index 58387d37064eb..7d07c4594acb4 100644 --- a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py +++ b/python/paddle/base/incubate/checkpoint/auto_checkpoint.py @@ -22,9 +22,9 @@ from threading import Thread, current_thread from contextlib import contextmanager -from paddle.fluid import unique_name, compiler +from paddle.base import unique_name, compiler from .checkpoint_saver import SerializableBase, CheckpointSaver, PaddleModel -from paddle.fluid.framework import in_dygraph_mode, Program +from paddle.base.framework import in_dygraph_mode, Program g_train_epoch_range = None g_checker = None diff --git a/python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py b/python/paddle/base/incubate/checkpoint/checkpoint_saver.py similarity index 100% rename from python/paddle/fluid/incubate/checkpoint/checkpoint_saver.py rename to python/paddle/base/incubate/checkpoint/checkpoint_saver.py diff --git a/python/paddle/fluid/initializer.py b/python/paddle/base/initializer.py similarity index 97% rename from python/paddle/fluid/initializer.py rename to python/paddle/base/initializer.py index 5eead87a995c9..c9d197a4f6840 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/base/initializer.py @@ -58,7 +58,7 @@ def set_global_initializer(weight_init, bias_init=None): The model parameters include ``weight`` and ``bias`` . In the framework, they correspond to ``paddle.ParamAttr`` , which is inherited from ``paddle.Tensor`` , and is a persistable Variable. This API only takes effect for model parameters, not for variables created through apis such as - :ref:`api_fluid_layers_create_global_var` , :ref:`api_fluid_layers_create_tensor`. + :ref:`api_base_layers_create_global_var` , :ref:`api_base_layers_create_tensor`. If the initializer is also set up by ``param_attr`` or ``bias_attr`` when creating a network layer, the global initializer setting here will not take effect because it has a lower priority. diff --git a/python/paddle/fluid/io.py b/python/paddle/base/io.py similarity index 84% rename from python/paddle/fluid/io.py rename to python/paddle/base/io.py index 027e1ad7c438e..80855333b9165 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/base/io.py @@ -25,9 +25,9 @@ import numpy as np import math import paddle -from paddle.fluid import layers -from paddle.fluid.executor import Executor, global_scope -from paddle.fluid.framework import ( +from paddle.base import layers +from paddle.base.executor import Executor, global_scope +from paddle.base.framework import ( Program, Parameter, default_main_program, @@ -50,14 +50,14 @@ multiprocess_reader, ) from .wrapped_decorator import signature_safe_contextmanager -from paddle.fluid.compiler import CompiledProgram -from paddle.fluid.log_helper import get_logger +from paddle.base.compiler import CompiledProgram +from paddle.base.log_helper import get_logger from . import reader from . import unique_name from .reader import * from . import core from paddle.utils import deprecated -from paddle.fluid.framework import static_only +from paddle.base.framework import static_only __all__ = reader.__all__ diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/base/layer_helper.py similarity index 100% rename from python/paddle/fluid/layer_helper.py rename to python/paddle/base/layer_helper.py diff --git a/python/paddle/fluid/layer_helper_base.py b/python/paddle/base/layer_helper_base.py similarity index 99% rename from python/paddle/fluid/layer_helper_base.py rename to python/paddle/base/layer_helper_base.py index 042e33a108eca..02419f474e102 100644 --- a/python/paddle/fluid/layer_helper_base.py +++ b/python/paddle/base/layer_helper_base.py @@ -79,11 +79,11 @@ def to_variable(self, value, name=None): .. code-block:: python import numpy as np - import paddle.fluid as fluid + import paddle.base as base - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = np.ones([2, 2], np.float32) - y = fluid.dygraph.to_variable(x) + y = base.dygraph.to_variable(x) """ if isinstance(value, np.ndarray): diff --git a/python/paddle/fluid/layers/__init__.py b/python/paddle/base/layers/__init__.py similarity index 100% rename from python/paddle/fluid/layers/__init__.py rename to python/paddle/base/layers/__init__.py diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/base/layers/io.py similarity index 100% rename from python/paddle/fluid/layers/io.py rename to python/paddle/base/layers/io.py diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/base/layers/layer_function_generator.py similarity index 99% rename from python/paddle/fluid/layers/layer_function_generator.py rename to python/paddle/base/layers/layer_function_generator.py index f44f00f41a46e..17cf2820f85d2 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/base/layers/layer_function_generator.py @@ -349,7 +349,7 @@ def func(x, name=None): func.__name__ = inplace_op_type func.__doc__ = """ Inplace version of ``{0}`` API, the output Tensor will be inplaced with input ``x``. -Please refer to :ref:`api_fluid_layers_{1}`. +Please refer to :ref:`api_base_layers_{1}`. """.format( origin_op_type, origin_op_type ) diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/base/layers/math_op_patch.py similarity index 98% rename from python/paddle/fluid/layers/math_op_patch.py rename to python/paddle/base/layers/math_op_patch.py index e95fbacf2f66a..d2858430a438d 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/base/layers/math_op_patch.py @@ -18,7 +18,7 @@ from .. import core from ..framework import Variable, unique_name, static_only from .layer_function_generator import OpProtoHolder -from paddle.fluid.dygraph.base import in_declarative_mode +from paddle.base.dygraph.base import in_declarative_mode _supported_int_dtype_ = [ core.VarDesc.VarType.BOOL, @@ -241,7 +241,7 @@ def place(self): def astype(self, dtype): """ **Notes**: - **The variable must be a** :ref:`api_fluid_Tensor` + **The variable must be a** :ref:`api_base_Tensor` Cast a variable to a specified data type. @@ -259,11 +259,11 @@ def astype(self, dtype): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base paddle.enable_static() - startup_prog = fluid.Program() - main_prog = fluid.Program() - with fluid.program_guard(startup_prog, main_prog): + startup_prog = base.Program() + main_prog = base.Program() + with base.program_guard(startup_prog, main_prog): original_variable = paddle.static.data(name = "new_variable", shape=[2,2], dtype='float32') new_variable = original_variable.astype('int64') print("new var's dtype is: {}".format(new_variable.dtype)) @@ -272,12 +272,12 @@ def astype(self, dtype): .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import numpy as np x = np.ones([2, 2], np.float32) - with fluid.dygraph.guard(): - original_variable = fluid.dygraph.to_variable(x) + with base.dygraph.guard(): + original_variable = base.dygraph.to_variable(x) print("original var's dtype is: {}, numpy dtype is {}".format(original_variable.dtype, original_variable.numpy().dtype)) new_variable = original_variable.astype('int64') print("new var's dtype is: {}, numpy dtype is {}".format(new_variable.dtype, new_variable.numpy().dtype)) diff --git a/python/paddle/fluid/lod_tensor.py b/python/paddle/base/lod_tensor.py similarity index 93% rename from python/paddle/fluid/lod_tensor.py rename to python/paddle/base/lod_tensor.py index d099536fcd784..02058c0f576dc 100644 --- a/python/paddle/fluid/lod_tensor.py +++ b/python/paddle/base/lod_tensor.py @@ -62,10 +62,10 @@ def create_lod_tensor(data, recursive_seq_lens, place): .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import numpy as np - t = fluid.create_lod_tensor(np.ndarray([5, 30]), [[2, 3]], fluid.CPUPlace()) + t = base.create_lod_tensor(np.ndarray([5, 30]), [[2, 3]], base.CPUPlace()) """ if isinstance(data, core.LoDTensor): return create_lod_tensor(np.array(data), recursive_seq_lens, place) @@ -128,7 +128,7 @@ def create_random_int_lodtensor( :code:`base_shape` . 2. Create a numpy array of random integers, and parse the created numpy - array as parameter :code:`data` of :ref:`api_fluid_create_lod_tensor` to + array as parameter :code:`data` of :ref:`api_base_create_lod_tensor` to create the output LoDTensor. Suppose we want to create a LoDTensor to hold data for 2 sequences, where @@ -156,10 +156,10 @@ def create_random_int_lodtensor( Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base - t = fluid.create_random_int_lodtensor(recursive_seq_lens=[[2, 3]], - base_shape=[30], place=fluid.CPUPlace(), low=0, high=10) + t = base.create_random_int_lodtensor(recursive_seq_lens=[[2, 3]], + base_shape=[30], place=base.CPUPlace(), low=0, high=10) print(t.shape()) # [5, 30] """ assert isinstance(base_shape, list), "base_shape should be a list" diff --git a/python/paddle/fluid/log_helper.py b/python/paddle/base/log_helper.py similarity index 100% rename from python/paddle/fluid/log_helper.py rename to python/paddle/base/log_helper.py diff --git a/python/paddle/fluid/multiprocess_utils.py b/python/paddle/base/multiprocess_utils.py similarity index 100% rename from python/paddle/fluid/multiprocess_utils.py rename to python/paddle/base/multiprocess_utils.py diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/base/param_attr.py similarity index 99% rename from python/paddle/fluid/param_attr.py rename to python/paddle/base/param_attr.py index 10f0523f19b60..15f2f89b12016 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/base/param_attr.py @@ -14,7 +14,7 @@ import paddle from paddle.regularizer import WeightDecayRegularizer -from paddle.fluid.data_feeder import check_type +from paddle.base.data_feeder import check_type __all__ = [ 'ParamAttr', diff --git a/python/paddle/fluid/reader.py b/python/paddle/base/reader.py similarity index 95% rename from python/paddle/fluid/reader.py rename to python/paddle/base/reader.py index c08fdedba3bcb..001c96bd7df26 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/base/reader.py @@ -46,7 +46,7 @@ ) from .unique_name import UniqueNameGenerator from .framework import _get_paddle_place, _get_paddle_place_list -from paddle.fluid.framework import _set_expected_place, _current_expected_place +from paddle.base.framework import _set_expected_place, _current_expected_place import logging import warnings @@ -147,7 +147,7 @@ def _check_input_array(cls, item): "this means the input data contains nested lists with different lengths. " "\n\t* Check the reader function passed to 'decorate_batch_generator'" " to locate the data causes this issue.\n\t* Please consider using " - "'fluid.create_lod_tensor' to convert it to a LoD-Tensor." + "'base.create_lod_tensor' to convert it to a LoD-Tensor." ) return arr @@ -327,7 +327,7 @@ def set_data_source(loader, places): label = static.data(name='label', shape=[None, 1], dtype='int64') # Define DataLoader - loader = paddle.fluid.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE) + loader = paddle.base.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE) # Define network loss = simple_net(image, label) @@ -401,7 +401,7 @@ def forward(self, x): adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters()) # create data loader - loader = paddle.fluid.io.DataLoader.from_generator(capacity=5) + loader = paddle.base.io.DataLoader.from_generator(capacity=5) loader.set_batch_generator(random_batch_reader()) for epoch_id in range(EPOCH_NUM): @@ -478,7 +478,7 @@ def from_dataset(dataset, places, drop_last=True): use_var=[image, label]) dataset.set_filelist(['a.txt', 'b.txt', 'c.txt']) - loader = paddle.fluid.io.DataLoader.from_dataset(dataset, static.cpu_places()) + loader = paddle.base.io.DataLoader.from_dataset(dataset, static.cpu_places()) """ return DatasetLoader(dataset, places, drop_last) @@ -1151,9 +1151,9 @@ class PyReader(DataLoaderBase): Examples: 1. If iterable = False, the created PyReader object is almost the - same as :code:`fluid.layers.py_reader()`. Operators would be + same as :code:`base.layers.py_reader()`. Operators would be inserted into the program. User should call :code:`start()` - before each epoch and catch :code:`fluid.core.EOFException` + before each epoch and catch :code:`base.core.EOFException` thrown by :code:`Executor.run()` when epoch ends. Once the exception is caught, user should call :code:`reset()` to reset the reader manually. @@ -1161,7 +1161,7 @@ class PyReader(DataLoaderBase): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np paddle.enable_static() @@ -1191,7 +1191,7 @@ def reader(): image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') - reader = fluid.io.PyReader(feed_list=[image, label], + reader = base.io.PyReader(feed_list=[image, label], capacity=4, iterable=False) @@ -1199,14 +1199,14 @@ def reader(): reader.decorate_sample_list_generator( paddle.batch(user_defined_reader, batch_size=BATCH_SIZE)) loss = network(image, label) - executor = fluid.Executor(fluid.CPUPlace()) - executor.run(fluid.default_startup_program()) + executor = base.Executor(base.CPUPlace()) + executor.run(base.default_startup_program()) for i in range(EPOCH_NUM): reader.start() while True: try: executor.run(feed=None) - except fluid.core.EOFException: + except base.core.EOFException: reader.reset() break @@ -1220,7 +1220,7 @@ def reader(): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np paddle.enable_static() @@ -1247,16 +1247,16 @@ def reader(): image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') - reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False) + reader = base.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False) user_defined_reader = reader_creator_random_image(784, 784) reader.decorate_sample_list_generator( paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), - fluid.core.CPUPlace()) + base.core.CPUPlace()) loss = network(image, label) - executor = fluid.Executor(fluid.CPUPlace()) - executor.run(fluid.default_startup_program()) + executor = base.Executor(base.CPUPlace()) + executor.run(base.default_startup_program()) for _ in range(EPOCH_NUM): for data in reader(): @@ -1269,7 +1269,7 @@ def reader(): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np ITER_NUM = 5 @@ -1282,9 +1282,9 @@ def reader(): np.random.random_integers(low=0, high=9, size=[1]) return reader - place = fluid.CPUPlace() - with fluid.dygraph.guard(place): - py_reader = fluid.io.PyReader(capacity=2, return_list=True) + place = base.CPUPlace() + with base.dygraph.guard(place): + py_reader = base.io.PyReader(capacity=2, return_list=True) user_defined_reader = reader_creator_random_image(784, 784) py_reader.decorate_sample_list_generator( paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), @@ -1328,7 +1328,7 @@ def start(self): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np BATCH_SIZE = 10 @@ -1338,18 +1338,18 @@ def generator(): yield np.random.uniform(low=0, high=255, size=[784, 784]), image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') - reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) + reader = base.io.PyReader(feed_list=[image], capacity=4, iterable=False) reader.decorate_sample_list_generator( paddle.batch(generator, batch_size=BATCH_SIZE)) - executor = fluid.Executor(fluid.CPUPlace()) - executor.run(fluid.default_startup_program()) + executor = base.Executor(base.CPUPlace()) + executor.run(base.default_startup_program()) for i in range(3): reader.start() while True: try: executor.run(feed=None) - except fluid.core.EOFException: + except base.core.EOFException: reader.reset() break @@ -1358,14 +1358,14 @@ def generator(): def reset(self): ''' - Reset the reader object when :code:`fluid.core.EOFException` raises. + Reset the reader object when :code:`base.core.EOFException` raises. Can only call when the reader object is not iterable. Example: .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np BATCH_SIZE = 10 @@ -1375,18 +1375,18 @@ def generator(): yield np.random.uniform(low=0, high=255, size=[784, 784]), image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') - reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) + reader = base.io.PyReader(feed_list=[image], capacity=4, iterable=False) reader.decorate_sample_list_generator( paddle.batch(generator, batch_size=BATCH_SIZE)) - executor = fluid.Executor(fluid.CPUPlace()) - executor.run(fluid.default_startup_program()) + executor = base.Executor(base.CPUPlace()) + executor.run(base.default_startup_program()) for i in range(3): reader.start() while True: try: executor.run(feed=None) - except fluid.core.EOFException: + except base.core.EOFException: reader.reset() break @@ -1420,7 +1420,7 @@ def decorate_sample_generator( .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np EPOCH_NUM = 3 @@ -1447,15 +1447,15 @@ def generator(): image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') - reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + reader = base.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) reader.decorate_sample_generator(user_defined_generator, batch_size=BATCH_SIZE, - places=[fluid.CPUPlace()]) + places=[base.CPUPlace()]) loss = network(image, label) - executor = fluid.Executor(fluid.CPUPlace()) - executor.run(fluid.default_startup_program()) + executor = base.Executor(base.CPUPlace()) + executor.run(base.default_startup_program()) for _ in range(EPOCH_NUM): for data in reader(): @@ -1485,7 +1485,7 @@ def decorate_sample_list_generator(self, reader, places=None): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np paddle.enable_static() @@ -1514,16 +1514,16 @@ def generator(): image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') - reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + reader = base.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) reader.decorate_sample_list_generator( paddle.batch(user_defined_generator, batch_size=BATCH_SIZE), - fluid.core.CPUPlace()) + base.core.CPUPlace()) loss = network(image, label) - executor = fluid.Executor(fluid.core.CPUPlace()) - executor.run(fluid.default_startup_program()) + executor = base.Executor(base.core.CPUPlace()) + executor.run(base.default_startup_program()) for _ in range(EPOCH_NUM): for data in reader(): @@ -1551,7 +1551,7 @@ def decorate_batch_generator(self, reader, places=None): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np paddle.enable_static() @@ -1582,14 +1582,14 @@ def generator(): image = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') - reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) + reader = base.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) - reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace()) + reader.decorate_batch_generator(user_defined_generator, base.CPUPlace()) loss = network(image, label) - executor = fluid.Executor(fluid.CPUPlace()) - executor.run(fluid.default_startup_program()) + executor = base.Executor(base.CPUPlace()) + executor.run(base.default_startup_program()) for _ in range(EPOCH_NUM): for data in reader(): diff --git a/python/paddle/fluid/trainer_desc.py b/python/paddle/base/trainer_desc.py similarity index 100% rename from python/paddle/fluid/trainer_desc.py rename to python/paddle/base/trainer_desc.py diff --git a/python/paddle/fluid/trainer_factory.py b/python/paddle/base/trainer_factory.py similarity index 99% rename from python/paddle/fluid/trainer_factory.py rename to python/paddle/base/trainer_factory.py index e06b48a079af9..859d8c27f71d2 100644 --- a/python/paddle/fluid/trainer_factory.py +++ b/python/paddle/base/trainer_factory.py @@ -17,7 +17,7 @@ import time import logging import numpy as np -from paddle.fluid.log_helper import get_logger +from paddle.base.log_helper import get_logger local_logger = get_logger( __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' diff --git a/python/paddle/fluid/unique_name.py b/python/paddle/base/unique_name.py similarity index 100% rename from python/paddle/fluid/unique_name.py rename to python/paddle/base/unique_name.py diff --git a/python/paddle/fluid/variable_index.py b/python/paddle/base/variable_index.py similarity index 98% rename from python/paddle/fluid/variable_index.py rename to python/paddle/base/variable_index.py index bdc3d74050912..2efe308df0b83 100644 --- a/python/paddle/fluid/variable_index.py +++ b/python/paddle/base/variable_index.py @@ -73,10 +73,10 @@ def __init__(self): def update(self, index): if is_list_tuple(index, int) or isinstance( - index, (paddle.fluid.Variable, np.ndarray) + index, (paddle.base.Variable, np.ndarray) ): # convert index to Tensor - if not isinstance(index, paddle.fluid.Variable): + if not isinstance(index, paddle.base.Variable): index = paddle.assign(index) if self.dtype is None: @@ -123,7 +123,7 @@ def numel(self, shape): def get_offset_stride(self, tensor_shape): for index in self.indexes: - if not isinstance(index, paddle.fluid.Variable): + if not isinstance(index, paddle.base.Variable): raise ValueError( "only support list/tensor index, but received {}.".format( type(index) @@ -152,7 +152,7 @@ def get_item(self, tensor): return paddle.gather_nd(tensor, index) def set_item(self, tensor_origin, value): - if not isinstance(value, paddle.fluid.Variable): + if not isinstance(value, paddle.base.Variable): value = paddle.assign(value) tensor_type = None @@ -398,7 +398,7 @@ def _setitem_for_tensor_array(var, item, value): def _setitem_impl_(var, item, value): - from paddle.fluid import core + from paddle.base import core from .framework import default_main_program, Variable if var.type == core.VarDesc.VarType.LOD_TENSOR_ARRAY: @@ -565,7 +565,7 @@ def _setitem_impl_(var, item, value): var._bump_inplace_version() output = var else: - helper = paddle.fluid.layer_helper.LayerHelper('set_value', **locals()) + helper = paddle.base.layer_helper.LayerHelper('set_value', **locals()) if helper.main_program.current_block_idx != 0: # not in global block, we should create a global variable. output = helper._create_global_variable_for_type_inference( @@ -786,7 +786,7 @@ def parse_index(x, indices): has_advanced_index = True estimated_dim += 1 - elif isinstance(slice_item, paddle.fluid.Variable): + elif isinstance(slice_item, paddle.base.Variable): # In this case, the Variable is not 0-dim Tensor and will be treated as advanced-indexing. if slice_item.dtype == paddle.bool: if slice_item.ndim == 0: @@ -816,7 +816,7 @@ def parse_index(x, indices): axes.append(dim) use_strided_slice = ( True - if (isinstance(step, paddle.fluid.Variable) or step != 1) + if (isinstance(step, paddle.base.Variable) or step != 1) else use_strided_slice ) return ( @@ -844,7 +844,7 @@ def _setitem_static(x, indices, values): """ from .framework import default_main_program, Variable - if x.type == paddle.fluid.core.VarDesc.VarType.LOD_TENSOR_ARRAY: + if x.type == paddle.base.core.VarDesc.VarType.LOD_TENSOR_ARRAY: return _setitem_for_tensor_array(x, indices, values) # step1: parsing the index and recording them @@ -912,7 +912,7 @@ def _setitem_static(x, indices, values): x._bump_inplace_version() output = x else: - helper = paddle.fluid.layer_helper.LayerHelper( + helper = paddle.base.layer_helper.LayerHelper( 'set_value', **locals() ) if helper.main_program.current_block_idx != 0: @@ -986,7 +986,7 @@ def _setitem_static(x, indices, values): x._bump_inplace_version() output = x else: - helper = paddle.fluid.layer_helper.LayerHelper( + helper = paddle.base.layer_helper.LayerHelper( 'set_value', **locals() ) if helper.main_program.current_block_idx != 0: diff --git a/python/paddle/fluid/wrapped_decorator.py b/python/paddle/base/wrapped_decorator.py similarity index 100% rename from python/paddle/fluid/wrapped_decorator.py rename to python/paddle/base/wrapped_decorator.py diff --git a/python/paddle/common_ops_import.py b/python/paddle/common_ops_import.py index 042934d66f775..3605d71c39e62 100644 --- a/python/paddle/common_ops_import.py +++ b/python/paddle/common_ops_import.py @@ -11,16 +11,16 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from paddle import fluid # noqa: F401 -from paddle.fluid import core, dygraph_utils # noqa: F401 -from paddle.fluid.core import VarDesc # noqa: F401 -from paddle.fluid.data_feeder import ( # noqa: F401 +from paddle import base # noqa: F401 +from paddle.base import core, dygraph_utils # noqa: F401 +from paddle.base.core import VarDesc # noqa: F401 +from paddle.base.data_feeder import ( # noqa: F401 check_dtype, check_type, check_variable_and_dtype, convert_dtype, ) -from paddle.fluid.framework import ( # noqa: F401 +from paddle.base.framework import ( # noqa: F401 OpProtoHolder, Variable, _create_tensor, @@ -31,8 +31,8 @@ dygraph_only, in_dygraph_mode, ) -from paddle.fluid.layer_helper import LayerHelper # noqa: F401 -from paddle.fluid.layers.layer_function_generator import ( # noqa: F401 +from paddle.base.layer_helper import LayerHelper # noqa: F401 +from paddle.base.layers.layer_function_generator import ( # noqa: F401 templatedoc, ) -from paddle.fluid.param_attr import ParamAttr # noqa: F401 +from paddle.base.param_attr import ParamAttr # noqa: F401 diff --git a/python/paddle/cost_model/cost_model.py b/python/paddle/cost_model/cost_model.py index 20f98e906e031..f976bed4704d3 100644 --- a/python/paddle/cost_model/cost_model.py +++ b/python/paddle/cost_model/cost_model.py @@ -19,7 +19,7 @@ import paddle from paddle import static -from paddle.fluid import core +from paddle.base import core class CostModel: diff --git a/python/paddle/dataset/uci_housing.py b/python/paddle/dataset/uci_housing.py index d5e68db9d6954..744f9104c51f5 100644 --- a/python/paddle/dataset/uci_housing.py +++ b/python/paddle/dataset/uci_housing.py @@ -51,7 +51,7 @@ UCI_TRAIN_DATA = None UCI_TEST_DATA = None -FLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fluid/fit_a_line.fluid.tar' +FLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/base/fit_a_line.base.tar' FLUID_MD5_MODEL = '6e6dd637ccd5993961f68bfbde46090b' @@ -150,9 +150,9 @@ def reader(): return reader -def fluid_model(): +def base_model(): parameter_tar = paddle.dataset.common.download( - FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.fluid.tar' + FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.base.tar' ) tar = tarfile.TarFile(parameter_tar, mode='r') diff --git a/python/paddle/decomposition/decomp.py b/python/paddle/decomposition/decomp.py index c166c131d10cb..273b11f24c9e5 100644 --- a/python/paddle/decomposition/decomp.py +++ b/python/paddle/decomposition/decomp.py @@ -16,7 +16,7 @@ import typing from paddle import ir -from paddle.fluid.libpaddle.ir import Block, Program +from paddle.base.libpaddle.ir import Block, Program from paddle.framework import core from . import register diff --git a/python/paddle/device/__init__.py b/python/paddle/device/__init__.py index a7b7dd1be052f..764c84c325572 100644 --- a/python/paddle/device/__init__.py +++ b/python/paddle/device/__init__.py @@ -17,11 +17,11 @@ import os import ctypes import paddle -from paddle.fluid import core -from paddle.fluid import framework -from paddle.fluid.framework import is_compiled_with_cinn # noqa: F401 -from paddle.fluid.framework import is_compiled_with_cuda # noqa: F401 -from paddle.fluid.framework import is_compiled_with_rocm # noqa: F401 +from paddle.base import core +from paddle.base import framework +from paddle.base.framework import is_compiled_with_cinn # noqa: F401 +from paddle.base.framework import is_compiled_with_cuda # noqa: F401 +from paddle.base.framework import is_compiled_with_rocm # noqa: F401 from . import cuda from . import xpu @@ -854,8 +854,8 @@ def __enter__(self): self.src_prev_stream = current_stream(cur_stream.device) if self.src_prev_stream.device != cur_stream.device: - self.tmp_place = paddle.fluid.framework._current_expected_place() - paddle.fluid.framework._set_expected_place(cur_stream.device) + self.tmp_place = paddle.base.framework._current_expected_place() + paddle.base.framework._set_expected_place(cur_stream.device) self.dst_prev_stream = current_stream(cur_stream.device) set_stream(cur_stream) else: @@ -868,7 +868,7 @@ def __exit__(self, *args): if self.src_prev_stream.device != cur_stream.device: set_stream(self.dst_prev_stream) - paddle.fluid.framework._set_expected_place(self.tmp_place) + paddle.base.framework._set_expected_place(self.tmp_place) set_stream(self.src_prev_stream) else: set_stream(self.src_prev_stream) diff --git a/python/paddle/device/cuda/__init__.py b/python/paddle/device/cuda/__init__.py index 7f3ea571fb2a5..d2d7d0e23477a 100644 --- a/python/paddle/device/cuda/__init__.py +++ b/python/paddle/device/cuda/__init__.py @@ -13,8 +13,8 @@ # limitations under the License. import paddle -from paddle.fluid import core -from paddle.fluid.wrapped_decorator import signature_safe_contextmanager +from paddle.base import core +from paddle.base.wrapped_decorator import signature_safe_contextmanager from paddle.utils import deprecated from .streams import Stream # noqa: F401 diff --git a/python/paddle/device/cuda/graphs.py b/python/paddle/device/cuda/graphs.py index 2983897381a85..598bf64a10387 100644 --- a/python/paddle/device/cuda/graphs.py +++ b/python/paddle/device/cuda/graphs.py @@ -16,15 +16,15 @@ import warnings import paddle -from paddle.fluid import core -from paddle.fluid.core import ( +from paddle.base import core +from paddle.base.core import ( CUDAPlace, is_compiled_with_cuda, is_compiled_with_rocm, ) if is_compiled_with_cuda() and not is_compiled_with_rocm(): - from paddle.fluid.core import CUDAGraph as CoreCUDAGraph + from paddle.base.core import CUDAGraph as CoreCUDAGraph def is_cuda_graph_supported(): return True @@ -82,7 +82,7 @@ def wrap_cuda_graph(function, mode="thread_local", memory_pool="default"): assert mode in ALL_MODES if not paddle.in_dynamic_mode(): # static graph mode - from paddle.fluid.framework import _cuda_graph_guard + from paddle.base.framework import _cuda_graph_guard global cuda_graph_id graph_id = str(cuda_graph_id) diff --git a/python/paddle/device/cuda/streams.py b/python/paddle/device/cuda/streams.py index 6d716fd9016c5..cdfa8cfe0230b 100644 --- a/python/paddle/device/cuda/streams.py +++ b/python/paddle/device/cuda/streams.py @@ -12,5 +12,5 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid.core import CUDAEvent as Event # noqa: F401 -from paddle.fluid.core import CUDAStream as Stream # noqa: F401 +from paddle.base.core import CUDAEvent as Event # noqa: F401 +from paddle.base.core import CUDAStream as Stream # noqa: F401 diff --git a/python/paddle/device/xpu/__init__.py b/python/paddle/device/xpu/__init__.py index 9d0ff4b3ea63f..1567f0e6658a7 100644 --- a/python/paddle/device/xpu/__init__.py +++ b/python/paddle/device/xpu/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle -from paddle.fluid import core +from paddle.base import core from paddle.utils import deprecated __all__ = [ diff --git a/python/paddle/distributed/auto_parallel/static/auto_align_tool.py b/python/paddle/distributed/auto_parallel/static/auto_align_tool.py index ad345497167cd..1d25a264bf621 100644 --- a/python/paddle/distributed/auto_parallel/static/auto_align_tool.py +++ b/python/paddle/distributed/auto_parallel/static/auto_align_tool.py @@ -30,8 +30,8 @@ is_forward_op, is_loss_op, ) -from paddle.fluid import core -from paddle.fluid.framework import Program +from paddle.base import core +from paddle.base.framework import Program from paddle.static.io import deserialize_program _valid_types = [ diff --git a/python/paddle/distributed/auto_parallel/static/cluster.py b/python/paddle/distributed/auto_parallel/static/cluster.py index 3145153893f47..47d9711367765 100644 --- a/python/paddle/distributed/auto_parallel/static/cluster.py +++ b/python/paddle/distributed/auto_parallel/static/cluster.py @@ -884,7 +884,7 @@ def is_by_json_config(json_config): gpu_name = os.getenv("PADDLE_XCCL_BACKEND", None) gpu_model = gpu_name memory = int( - paddle.fluid.core.libpaddle._get_device_total_memory(gpu_name) + paddle.base.core.libpaddle._get_device_total_memory(gpu_name) ) // (1000**3) else: gpu_info = paddle.device.cuda.get_device_properties() diff --git a/python/paddle/distributed/auto_parallel/static/completion.py b/python/paddle/distributed/auto_parallel/static/completion.py index acc7f512e7f31..cb64025d7db0f 100644 --- a/python/paddle/distributed/auto_parallel/static/completion.py +++ b/python/paddle/distributed/auto_parallel/static/completion.py @@ -16,7 +16,7 @@ import logging from paddle.distributed.fleet.meta_optimizers.common import OpRole -from paddle.fluid.core import get_spmd_rule # noqa: F401 +from paddle.base.core import get_spmd_rule # noqa: F401 from paddle.framework import core from ..process_mesh import ProcessMesh, compute_compatible_process_mesh diff --git a/python/paddle/distributed/auto_parallel/static/cost_model.py b/python/paddle/distributed/auto_parallel/static/cost_model.py index 7a9ccd691d644..b3e5bce8ef58f 100644 --- a/python/paddle/distributed/auto_parallel/static/cost_model.py +++ b/python/paddle/distributed/auto_parallel/static/cost_model.py @@ -164,7 +164,7 @@ def __init__(self, node, node_type, id=None, is_bwd=False, is_optim=False): self.is_optim = is_optim def init_comp_cost(self, cost_data): - # TODO: improve fluid.CostModel for more specific cost_data + # TODO: improve base.CostModel for more specific cost_data op_id = self.node.desc.id() if op_id in cost_data.keys(): self.cost = cost_data[op_id] diff --git a/python/paddle/distributed/auto_parallel/static/dist_attribute.py b/python/paddle/distributed/auto_parallel/static/dist_attribute.py index d31df134d6b6a..a8ee0e313669a 100644 --- a/python/paddle/distributed/auto_parallel/static/dist_attribute.py +++ b/python/paddle/distributed/auto_parallel/static/dist_attribute.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License -from paddle.fluid.core import DistTensorSpec # noqa: F401 -from paddle.fluid.core import OperatorDistAttr # noqa: F401 -from paddle.fluid.core import TensorDistAttr # noqa: F401 +from paddle.base.core import DistTensorSpec # noqa: F401 +from paddle.base.core import OperatorDistAttr # noqa: F401 +from paddle.base.core import TensorDistAttr # noqa: F401 diff --git a/python/paddle/distributed/auto_parallel/static/dist_loader.py b/python/paddle/distributed/auto_parallel/static/dist_loader.py index 8db3d21e3ad47..1069fa92f830f 100644 --- a/python/paddle/distributed/auto_parallel/static/dist_loader.py +++ b/python/paddle/distributed/auto_parallel/static/dist_loader.py @@ -199,7 +199,7 @@ def data_generator(): yield partial_data - dataloader = paddle.fluid.io.DataLoader.from_generator( + dataloader = paddle.base.io.DataLoader.from_generator( feed_list=self.feed_list, capacity=self.capacity, use_double_buffer=self.use_double_buffer, diff --git a/python/paddle/distributed/auto_parallel/static/engine.py b/python/paddle/distributed/auto_parallel/static/engine.py index 2b66ac4e16a22..16b452944f932 100644 --- a/python/paddle/distributed/auto_parallel/static/engine.py +++ b/python/paddle/distributed/auto_parallel/static/engine.py @@ -25,7 +25,7 @@ import paddle.distributed.auto_parallel.static.utils as auto_utils from paddle import static, utils from paddle.distributed import fleet -from paddle.fluid.executor import _to_name_str +from paddle.base.executor import _to_name_str from paddle.framework import IrGraph from paddle.framework import _current_expected_place as _get_device from paddle.framework import core, in_dynamic_mode diff --git a/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py b/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py index 79d3b7ef974e0..bb5bb4bbc2ac8 100644 --- a/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py @@ -48,9 +48,9 @@ print_program_with_dist_attr, ) from paddle.distributed.fleet.meta_optimizers.common import OpRole -from paddle.fluid import program_guard -from paddle.fluid.backward import append_backward -from paddle.fluid.framework import Parameter, unique_name +from paddle.base import program_guard +from paddle.base.backward import append_backward +from paddle.base.framework import Parameter, unique_name from ....utils.log_utils import get_logger from ..graph import Graph diff --git a/python/paddle/distributed/auto_parallel/static/utils.py b/python/paddle/distributed/auto_parallel/static/utils.py index 3441914518822..fa12cfd68e3b2 100644 --- a/python/paddle/distributed/auto_parallel/static/utils.py +++ b/python/paddle/distributed/auto_parallel/static/utils.py @@ -22,7 +22,7 @@ import numpy as np import paddle -from paddle.fluid.wrapped_decorator import wrap_decorator +from paddle.base.wrapped_decorator import wrap_decorator from paddle.framework import core from paddle.framework.io_utils import is_belong_to_optimizer, is_parameter from paddle.static import Variable @@ -546,7 +546,7 @@ def _check_param_dict(param_dict): "The type of key of 'param_dict' should be 'str', " "but got '{}'.".format(str(type(name))) ) - if not isinstance(value, paddle.fluid.LoDTensor): + if not isinstance(value, paddle.base.LoDTensor): raise TypeError( "The type of value of 'param_dict' should be 'LoDTensor', " "but got '{}'.".format(str(type(value))) @@ -988,7 +988,7 @@ def _merge_parameter_with_dist_attr(param_list, dist_attr): def _slice_parameter_with_dist_attr(param, dist_attr): """Slice parameter with distributed attribute""" param = ( - np.array(param) if isinstance(param, paddle.fluid.LoDTensor) else param + np.array(param) if isinstance(param, paddle.base.LoDTensor) else param ) dims_mapping = dist_attr["dims_mapping"] process_shape = dist_attr["process_shape"] @@ -2401,7 +2401,7 @@ def __impl__(*args, **kwargs): if paddle.framework.in_dynamic_mode(): return func(*args, **kwargs) else: - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): return func(*args, **kwargs) return __impl__ diff --git a/python/paddle/distributed/collective.py b/python/paddle/distributed/collective.py index 144be980a7599..5128e2657c605 100644 --- a/python/paddle/distributed/collective.py +++ b/python/paddle/distributed/collective.py @@ -17,7 +17,7 @@ import paddle # (TODO: GhostScreaming) It will be removed later. -from paddle.fluid import core +from paddle.base import core from paddle.framework import in_dynamic_mode from .communication.group import Group, _add_new_group, is_initialized diff --git a/python/paddle/distributed/communication/stream/all_gather.py b/python/paddle/distributed/communication/stream/all_gather.py index fc814956be998..04e0a7d5361d1 100644 --- a/python/paddle/distributed/communication/stream/all_gather.py +++ b/python/paddle/distributed/communication/stream/all_gather.py @@ -16,7 +16,7 @@ import paddle.distributed as dist from paddle import framework from paddle.distributed.communication.group import _get_global_group -from paddle.fluid import data_feeder +from paddle.base import data_feeder def _all_gather_into_tensor_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/all_reduce.py b/python/paddle/distributed/communication/stream/all_reduce.py index 970b060abd805..b10773006a344 100644 --- a/python/paddle/distributed/communication/stream/all_reduce.py +++ b/python/paddle/distributed/communication/stream/all_reduce.py @@ -18,7 +18,7 @@ _warn_cur_rank_not_in_group, ) from paddle.distributed.communication.reduce import ReduceOp, _get_reduce_op -from paddle.fluid import data_feeder +from paddle.base import data_feeder def _all_reduce_in_dygraph(tensor, op, group, sync_op, use_calc_stream): diff --git a/python/paddle/distributed/communication/stream/all_to_all.py b/python/paddle/distributed/communication/stream/all_to_all.py index ae1fad0109d30..7089ec70fc55d 100644 --- a/python/paddle/distributed/communication/stream/all_to_all.py +++ b/python/paddle/distributed/communication/stream/all_to_all.py @@ -19,7 +19,7 @@ _get_global_group, _warn_cur_rank_not_in_group, ) -from paddle.fluid import data_feeder +from paddle.base import data_feeder def _all_to_all_tensor_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/broadcast.py b/python/paddle/distributed/communication/stream/broadcast.py index 573fd9368ffe5..2a671ac7a8700 100644 --- a/python/paddle/distributed/communication/stream/broadcast.py +++ b/python/paddle/distributed/communication/stream/broadcast.py @@ -18,7 +18,7 @@ _get_or_throw_group_rank, _warn_cur_rank_not_in_group, ) -from paddle.fluid import data_feeder +from paddle.base import data_feeder def _broadcast_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/recv.py b/python/paddle/distributed/communication/stream/recv.py index c34e2d790b21a..d77010e79e061 100644 --- a/python/paddle/distributed/communication/stream/recv.py +++ b/python/paddle/distributed/communication/stream/recv.py @@ -18,7 +18,7 @@ _get_or_throw_group_rank, _warn_cur_rank_not_in_group, ) -from paddle.fluid import data_feeder +from paddle.base import data_feeder def _recv_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/reduce.py b/python/paddle/distributed/communication/stream/reduce.py index 5f50cdd45b503..feb5aff33d006 100644 --- a/python/paddle/distributed/communication/stream/reduce.py +++ b/python/paddle/distributed/communication/stream/reduce.py @@ -19,7 +19,7 @@ _warn_cur_rank_not_in_group, ) from paddle.distributed.communication.reduce import ReduceOp, _get_reduce_op -from paddle.fluid import data_feeder +from paddle.base import data_feeder def _reduce_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/reduce_scatter.py b/python/paddle/distributed/communication/stream/reduce_scatter.py index 52f9d86f7a298..d071db82b50ec 100644 --- a/python/paddle/distributed/communication/stream/reduce_scatter.py +++ b/python/paddle/distributed/communication/stream/reduce_scatter.py @@ -20,7 +20,7 @@ _warn_cur_rank_not_in_group, ) from paddle.distributed.communication.reduce import ReduceOp, _get_reduce_op -from paddle.fluid import data_feeder +from paddle.base import data_feeder def _reduce_scatter_tensor_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/scatter.py b/python/paddle/distributed/communication/stream/scatter.py index 2d6c4282cb586..fc3a115ff63e4 100644 --- a/python/paddle/distributed/communication/stream/scatter.py +++ b/python/paddle/distributed/communication/stream/scatter.py @@ -22,7 +22,7 @@ _get_or_throw_group_rank, _warn_cur_rank_not_in_group, ) -from paddle.fluid import data_feeder +from paddle.base import data_feeder def _scatter_tensor_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/send.py b/python/paddle/distributed/communication/stream/send.py index 298cd76bbefea..16705f05bbfd4 100644 --- a/python/paddle/distributed/communication/stream/send.py +++ b/python/paddle/distributed/communication/stream/send.py @@ -18,7 +18,7 @@ _get_or_throw_group_rank, _warn_cur_rank_not_in_group, ) -from paddle.fluid import data_feeder +from paddle.base import data_feeder def _send_in_dygraph( diff --git a/python/paddle/distributed/fleet/base/distributed_strategy.py b/python/paddle/distributed/fleet/base/distributed_strategy.py index 5cb9480eca8b6..4fb06eaf16e5d 100755 --- a/python/paddle/distributed/fleet/base/distributed_strategy.py +++ b/python/paddle/distributed/fleet/base/distributed_strategy.py @@ -21,8 +21,8 @@ import paddle from paddle.distributed.fleet.proto import distributed_strategy_pb2 from paddle.distributed.fleet.utils.log_util import logger -from paddle.fluid.framework import _global_flags -from paddle.fluid.wrapped_decorator import wrap_decorator +from paddle.base.framework import _global_flags +from paddle.base.wrapped_decorator import wrap_decorator __all__ = [] diff --git a/python/paddle/distributed/fleet/base/role_maker.py b/python/paddle/distributed/fleet/base/role_maker.py index 0b09fd082ba41..7300791dbcb7a 100755 --- a/python/paddle/distributed/fleet/base/role_maker.py +++ b/python/paddle/distributed/fleet/base/role_maker.py @@ -23,7 +23,7 @@ from paddle.distributed.fleet.base.private_helper_function import ( wait_server_ready, ) -from paddle.fluid import core +from paddle.base import core from ...backup_env import getenv_or_backup diff --git a/python/paddle/distributed/fleet/base/topology.py b/python/paddle/distributed/fleet/base/topology.py index bec592e6bb534..1bacdfb8df02e 100644 --- a/python/paddle/distributed/fleet/base/topology.py +++ b/python/paddle/distributed/fleet/base/topology.py @@ -26,7 +26,7 @@ _HYBRID_PARALLEL_GROUP = None _use_four_directions = os.environ.get( - 'PADDLE_USE_FOUR_DIRECTIONS_P2P', paddle.fluid.core.is_compiled_with_xpu() + 'PADDLE_USE_FOUR_DIRECTIONS_P2P', paddle.base.core.is_compiled_with_xpu() ) diff --git a/python/paddle/distributed/fleet/base/util_factory.py b/python/paddle/distributed/fleet/base/util_factory.py index 6415f95793ca1..0f7d37aba8a6b 100755 --- a/python/paddle/distributed/fleet/base/util_factory.py +++ b/python/paddle/distributed/fleet/base/util_factory.py @@ -26,8 +26,8 @@ import paddle from paddle import framework -from paddle.fluid import core -from paddle.fluid.proto import framework_pb2 +from paddle.base import core +from paddle.base.proto import framework_pb2 from paddle.static import Program from ..utils.fs import FS @@ -644,7 +644,7 @@ def check_not_expected_ops(prog, not_expected_op_types): dtype=feed_config.feeded_vars_types[i], ) feed_tensors.append( - paddle.fluid.create_lod_tensor( + paddle.base.create_lod_tensor( t, [[1] * config.batch_size], place ) ) @@ -673,7 +673,7 @@ def check_not_expected_ops(prog, not_expected_op_types): ) for i in range(len(feed_config.feeded_vars_names)) ] - feeder = paddle.fluid.DataFeeder( + feeder = paddle.base.DataFeeder( feed_list=feed_vars, place=place ) batch_feed = feed_gen( diff --git a/python/paddle/distributed/fleet/dataset/dataset.py b/python/paddle/distributed/fleet/dataset/dataset.py index 8cdd57a34248e..bdaa8cbb5ea08 100755 --- a/python/paddle/distributed/fleet/dataset/dataset.py +++ b/python/paddle/distributed/fleet/dataset/dataset.py @@ -15,8 +15,8 @@ from google.protobuf import text_format -from paddle.fluid import core -from paddle.fluid.proto import data_feed_pb2 +from paddle.base import core +from paddle.base.proto import data_feed_pb2 __all__ = [] diff --git a/python/paddle/distributed/fleet/dataset/index_dataset.py b/python/paddle/distributed/fleet/dataset/index_dataset.py index 7df2931b5d87d..4c02cb8d82c7a 100644 --- a/python/paddle/distributed/fleet/dataset/index_dataset.py +++ b/python/paddle/distributed/fleet/dataset/index_dataset.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid import core +from paddle.base import core __all__ = [] diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index df0c39ee119ab..0b873cde8e6d9 100755 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -16,8 +16,8 @@ import os import paddle -from paddle.fluid import compiler -from paddle.fluid.wrapped_decorator import wrap_decorator +from paddle.base import compiler +from paddle.base.wrapped_decorator import wrap_decorator from paddle.framework import _global_flags, in_dynamic_mode from paddle.framework.ir import apply_build_strategy @@ -1252,9 +1252,9 @@ def minimize( Args: loss (Tensor): A ``Tensor`` containing the value to minimize. - startup_program (Program, optional): :ref:`api_fluid_Program` for + startup_program (Program, optional): :ref:`api_base_Program` for initializing parameters in ``parameter_list``. The default value - is None, at this time :ref:`api_fluid_default_startup_program` will be used. + is None, at this time :ref:`api_base_default_startup_program` will be used. parameter_list (Iterable, optional): Iterable of ``Tensor`` or ``Tensor.name`` to update to minimize ``loss``. The default value is None, at this time all parameters will be updated. diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_layers.py b/python/paddle/distributed/fleet/layers/mpu/mp_layers.py index 61ccbbcc448b9..6a3becc314974 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_layers.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_layers.py @@ -15,7 +15,7 @@ import paddle from paddle.autograd import PyLayer from paddle.distributed import fleet -from paddle.fluid import core +from paddle.base import core from paddle.nn import functional as F from ....communication.reduce import ReduceOp, _get_reduce_op diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py index ccfba7acafffd..9a82864f719f4 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py @@ -16,7 +16,7 @@ from paddle import _legacy_C_ops from paddle.autograd import PyLayer from paddle.distributed import collective -from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype +from paddle.base.data_feeder import check_dtype, check_variable_and_dtype from paddle.framework import LayerHelper, _create_tensor, in_dynamic_mode from paddle.nn import Layer from paddle.nn.utils import dygraph_utils diff --git a/python/paddle/distributed/fleet/layers/mpu/random.py b/python/paddle/distributed/fleet/layers/mpu/random.py index c0d4f6320f4b6..22063aa24fa79 100644 --- a/python/paddle/distributed/fleet/layers/mpu/random.py +++ b/python/paddle/distributed/fleet/layers/mpu/random.py @@ -19,8 +19,8 @@ import paddle from paddle import _legacy_C_ops from paddle.common_ops_import import Variable -from paddle.fluid import core -from paddle.fluid.data_feeder import check_variable_and_dtype +from paddle.base import core +from paddle.base.data_feeder import check_variable_and_dtype from paddle.framework import LayerHelper, in_dynamic_mode __all__ = [] diff --git a/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py index 4fa85994269a7..4f74b62a418a5 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py @@ -20,8 +20,8 @@ import paddle from paddle.common_ops_import import LayerHelper -from paddle.fluid import framework -from paddle.fluid.dygraph import base as imperative_base +from paddle.base import framework +from paddle.base.dygraph import base as imperative_base from paddle.framework import core, in_dynamic_mode from paddle.nn.clip import ClipGradByNorm, append_gradient_clip_ops from paddle.optimizer import Momentum, Optimizer @@ -313,7 +313,7 @@ def _clip_by_norm(self, x, max_norm, name=None): helper = LayerHelper("dgc_clip_by_norm_op", **args) if name is None: - name = paddle.fluid.unique_name.generate_with_ignorable_key( + name = paddle.base.unique_name.generate_with_ignorable_key( ".".join([helper.name, 'tmp']) ) diff --git a/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py index a1ab474723527..36061659c9cef 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py @@ -14,7 +14,7 @@ import os from paddle import static -from paddle.fluid import core +from paddle.base import core from paddle.framework.ir import apply_build_strategy from paddle.utils import unique_name diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py index c15803e583771..1f7a497b3dfea 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py @@ -1034,7 +1034,7 @@ def is_gradient_merge_vars(var): def is_trainable(var): return ( - isinstance(var, paddle.fluid.framework.Parameter) and var.trainable + isinstance(var, paddle.base.framework.Parameter) and var.trainable ) def sharding_predicate(var): diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py index ad126d5348633..884cd16ecb5da 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py @@ -14,7 +14,7 @@ import os -from paddle.fluid import core +from paddle.base import core from paddle.incubate.optimizer import PipelineOptimizer from paddle.static import ( create_global_var, diff --git a/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py b/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py index c194ab8d4e5e2..367169aead956 100755 --- a/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py +++ b/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py @@ -31,7 +31,7 @@ from .parallel_layers.pp_layers import PipelineLayer _use_four_directions = os.environ.get( - 'PADDLE_USE_FOUR_DIRECTIONS_P2P', paddle.fluid.core.is_compiled_with_xpu() + 'PADDLE_USE_FOUR_DIRECTIONS_P2P', paddle.base.core.is_compiled_with_xpu() ) if _use_four_directions: from .pp_utils import four_directions_p2p_communication as p2p diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py index b9ca53aeef0a1..3ef5dbb91dd13 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py @@ -23,7 +23,7 @@ from paddle import framework, nn from paddle.autograd import PyLayer from paddle.distributed import collective -from paddle.fluid.framework import EagerParamBase +from paddle.base.framework import EagerParamBase from paddle.framework import core from paddle.nn import ClipGradByGlobalNorm diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py index 723d3a177124e..3e4ba5026b1fa 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py @@ -21,8 +21,8 @@ import paddle from paddle import _C_ops, _legacy_C_ops from paddle.common_ops_import import dygraph_only -from paddle.fluid import core -from paddle.fluid.dygraph import to_variable +from paddle.base import core +from paddle.base.dygraph import to_variable from paddle.nn import clip diff --git a/python/paddle/distributed/fleet/recompute/recompute.py b/python/paddle/distributed/fleet/recompute/recompute.py index ba22372e79147..43e4dde69440a 100755 --- a/python/paddle/distributed/fleet/recompute/recompute.py +++ b/python/paddle/distributed/fleet/recompute/recompute.py @@ -170,7 +170,7 @@ def forward(ctx, run_function, preserve_rng_state, *args, **kwargs): @staticmethod def backward(ctx, *args): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): # TODO need to check the recompute calling is vaild or not # Restore inputs diff --git a/python/paddle/distributed/fleet/recompute/recompute_hybrid.py b/python/paddle/distributed/fleet/recompute/recompute_hybrid.py index 48dd678a22a9d..5a8ca327ffc51 100644 --- a/python/paddle/distributed/fleet/recompute/recompute_hybrid.py +++ b/python/paddle/distributed/fleet/recompute/recompute_hybrid.py @@ -158,7 +158,7 @@ def forward( # Note: # If not marked non_differentiable, all output tensors' attr `stop gradient` # will be reset to `False` in c++ backend. - # See https://github.com/PaddlePaddle/Paddle/blob/9d62efb0e6e5373823039d9eda96cd5905426c0a/paddle/fluid/pybind/eager_py_layer.cc#L388 + # See https://github.com/PaddlePaddle/Paddle/blob/9d62efb0e6e5373823039d9eda96cd5905426c0a/paddle/base/pybind/eager_py_layer.cc#L388 if framework.in_dynamic_mode() and state: ctx.mark_non_differentiable(arg) else: @@ -175,7 +175,7 @@ def forward( @staticmethod def backward(ctx, *args): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): # Restore inputs inputs = list(ctx.inputs) tensor_indices = ctx.tensor_indices diff --git a/python/paddle/distributed/fleet/runtime/the_one_ps.py b/python/paddle/distributed/fleet/runtime/the_one_ps.py index 8bc7ea5fbf5d3..43440ba37fc3a 100644 --- a/python/paddle/distributed/fleet/runtime/the_one_ps.py +++ b/python/paddle/distributed/fleet/runtime/the_one_ps.py @@ -15,11 +15,11 @@ import os import warnings -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.compiler import CompiledProgram -from paddle.fluid.executor import Executor -from paddle.fluid.framework import Program +from paddle import base +from paddle.base import core +from paddle.base.compiler import CompiledProgram +from paddle.base.executor import Executor +from paddle.base.framework import Program from ..base.private_helper_function import wait_server_ready from .runtime_base import RuntimeBase @@ -676,7 +676,7 @@ def __init__(self): super().__init__() self._communicator = None self._server = None - self._worker = fluid.core.DistFleetWrapper() + self._worker = base.core.DistFleetWrapper() self._server_sub_program = [] self._heter_client = None @@ -772,7 +772,7 @@ def sync_strategy_envs(): string_hosts = [] for idx, ep in enumerate(endpoints): host, port = ep.split(":") - pshost = fluid.core.PSHost(host, int(port), idx) + pshost = base.core.PSHost(host, int(port), idx) string_hosts.append(pshost.serialize_to_string()) dense_map = self.compiled_strategy.get_the_one_recv_context( @@ -816,7 +816,7 @@ def sync_strategy_envs(): trainer_config.mode, kwargs, trainer_config.get_communicator_flags() ) self._communicator.init_with_ctx( - send_ctx, dense_map, proto_txt, string_hosts, fluid.global_scope() + send_ctx, dense_map, proto_txt, string_hosts, base.global_scope() ) from paddle.distributed import fleet @@ -887,12 +887,12 @@ def sync_strategy_envs(): ) def _push_sparse_param( - self, var_name, table_id=-1, scope=fluid.global_scope() + self, var_name, table_id=-1, scope=base.global_scope() ): self._communicator.push_sparse_param(var_name, table_id, scope) def _get_executor(self): - executor = fluid.Executor(fluid.CPUPlace()) + executor = base.Executor(base.CPUPlace()) if self.role_maker._is_heter_parameter_server_mode: if self.role_maker._is_heter_worker(): heter_device_type = self.role_maker._heter_device_type().upper() @@ -904,13 +904,13 @@ def _get_executor(self): ) if heter_device_type == "GPU": executor = Executor( - fluid.CUDAPlace( + base.CUDAPlace( int(os.getenv("FLAGS_selected_gpus", "0")) ) ) elif heter_device_type == "XPU": executor = Executor( - fluid.XPUPlace( + base.XPUPlace( int(os.getenv("FLAGS_selected_xpus", "0")) ) ) @@ -1182,10 +1182,10 @@ def _init_server(self, dirname=None, var_names=None, **kwargs): string_hosts = [] for idx, ep in enumerate(endpoints): host, port = ep.split(":") - pshost = fluid.core.PSHost(host, int(port), idx) + pshost = base.core.PSHost(host, int(port), idx) string_hosts.append(pshost.serialize_to_string()) - self._server = fluid.core.DistFleetWrapper() + self._server = base.core.DistFleetWrapper() self._server.init_server( proto_txt, string_hosts, role_id, trainers, self._server_sub_program ) diff --git a/python/paddle/distributed/fleet/scaler.py b/python/paddle/distributed/fleet/scaler.py index 9423c95e99390..a9528210c6223 100755 --- a/python/paddle/distributed/fleet/scaler.py +++ b/python/paddle/distributed/fleet/scaler.py @@ -19,7 +19,7 @@ import paddle from paddle import _C_ops, _legacy_C_ops from paddle.distributed import fleet -from paddle.fluid.dygraph import to_variable +from paddle.base.dygraph import to_variable from paddle.framework import core from .base.topology import ParallelMode diff --git a/python/paddle/distributed/fleet/utils/fs.py b/python/paddle/distributed/fleet/utils/fs.py index a4f95340cb88e..770cef9c551e6 100644 --- a/python/paddle/distributed/fleet/utils/fs.py +++ b/python/paddle/distributed/fleet/utils/fs.py @@ -21,7 +21,7 @@ import time # (TODO: GhostScreaming) It will be removed later. -from paddle.fluid import core +from paddle.base import core from .log_util import logger diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index 2c49cbd46619f..ae9fccb1be7a6 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -19,7 +19,7 @@ from paddle.distributed import fleet # (TODO: GhostScreaming) It will be removed later. -from paddle.fluid import core +from paddle.base import core from paddle.framework import Block, Program, in_dynamic_mode @@ -47,7 +47,7 @@ class HybridParallelInferenceHelper: :name: bash-example1 # while op pattern - with paddle.fluid.device_guard(f'{device}:all'): + with paddle.base.device_guard(f'{device}:all'): # init global cond max_len = paddle.full(shape=[1], dtype="int64", fill_value=10) step_idx = paddle.full(shape=[1], dtype="int64", fill_value=0) @@ -59,7 +59,7 @@ class HybridParallelInferenceHelper: arr = paddle.tensor.array_write(data, step_idx) with while_op.block(): - with paddle.fluid.device_guard(f'{device}:all'): + with paddle.base.device_guard(f'{device}:all'): # read data from global lod_tensor_array element_in_arr = paddle.tensor.array_read(array=arr, i=step_idx) # write placehold data to global lod_tensor_array, @@ -67,13 +67,13 @@ class HybridParallelInferenceHelper: paddle.increment(x=step_idx, value=1.0) paddle.tensor.array_write(element_in_arr, i=step_idx, array=arr) - with paddle.fluid.device_guard(f'{device}:0'): + with paddle.base.device_guard(f'{device}:0'): ... some code - with paddle.fluid.device_guard(f'{device}:1'): + with paddle.base.device_guard(f'{device}:1'): ... some code - with paddle.fluid.device_guard(f'{device}:{num_pp-1}'): + with paddle.base.device_guard(f'{device}:{num_pp-1}'): # generate some data in while block and write to global lod_tensor_array # that they are read in next while step. # we will using send_v2 to send global lod_tensor_array to other pipeline and sync @@ -82,11 +82,11 @@ class HybridParallelInferenceHelper: # update cond and assign to cond_int, we will sync cond_int layers.assign(layers.cast(cond, dtype="int32"), cond_int) - with paddle.fluid.device_guard(f'{model._device}:all'): + with paddle.base.device_guard(f'{model._device}:all'): # the code below must at end of while block and exists in device:all layers.assign(layers.cast(cond_int, dtype='bool'), cond) - with paddle.fluid.device_guard(f'{model._device}:all'): + with paddle.base.device_guard(f'{model._device}:all'): # use a empty lod_tensor_array to clear lod_tensor_array layers.assign(layers.create_array(data.dtype), arr) @@ -100,7 +100,7 @@ class HybridParallelInferenceHelper: import os import numpy as np import paddle - import paddle.fluid.layers as layers + import paddle.base.layers as layers import paddle.distributed.fleet as fleet paddle.enable_static() @@ -119,10 +119,10 @@ class HybridParallelInferenceHelper: device = "gpu" with paddle.static.program_guard(main_program, startup_program): - with paddle.fluid.device_guard(f'{device}:0'): + with paddle.base.device_guard(f'{device}:0'): X = paddle.static.data(name='X', shape=[None, 2], dtype='float32') - with paddle.fluid.device_guard(f'{device}:all'): + with paddle.base.device_guard(f'{device}:all'): max_len = paddle.full( shape=[1], dtype="int64", fill_value=5, name="n") step_idx = paddle.full( @@ -135,18 +135,18 @@ class HybridParallelInferenceHelper: while_op = layers.While(cond, is_test=True) with while_op.block(): - with paddle.fluid.device_guard(f'{device}:all'): + with paddle.base.device_guard(f'{device}:all'): input = paddle.tensor.array_read(array=data, i=step_idx) paddle.increment(x=step_idx, value=1.0) paddle.tensor.array_write(input, i=step_idx, array=data) - with paddle.fluid.device_guard(f'{device}:0'): + with paddle.base.device_guard(f'{device}:0'): param_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(1.0)) weight1 = paddle.static.create_parameter( shape=[2, 5], dtype='float32', attr=param_attr, is_bias=False) hidden1 = paddle.matmul(input, weight1) - with paddle.fluid.device_guard(f'{device}:1'): + with paddle.base.device_guard(f'{device}:1'): param_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(2.0)) weight2 = paddle.static.create_parameter( shape=[5, 2], dtype='float32', attr=param_attr, is_bias=False) @@ -158,15 +158,15 @@ class HybridParallelInferenceHelper: paddle.assign(paddle.less_than(x=step_idx, y=max_len), cond) layers.assign(layers.cast(cond, dtype="int32"), cond_int) - with paddle.fluid.device_guard(f'{device}:all'): + with paddle.base.device_guard(f'{device}:all'): # the code below must at end of while block and exists in device:all layers.assign(layers.cast(cond_int, dtype='bool'), cond) - with paddle.fluid.device_guard(f'{device}:all'): + with paddle.base.device_guard(f'{device}:all'): out = layers.create_array(data.dtype) layers.assign(data, out) - with paddle.fluid.device_guard(f'{device}:all'): + with paddle.base.device_guard(f'{device}:all'): # use a empty lod_tensor_array to clear lod_tensor_array layers.assign(layers.create_array(data.dtype), data) diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py index 340ace6ed7b80..f44ba3f41ba05 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py @@ -22,7 +22,7 @@ ) # (TODO: GhostScreaming) It will be removed later. -from paddle.fluid import core +from paddle.base import core from .log_util import logger @@ -73,7 +73,7 @@ def _apply_collective_grads(parameters, comm_group, bucket_size, scale=None): # need to div nranks if scale is not None: div_factor = paddle.to_tensor(scale, dtype=coalesced_grad.dtype) - paddle.fluid.framework._dygraph_tracer().trace_op( + paddle.base.framework._dygraph_tracer().trace_op( type="elementwise_div", inputs={'X': coalesced_grad, 'Y': div_factor}, outputs={'Out': coalesced_grad}, diff --git a/python/paddle/distributed/fleet/utils/mix_precision_utils.py b/python/paddle/distributed/fleet/utils/mix_precision_utils.py index 991a1fafc7b0f..37e6a4d32a42e 100644 --- a/python/paddle/distributed/fleet/utils/mix_precision_utils.py +++ b/python/paddle/distributed/fleet/utils/mix_precision_utils.py @@ -24,9 +24,9 @@ from paddle.distributed.fleet.utils.hybrid_parallel_util import ( obtain_optimizer_parameters_list, ) -from paddle.fluid import framework -from paddle.fluid.dygraph import base as imperative_base -from paddle.fluid.dygraph import to_variable +from paddle.base import framework +from paddle.base.dygraph import base as imperative_base +from paddle.base.dygraph import to_variable from paddle.framework import core diff --git a/python/paddle/distributed/fleet/utils/sequence_parallel_utils.py b/python/paddle/distributed/fleet/utils/sequence_parallel_utils.py index 1e7f5e93785ad..c43a339db9f3f 100644 --- a/python/paddle/distributed/fleet/utils/sequence_parallel_utils.py +++ b/python/paddle/distributed/fleet/utils/sequence_parallel_utils.py @@ -22,7 +22,7 @@ from paddle.distributed.fleet.utils.hybrid_parallel_util import ( fused_allreduce_gradients_with_group, ) -from paddle.fluid import core +from paddle.base import core from paddle.nn import Layer from paddle.nn import functional as F diff --git a/python/paddle/distributed/fleet/utils/tensor_parallel_utils.py b/python/paddle/distributed/fleet/utils/tensor_parallel_utils.py index f3026a84f1dd0..0ad0c5024015e 100644 --- a/python/paddle/distributed/fleet/utils/tensor_parallel_utils.py +++ b/python/paddle/distributed/fleet/utils/tensor_parallel_utils.py @@ -23,7 +23,7 @@ logger.addHandler(ch) from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY -from paddle.fluid import core +from paddle.base import core from paddle.static import Parameter _supported_optimizer_type = [ diff --git a/python/paddle/distributed/io.py b/python/paddle/distributed/io.py index bc125cb242a29..69f6e42bf1954 100644 --- a/python/paddle/distributed/io.py +++ b/python/paddle/distributed/io.py @@ -15,7 +15,7 @@ import os import paddle -from paddle.fluid.framework import Program, static_only +from paddle.base.framework import Program, static_only from paddle.framework import core, dygraph_not_support @@ -38,10 +38,10 @@ def _load_distributed_persistables(executor, dirname, main_program=None): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base paddle.enable_static() - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) param_path = "./my_paddle_model" t = paddle.distributed.transpiler.DistributeTranspiler() t.transpile(...) @@ -161,12 +161,12 @@ def load_persistables(executor, dirname, main_program=None, filename=None): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base paddle.enable_static() - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) param_path = "./my_paddle_model" - prog = fluid.default_main_program() + prog = base.default_main_program() paddle.distributed.io.load_persistables(executor=exe, dirname=param_path, main_program=None) """ @@ -367,11 +367,11 @@ def is_persistable(var): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base paddle.enable_static() - param = fluid.default_main_program().global_block().var('fc.b') - res = fluid.io.is_persistable(param) + param = base.default_main_program().global_block().var('fc.b') + res = base.io.is_persistable(param) """ if ( var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH @@ -465,7 +465,7 @@ def load_inference_model_distributed( """ Load the inference model from a given directory. By this API, you can get the model structure(Inference Program) and model parameters. If you just want to load - parameters of the pre-trained model, please use the :ref:`api_fluid_io_load_params` API. + parameters of the pre-trained model, please use the :ref:`api_base_io_load_params` API. You can refer to :ref:`api_guide_model_save_reader_en` for more details. Args: @@ -505,26 +505,26 @@ def load_inference_model_distributed( .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np paddle.enable_static() # Build the model - main_prog = fluid.Program() - startup_prog = fluid.Program() - with fluid.program_guard(main_prog, startup_prog): - data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + main_prog = base.Program() + startup_prog = base.Program() + with base.program_guard(main_prog, startup_prog): + data = base.layers.data(name="img", shape=[64, 784], append_batch_size=False) w = paddle.create_parameter(shape=[784, 200], dtype='float32') b = paddle.create_parameter(shape=[200], dtype='float32') hidden_w = paddle.matmul(x=data, y=w) - hidden_b = fluid.layers.elementwise_add(hidden_w, b) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + hidden_b = base.layers.elementwise_add(hidden_w, b) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(startup_prog) # Save the inference model path = "./infer_model" - fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'], + base.io.save_inference_model(dirname=path, feeded_var_names=['img'], target_vars=[hidden_b], executor=exe, main_program=main_prog) # Demo one. Not need to set the distributed look up table, because the diff --git a/python/paddle/distributed/launch/context/device.py b/python/paddle/distributed/launch/context/device.py index 7ca3c5f41bff5..dfba7b72149fb 100644 --- a/python/paddle/distributed/launch/context/device.py +++ b/python/paddle/distributed/launch/context/device.py @@ -17,7 +17,7 @@ from paddle.device import get_available_custom_device # (TODO: GhostScreaming) It will be removed later. -from paddle.fluid import core +from paddle.base import core class DeviceType: diff --git a/python/paddle/distributed/parallel.py b/python/paddle/distributed/parallel.py index 67452c1a4e1b2..843c9eb5d9c0e 100644 --- a/python/paddle/distributed/parallel.py +++ b/python/paddle/distributed/parallel.py @@ -185,7 +185,7 @@ def sync_params_buffers( for coalesced_var, origin_vars, var_shapes in coalesced_vars: var_len = [np.prod(v_shape) for v_shape in var_shapes] - paddle.fluid.framework._dygraph_tracer().trace_op( + paddle.base.framework._dygraph_tracer().trace_op( type='split', inputs={'X': coalesced_var}, outputs={'Out': origin_vars}, diff --git a/python/paddle/distributed/parallel_with_gloo.py b/python/paddle/distributed/parallel_with_gloo.py index f205a5d261235..1a4bf4f8fbe6e 100755 --- a/python/paddle/distributed/parallel_with_gloo.py +++ b/python/paddle/distributed/parallel_with_gloo.py @@ -21,7 +21,7 @@ # deprecated module import # (TODO: GhostScreaming) It will be removed later. -from paddle.fluid import core +from paddle.base import core __all__ = [] diff --git a/python/paddle/distributed/passes/auto_parallel_amp.py b/python/paddle/distributed/passes/auto_parallel_amp.py index 34ab1c29534a9..76a33d748d86e 100644 --- a/python/paddle/distributed/passes/auto_parallel_amp.py +++ b/python/paddle/distributed/passes/auto_parallel_amp.py @@ -24,7 +24,7 @@ set_var_dist_attr, ) from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole -from paddle.fluid.data_feeder import check_type, check_variable_and_dtype +from paddle.base.data_feeder import check_type, check_variable_and_dtype from paddle.framework import core from paddle.static.amp.bf16.amp_utils import ( AutoMixedPrecisionListsBF16, diff --git a/python/paddle/distributed/passes/auto_parallel_fp16.py b/python/paddle/distributed/passes/auto_parallel_fp16.py index c9a0f772db5ce..8f9927ae37c28 100644 --- a/python/paddle/distributed/passes/auto_parallel_fp16.py +++ b/python/paddle/distributed/passes/auto_parallel_fp16.py @@ -839,7 +839,7 @@ def _apply_single_impl(self, main_program, startup_program, context): with main_program._optimized_guard([]): block = main_program.global_block() - # all_infs = paddle.fluid.layers.concat(found_infs) + # all_infs = paddle.base.layers.concat(found_infs) all_infs = block.create_var( name=paddle.utils.unique_name.generate_with_ignorable_key( ".".join(['concat', 'tmp']) @@ -870,7 +870,7 @@ def _apply_single_impl(self, main_program, startup_program, context): self.dist_context, ) - # found_inf = paddle.fluid.layers.reduce_any(all_infs) + # found_inf = paddle.base.layers.reduce_any(all_infs) found_inf = block.create_var( name=paddle.utils.unique_name.generate_with_ignorable_key( ".".join(['find_infinite_scale', 'tmp']) diff --git a/python/paddle/distributed/passes/auto_parallel_pipeline.py b/python/paddle/distributed/passes/auto_parallel_pipeline.py index acc99636a53d9..de7e819635a49 100644 --- a/python/paddle/distributed/passes/auto_parallel_pipeline.py +++ b/python/paddle/distributed/passes/auto_parallel_pipeline.py @@ -24,8 +24,8 @@ is_optimize_op, ) from paddle.distributed.fleet.fleet_executor_utils import TaskNode -from paddle.fluid import core -from paddle.fluid.framework import Program +from paddle.base import core +from paddle.base.framework import Program from .pass_base import PassBase, register_pass from .pass_utils import _create_program, _insert_sync_for_fthenb_1f1b diff --git a/python/paddle/distributed/passes/auto_parallel_recompute.py b/python/paddle/distributed/passes/auto_parallel_recompute.py index d69bdb2a44c10..194aeefb6029b 100644 --- a/python/paddle/distributed/passes/auto_parallel_recompute.py +++ b/python/paddle/distributed/passes/auto_parallel_recompute.py @@ -16,7 +16,7 @@ import paddle from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole -from paddle.fluid.backward import ( +from paddle.base.backward import ( ProgramStats, _append_grad_suffix_, _find_op_path_, diff --git a/python/paddle/distributed/passes/pass_utils.py b/python/paddle/distributed/passes/pass_utils.py index 89c9c65ba1761..2db43237c8290 100644 --- a/python/paddle/distributed/passes/pass_utils.py +++ b/python/paddle/distributed/passes/pass_utils.py @@ -22,8 +22,8 @@ is_optimize_op, ) from paddle.distributed.fleet.meta_optimizers.common import OpRole -from paddle.fluid import core -from paddle.fluid.framework import Parameter, Program +from paddle.base import core +from paddle.base.framework import Parameter, Program __not_shape_var_type__ = [ core.VarDesc.VarType.READER, @@ -332,7 +332,7 @@ def _create_program(src_block, dst_block, src_op, force_create=False): def _insert_sync_for_fthenb_1f1b(program): """ - This implementation refers to lots of Paddle/python/paddle/fluid/optimizer.py. + This implementation refers to lots of Paddle/python/paddle/base/optimizer.py. The difference between this function with 'PipelineOptimizer' is that 'send_v2' op and 'recv_v2' op have been inserted in program by 'reshard'. """ diff --git a/python/paddle/distributed/passes/pipeline_pass_base.py b/python/paddle/distributed/passes/pipeline_pass_base.py index c18a215012e86..65e09a73327d3 100644 --- a/python/paddle/distributed/passes/pipeline_pass_base.py +++ b/python/paddle/distributed/passes/pipeline_pass_base.py @@ -15,7 +15,7 @@ import logging from paddle.distributed.auto_parallel.static.utils import get_logger -from paddle.fluid import core +from paddle.base import core from .pass_base import PassBase from .pass_utils import get_skip_gc_vars diff --git a/python/paddle/distributed/passes/pipeline_scheduler_pass.py b/python/paddle/distributed/passes/pipeline_scheduler_pass.py index dc45e73f77c4d..509836585a99d 100644 --- a/python/paddle/distributed/passes/pipeline_scheduler_pass.py +++ b/python/paddle/distributed/passes/pipeline_scheduler_pass.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid import core +from paddle.base import core from .pass_base import PassContext, new_pass, register_pass from .pass_utils import _program_for_fthenb_and_1f1b diff --git a/python/paddle/distributed/passes/ps_trainer_pass.py b/python/paddle/distributed/passes/ps_trainer_pass.py index a1eeffa9abb00..85dc873b7112f 100755 --- a/python/paddle/distributed/passes/ps_trainer_pass.py +++ b/python/paddle/distributed/passes/ps_trainer_pass.py @@ -18,7 +18,7 @@ import paddle from paddle.distributed.passes.pass_base import PassBase, register_pass -from paddle.fluid import framework +from paddle.base import framework from paddle.framework import core from paddle.static import Parameter, Program diff --git a/python/paddle/distributed/ps/utils/collective_transpiler.py b/python/paddle/distributed/ps/utils/collective_transpiler.py index f5278d05367fd..989a97d1d8426 100644 --- a/python/paddle/distributed/ps/utils/collective_transpiler.py +++ b/python/paddle/distributed/ps/utils/collective_transpiler.py @@ -18,7 +18,7 @@ from paddle.distributed.fleet.base.private_helper_function import ( wait_server_ready, ) -from paddle.fluid import unique_name +from paddle.base import unique_name from paddle.framework import core from paddle.static import default_main_program, default_startup_program diff --git a/python/paddle/distributed/ps/utils/ps_program_builder.py b/python/paddle/distributed/ps/utils/ps_program_builder.py index ec2409028f91d..428875fff6f72 100755 --- a/python/paddle/distributed/ps/utils/ps_program_builder.py +++ b/python/paddle/distributed/ps/utils/ps_program_builder.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet.base.private_helper_function import ( wait_server_ready, ) @@ -78,7 +78,7 @@ def _build_pserver_programs(self): def _build_programs(self): if self.attrs['is_worker']: self._build_trainer_programs() - fluid.framework.switch_startup_program(self.cloned_startup) + base.framework.switch_startup_program(self.cloned_startup) print( "paddle.static.default_startup_program: {}".format( paddle.static.default_startup_program @@ -97,7 +97,7 @@ def _build_programs(self): elif self.attrs['is_server']: self._build_pserver_programs() self.loss.block.program = self.attrs['_main_server'] - fluid.framework.switch_startup_program( + base.framework.switch_startup_program( self.attrs['_startup_server'] ) @@ -372,7 +372,7 @@ def _build_programs(self): elif self.attrs['is_server']: self._build_pserver_programs() self.loss.block.program = self.attrs['_main_server'] - fluid.framework.switch_startup_program( + base.framework.switch_startup_program( self.attrs['_startup_server'] ) @@ -470,7 +470,7 @@ def _build_pserver_programs(self): def _build_programs(self): if not self.is_server: self._build_trainer_programs() - fluid.framework.switch_startup_program(self.cloned_startup) + base.framework.switch_startup_program(self.cloned_startup) paddle.framework.switch_main_program(self.cloned_main) print( "paddle.static.default_startup_program: {}".format( @@ -479,7 +479,7 @@ def _build_programs(self): ) else: self._build_pserver_programs() - fluid.framework.switch_startup_program( + base.framework.switch_startup_program( self.attrs['_startup_server'] ) paddle.framework.switch_main_program(self.attrs['_main_server']) diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index 5537f6b4739f9..82d6eccee2e10 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -19,7 +19,7 @@ from functools import reduce from paddle.distributed.io import is_persistable -from paddle.fluid.framework import generate_control_dev_var_name +from paddle.base.framework import generate_control_dev_var_name from paddle.framework import core # logging.basicConfig( @@ -391,7 +391,7 @@ def get_dense_send_context( aggregate = True # print("public get_dense_send_context dense_table:", grad_name, # var_numel, origin_varnames) - from paddle.fluid.core import CommContext + from paddle.base.core import CommContext dense_ctx = CommContext( grad_name, @@ -427,7 +427,7 @@ def get_dense_send_context( aggregate = True # print("public get_dense_send_context data_norm table:", grad_name, # var_numel, origin_varnames) - from paddle.fluid.core import CommContext + from paddle.base.core import CommContext data_norm_ctx = CommContext( grad_name, @@ -455,7 +455,7 @@ def get_dense_send_context( var_numel = reduce(lambda x, y: x * y, var.shape, 1) grad_name = origin_varname aggregate = True - from paddle.fluid.core import CommContext + from paddle.base.core import CommContext dense_ctx = CommContext( grad_name, @@ -504,7 +504,7 @@ def get_geo_trainer_send_context(attrs): ) var = program.global_block().vars[grad.merged_var.name] var_numel = reduce(lambda x, y: x * y, var.shape[1:], 1) - from paddle.fluid.core import CommContext + from paddle.base.core import CommContext print( "public get_the_geo_send_context sparse: ", grad_name, var_numel @@ -544,7 +544,7 @@ def _step_ctx(idx, role_maker): endpoints = get_ps_endpoints(role_maker) sections = [1] * len(endpoints) names = [name] * len(endpoints) - from paddle.fluid.core import CommContext + from paddle.base.core import CommContext ctx = CommContext( name, @@ -602,7 +602,7 @@ def get_the_one_send_context(attrs, split_dense_table=False, ep_list=None): if grad_name in send_ctx: continue - from paddle.fluid.core import CommContext + from paddle.base.core import CommContext print( "public get_the_one_send_context sparse: ", diff --git a/python/paddle/distributed/rpc/rpc.py b/python/paddle/distributed/rpc/rpc.py index e1e2de7c7b04a..ae4c1459d8717 100644 --- a/python/paddle/distributed/rpc/rpc.py +++ b/python/paddle/distributed/rpc/rpc.py @@ -21,7 +21,7 @@ from paddle.distributed.launch.context import Node from paddle.distributed.rpc.internal import PythonFunc, _serialize from paddle.distributed.utils.launch_utils import logger -from paddle.fluid import core +from paddle.base import core WorkerInfo = namedtuple("WorkerInfo", ["name", "rank", "ip", "port"]) diff --git a/python/paddle/distributed/spawn.py b/python/paddle/distributed/spawn.py index 0bc68c34c2086..c3351c5c9436b 100644 --- a/python/paddle/distributed/spawn.py +++ b/python/paddle/distributed/spawn.py @@ -38,7 +38,7 @@ # deprecated module import # (TODO: GhostScreaming) It will be removed later. -from paddle.fluid import core +from paddle.base import core from paddle.framework import set_flags __all__ = [] diff --git a/python/paddle/distributed/transpiler/collective.py b/python/paddle/distributed/transpiler/collective.py index 1fb1cf474a701..03b3be36e69b0 100644 --- a/python/paddle/distributed/transpiler/collective.py +++ b/python/paddle/distributed/transpiler/collective.py @@ -18,7 +18,7 @@ from paddle.distributed.fleet.base.private_helper_function import ( wait_server_ready, ) -from paddle.fluid import unique_name +from paddle.base import unique_name from paddle.framework import core from paddle.static import default_main_program, default_startup_program diff --git a/python/paddle/distributed/transpiler/details/vars_distributed.py b/python/paddle/distributed/transpiler/details/vars_distributed.py index e6dc457c5266c..25324598d2547 100644 --- a/python/paddle/distributed/transpiler/details/vars_distributed.py +++ b/python/paddle/distributed/transpiler/details/vars_distributed.py @@ -116,7 +116,7 @@ def equal(var1, var2): def __str__(self): origin_var_str = ( - "{name} : fluid.{type}.shape{shape}.astype({dtype})".format( + "{name} : base.{type}.shape{shape}.astype({dtype})".format( name=self.origin.name, type=self.origin.type, shape=self.origin.shape, @@ -125,7 +125,7 @@ def __str__(self): ) slice_var_str = ( - "{name} : fluid.{type}.shape{shape}.astype({dtype})" + "{name} : base.{type}.shape{shape}.astype({dtype})" ".slice({is_slice}).block({block_id}).offset({offset})".format( name=self.slice.name, type=self.slice.type, diff --git a/python/paddle/distributed/transpiler/distribute_transpiler.py b/python/paddle/distributed/transpiler/distribute_transpiler.py index 38892c1a6e92e..d1059c08cb975 100644 --- a/python/paddle/distributed/transpiler/distribute_transpiler.py +++ b/python/paddle/distributed/transpiler/distribute_transpiler.py @@ -38,7 +38,7 @@ import numpy as np from paddle import framework -from paddle.fluid.framework import grad_var_name +from paddle.base.framework import grad_var_name from paddle.framework import Block, Program, core from paddle.incubate.distributed.fleet.parameter_server.ir.ps_dispatcher import ( PSDispatcher, @@ -157,8 +157,8 @@ class DistributeTranspilerConfig: .. py:attribute:: split_method (PSDispatcher) Methods of dispatching parameters for server, - :ref:`api_fluid_transpiler_RoundRobin` or - :ref:`api_fluid_transpiler_HashName` can be used and default is RoundRobin. + :ref:`api_base_transpiler_RoundRobin` or + :ref:`api_base_transpiler_HashName` can be used and default is RoundRobin. Try to choose the best method to balance loads for parameter servers. .. py:attribute:: min_block_size (int) @@ -266,7 +266,7 @@ class DistributeTranspiler: **DistributeTranspiler** - Convert the fluid program to distributed data-parallelism programs. + Convert the base program to distributed data-parallelism programs. Supports two modes: parameter server(pserver) mode and nccl2 mode. In pserver mode, the main_program will be transformed to use a remote @@ -283,7 +283,7 @@ class DistributeTranspiler: .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import paddle.distributed.transpiler as transpiler paddle.enable_static() diff --git a/python/paddle/distributed/utils/nccl_utils.py b/python/paddle/distributed/utils/nccl_utils.py index f9a1b99f91b17..2910bdd0a8d2e 100644 --- a/python/paddle/distributed/utils/nccl_utils.py +++ b/python/paddle/distributed/utils/nccl_utils.py @@ -13,7 +13,7 @@ # limitations under the License. -from paddle.fluid import core +from paddle.base import core def get_nccl_version_str(ver): diff --git a/python/paddle/distribution/bernoulli.py b/python/paddle/distribution/bernoulli.py index 1b365bbcd313c..c86bed5a0d78f 100644 --- a/python/paddle/distribution/bernoulli.py +++ b/python/paddle/distribution/bernoulli.py @@ -17,8 +17,8 @@ import paddle from paddle.distribution import exponential_family -from paddle.fluid.data_feeder import check_type, convert_dtype -from paddle.fluid.framework import Variable +from paddle.base.data_feeder import check_type, convert_dtype +from paddle.base.framework import Variable from paddle.framework import in_dynamic_mode from paddle.nn.functional import ( binary_cross_entropy_with_logits, diff --git a/python/paddle/distribution/categorical.py b/python/paddle/distribution/categorical.py index 1af187c2cfed7..110f324db77c7 100644 --- a/python/paddle/distribution/categorical.py +++ b/python/paddle/distribution/categorical.py @@ -16,8 +16,8 @@ import paddle from paddle.distribution import distribution -from paddle.fluid.data_feeder import check_type, convert_dtype -from paddle.fluid.framework import Variable +from paddle.base.data_feeder import check_type, convert_dtype +from paddle.base.framework import Variable from paddle.framework import in_dynamic_mode from paddle.tensor import multinomial diff --git a/python/paddle/distribution/cauchy.py b/python/paddle/distribution/cauchy.py index 818c9d0491d2f..cad5c88753421 100644 --- a/python/paddle/distribution/cauchy.py +++ b/python/paddle/distribution/cauchy.py @@ -18,7 +18,7 @@ import paddle from paddle.distribution import distribution -from paddle.fluid import framework +from paddle.base import framework class Cauchy(distribution.Distribution): diff --git a/python/paddle/distribution/dirichlet.py b/python/paddle/distribution/dirichlet.py index 4e4b002533eb9..a1695f2b36b48 100644 --- a/python/paddle/distribution/dirichlet.py +++ b/python/paddle/distribution/dirichlet.py @@ -14,8 +14,8 @@ import paddle from paddle.distribution import exponential_family -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode diff --git a/python/paddle/distribution/distribution.py b/python/paddle/distribution/distribution.py index 68d468accee55..0bc1a70a4c854 100644 --- a/python/paddle/distribution/distribution.py +++ b/python/paddle/distribution/distribution.py @@ -25,8 +25,8 @@ import paddle from paddle import _C_ops -from paddle.fluid.data_feeder import check_variable_and_dtype, convert_dtype -from paddle.fluid.framework import Variable +from paddle.base.data_feeder import check_variable_and_dtype, convert_dtype +from paddle.base.framework import Variable from paddle.framework import in_dynamic_mode diff --git a/python/paddle/distribution/geometric.py b/python/paddle/distribution/geometric.py index ae531cf50a873..e4f2795b31f16 100644 --- a/python/paddle/distribution/geometric.py +++ b/python/paddle/distribution/geometric.py @@ -18,7 +18,7 @@ import paddle from paddle.distribution import distribution, uniform -from paddle.fluid import framework +from paddle.base import framework class Geometric(distribution.Distribution): diff --git a/python/paddle/distribution/gumbel.py b/python/paddle/distribution/gumbel.py index 5844548255c29..6c21b8d601a3f 100644 --- a/python/paddle/distribution/gumbel.py +++ b/python/paddle/distribution/gumbel.py @@ -19,7 +19,7 @@ import paddle from paddle.distribution.transformed_distribution import TransformedDistribution -from paddle.fluid import framework +from paddle.base import framework class Gumbel(TransformedDistribution): diff --git a/python/paddle/distribution/laplace.py b/python/paddle/distribution/laplace.py index d4af1203fcd35..3d5ef1bbf245a 100644 --- a/python/paddle/distribution/laplace.py +++ b/python/paddle/distribution/laplace.py @@ -18,7 +18,7 @@ import paddle from paddle.distribution import distribution -from paddle.fluid import framework +from paddle.base import framework class Laplace(distribution.Distribution): diff --git a/python/paddle/distribution/normal.py b/python/paddle/distribution/normal.py index b1cec30d2d89e..f522ac65c8a6d 100644 --- a/python/paddle/distribution/normal.py +++ b/python/paddle/distribution/normal.py @@ -19,8 +19,8 @@ import paddle from paddle.distribution import distribution -from paddle.fluid.data_feeder import check_type, convert_dtype -from paddle.fluid.framework import Variable +from paddle.base.data_feeder import check_type, convert_dtype +from paddle.base.framework import Variable from paddle.framework import in_dynamic_mode from paddle.tensor import random diff --git a/python/paddle/distribution/transform.py b/python/paddle/distribution/transform.py index 06735d23b1bdf..92313c9bec58a 100644 --- a/python/paddle/distribution/transform.py +++ b/python/paddle/distribution/transform.py @@ -166,7 +166,7 @@ def forward(self, x): Returns: Tensor: Outcome of forward transformation. """ - if not isinstance(x, paddle.fluid.framework.Variable): + if not isinstance(x, paddle.base.framework.Variable): raise TypeError( f"Expected 'x' is a Tensor or Real, but got {type(x)}." ) @@ -187,7 +187,7 @@ def inverse(self, y): Returns: Tensor: Outcome of inverse transform. """ - if not isinstance(y, paddle.fluid.framework.Variable): + if not isinstance(y, paddle.base.framework.Variable): raise TypeError( f"Expected 'y' is a Tensor or Real, but got {type(y)}." ) @@ -209,12 +209,12 @@ def forward_log_det_jacobian(self, x): Returns: Tensor: The log of the absolute value of Jacobian determinant. """ - if not isinstance(x, paddle.fluid.framework.Variable): + if not isinstance(x, paddle.base.framework.Variable): raise TypeError( f"Expected 'y' is a Tensor or Real, but got {type(x)}." ) if ( - isinstance(x, paddle.fluid.framework.Variable) + isinstance(x, paddle.base.framework.Variable) and x.dim() < self._domain.event_rank ): raise ValueError( @@ -241,7 +241,7 @@ def inverse_log_det_jacobian(self, y): Returns: Tensor: The value of :math:`log|det J_{f^{-1}}(y)|`. """ - if not isinstance(y, paddle.fluid.framework.Variable): + if not isinstance(y, paddle.base.framework.Variable): raise TypeError(f"Expected 'y' is a Tensor, but got {type(y)}.") if y.dim() < self._codomain.event_rank: raise ValueError( @@ -441,9 +441,9 @@ class AffineTransform(Transform): _type = Type.BIJECTION def __init__(self, loc, scale): - if not isinstance(loc, paddle.fluid.framework.Variable): + if not isinstance(loc, paddle.base.framework.Variable): raise TypeError(f"Expected 'loc' is a Tensor, but got {type(loc)}") - if not isinstance(scale, paddle.fluid.framework.Variable): + if not isinstance(scale, paddle.base.framework.Variable): raise TypeError( f"Expected scale is a Tensor, but got {type(scale)}" ) @@ -791,7 +791,7 @@ class PowerTransform(Transform): _type = Type.BIJECTION def __init__(self, power): - if not isinstance(power, paddle.fluid.framework.Variable): + if not isinstance(power, paddle.base.framework.Variable): raise TypeError( f"Expected 'power' is a tensor, but got {type(power)}" ) diff --git a/python/paddle/distribution/uniform.py b/python/paddle/distribution/uniform.py index dbd27fd14728f..833195491e038 100644 --- a/python/paddle/distribution/uniform.py +++ b/python/paddle/distribution/uniform.py @@ -17,8 +17,8 @@ import paddle from paddle import _C_ops from paddle.distribution import distribution -from paddle.fluid.data_feeder import check_type, convert_dtype -from paddle.fluid.framework import Variable +from paddle.base.data_feeder import check_type, convert_dtype +from paddle.base.framework import Variable from paddle.framework import in_dynamic_mode from paddle.tensor import random diff --git a/python/paddle/fft.py b/python/paddle/fft.py index 704bc56823ae1..df9cc318830d4 100644 --- a/python/paddle/fft.py +++ b/python/paddle/fft.py @@ -19,8 +19,8 @@ import paddle from . import _C_ops -from .fluid.data_feeder import check_variable_and_dtype -from .fluid.layer_helper import LayerHelper +from .base.data_feeder import check_variable_and_dtype +from .base.layer_helper import LayerHelper from .framework import in_dynamic_mode from .tensor.attribute import is_floating_point, is_integer from .tensor.creation import _complex_to_real_dtype, _real_to_complex_dtype diff --git a/python/paddle/framework/__init__.py b/python/paddle/framework/__init__.py index 302acc6b4c4db..110f8cd39b585 100755 --- a/python/paddle/framework/__init__.py +++ b/python/paddle/framework/__init__.py @@ -19,18 +19,18 @@ from .framework import get_default_dtype # noqa: F401 from .framework import set_default_dtype # noqa: F401 -from ..fluid.param_attr import ParamAttr # noqa: F401 -from ..fluid.core import CPUPlace # noqa: F401 -from ..fluid.core import IPUPlace # noqa: F401 -from ..fluid.core import CUDAPlace # noqa: F401 -from ..fluid.core import CUDAPinnedPlace # noqa: F401 -from ..fluid.core import CustomPlace # noqa: F401 -from ..fluid.core import XPUPlace # noqa: F401 +from ..base.param_attr import ParamAttr # noqa: F401 +from ..base.core import CPUPlace # noqa: F401 +from ..base.core import IPUPlace # noqa: F401 +from ..base.core import CUDAPlace # noqa: F401 +from ..base.core import CUDAPinnedPlace # noqa: F401 +from ..base.core import CustomPlace # noqa: F401 +from ..base.core import XPUPlace # noqa: F401 -from ..fluid import core # noqa: F401 -from ..fluid.dygraph import base, to_variable -from ..fluid.dygraph.base import no_grad_ as no_grad # noqa: F401 -from ..fluid.dygraph.base import grad # noqa: F401 +from ..base import core # noqa: F401 +from ..base.dygraph import base, to_variable +from ..base.dygraph.base import no_grad_ as no_grad # noqa: F401 +from ..base.dygraph.base import grad # noqa: F401 from .io import save # noqa: F401 from .io import load # noqa: F401 @@ -47,41 +47,41 @@ # Do the *DUPLICATED* monkey-patch for the tensor object. # We need remove the duplicated code here once we fix # the illogical implement in the monkey-patch methods later. -from ..fluid.layers.math_op_patch import monkey_patch_variable -from ..fluid.dygraph.math_op_patch import monkey_patch_math_tensor -from ..fluid.framework import disable_signal_handler # noqa: F401 -from ..fluid.framework import get_flags # noqa: F401 -from ..fluid.framework import set_flags # noqa: F401 -from ..fluid.framework import Parameter -from ..fluid.dygraph.base import enable_dygraph as disable_static # noqa: F401 -from ..fluid.dygraph.base import disable_dygraph as enable_static # noqa: F401 -from ..fluid.framework import in_dygraph_mode as in_dynamic_mode # noqa: F401 -from ..fluid.framework import in_new_ir_mode # noqa: F401 -from ..fluid.framework import in_dynamic_or_new_ir_mode # noqa: F401 -from ..fluid.framework import ( +from ..base.layers.math_op_patch import monkey_patch_variable +from ..base.dygraph.math_op_patch import monkey_patch_math_tensor +from ..base.framework import disable_signal_handler # noqa: F401 +from ..base.framework import get_flags # noqa: F401 +from ..base.framework import set_flags # noqa: F401 +from ..base.framework import Parameter +from ..base.dygraph.base import enable_dygraph as disable_static # noqa: F401 +from ..base.dygraph.base import disable_dygraph as enable_static # noqa: F401 +from ..base.framework import in_dygraph_mode as in_dynamic_mode # noqa: F401 +from ..base.framework import in_new_ir_mode # noqa: F401 +from ..base.framework import in_dynamic_or_new_ir_mode # noqa: F401 +from ..base.framework import ( _current_expected_place, _get_paddle_place, ) # noqa: F401 -from ..fluid.framework import dygraph_only # noqa: F401 -from ..fluid.framework import dygraph_not_support # noqa: F401 -from ..fluid.framework import ( +from ..base.framework import dygraph_only # noqa: F401 +from ..base.framework import dygraph_not_support # noqa: F401 +from ..base.framework import ( convert_np_dtype_to_dtype_, _create_tensor, OpProtoHolder, ) # noqa: F401 -from ..fluid.framework import _dygraph_tracer # noqa: F401 -from ..fluid.framework import generate_control_dev_var_name # noqa: F401 +from ..base.framework import _dygraph_tracer # noqa: F401 +from ..base.framework import generate_control_dev_var_name # noqa: F401 -from ..fluid.layer_helper import LayerHelper # noqa: F401 -from ..fluid.framework import _global_flags # noqa: F401 -from ..fluid.framework import _apply_pass # noqa: F401 -from ..fluid.framework import switch_main_program -from ..fluid.framework import switch_startup_program -from ..fluid.framework import _set_expected_place # noqa: F401 -from ..fluid.framework import Block, Program # noqa: F401 -from ..fluid.framework import IrGraph # noqa: F401 -from ..fluid.framework import deprecate_stat_dict -from ..fluid.framework import ( +from ..base.layer_helper import LayerHelper # noqa: F401 +from ..base.framework import _global_flags # noqa: F401 +from ..base.framework import _apply_pass # noqa: F401 +from ..base.framework import switch_main_program +from ..base.framework import switch_startup_program +from ..base.framework import _set_expected_place # noqa: F401 +from ..base.framework import Block, Program # noqa: F401 +from ..base.framework import IrGraph # noqa: F401 +from ..base.framework import deprecate_stat_dict +from ..base.framework import ( _stride_in_no_check_dy2st_diff as _no_check_dy2st_diff, ) # noqa: F401 diff --git a/python/paddle/framework/dtype.py b/python/paddle/framework/dtype.py index 6640407084785..57a3cb81d00fe 100644 --- a/python/paddle/framework/dtype.py +++ b/python/paddle/framework/dtype.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..fluid.core import VarDesc -from ..fluid.core import finfo as core_finfo -from ..fluid.core import iinfo as core_iinfo +from ..base.core import VarDesc +from ..base.core import finfo as core_finfo +from ..base.core import iinfo as core_iinfo dtype = VarDesc.VarType dtype.__qualname__ = "dtype" diff --git a/python/paddle/framework/framework.py b/python/paddle/framework/framework.py index 6f8c7c7555bf0..812c42d956093 100644 --- a/python/paddle/framework/framework.py +++ b/python/paddle/framework/framework.py @@ -16,7 +16,7 @@ import numpy as np # TODO: define framework api -from paddle.fluid.layer_helper_base import LayerHelperBase +from paddle.base.layer_helper_base import LayerHelperBase __all__ = [] diff --git a/python/paddle/framework/io.py b/python/paddle/framework/io.py index cc9ed4768ced9..9acbf62748e5a 100644 --- a/python/paddle/framework/io.py +++ b/python/paddle/framework/io.py @@ -25,9 +25,9 @@ import paddle # deprecated module import -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.framework import ( +from paddle import base +from paddle.base import core +from paddle.base.framework import ( EagerParamBase, Program, Variable, @@ -83,7 +83,7 @@ def _load_state_dict_from_save_inference_model(model_path, config): programs = _construct_program_holders(model_path, config.model_filename) # 2. load layer parameters & buffers - with fluid.dygraph.guard(): + with base.dygraph.guard(): persistable_var_dict = _construct_params_and_buffers( model_path, programs, config.params_filename, append_suffix=False ) @@ -131,7 +131,7 @@ def _load_state_dict_from_save_params(model_path): var_name_list.append(var_name) # 2. create and load Tensor - with fluid.dygraph.guard(): + with base.dygraph.guard(): for name in var_name_list: new_var = _create_tensor(name=name, persistable=True) _dygraph_tracer().trace_op( @@ -160,8 +160,8 @@ def _load_state_dict_from_save_params(model_path): # - paddle.jit.save # - paddle.static.save_inference_model # - need [directory] when loading [compatible for paddle 1.x] -# - paddle.fluid.io.save_inference_model -# - paddle.fluid.io.save_params/save_persistable +# - paddle.base.io.save_inference_model +# - paddle.base.io.save_params/save_persistable # 2. Error cases: # - no error case def _build_load_path_and_config(path, config): @@ -187,7 +187,7 @@ def _build_load_path_and_config(path, config): error_msg = "The ``path`` (%s) to load model not exists." # if current path is a prefix, and the path.pdparams or path.pdopt # is exist, users may want use `paddle.load` load the result of - # `fluid.save_dygraph`, we raise error here for users + # `base.save_dygraph`, we raise error here for users params_file_path = path + ".pdparams" opti_file_path = path + ".pdopt" if os.path.exists(params_file_path) or os.path.exists(opti_file_path): @@ -561,15 +561,15 @@ def _save_lod_tensor(tensor, file_name): def _load_lod_tensor(file_name): - temp_t = paddle.fluid.core.LoDTensor() + temp_t = paddle.base.core.LoDTensor() if _is_file_path(file_name): # '_seek' is the end position of this tensor in the file. - _seek = paddle.fluid.core.load_lod_tensor(temp_t, file_name) + _seek = paddle.base.core.load_lod_tensor(temp_t, file_name) elif _is_memory_buffer(file_name): with _open_file_buffer(file_name, 'rb') as f: tensor_bytes = f.read() - paddle.fluid.core.load_lod_tensor_from_memory(temp_t, tensor_bytes) + paddle.base.core.load_lod_tensor_from_memory(temp_t, tensor_bytes) _seek = f.tell() else: @@ -612,7 +612,7 @@ def _load_selected_rows(file_name): elif _is_memory_buffer(file_name): with _open_file_buffer(file_name, 'rb') as f: selected_rows_bytes = f.read() - paddle.fluid.core.load_selected_rows_from_memory( + paddle.base.core.load_selected_rows_from_memory( temp_sr, selected_rows_bytes ) _seek = f.tell() @@ -905,8 +905,8 @@ def load(path, **configs): or ``paddle.Model().save(training=False)`` , ``path`` need to be a file prefix, such as ``model/mnist``, and ``paddle.load`` will get information from ``mnist.pdmodel`` and ``mnist.pdiparams`` ; - 3. loading from paddle 1.x APIs ``paddle.fluid.io.save_inference_model`` or - ``paddle.fluid.io.save_params/save_persistables`` , ``path`` need to be a + 3. loading from paddle 1.x APIs ``paddle.base.io.save_inference_model`` or + ``paddle.base.io.save_params/save_persistables`` , ``path`` need to be a directory, such as ``model`` and model is a directory. Note: diff --git a/python/paddle/framework/io_utils.py b/python/paddle/framework/io_utils.py index 1c72bc2852d0c..a5c46778605cc 100644 --- a/python/paddle/framework/io_utils.py +++ b/python/paddle/framework/io_utils.py @@ -22,10 +22,10 @@ import numpy as np import paddle -from paddle.fluid import core -from paddle.fluid.framework import Parameter, Variable, static_only -from paddle.fluid.log_helper import get_logger -from paddle.fluid.wrapped_decorator import signature_safe_contextmanager +from paddle.base import core +from paddle.base.framework import Parameter, Variable, static_only +from paddle.base.log_helper import get_logger +from paddle.base.wrapped_decorator import signature_safe_contextmanager _logger = get_logger( __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' @@ -96,11 +96,11 @@ def is_persistable(var): >>> # doctest: +SKIP('ValueError: var fc.b not in this block') >>> import paddle - >>> import paddle.fluid as fluid + >>> import paddle.base as base >>> paddle.enable_static() - >>> param = fluid.default_main_program().global_block().var('fc.b') - >>> res = fluid.io.is_persistable(param) + >>> param = base.default_main_program().global_block().var('fc.b') + >>> res = base.io.is_persistable(param) """ if ( var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH @@ -127,11 +127,11 @@ def is_parameter(var): >>> # doctest: +SKIP('ValueError: var fc.w not in this block') >>> import paddle - >>> import paddle.fluid as fluid + >>> import paddle.base as base >>> paddle.enable_static() - >>> param = fluid.default_main_program().global_block().var('fc.w') - >>> res = fluid.io.is_parameter(param) + >>> param = base.default_main_program().global_block().var('fc.w') + >>> res = base.io.is_parameter(param) """ return isinstance(var, Parameter) @@ -166,13 +166,13 @@ def _clone_var_in_block_(block, var): @signature_safe_contextmanager def _load_program_scope(main=None, startup=None, scope=None): - prog = main if main else paddle.fluid.Program() - startup_prog = startup if startup else paddle.fluid.Program() - scope = scope if scope else paddle.fluid.core.Scope() - with paddle.fluid.scope_guard(scope): - with paddle.fluid.program_guard(prog, startup_prog): - with paddle.fluid.unique_name.guard(): - with paddle.fluid.framework._dygraph_guard(None): + prog = main if main else paddle.base.Program() + startup_prog = startup if startup else paddle.base.Program() + scope = scope if scope else paddle.base.core.Scope() + with paddle.base.scope_guard(scope): + with paddle.base.program_guard(prog, startup_prog): + with paddle.base.unique_name.guard(): + with paddle.base.framework._dygraph_guard(None): yield diff --git a/python/paddle/framework/ir.py b/python/paddle/framework/ir.py index 544eff024d750..dff055a1575b1 100644 --- a/python/paddle/framework/ir.py +++ b/python/paddle/framework/ir.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..fluid.framework import _apply_pass +from ..base.framework import _apply_pass from . import core diff --git a/python/paddle/framework/random.py b/python/paddle/framework/random.py index 9670e79b457bd..8d72d0215e93c 100644 --- a/python/paddle/framework/random.py +++ b/python/paddle/framework/random.py @@ -14,8 +14,8 @@ # TODO: define random api import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core __all__ = [] @@ -49,7 +49,7 @@ def seed(seed): elif core.is_compiled_with_xpu(): for i in range(core.get_xpu_device_count()): core.default_xpu_generator(i).manual_seed(seed) - place = fluid.framework._current_expected_place() + place = base.framework._current_expected_place() if isinstance(place, core.CustomPlace): dev_cnt = sum( [ @@ -80,7 +80,7 @@ def get_rng_state(device=None): """ state_list = [] if device is None: - place = fluid.framework._current_expected_place() + place = base.framework._current_expected_place() else: place = paddle.device._convert_to_place(device) @@ -164,7 +164,7 @@ def set_rng_state(state_list, device=None): """ if device is None: - place = fluid.framework._current_expected_place() + place = base.framework._current_expected_place() else: place = device._convert_to_place(device) @@ -250,9 +250,9 @@ def _manual_program_seed(seed): Returns: None """ - fluid.default_main_program().random_seed = seed - fluid.default_startup_program().random_seed = seed - program = fluid.Program() + base.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + program = base.Program() program.global_seed(seed) diff --git a/python/paddle/geometric/math.py b/python/paddle/geometric/math.py index fd8e2dda24484..4ba60f5d63a54 100644 --- a/python/paddle/geometric/math.py +++ b/python/paddle/geometric/math.py @@ -13,8 +13,8 @@ # limitations under the License. from paddle import _C_ops -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode __all__ = [] diff --git a/python/paddle/geometric/message_passing/send_recv.py b/python/paddle/geometric/message_passing/send_recv.py index 9ad69b7cf76b4..e0ec592d7010b 100644 --- a/python/paddle/geometric/message_passing/send_recv.py +++ b/python/paddle/geometric/message_passing/send_recv.py @@ -15,13 +15,13 @@ import numpy as np from paddle import _C_ops -from paddle.fluid.data_feeder import ( +from paddle.base.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, ) -from paddle.fluid.framework import Variable -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.framework import Variable +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode from .utils import ( diff --git a/python/paddle/geometric/message_passing/utils.py b/python/paddle/geometric/message_passing/utils.py index 5122a4f110ffe..cbcd8478ab3aa 100644 --- a/python/paddle/geometric/message_passing/utils.py +++ b/python/paddle/geometric/message_passing/utils.py @@ -15,8 +15,8 @@ import numpy as np import paddle -from paddle.fluid.data_feeder import check_dtype, convert_dtype -from paddle.fluid.framework import Variable +from paddle.base.data_feeder import check_dtype, convert_dtype +from paddle.base.framework import Variable def convert_out_size_to_list(out_size): diff --git a/python/paddle/geometric/reindex.py b/python/paddle/geometric/reindex.py index 7a38f2cfdca46..164a5af2cbb9f 100644 --- a/python/paddle/geometric/reindex.py +++ b/python/paddle/geometric/reindex.py @@ -14,9 +14,9 @@ import paddle from paddle import _C_ops -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import Variable -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.framework import Variable +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode __all__ = [] diff --git a/python/paddle/geometric/sampling/neighbors.py b/python/paddle/geometric/sampling/neighbors.py index f3242d02adfdf..8e771f7d081fc 100644 --- a/python/paddle/geometric/sampling/neighbors.py +++ b/python/paddle/geometric/sampling/neighbors.py @@ -13,8 +13,8 @@ # limitations under the License. from paddle import _C_ops, _legacy_C_ops -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode __all__ = [] diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 425ea07ab45c9..9002de1ee1698 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -24,16 +24,16 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base from paddle.autograd import no_grad from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable -from paddle.fluid.executor import global_scope -from paddle.fluid.framework import Variable -from paddle.fluid.framework import _current_expected_place as _get_device -from paddle.fluid.framework import _get_paddle_place +from paddle.base import core +from paddle.base.dygraph.base import to_variable +from paddle.base.executor import global_scope +from paddle.base.framework import Variable +from paddle.base.framework import _current_expected_place as _get_device +from paddle.base.framework import _get_paddle_place from paddle.framework import in_dynamic_mode from paddle.framework.io_utils import is_belong_to_optimizer from paddle.io import DataLoader, Dataset, DistributedBatchSampler @@ -59,9 +59,9 @@ def to_list(value): def to_numpy(var): assert isinstance( - var, (Variable, fluid.core.eager.Tensor) + var, (Variable, base.core.eager.Tensor) ), "not a variable" - if isinstance(var, fluid.core.eager.Tensor): + if isinstance(var, base.core.eager.Tensor): return np.array(var) t = global_scope().find_var(var.name).get_tensor() return np.array(t) @@ -131,9 +131,9 @@ def init_communicator( wait_server_ready(other_endpoints) if core.is_compiled_with_cuda(): nccl_id_var = block.create_var( - name=fluid.unique_name.generate('nccl_id'), + name=base.unique_name.generate('nccl_id'), persistable=True, - type=fluid.core.VarDesc.VarType.RAW, + type=base.core.VarDesc.VarType.RAW, ) block.append_op( @@ -159,9 +159,9 @@ def init_communicator( ) elif core.is_compiled_with_xpu(): bkcl_id_var = block.create_var( - name=fluid.unique_name.generate('bkcl_id'), + name=base.unique_name.generate('bkcl_id'), persistable=True, - type=fluid.core.VarDesc.VarType.RAW, + type=base.core.VarDesc.VarType.RAW, ) block.append_op( @@ -190,9 +190,9 @@ def init_communicator( in paddle.device.get_all_custom_device_type() ): xccl_id_var = block.create_var( - name=fluid.unique_name.generate('xccl_id'), + name=base.unique_name.generate('xccl_id'), persistable=True, - type=fluid.core.VarDesc.VarType.RAW, + type=base.core.VarDesc.VarType.RAW, ) block.append_op( @@ -221,9 +221,9 @@ def init_communicator( def prepare_distributed_context(place=None): if place is None: place = ( - fluid.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) + base.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) if paddle.distributed.ParallelEnv().nranks > 1 - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) place = _get_paddle_place(place) @@ -242,10 +242,10 @@ def prepare_distributed_context(place=None): global _parallel_context_initialized - if not _parallel_context_initialized and isinstance(place, fluid.CUDAPlace): + if not _parallel_context_initialized and isinstance(place, base.CUDAPlace): def _init_context(): - communicator_prog = fluid.Program() + communicator_prog = base.Program() init_communicator( communicator_prog, strategy.local_rank, @@ -254,13 +254,13 @@ def _init_context(): strategy.current_endpoint, strategy.trainer_endpoints, ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(communicator_prog) if in_dynamic_mode(): - fluid.disable_dygraph() + base.disable_dygraph() _init_context() - fluid.enable_dygraph(place) + base.enable_dygraph(place) else: assert "Only support CUDAPlace for now." @@ -299,8 +299,8 @@ def __init__(self, model): self.model = model # with `_build_once` gone, parameters are now created in `__init__` # so we need to keep track of the parameters already created - self._startup_prog = fluid.default_startup_program() - self._orig_prog = fluid.default_main_program() + self._startup_prog = base.default_startup_program() + self._orig_prog = base.default_main_program() self._label_vars = {} # label variables self._input_vars = {} # label variables @@ -388,12 +388,12 @@ def _save(state, path): # TODO: Support save/load scaler state in static graph def load(self, param_state_pairs, optim_state): if self._executor is None: - executor = fluid.Executor(fluid.CPUPlace())._default_executor + executor = base.Executor(base.CPUPlace())._default_executor else: executor = self._executor._default_executor # restore parameter states - fluid.core._create_loaded_parameter( + base.core._create_loaded_parameter( [param for param, state in param_state_pairs], global_scope(), executor, @@ -413,7 +413,7 @@ def _load_optimizer(self, state, executor): if not optim: return - fluid.core._create_loaded_parameter(optim, global_scope(), executor) + base.core._create_loaded_parameter(optim, global_scope(), executor) converted_state = dict(state) for var in optim: @@ -506,13 +506,13 @@ def _set_var(self, var, ndarray): t = global_scope().find_var(var.name).get_tensor() p = t._place() if p.is_cpu_place(): - place = fluid.CPUPlace() + place = base.CPUPlace() elif p.is_cuda_pinned_place(): - place = fluid.CUDAPinnedPlace() + place = base.CUDAPinnedPlace() else: - p = fluid.core.Place() + p = base.core.Place() p.set_place(t._place()) - place = fluid.CUDAPlace(p.gpu_device_id()) + place = base.CUDAPlace(p.gpu_device_id()) t.set(ndarray, place) @@ -651,7 +651,7 @@ def _make_program(self, mode): losses = [] metrics = [] - with fluid.program_guard(prog, self._startup_prog): + with base.program_guard(prog, self._startup_prog): inputs = self.model._inputs labels = self.model._labels if self.model._labels else [] inputs = [k._create_feed_layer() for k in to_list(inputs)] @@ -732,11 +732,11 @@ def _compile_and_initialize(self, prog, mode): # even if `forward()` may run different code path for different mode # therefore startup program only needs to run once if self._executor is None: - self._executor = fluid.Executor(place) + self._executor = base.Executor(place) # XXX incremental initialization uninitialized = [] for var_py in self._startup_prog.list_vars(): - var = fluid.global_scope().find_var(var_py.name) + var = base.global_scope().find_var(var_py.name) if ( not var_py.name.startswith('nccl_id') and var @@ -766,7 +766,7 @@ def _compile_and_initialize(self, prog, mode): self.model._optimizer.amp_init(place) if self._nranks < 2: - compiled_prog = fluid.CompiledProgram(prog) + compiled_prog = base.CompiledProgram(prog) else: compiled_prog = prog @@ -935,7 +935,7 @@ def predict_batch(self, inputs): inputs = [to_variable(x) for x in to_list(inputs)] self._input_info = _update_input_info(inputs) outputs = self.model.network(*inputs) - if self._nranks > 1 and isinstance(self.model._place, fluid.CUDAPlace): + if self._nranks > 1 and isinstance(self.model._place, base.CUDAPlace): outputs = [_all_gather(o) for o in to_list(outputs)] return [to_numpy(o) for o in to_list(outputs)] @@ -1026,7 +1026,7 @@ def load(self, param_state_pairs, optim_state, scaler_state=None): if not hasattr(self.model._optimizer, 'set_state_dict'): warnings.warn( - "paddle.fluid.optimizer is deprecated in API 2.0, please use paddle.optimizer instead." + "paddle.base.optimizer is deprecated in API 2.0, please use paddle.optimizer instead." ) self.model._optimizer.set_dict(converted_state) else: @@ -1708,23 +1708,23 @@ def prepare( """ self._place = _get_device() - if isinstance(self._place, fluid.CUDAPlace): + if isinstance(self._place, base.CUDAPlace): global _parallel_context_initialized if ( paddle.distributed.ParallelEnv().nranks > 1 and not _parallel_context_initialized ): if in_dynamic_mode(): - main_prog_seed = fluid.default_main_program().random_seed + main_prog_seed = base.default_main_program().random_seed startup_prog_seed = ( - fluid.default_startup_program().random_seed + base.default_startup_program().random_seed ) - fluid.disable_dygraph() + base.disable_dygraph() paddle.disable_static(self._place) # enable_dygraph would create and switch to a new program, # thus also copy seed to the new program - fluid.default_main_program().random_seed = main_prog_seed - fluid.default_startup_program().random_seed = ( + base.default_main_program().random_seed = main_prog_seed + base.default_startup_program().random_seed = ( startup_prog_seed ) else: @@ -2233,7 +2233,7 @@ def _save_inference_model(self, path): """ if in_dynamic_mode(): - with fluid.framework._dygraph_guard(None): + with base.framework._dygraph_guard(None): layer = self.network if self._input_info is None: # No provided or inferred raise RuntimeError( diff --git a/python/paddle/hapi/model_summary.py b/python/paddle/hapi/model_summary.py index 262547444d8d2..4a204f64b3d63 100644 --- a/python/paddle/hapi/model_summary.py +++ b/python/paddle/hapi/model_summary.py @@ -240,7 +240,7 @@ def summary(net, input_size=None, dtypes=None, input=None): input_size = [] for key in input.keys(): input_size.append(tuple(input[key].shape)) - elif isinstance(input, paddle.fluid.framework.Variable): + elif isinstance(input, paddle.base.framework.Variable): input_size = tuple(input.shape) else: raise ValueError( @@ -354,7 +354,7 @@ def _build_dtypes(input_size, dtype): def _get_shape_from_tensor(x): if isinstance( - x, (paddle.fluid.Variable, paddle.fluid.core.eager.Tensor) + x, (paddle.base.Variable, paddle.base.core.eager.Tensor) ): return list(x.shape) elif isinstance(x, (list, tuple)): diff --git a/python/paddle/hapi/static_flops.py b/python/paddle/hapi/static_flops.py index c6d099d8b8872..21a4359743685 100644 --- a/python/paddle/hapi/static_flops.py +++ b/python/paddle/hapi/static_flops.py @@ -73,7 +73,7 @@ def outputs(self, name): class GraphWrapper: """ - It is a wrapper of paddle.fluid.framework.IrGraph with some special functions + It is a wrapper of paddle.base.framework.IrGraph with some special functions for paddle slim framework. Args: diff --git a/python/paddle/incubate/__init__.py b/python/paddle/incubate/__init__.py index 81f46340d250a..f6eb4377f6033 100644 --- a/python/paddle/incubate/__init__.py +++ b/python/paddle/incubate/__init__.py @@ -16,7 +16,7 @@ from .optimizer import ModelAverage # noqa: F401 from .optimizer import DistributedFusedLamb # noqa: F401 from .checkpoint import auto_checkpoint # noqa: F401 -from ..fluid.layer_helper import LayerHelper # noqa: F401 +from ..base.layer_helper import LayerHelper # noqa: F401 from .operators import softmax_mask_fuse_upper_triangle # noqa: F401 from .operators import softmax_mask_fuse # noqa: F401 from .operators import graph_send_recv diff --git a/python/paddle/incubate/asp/asp.py b/python/paddle/incubate/asp/asp.py index 8ba4966973fed..671bc0251c1e8 100644 --- a/python/paddle/incubate/asp/asp.py +++ b/python/paddle/incubate/asp/asp.py @@ -22,8 +22,8 @@ import numpy as np import paddle -from paddle.fluid import core, global_scope, program_guard -from paddle.fluid.framework import dygraph_only +from paddle.base import core, global_scope, program_guard +from paddle.base.framework import dygraph_only from paddle.incubate import asp from .supported_layer_list import ( @@ -453,7 +453,7 @@ def prune_model(model, n=2, m=4, mask_algo='mask_1d', with_mask=True): if ( hasattr(model, "distributed_info_") and model.distributed_info_["sharding_degree"] > 1 - and paddle.fluid.is_compiled_with_cuda() + and paddle.base.is_compiled_with_cuda() ): gpu_id = int(os.environ.get('FLAGS_selected_gpus', 0)) place = paddle.CUDAPlace(gpu_id) @@ -857,7 +857,7 @@ def _step(cls, optimizer): """ optimizer.step() main_prog = paddle.static.default_main_program() - with paddle.fluid.dygraph.no_grad(): + with paddle.base.dygraph.no_grad(): ASPHelper._insert_sparse_mask_ops( main_prog, optimizer._parameter_list ) diff --git a/python/paddle/incubate/asp/supported_layer_list.py b/python/paddle/incubate/asp/supported_layer_list.py index 432adebacc620..b0d420fa36b03 100644 --- a/python/paddle/incubate/asp/supported_layer_list.py +++ b/python/paddle/incubate/asp/supported_layer_list.py @@ -20,7 +20,7 @@ import numpy as np import paddle -from paddle.fluid.log_helper import get_logger +from paddle.base.log_helper import get_logger from paddle.incubate import asp __all__ = [] diff --git a/python/paddle/incubate/autograd/composite_rules.py b/python/paddle/incubate/autograd/composite_rules.py index 1ca8ab62a8363..9123b98ac2054 100644 --- a/python/paddle/incubate/autograd/composite_rules.py +++ b/python/paddle/incubate/autograd/composite_rules.py @@ -20,7 +20,7 @@ import functools import operator -from paddle.fluid import core +from paddle.base import core from .primitives import * # noqa: F403 from .primreg import REGISTER_COMPOSITE, lookup_composite @@ -35,7 +35,7 @@ def _composite(op, *args): def softmax_composite(x, axis): """define composite rule of op softmax""" is_amp = False - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype # Softmax need fp32 compute since it has sum op in dtype = convert_dtype(x.dtype) @@ -78,7 +78,7 @@ def composite_batchnorm( """ is_amp = False - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype dtype = convert_dtype(x.dtype) if dtype in ["float16", "uint16"]: @@ -152,7 +152,7 @@ def layernorm_composite(x, scale, bias, epsilon, begin_norm_axis): var = mean((x-mean(x))^2) """ is_amp = False - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype dtype = convert_dtype(x.dtype) if dtype in ["float16", "uint16"]: @@ -194,7 +194,7 @@ def instancenorm_composite(x, scale, bias, epsilon): var = mean((x-mean(x))^2) """ is_amp = False - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype dtype = convert_dtype(x.dtype) if dtype in ["float16", "uint16"]: @@ -259,7 +259,7 @@ def gelu_composite(x, approximate): def mean_composite(x, axis, keepdim): """define composite rule of op mean""" is_amp = False - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype dtype = convert_dtype(x.dtype) if dtype in ["float16", "uint16"]: @@ -436,7 +436,7 @@ def dropout_composite(x, seed_tensor, p, is_test, mode, seed, fix_seed): def bernoulli(shape, dtype, p, seed=0): - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype # TODO(jiabin) Fix uniform doesn't support float16 error in CINN new_dtype = ( @@ -493,7 +493,7 @@ def sigmoid_composite(x): res = 1 / (1 + exp(-x)) """ is_amp = False - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype dtype = convert_dtype(x.dtype) if dtype in ["float16", "uint16"]: @@ -512,7 +512,7 @@ def silu_composite(x): res = x / (1 + exp(-x)) """ is_amp = False - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype dtype = convert_dtype(x.dtype) if dtype in ["float16", "uint16"]: @@ -592,7 +592,7 @@ def sqrt_composite(x): res = pow(x, 0.5) """ is_amp = False - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype dtype = convert_dtype(x.dtype) if dtype in ["float16", "uint16"]: @@ -611,7 +611,7 @@ def pow_composite(x, y): res = x^y """ is_amp = False - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype dtype = convert_dtype(x.dtype) if dtype in ["float16", "uint16"]: @@ -661,7 +661,7 @@ def rsqrt_composite(x): """define composite rule of op rsqrt.""" # rsqrt(x) = x^(-0.5) is_amp = False - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype dtype = convert_dtype(x.dtype) if dtype in ["float16", "uint16"]: @@ -684,7 +684,7 @@ def group_norm_composite(x, scale, bias, epsilon, groups, data_layout): N, C, H, W = x.shape is_amp = False - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype dtype = convert_dtype(x.dtype) # when inputs are float16 or bfloat16, convert to float32 in computing diff --git a/python/paddle/incubate/autograd/functional.py b/python/paddle/incubate/autograd/functional.py index 116f4e2936fb7..fbf054b05808a 100644 --- a/python/paddle/incubate/autograd/functional.py +++ b/python/paddle/incubate/autograd/functional.py @@ -15,7 +15,7 @@ import typing import paddle -from paddle.fluid import framework +from paddle.base import framework from paddle.incubate.autograd import primapi, utils @@ -582,7 +582,7 @@ def _grad(ys, xs, v=None): # xs_grad when the xs is a signle Tensor. xs_grad = paddle.grad(ys, xs, v, create_graph=True, allow_unused=True) if ( - isinstance(xs, paddle.fluid.framework.Variable) + isinstance(xs, paddle.base.framework.Variable) and isinstance(xs_grad, typing.Sequence) and len(xs_grad) > 0 ): diff --git a/python/paddle/incubate/autograd/primapi.py b/python/paddle/incubate/autograd/primapi.py index ee72cc3bd8f18..cc57f930de4a7 100644 --- a/python/paddle/incubate/autograd/primapi.py +++ b/python/paddle/incubate/autograd/primapi.py @@ -16,8 +16,8 @@ import typing import paddle -from paddle.fluid import backward, core, framework -from paddle.fluid.core import prim_config +from paddle.base import backward, core, framework +from paddle.base.core import prim_config from paddle.incubate.autograd import primx, utils @@ -240,12 +240,12 @@ def to_prim( """ if not core._is_fwd_prim_enabled(): return - if isinstance(blocks, paddle.fluid.framework.Block): + if isinstance(blocks, paddle.base.framework.Block): logging.info("Atomize composite op to primitive ops begin.") main_program = blocks.program elif isinstance(blocks, typing.Sequence): for item in blocks: - if not isinstance(item, paddle.fluid.framework.Block): + if not isinstance(item, paddle.base.framework.Block): raise TypeError( f"Expect block or sequence of blocks, but sequence contains {type(item)}." ) diff --git a/python/paddle/incubate/autograd/primops.py b/python/paddle/incubate/autograd/primops.py index 29a17dda1d458..3b628d410f525 100644 --- a/python/paddle/incubate/autograd/primops.py +++ b/python/paddle/incubate/autograd/primops.py @@ -16,7 +16,7 @@ import operator import paddle -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from .primreg import REGISTER_FN @@ -307,7 +307,7 @@ def split(x, num_or_sections, axis=0, outs=None): @REGISTER_FN('concat_p', 'XS', 'Y') def concat(xs, axis=0, out=None): - if isinstance(xs, paddle.fluid.framework.Variable): + if isinstance(xs, paddle.base.framework.Variable): xs = [xs] attrs = {'axis': axis} helper = LayerHelper('concat_p', **locals()) diff --git a/python/paddle/incubate/autograd/primx.py b/python/paddle/incubate/autograd/primx.py index b61076ed0df22..49c6876640a71 100644 --- a/python/paddle/incubate/autograd/primx.py +++ b/python/paddle/incubate/autograd/primx.py @@ -17,9 +17,9 @@ from collections import OrderedDict import paddle -from paddle.fluid import framework -from paddle.fluid.core import ops_contain_none, prim_config -from paddle.fluid.framework import Operator, default_main_program +from paddle.base import framework +from paddle.base.core import ops_contain_none, prim_config +from paddle.base.framework import Operator, default_main_program from paddle.incubate.autograd.utils import as_tensors from .composite_rules import _composite @@ -141,8 +141,8 @@ def add(self, key_var, value_var): def add_rec(self, key_vars, value_vars): if value_vars is None: return - if isinstance(key_vars, paddle.fluid.framework.Variable): - if not isinstance(value_vars, paddle.fluid.framework.Variable): + if isinstance(key_vars, paddle.base.framework.Variable): + if not isinstance(value_vars, paddle.base.framework.Variable): raise TypeError( f'value_vars must be Variable, but got {type(value_vars)}' ) @@ -212,7 +212,7 @@ def add_vars(self, new_vars): def add_vars_rec(self, new_vars): if new_vars is None: return - if isinstance(new_vars, paddle.fluid.framework.Variable): + if isinstance(new_vars, paddle.base.framework.Variable): self.vars.update({id(new_vars): new_vars}) return if not isinstance(new_vars, list): @@ -246,7 +246,7 @@ def erase_dots(self, vars_to_erase): def var2dot_rec(self, vars): """Lookup var2dot recursively.""" - if isinstance(vars, paddle.fluid.framework.Variable): + if isinstance(vars, paddle.base.framework.Variable): dot = self.var2dot.lookup(vars) return dot @@ -254,7 +254,7 @@ def var2dot_rec(self, vars): return dots def dot2bar_rec(self, dots): - if isinstance(dots, paddle.fluid.framework.Variable): + if isinstance(dots, paddle.base.framework.Variable): bar = self.dot2bar.lookup(dots) assert bar is not None, 'bar must be not None' return bar @@ -509,7 +509,7 @@ def expand_nested_list(xs): attrs = {} for name in sorted(op.attr_names): attrs[name] = op.attr(name) - from paddle.fluid.dygraph.base import param_guard + from paddle.base.dygraph.base import param_guard new_op_desc = block.desc.append_op() with param_guard(inputs), param_guard(outputs): @@ -561,7 +561,7 @@ def bind(args, to_bind, value_table): for i in range(len(args)): if isinstance(args[i], list): bind(args[i], to_bind, value_table) - if not isinstance(args[i], paddle.fluid.framework.Variable): + if not isinstance(args[i], paddle.base.framework.Variable): continue elif args[i] is not None and args[i].name in to_bind: args[i] = value_table[to_bind[args[i].name]] @@ -584,7 +584,7 @@ def expand_nested_list(xs): return_list.append(x) return return_list - if isinstance(block, paddle.fluid.framework.Block): + if isinstance(block, paddle.base.framework.Block): logging.info("Atomize composite op to primitive ops begin.") # Step1: Do some preparatory work for lower diff --git a/python/paddle/incubate/autograd/utils.py b/python/paddle/incubate/autograd/utils.py index 79667cafeaf3e..1aa4912a89bf6 100644 --- a/python/paddle/incubate/autograd/utils.py +++ b/python/paddle/incubate/autograd/utils.py @@ -16,7 +16,7 @@ import paddle import paddle.framework.dtype as dtypes -from paddle.fluid import framework +from paddle.base import framework from .phi_ops_map import op_info, op_map @@ -309,7 +309,7 @@ def map_output_for_composite(op): def flatten(inp): - if inp is None or isinstance(inp, paddle.fluid.framework.Variable): + if inp is None or isinstance(inp, paddle.base.framework.Variable): return [inp] flattened = [] for part in inp: diff --git a/python/paddle/incubate/autotune.py b/python/paddle/incubate/autotune.py index a4b9cbe7006bd..745ac9fc69c07 100644 --- a/python/paddle/incubate/autotune.py +++ b/python/paddle/incubate/autotune.py @@ -16,7 +16,7 @@ import warnings import paddle -from paddle.fluid import core +from paddle.base import core __all__ = ['set_config'] diff --git a/python/paddle/incubate/checkpoint/__init__.py b/python/paddle/incubate/checkpoint/__init__.py index 79e6259de0275..115cc0a255a41 100644 --- a/python/paddle/incubate/checkpoint/__init__.py +++ b/python/paddle/incubate/checkpoint/__init__.py @@ -12,6 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ...fluid.incubate.checkpoint import auto_checkpoint # noqa: F401 +from ...base.incubate.checkpoint import auto_checkpoint # noqa: F401 __all__ = [] diff --git a/python/paddle/incubate/distributed/fleet/base.py b/python/paddle/incubate/distributed/fleet/base.py index 81d071bf98226..1bce9decb5290 100644 --- a/python/paddle/incubate/distributed/fleet/base.py +++ b/python/paddle/incubate/distributed/fleet/base.py @@ -14,9 +14,9 @@ import abc -from paddle import fluid +from paddle import base from paddle.distributed.fleet.base.role_maker import RoleMakerBase -from paddle.fluid.executor import Executor +from paddle.base.executor import Executor from paddle.optimizer import SGD from paddle.static.amp.decorator import OptimizerWithMixedPrecision @@ -200,7 +200,7 @@ def init(self, role_maker=None): Returns: None """ - self._executor = Executor(fluid.CPUPlace()) + self._executor = Executor(base.CPUPlace()) if role_maker and not isinstance(role_maker, RoleMakerBase): from paddle.incubate.distributed.fleet.role_maker import ( @@ -272,8 +272,8 @@ def save_persistables(self, executor, dirname, main_program=None): class DistributedOptimizer(metaclass=abc.ABCMeta): """ - DistributedOptimizer is a wrapper for paddle.fluid.optimizer - A user should pass a paddle.fluid.optimizer to DistributedOptimizer + DistributedOptimizer is a wrapper for paddle.base.optimizer + A user should pass a paddle.base.optimizer to DistributedOptimizer minimize() function is implemented. DistributedOptimizer is the starting point for a user who wants to run distributed training. The optimized information will be stored in @@ -345,7 +345,7 @@ def apply_gradients(self, params_grads): >>> # doctest: +SKIP('The network is not defined.') >>> loss = network() - >>> optimizer = fluid.optimizer.SGD(learning_rate=0.1) + >>> optimizer = base.optimizer.SGD(learning_rate=0.1) >>> params_grads = optimizer.backward(loss) >>> # you may append operations for params_grads here >>> # ... diff --git a/python/paddle/incubate/distributed/fleet/collective.py b/python/paddle/incubate/distributed/fleet/collective.py index 6b32ef85de3e5..8ac688107e97b 100644 --- a/python/paddle/incubate/distributed/fleet/collective.py +++ b/python/paddle/incubate/distributed/fleet/collective.py @@ -16,12 +16,12 @@ import paddle import paddle.distributed.transpiler.distribute_transpiler as dist_transpiler -from paddle import fluid +from paddle import base from paddle.distributed.fleet.meta_optimizers import RawProgramOptimizer -from paddle.fluid.compiler import CompiledProgram -from paddle.fluid.executor import Executor -from paddle.fluid.framework import Program -from paddle.fluid.incubate.checkpoint.checkpoint_saver import ( +from paddle.base.compiler import CompiledProgram +from paddle.base.executor import Executor +from paddle.base.framework import Program +from paddle.base.incubate.checkpoint.checkpoint_saver import ( CheckpointSaver, PaddleModel, ) @@ -203,7 +203,7 @@ def load_checkpoint( fleet = Collective() -class DistributedStrategy(fluid.BuildStrategy): +class DistributedStrategy(base.BuildStrategy): """ Init function of DistributedStrategy """ @@ -222,7 +222,7 @@ def __init__(self): self.use_amp = False # use mixed precision optimizer self.amp_loss_scaling = 2**15 - self.exec_strategy = fluid.ExecutionStrategy() + self.exec_strategy = base.ExecutionStrategy() # configurations below are used for unit test self._ut4grad_allreduce = False @@ -258,8 +258,8 @@ def apply_gradients(self, params_grads): class CollectiveOptimizer(DistributedOptimizer): """ - DistributedOptimizer is a wrapper for paddle.fluid.optimizer - A user should pass a paddle.fluid.optimizer to DistributedOptimizer + DistributedOptimizer is a wrapper for paddle.base.optimizer + A user should pass a paddle.base.optimizer to DistributedOptimizer minimize() function is implemented. DistributedOptimizer is the starting point for a user who wants to run distributed training. The optimized information will be stored in @@ -550,7 +550,7 @@ def minimize( main_program = loss.block.program if startup_program is None: - startup_program = fluid.default_startup_program() + startup_program = base.default_startup_program() fleet.startup_program = startup_program self._loss = loss diff --git a/python/paddle/incubate/distributed/fleet/fleet_util.py b/python/paddle/incubate/distributed/fleet/fleet_util.py index f809a536ca241..860c37705f4be 100644 --- a/python/paddle/incubate/distributed/fleet/fleet_util.py +++ b/python/paddle/incubate/distributed/fleet/fleet_util.py @@ -24,9 +24,9 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet.utils.fs import HDFSClient -from paddle.fluid.log_helper import get_logger +from paddle.base.log_helper import get_logger from . import utils @@ -132,8 +132,8 @@ def rank0_error(self, s): def set_zero( self, var_name, - scope=fluid.global_scope(), - place=fluid.CPUPlace(), + scope=base.global_scope(), + place=base.CPUPlace(), param_type="int64", ): """ @@ -141,8 +141,8 @@ def set_zero( Args: var_name(str): name of Variable - scope(Scope): Scope object, default is fluid.global_scope() - place(Place): Place object, default is fluid.CPUPlace() + scope(Scope): Scope object, default is base.global_scope() + place(Place): Place object, default is base.CPUPlace() param_type(str): param data type, default is int64 Examples: @@ -159,7 +159,7 @@ def set_zero( def print_global_auc( self, - scope=fluid.global_scope(), + scope=base.global_scope(), stat_pos="_generated_var_2", stat_neg="_generated_var_3", print_prefix="", @@ -168,7 +168,7 @@ def print_global_auc( Print global auc of all distributed workers. Args: - scope(Scope): Scope object, default is fluid.global_scope() + scope(Scope): Scope object, default is base.global_scope() stat_pos(str): name of auc pos bucket Variable stat_neg(str): name of auc neg bucket Variable print_prefix(str): prefix of print auc @@ -183,11 +183,11 @@ def print_global_auc( # below is part of model emb = my_slot_net(slots, label) # emb can be fc layer of size 1 - similarity_norm = fluid.layers.sigmoid(paddle.clip(\ + similarity_norm = base.layers.sigmoid(paddle.clip(\ emb, min=-15.0, max=15.0), name="similarity_norm")\ - binary_predict = fluid.layers.concat(input=[\ + binary_predict = base.layers.concat(input=[\ paddle.subtract(\ - fluid.layers.ceil(similarity_norm), similarity_norm),\ + base.layers.ceil(similarity_norm), similarity_norm),\ similarity_norm], axis=1) auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \ stat_neg] = paddle.static.auc(input=binary_predict,\ @@ -200,7 +200,7 @@ def print_global_auc( def get_global_auc( self, - scope=fluid.global_scope(), + scope=base.global_scope(), stat_pos="_generated_var_2", stat_neg="_generated_var_3", ): @@ -208,7 +208,7 @@ def get_global_auc( Get global auc of all distributed workers. Args: - scope(Scope): Scope object, default is fluid.global_scope() + scope(Scope): Scope object, default is base.global_scope() stat_pos(str): name of auc pos bucket Variable stat_neg(str): name of auc neg bucket Variable @@ -869,8 +869,8 @@ def pull_all_dense_params(self, scope, program): pull all dense params in trainer of rank 0 Args: - scope(Scope): fluid Scope - program(Program): fluid Program + scope(Scope): base Scope + program(Program): base Program Examples: .. code-block:: python @@ -933,9 +933,9 @@ def save_paddle_inference_model( save paddle inference model, and upload to hdfs dnn_plugin path Args: - executor(Executor): fluid Executor - scope(Scope): fluid Scope - program(Program): fluid Program + executor(Executor): base Executor + scope(Scope): base Scope + program(Program): base Program feeded_vars(list[Variable]): feed vars target_vars(list[variable]): fetch vars output_path(str): hdfs/afs output path @@ -969,7 +969,7 @@ def save_paddle_inference_model( # pull dense before save self.pull_all_dense_params(scope, program) if fleet.worker_index() == 0: - with fluid.scope_guard(scope): + with base.scope_guard(scope): if save_combine: paddle.static.io.save_inference_model( model_name, @@ -1027,9 +1027,9 @@ def save_paddle_params( save paddle model, and upload to hdfs dnn_plugin path Args: - executor(Executor): fluid Executor - scope(Scope): fluid Scope - program(Program): fluid Program + executor(Executor): base Executor + scope(Scope): base Scope + program(Program): base Program model_name(str): save model local dir or filename output_path(str): hdfs/afs output path day(str|int): training day @@ -1084,7 +1084,7 @@ def save_paddle_params( self.pull_all_dense_params(scope, program) if fleet.worker_index() == 0: vars = [program.global_block().var(i) for i in var_names] - with fluid.scope_guard(scope): + with base.scope_guard(scope): if save_combine: paddle.static.io.save_vars( executor, "./", program, vars=vars, filename=model_name @@ -1326,7 +1326,7 @@ def get_online_pass_interval( def get_global_metrics( self, - scope=fluid.global_scope(), + scope=base.global_scope(), stat_pos_name="_generated_var_2", stat_neg_name="_generated_var_3", sqrerr_name="sqrerr", @@ -1341,7 +1341,7 @@ def get_global_metrics( actual_ctr, predicted_ctr, copc, mean_predict_qvalue, total_ins_num. Args: - scope(Scope): Scope object, default is fluid.global_scope() + scope(Scope): Scope object, default is base.global_scope() stat_pos_name(str): name of auc pos bucket Variable stat_neg_name(str): name of auc neg bucket Variable sqrerr_name(str): name of sqrerr Variable @@ -1374,11 +1374,11 @@ def get_global_metrics( label = paddle.static.data(name="click", shape=[-1, 1],\ dtype="int64", lod_level=0) emb = my_slot_net(slots, label) # emb can be fc layer of size 1 - similarity_norm = fluid.layers.sigmoid(paddle.clip(\ + similarity_norm = base.layers.sigmoid(paddle.clip(\ emb, min=-15.0, max=15.0), name="similarity_norm")\ - binary_predict = fluid.layers.concat(input=[\ + binary_predict = base.layers.concat(input=[\ paddle.subtract(\ - fluid.layers.ceil(similarity_norm), similarity_norm),\ + base.layers.ceil(similarity_norm), similarity_norm),\ similarity_norm], axis=1) auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \ stat_neg] = paddle.static.auc(input=binary_predict,\ @@ -1528,7 +1528,7 @@ def get_metric(name): def print_global_metrics( self, - scope=fluid.global_scope(), + scope=base.global_scope(), stat_pos_name="_generated_var_2", stat_neg_name="_generated_var_3", sqrerr_name="sqrerr", @@ -1544,7 +1544,7 @@ def print_global_metrics( actual_ctr, predicted_ctr, copc, mean_predict_qvalue, total_ins_num. Args: - scope(Scope): Scope object, default is fluid.global_scope() + scope(Scope): Scope object, default is base.global_scope() stat_pos_name(str): name of auc pos bucket Variable stat_neg_name(str): name of auc neg bucket Variable sqrerr_name(str): name of sqrerr Variable @@ -1574,11 +1574,11 @@ def print_global_metrics( label = paddle.static.data(name="click", shape=[-1, 1],\ dtype="int64", lod_level=0) emb = my_slot_net(slots, label) # emb can be fc layer of size 1 - similarity_norm = fluid.layers.sigmoid(paddle.clip(\ + similarity_norm = base.layers.sigmoid(paddle.clip(\ emb, min=-15.0, max=15.0), name="similarity_norm")\ - binary_predict = fluid.layers.concat(input=[\ + binary_predict = base.layers.concat(input=[\ paddle.subtract(\ - fluid.layers.ceil(similarity_norm), similarity_norm),\ + base.layers.ceil(similarity_norm), similarity_norm),\ similarity_norm], axis=1) auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \ stat_neg] = paddle.static.auc(input=binary_predict,\ diff --git a/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/__init__.py b/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/__init__.py index b3adb88271a75..d590fe145ebc3 100644 --- a/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/__init__.py +++ b/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/__init__.py @@ -27,7 +27,7 @@ Program, Executor, ) -from paddle.fluid.compiler import CompiledProgram +from paddle.base.compiler import CompiledProgram from paddle.distributed.transpiler.distribute_transpiler import ( DistributeTranspilerConfig, @@ -812,8 +812,8 @@ def is_valid(var): class ParameterServerOptimizer(DistributedOptimizer): """ - DistributedOptimizer is a wrapper for paddle.fluid.optimizer - A user should pass a paddle.fluid.optimizer to DistributedOptimizer + DistributedOptimizer is a wrapper for paddle.base.optimizer + A user should pass a paddle.base.optimizer to DistributedOptimizer minimize() function is implemented. DistributedOptimizer is the starting point for a user who wants to run distributed training. The optimized information will be stored in diff --git a/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/distributed_strategy.py b/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/distributed_strategy.py index 87cb5fa1d3b11..b7158f41e9892 100644 --- a/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/distributed_strategy.py +++ b/python/paddle/incubate/distributed/fleet/parameter_server/distribute_transpiler/distributed_strategy.py @@ -16,7 +16,7 @@ import os -from paddle import fluid +from paddle import base from paddle.distributed.transpiler.distribute_transpiler import ( DistributeTranspilerConfig, ServerRuntimeConfig, @@ -171,13 +171,13 @@ def __init__(self): self._server_runtime_config = ServerRuntimeConfig() num_threads = int(os.getenv("CPU_NUM", "1")) - self._execute_strategy = fluid.ExecutionStrategy() - self._build_strategy = fluid.BuildStrategy() + self._execute_strategy = base.ExecutionStrategy() + self._build_strategy = base.BuildStrategy() self._execute_strategy.num_threads = num_threads if num_threads > 1: self._build_strategy.reduce_strategy = ( - fluid.BuildStrategy.ReduceStrategy.Reduce + base.BuildStrategy.ReduceStrategy.Reduce ) self.debug_opt = None self.use_ps_gpu = False @@ -287,7 +287,7 @@ def get_execute_strategy(self): return self._execute_strategy def set_execute_strategy(self, config): - if isinstance(config, fluid.ExecutionStrategy): + if isinstance(config, base.ExecutionStrategy): self._execute_strategy = config elif isinstance(config, dict): for key in config: @@ -312,7 +312,7 @@ def get_build_strategy(self): return self._build_strategy def set_build_strategy(self, config): - if isinstance(config, fluid.BuildStrategy): + if isinstance(config, base.BuildStrategy): self._build_strategy = config elif isinstance(config, dict): for key in config: diff --git a/python/paddle/incubate/distributed/fleet/parameter_server/ir/public.py b/python/paddle/incubate/distributed/fleet/parameter_server/ir/public.py index 75d65dc079e09..628d4f580bbad 100755 --- a/python/paddle/incubate/distributed/fleet/parameter_server/ir/public.py +++ b/python/paddle/incubate/distributed/fleet/parameter_server/ir/public.py @@ -148,7 +148,7 @@ def __init__(self, main_program, startup_program, strategy, role_maker): self.is_heter_ps_mode = role_maker._is_heter_parameter_server_mode except: warnings.warn( - "Using paddle.distributed.fleet instead of paddle.fluid.incubate.fleet" + "Using paddle.distributed.fleet instead of paddle.base.incubate.fleet" ) self.is_heter_ps_mode = False diff --git a/python/paddle/incubate/distributed/fleet/parameter_server/ir/vars_metatools.py b/python/paddle/incubate/distributed/fleet/parameter_server/ir/vars_metatools.py index 20914fda33705..99f753373ef2f 100644 --- a/python/paddle/incubate/distributed/fleet/parameter_server/ir/vars_metatools.py +++ b/python/paddle/incubate/distributed/fleet/parameter_server/ir/vars_metatools.py @@ -157,7 +157,7 @@ def equal(var1, var2): def __str__(self): origin_var_str = ( - "{name} : fluid.{type}.shape{shape}.astype({dtype})".format( + "{name} : base.{type}.shape{shape}.astype({dtype})".format( name=self.origin.name, type=self.origin.type, shape=self.origin.shape, @@ -166,7 +166,7 @@ def __str__(self): ) slice_var_str = ( - "{name} : fluid.{type}.shape{shape}.astype({dtype})" + "{name} : base.{type}.shape{shape}.astype({dtype})" ".slice({is_slice}).block({block_id}).offset({offset})".format( name=self.slice.name, type=self.slice.type, diff --git a/python/paddle/incubate/distributed/fleet/parameter_server/pslib/__init__.py b/python/paddle/incubate/distributed/fleet/parameter_server/pslib/__init__.py index 3f36562b2adc2..5dded6c9b8183 100644 --- a/python/paddle/incubate/distributed/fleet/parameter_server/pslib/__init__.py +++ b/python/paddle/incubate/distributed/fleet/parameter_server/pslib/__init__.py @@ -592,7 +592,7 @@ def shrink_dense_table(self, decay, emb_dim=11, scope=None, table_id=None): Args: decay (float): The decay rate, usually range in (0, 1). emb_dim (int, optional): One element's length in datanorm layer. Default is 11. - scope (Scope, optional): Scope object, default is fluid.global_scope(). Default is None. + scope (Scope, optional): Scope object, default is base.global_scope(). Default is None. table_id (int, optional): Table id of shrinking dense table. None means shrink all, you should specify it when using multiple scopes, default is None. @@ -692,7 +692,7 @@ def load_pslib_whitelist(self, table_id, model_path, **kwargs): # below is how to save proto binary file with open("my_program.bin", "wb") as fout: - my_program = fluid.default_main_program() + my_program = base.default_main_program() fout.write(my_program.desc.serialize_to_string()) """ @@ -736,7 +736,7 @@ def load_one_table(self, table_id, model_path, **kwargs): load_combine = False) # below is how to save proto binary file with open("my_program.bin", "wb") as fout: - my_program = fluid.default_main_program() + my_program = base.default_main_program() fout.write(my_program.desc.serialize_to_string()) """ @@ -1228,7 +1228,7 @@ class fleet_embedding: size=[-1, 11], is_sparse=True, is_distributed=True, - param_attr=fluid.ParamAttr(name="embedding")) + param_attr=base.ParamAttr(name="embedding")) """ def __init__(self, click_name, scale_sparse_grad=True): @@ -1257,8 +1257,8 @@ def __exit__(self, exc_type, exc_val, exc_tb): class DownpourOptimizer(DistributedOptimizer): """ - DistributedOptimizer is a wrapper for paddle.fluid.optimizer - A user should pass a paddle.fluid.optimizer to DistributedOptimizer + DistributedOptimizer is a wrapper for paddle.base.optimizer + A user should pass a paddle.base.optimizer to DistributedOptimizer minimize() function is implemented. DistributedOptimizer is the starting point for a user who wants to run distributed training. The optimized information will be stored in diff --git a/python/paddle/incubate/distributed/fleet/role_maker.py b/python/paddle/incubate/distributed/fleet/role_maker.py index f15a5b6598ef9..920f04ac3fa73 100644 --- a/python/paddle/incubate/distributed/fleet/role_maker.py +++ b/python/paddle/incubate/distributed/fleet/role_maker.py @@ -17,7 +17,7 @@ import time from multiprocessing import Manager, Process -from paddle import fluid +from paddle import base __all__ = [] @@ -664,7 +664,7 @@ def generate_role(self): self._node_type = 1 self._cur_endpoint = worker_endpoints[current_id] if self._is_barrier_all: - gloo = fluid.core.Gloo() + gloo = base.core.Gloo() gloo.set_rank(current_id) gloo.set_size(len(worker_endpoints)) gloo.set_prefix(self._prefix) @@ -687,7 +687,7 @@ def generate_role(self): gloo.init() self._node_type_comm = gloo if self._use_ps_gpu or self._use_metric: - Gloo_strategy = fluid.core.GlooParallelStrategy() + Gloo_strategy = base.core.GlooParallelStrategy() Gloo_strategy.rank = current_id Gloo_strategy.rank_num = len(worker_endpoints) Gloo_strategy.ip_address = self._http_ip_port[0] @@ -698,7 +698,7 @@ def generate_role(self): Default_init_timeout_seconds ) Gloo_strategy.run_seconds = Default_run_timeout_seconds - Gloo = fluid.core.GlooParallelContext(Gloo_strategy) + Gloo = base.core.GlooParallelContext(Gloo_strategy) Gloo.init() else: self._all_comm = MockBarrier() @@ -715,7 +715,7 @@ def generate_role(self): current_id = eplist.index(cur_endpoint) self._node_type = 0 self._cur_endpoint = cur_endpoint - gloo = fluid.core.Gloo() + gloo = base.core.Gloo() gloo.set_rank(current_id) gloo.set_size(len(eplist)) gloo.set_prefix(self._prefix) @@ -738,7 +738,7 @@ def generate_role(self): gloo.init() self._node_type_comm = gloo - gloo = fluid.core.Gloo() + gloo = base.core.Gloo() all_list = worker_endpoints + eplist gloo.set_rank(all_list.index(self._cur_endpoint)) gloo.set_size(len(all_list)) @@ -1071,7 +1071,7 @@ def generate_role(self): current_id = int(os.environ["PADDLE_TRAINER_ID"]) self._node_type = 1 self._cur_endpoint = worker_endpoints[current_id] - gloo = fluid.core.Gloo() + gloo = base.core.Gloo() gloo.set_rank(current_id) gloo.set_size(len(worker_endpoints)) @@ -1092,7 +1092,7 @@ def generate_role(self): current_id = int(os.environ["PADDLE_XPU_ID"]) self._node_type = 2 self._cur_endpoint = xpu_endpoints[current_id] - gloo = fluid.core.Gloo() + gloo = base.core.Gloo() gloo.set_rank(current_id) gloo.set_size(len(xpu_endpoints)) @@ -1121,7 +1121,7 @@ def generate_role(self): current_id = eplist.index(cur_endpoint) self._node_type = 0 self._cur_endpoint = cur_endpoint - gloo = fluid.core.Gloo() + gloo = base.core.Gloo() gloo.set_rank(current_id) gloo.set_size(len(eplist)) gloo.set_prefix(self._prefix) @@ -1138,7 +1138,7 @@ def generate_role(self): self._node_type_comm = gloo if training_role == "TRAINER" or training_role == "XPU": - gloo = fluid.core.Gloo() + gloo = base.core.Gloo() heter_list = worker_endpoints + xpu_endpoints gloo.set_rank(heter_list.index(self._cur_endpoint)) @@ -1156,7 +1156,7 @@ def generate_role(self): gloo.init() self._heter_comm = gloo - gloo = fluid.core.Gloo() + gloo = base.core.Gloo() all_list = worker_endpoints + eplist + xpu_endpoints gloo.set_rank(all_list.index(self._cur_endpoint)) diff --git a/python/paddle/incubate/distributed/fleet/utils.py b/python/paddle/incubate/distributed/fleet/utils.py index ca30f1caff128..4a60900f75435 100644 --- a/python/paddle/incubate/distributed/fleet/utils.py +++ b/python/paddle/incubate/distributed/fleet/utils.py @@ -21,11 +21,11 @@ from google.protobuf import text_format import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet.base.util_factory import draw_block_graphviz -from paddle.fluid import core -from paddle.fluid.framework import Program -from paddle.fluid.proto import framework_pb2 +from paddle.base import core +from paddle.base.framework import Program +from paddle.base.proto import framework_pb2 from paddle.framework import io_utils __all__ = [ @@ -168,21 +168,21 @@ def append_load_op(block, var, path): def save_var(np_array, var_name, shape_list, dtype, save_path): - program = fluid.Program() - place = fluid.CPUPlace() - exe = fluid.Executor(place) + program = base.Program() + place = base.CPUPlace() + exe = base.Executor(place) shape = list(shape_list) - with fluid.program_guard(program): + with base.program_guard(program): d0_data = paddle.static.data(var_name, shape=shape, dtype=dtype) append_save_op(program.global_block(), d0_data, save_path) exe.run(feed={var_name: np_array}, fetch_list=[]) def load_var(var_name, shape_list, dtype, save_path): - program = fluid.Program() - place = fluid.CPUPlace() - exe = fluid.Executor(place) - with fluid.program_guard(program): + program = base.Program() + place = base.CPUPlace() + exe = base.Executor(place) + with base.program_guard(program): d0_data = paddle.static.data(var_name, shape=shape_list, dtype=dtype) append_load_op(program.global_block(), d0_data, save_path) outs = exe.run(feed={}, fetch_list=[d0_data]) @@ -230,10 +230,10 @@ def try_load_model_vars( save_filename, saved_params, ): - place = fluid.CPUPlace() - exe = fluid.Executor(place) - scope = fluid.core.Scope() - with fluid.scope_guard(scope): + place = base.CPUPlace() + exe = base.Executor(place) + scope = base.core.Scope() + with base.scope_guard(scope): if is_text_dump_program: dump_prog_fn = program_type_trans( dump_dir, dump_prog_fn, is_text_dump_program @@ -256,7 +256,7 @@ def try_load_model_vars( for each_var in saved_params } for each_var in saved_params: - var_temp = fluid.global_scope().find_var(each_var.name) + var_temp = base.global_scope().find_var(each_var.name) assert var_temp is not None, "can't not find var: " + each_var.name new_shape = (np.array(var_temp.get_tensor())).shape assert each_var.name in orig_para_shape, ( @@ -381,7 +381,7 @@ def try_load_model_vars( dtype=feed_config.feeded_vars_types[i], ) feed_tensors.append( - fluid.create_lod_tensor(t, [[1] * batch_size], place) + base.create_lod_tensor(t, [[1] * batch_size], place) ) else: raise RuntimeError( @@ -408,7 +408,7 @@ def try_load_model_vars( ) for i in range(len(feed_config.feeded_vars_names)) ] - feeder = fluid.DataFeeder(feed_list=feed_vars, place=place) + feeder = base.DataFeeder(feed_list=feed_vars, place=place) batch_feed = feed_gen( batch_size, feed_config.feeded_vars_dims, diff --git a/python/paddle/incubate/distributed/utils/io/dist_load.py b/python/paddle/incubate/distributed/utils/io/dist_load.py index 5815b0237baef..be37471ec0a60 100644 --- a/python/paddle/incubate/distributed/utils/io/dist_load.py +++ b/python/paddle/incubate/distributed/utils/io/dist_load.py @@ -18,7 +18,7 @@ import paddle import paddle.distributed as dist from paddle.distributed import fleet -from paddle.fluid.framework import dygraph_only +from paddle.base.framework import dygraph_only @dygraph_only diff --git a/python/paddle/incubate/distributed/utils/io/dist_save.py b/python/paddle/incubate/distributed/utils/io/dist_save.py index ce0d2d05042d9..f05828ea92bd0 100644 --- a/python/paddle/incubate/distributed/utils/io/dist_save.py +++ b/python/paddle/incubate/distributed/utils/io/dist_save.py @@ -20,7 +20,7 @@ import paddle.distributed as dist from paddle.distributed import fleet from paddle.distributed.fleet.utils.log_util import logger -from paddle.fluid.framework import dygraph_only +from paddle.base.framework import dygraph_only from .save_for_auto import save_for_auto_inference diff --git a/python/paddle/incubate/distributed/utils/io/save_for_auto.py b/python/paddle/incubate/distributed/utils/io/save_for_auto.py index 2a7749947995d..da127303ad1b7 100644 --- a/python/paddle/incubate/distributed/utils/io/save_for_auto.py +++ b/python/paddle/incubate/distributed/utils/io/save_for_auto.py @@ -26,7 +26,7 @@ GroupShardedStage3, ) from paddle.distributed.fleet.utils.log_util import logger -from paddle.fluid.framework import dygraph_only +from paddle.base.framework import dygraph_only __all__ = ["save_for_auto_inference"] diff --git a/python/paddle/incubate/layers/nn.py b/python/paddle/incubate/layers/nn.py index 1777aaa9a4b24..167b52cde2693 100644 --- a/python/paddle/incubate/layers/nn.py +++ b/python/paddle/incubate/layers/nn.py @@ -21,15 +21,15 @@ import paddle from paddle import _legacy_C_ops -from paddle.fluid import core, unique_name -from paddle.fluid.data_feeder import ( +from paddle.base import core, unique_name +from paddle.base.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, ) -from paddle.fluid.framework import Variable, convert_np_dtype_to_dtype_ -from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.param_attr import ParamAttr +from paddle.base.framework import Variable, convert_np_dtype_to_dtype_ +from paddle.base.layer_helper import LayerHelper +from paddle.base.param_attr import ParamAttr __all__ = [] @@ -1134,7 +1134,7 @@ def bilateral_slice(x, guide, grid, has_offset, name=None): """ :alias_main: paddle.nn.functional.bilateral_slice :alias: paddle.nn.functional.bilateral_slice,paddle.nn.functional.vision.bilateral_slice - :old_api: paddle.fluid.layers.bilateral_slice + :old_api: paddle.base.layers.bilateral_slice This operation implements bilateral slicing on the input according to the guide map. For more information of bilateral slicing, please refer to Deep Bilateral Learning for Real-Time Image Enhancement _ diff --git a/python/paddle/incubate/multiprocessing/reductions.py b/python/paddle/incubate/multiprocessing/reductions.py index 5bc1e21186212..520aa0a2b24ee 100644 --- a/python/paddle/incubate/multiprocessing/reductions.py +++ b/python/paddle/incubate/multiprocessing/reductions.py @@ -75,14 +75,14 @@ def _cuda_from_cache(key): def _rebuild_tensor(cls, lodtensor, metadata): - if cls == paddle.fluid.framework.EagerParamBase: - tensor = paddle.fluid.framework.EagerParamBase( + if cls == paddle.base.framework.EagerParamBase: + tensor = paddle.base.framework.EagerParamBase( lodtensor.shape(), lodtensor._dtype(), **metadata ) tensor.value().get_tensor()._share_data_with(lodtensor) else: size, stop_gradient = metadata - tensor = paddle.fluid.core.eager.Tensor() + tensor = paddle.base.core.eager.Tensor() if lodtensor._is_initialized(): tensor.value().get_tensor()._share_data_with(lodtensor) else: @@ -104,7 +104,7 @@ def _reduce_tensor(tensor): or tensor.place.is_gpu_place() or tensor.place.is_cuda_pinned_place() ): - if type(tensor) == paddle.fluid.framework.EagerParamBase: + if type(tensor) == paddle.base.framework.EagerParamBase: metadata = copy.deepcopy(tensor.__dict__) else: metadata = (tensor.size, tensor.stop_gradient) @@ -138,7 +138,7 @@ def _rebuild_cuda_tensor( # you should manualy maintian the lifecycle of ipc tensor shared_cache[(handle, offset_bytes)] = lodtensor else: - lodtensor = paddle.fluid.core.LoDTensor() + lodtensor = paddle.base.core.LoDTensor() lodtensor._share_buffer_with( cache_tensor, (size, type_idx, dims, lod, device_idx) ) @@ -184,8 +184,8 @@ def init_reductions(): return ForkingPickler.register(paddle.Tensor, _reduce_tensor) - ForkingPickler.register(paddle.fluid.core.eager.Tensor, _reduce_tensor) + ForkingPickler.register(paddle.base.core.eager.Tensor, _reduce_tensor) ForkingPickler.register( - paddle.fluid.framework.EagerParamBase, _reduce_tensor + paddle.base.framework.EagerParamBase, _reduce_tensor ) - ForkingPickler.register(paddle.fluid.core.LoDTensor, _reduce_lodtensor) + ForkingPickler.register(paddle.base.core.LoDTensor, _reduce_lodtensor) diff --git a/python/paddle/incubate/nn/functional/fused_dropout_add.py b/python/paddle/incubate/nn/functional/fused_dropout_add.py index 4c47a2302ef6c..c8995bb7334a8 100644 --- a/python/paddle/incubate/nn/functional/fused_dropout_add.py +++ b/python/paddle/incubate/nn/functional/fused_dropout_add.py @@ -15,7 +15,7 @@ from paddle import _C_ops from paddle.common_ops_import import default_main_program -from paddle.fluid import core +from paddle.base import core from paddle.framework import LayerHelper, in_dynamic_mode diff --git a/python/paddle/incubate/nn/functional/fused_ec_moe.py b/python/paddle/incubate/nn/functional/fused_ec_moe.py index 9f067acbb0de0..3b4b85029fadd 100644 --- a/python/paddle/incubate/nn/functional/fused_ec_moe.py +++ b/python/paddle/incubate/nn/functional/fused_ec_moe.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper def fused_ec_moe( diff --git a/python/paddle/incubate/nn/functional/fused_matmul_bias.py b/python/paddle/incubate/nn/functional/fused_matmul_bias.py index 526f8a3fec05f..83d3b5a91d4ba 100644 --- a/python/paddle/incubate/nn/functional/fused_matmul_bias.py +++ b/python/paddle/incubate/nn/functional/fused_matmul_bias.py @@ -13,7 +13,7 @@ # limitations under the License. from paddle import _legacy_C_ops -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode from paddle.tensor.linalg import matmul diff --git a/python/paddle/incubate/nn/functional/fused_transformer.py b/python/paddle/incubate/nn/functional/fused_transformer.py index 469aea26cc600..355b5916b5ddb 100644 --- a/python/paddle/incubate/nn/functional/fused_transformer.py +++ b/python/paddle/incubate/nn/functional/fused_transformer.py @@ -13,10 +13,10 @@ # limitations under the License. from paddle import _legacy_C_ops -from paddle.fluid import core -from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype -from paddle.fluid.framework import default_main_program -from paddle.fluid.layer_helper import LayerHelper +from paddle.base import core +from paddle.base.data_feeder import check_dtype, check_variable_and_dtype +from paddle.base.framework import default_main_program +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode __all__ = [] diff --git a/python/paddle/incubate/nn/layer/fused_transformer.py b/python/paddle/incubate/nn/layer/fused_transformer.py index a3f3ea1d83af4..a09cc521a0e79 100644 --- a/python/paddle/incubate/nn/layer/fused_transformer.py +++ b/python/paddle/incubate/nn/layer/fused_transformer.py @@ -14,10 +14,10 @@ import numpy as np import paddle -from paddle.fluid import core -from paddle.fluid.core import VarDesc -from paddle.fluid.dygraph import no_grad -from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle.base import core +from paddle.base.core import VarDesc +from paddle.base.dygraph import no_grad +from paddle.base.framework import convert_np_dtype_to_dtype_ from paddle.framework import in_dynamic_mode from paddle.incubate.nn import functional as incubate_f from paddle.nn import Layer @@ -66,7 +66,7 @@ def _to_dtype(t, dtype): t_used = t if dtype is not None and dtype != t_used.dtype: - with paddle.fluid.framework._dygraph_place_guard(place=t_used.place): + with paddle.base.framework._dygraph_place_guard(place=t_used.place): t_casted = t_used.cast(dtype=dtype) else: t_casted = t_used diff --git a/python/paddle/incubate/nn/layer/io.py b/python/paddle/incubate/nn/layer/io.py index f464d93833b2f..b96b945b7b71e 100644 --- a/python/paddle/incubate/nn/layer/io.py +++ b/python/paddle/incubate/nn/layer/io.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle -from ....fluid.framework import Variable +from ....base.framework import Variable from ....framework import LayerHelper, core @@ -55,9 +55,9 @@ class ListenAndServ: Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import paddle - with fluid.program_guard(main): + with base.program_guard(main): serv = layers.ListenAndServ( "127.0.0.1:6170", ["X"], optimizer_mode=False) with serv.do(): @@ -68,7 +68,7 @@ class ListenAndServ: paddle.nn.initializer.Constant(value=1.0)(x, main.global_block()) paddle.scale(x=x, scale=10.0, out=out_var) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(main) """ diff --git a/python/paddle/incubate/nn/loss.py b/python/paddle/incubate/nn/loss.py index d31fe41d8ce3f..0a8043b445b82 100644 --- a/python/paddle/incubate/nn/loss.py +++ b/python/paddle/incubate/nn/loss.py @@ -13,8 +13,8 @@ # limitations under the License. from paddle import _legacy_C_ops -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode diff --git a/python/paddle/incubate/nn/memory_efficient_attention.py b/python/paddle/incubate/nn/memory_efficient_attention.py index 565809cf7df48..eff81e25ec454 100644 --- a/python/paddle/incubate/nn/memory_efficient_attention.py +++ b/python/paddle/incubate/nn/memory_efficient_attention.py @@ -21,7 +21,7 @@ import paddle from paddle import _C_ops -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode from .attn_bias import ( diff --git a/python/paddle/incubate/operators/graph_khop_sampler.py b/python/paddle/incubate/operators/graph_khop_sampler.py index 9cade59f1fff3..45555786eb02d 100644 --- a/python/paddle/incubate/operators/graph_khop_sampler.py +++ b/python/paddle/incubate/operators/graph_khop_sampler.py @@ -13,8 +13,8 @@ # limitations under the License. from paddle import _legacy_C_ops -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode diff --git a/python/paddle/incubate/operators/graph_reindex.py b/python/paddle/incubate/operators/graph_reindex.py index 2594ed7ce056e..d0967409b72a7 100644 --- a/python/paddle/incubate/operators/graph_reindex.py +++ b/python/paddle/incubate/operators/graph_reindex.py @@ -13,8 +13,8 @@ # limitations under the License. from paddle import _C_ops -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode from paddle.utils import deprecated diff --git a/python/paddle/incubate/operators/graph_sample_neighbors.py b/python/paddle/incubate/operators/graph_sample_neighbors.py index 169acca5fdc63..e49b125e8fa8d 100644 --- a/python/paddle/incubate/operators/graph_sample_neighbors.py +++ b/python/paddle/incubate/operators/graph_sample_neighbors.py @@ -13,8 +13,8 @@ # limitations under the License. from paddle import _legacy_C_ops -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode from paddle.utils import deprecated diff --git a/python/paddle/incubate/operators/graph_send_recv.py b/python/paddle/incubate/operators/graph_send_recv.py index 7a874f19249e1..6e60d3a2979d5 100644 --- a/python/paddle/incubate/operators/graph_send_recv.py +++ b/python/paddle/incubate/operators/graph_send_recv.py @@ -16,14 +16,14 @@ import paddle from paddle import _C_ops -from paddle.fluid.data_feeder import ( +from paddle.base.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, convert_dtype, ) -from paddle.fluid.framework import Variable -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.framework import Variable +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode from paddle.utils import deprecated diff --git a/python/paddle/incubate/operators/resnet_unit.py b/python/paddle/incubate/operators/resnet_unit.py index a02201180c762..54d07d3292506 100644 --- a/python/paddle/incubate/operators/resnet_unit.py +++ b/python/paddle/incubate/operators/resnet_unit.py @@ -15,9 +15,9 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.param_attr import ParamAttr +from paddle import base +from paddle.base.layer_helper import LayerHelper +from paddle.base.param_attr import ParamAttr from paddle.nn import Layer from paddle.nn import initializer as I @@ -50,8 +50,8 @@ def resnet_unit( act, ): helper = LayerHelper('resnet_unit', **locals()) - bn_param_dtype = fluid.core.VarDesc.VarType.FP32 - bit_mask_dtype = fluid.core.VarDesc.VarType.INT32 + bn_param_dtype = base.core.VarDesc.VarType.FP32 + bit_mask_dtype = base.core.VarDesc.VarType.INT32 out = helper.create_variable_for_type_inference(x.dtype) bit_mask = helper.create_variable_for_type_inference( dtype=bit_mask_dtype, stop_gradient=True @@ -213,7 +213,7 @@ def _get_default_param_initializer(channels): is_nchw = data_format == 'NCHW' # initial filter - bn_param_dtype = fluid.core.VarDesc.VarType.FP32 + bn_param_dtype = base.core.VarDesc.VarType.FP32 if not is_nchw: bn_param_shape = [1, 1, 1, num_filters] filter_x_shape = [ diff --git a/python/paddle/incubate/operators/softmax_mask_fuse.py b/python/paddle/incubate/operators/softmax_mask_fuse.py index 178cfd9a046ce..54106536dc709 100644 --- a/python/paddle/incubate/operators/softmax_mask_fuse.py +++ b/python/paddle/incubate/operators/softmax_mask_fuse.py @@ -13,7 +13,7 @@ # limitations under the License. from paddle import _legacy_C_ops -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode diff --git a/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py b/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py index dd8e229a1e9c2..a3f84ea4f0356 100644 --- a/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py +++ b/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py @@ -13,7 +13,7 @@ # limitations under the License. from paddle import _legacy_C_ops -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode diff --git a/python/paddle/incubate/operators/unzip.py b/python/paddle/incubate/operators/unzip.py index 68a491300c5d2..2edf3a392025d 100644 --- a/python/paddle/incubate/operators/unzip.py +++ b/python/paddle/incubate/operators/unzip.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.layer_helper import LayerHelper def unzip(input, lod): diff --git a/python/paddle/incubate/optimizer/distributed_fused_lamb.py b/python/paddle/incubate/optimizer/distributed_fused_lamb.py index 9f2873fe81b09..fe6442756e3c7 100644 --- a/python/paddle/incubate/optimizer/distributed_fused_lamb.py +++ b/python/paddle/incubate/optimizer/distributed_fused_lamb.py @@ -15,10 +15,10 @@ import os import paddle -from paddle.fluid import core, unique_name -from paddle.fluid.executor import global_scope -from paddle.fluid.framework import Variable, name_scope -from paddle.fluid.layer_helper import LayerHelper +from paddle.base import core, unique_name +from paddle.base.executor import global_scope +from paddle.base.framework import Variable, name_scope +from paddle.base.layer_helper import LayerHelper from paddle.nn import ClipGradByGlobalNorm from paddle.optimizer import Optimizer diff --git a/python/paddle/incubate/optimizer/functional/utils.py b/python/paddle/incubate/optimizer/functional/utils.py index 674c56c0530fb..52edef7177ae9 100644 --- a/python/paddle/incubate/optimizer/functional/utils.py +++ b/python/paddle/incubate/optimizer/functional/utils.py @@ -13,8 +13,8 @@ # limitations under the License. import paddle -from paddle.fluid.data_feeder import check_type -from paddle.fluid.framework import Variable +from paddle.base.data_feeder import check_type +from paddle.base.framework import Variable def check_input_type(input, name, op_name): diff --git a/python/paddle/incubate/optimizer/gradient_merge.py b/python/paddle/incubate/optimizer/gradient_merge.py index bd99b1aebb779..c407780f50945 100644 --- a/python/paddle/incubate/optimizer/gradient_merge.py +++ b/python/paddle/incubate/optimizer/gradient_merge.py @@ -14,8 +14,8 @@ import paddle -from paddle.fluid import core -from paddle.fluid.framework import ( +from paddle.base import core +from paddle.base.framework import ( Variable, default_main_program, default_startup_program, @@ -51,7 +51,7 @@ class GradientMergeOptimizer: .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np def gen_data(batch_size): @@ -75,13 +75,13 @@ def mlp(input_x, input_y, hid_dim=128, label_dim=2): sgd = paddle.incubate.optimizer.GradientMergeOptimizer(sgd, k_steps=4, avg=True) sgd.minimize(cost) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) for i in range(10): cost_val = exe.run(feed=gen_data(32), - program=fluid.default_main_program(), + program=base.default_main_program(), fetch_list=[cost.name]) print("step=%d, cost=%f" % (i, cost_val[0])) """ diff --git a/python/paddle/incubate/optimizer/lars_momentum.py b/python/paddle/incubate/optimizer/lars_momentum.py index 57055b4a923cc..5df9160d3d11b 100644 --- a/python/paddle/incubate/optimizer/lars_momentum.py +++ b/python/paddle/incubate/optimizer/lars_momentum.py @@ -14,8 +14,8 @@ import warnings from paddle import _legacy_C_ops -from paddle.fluid import framework -from paddle.fluid.framework import in_dygraph_mode +from paddle.base import framework +from paddle.base.framework import in_dygraph_mode from paddle.optimizer import Optimizer @@ -44,14 +44,14 @@ class LarsMomentumOptimizer(Optimizer): This parameter is required in dygraph mode. \ The default value is None in static graph mode, at this time all parameters will be updated. regularization (WeightDecayRegularizer, optional): The strategy of regularization. There are two method: \ - :ref:`api_fluid_regularizer_L1Decay` , :ref:`api_fluid_regularizer_L2Decay` . If a parameter has set \ - regularizer using :ref:`api_fluid_ParamAttr` already, the regularization setting here in optimizer will be \ + :ref:`api_base_regularizer_L1Decay` , :ref:`api_base_regularizer_L2Decay` . If a parameter has set \ + regularizer using :ref:`api_base_ParamAttr` already, the regularization setting here in optimizer will be \ ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three cliping strategies - ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , - :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , + :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): This parameter is used by developers to print debugging information. \ For details, please refer to :ref:`api_guide_Name`. Default is None. exclude_from_weight_decay (list[str], optional): Name string of layers which will be exclude from lars weight decay. Default is None. @@ -64,7 +64,7 @@ class LarsMomentumOptimizer(Optimizer): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np paddle.enable_static() @@ -73,11 +73,11 @@ class LarsMomentumOptimizer(Optimizer): name="inp", shape=[2, 2], dtype='float32') out = paddle.static.nn.fc(inp, size=3) out = paddle.sum(out) - optimizer = fluid.optimizer.LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9) + optimizer = base.optimizer.LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9) optimizer.minimize(out) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) exe.run( feed={"inp": np_inp}, fetch_list=[out.name]) diff --git a/python/paddle/incubate/optimizer/lbfgs.py b/python/paddle/incubate/optimizer/lbfgs.py index 04d90d1f8c271..2aa4ae1e21b7b 100644 --- a/python/paddle/incubate/optimizer/lbfgs.py +++ b/python/paddle/incubate/optimizer/lbfgs.py @@ -57,15 +57,15 @@ class LBFGS(Optimizer): This parameter is required in dygraph mode. The default value is None. weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ It canbe a float value as coeff of L2 regularization or \ - :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \ + :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. + If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \ some derived class of ``GradientClipBase`` . There are three cliping strategies \ - ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , \ - :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , \ + :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. The default value is None. diff --git a/python/paddle/incubate/optimizer/lookahead.py b/python/paddle/incubate/optimizer/lookahead.py index 40e0fd55e4b50..e1def5a9c45f2 100644 --- a/python/paddle/incubate/optimizer/lookahead.py +++ b/python/paddle/incubate/optimizer/lookahead.py @@ -13,10 +13,10 @@ # limitations under the License. import paddle -from paddle.fluid import framework, unique_name -from paddle.fluid.dygraph import base as imperative_base -from paddle.fluid.framework import Variable -from paddle.fluid.layer_helper import LayerHelper +from paddle.base import framework, unique_name +from paddle.base.dygraph import base as imperative_base +from paddle.base.framework import Variable +from paddle.base.layer_helper import LayerHelper from paddle.optimizer import Optimizer __all__ = [] @@ -250,9 +250,9 @@ def minimize( Args: loss (Tensor): A ``Tensor`` containing the value to minimize. - startup_program (Program, optional): :ref:`api_fluid_Program` for + startup_program (Program, optional): :ref:`api_base_Program` for initializing parameters in ``parameters``. The default value - is None, at this time :ref:`api_fluid_default_startup_program` will be used. + is None, at this time :ref:`api_base_default_startup_program` will be used. parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update to minimize ``loss``. The default value is None, at this time all parameters will be updated. diff --git a/python/paddle/incubate/optimizer/modelaverage.py b/python/paddle/incubate/optimizer/modelaverage.py index 8e182ab9d8284..f66d546a36657 100644 --- a/python/paddle/incubate/optimizer/modelaverage.py +++ b/python/paddle/incubate/optimizer/modelaverage.py @@ -14,11 +14,11 @@ import paddle from paddle import _C_ops -from paddle.fluid import framework -from paddle.fluid.dygraph import base as imperative_base -from paddle.fluid.framework import Program -from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.wrapped_decorator import signature_safe_contextmanager +from paddle.base import framework +from paddle.base.dygraph import base as imperative_base +from paddle.base.framework import Program +from paddle.base.layer_helper import LayerHelper +from paddle.base.wrapped_decorator import signature_safe_contextmanager from paddle.framework import in_dynamic_mode from paddle.optimizer import Optimizer @@ -300,9 +300,9 @@ def minimize( Args: loss (Tensor): A ``Tensor`` containing the value to minimize. - startup_program (Program, optional): :ref:`api_fluid_Program` for + startup_program (Program, optional): :ref:`api_base_Program` for initializing parameters in ``parameters``. The default value - is None, at this time :ref:`api_fluid_default_startup_program` will be used. + is None, at this time :ref:`api_base_default_startup_program` will be used. parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update to minimize ``loss``. The default value is None, at this time all parameters will be updated. diff --git a/python/paddle/incubate/optimizer/pipeline.py b/python/paddle/incubate/optimizer/pipeline.py index 1769ac62a7e02..5487acd5122fc 100644 --- a/python/paddle/incubate/optimizer/pipeline.py +++ b/python/paddle/incubate/optimizer/pipeline.py @@ -20,8 +20,8 @@ import numpy as np import paddle -from paddle.fluid import core, unique_name -from paddle.fluid.framework import ( +from paddle.base import core, unique_name +from paddle.base.framework import ( Parameter, Program, default_startup_program, @@ -49,24 +49,24 @@ class PipelineOptimizer: .. code-block:: python import paddle - import paddle.fluid as fluid - import paddle.fluid.layers as layers + import paddle.base as base + import paddle.base.layers as layers import numpy as np paddle.enable_static() - with fluid.device_guard("gpu:0"): + with base.device_guard("gpu:0"): x = paddle.static.data(name='x', shape=[-1, 1], dtype='int64', lod_level=0) y = paddle.static.data(name='y', shape=[-1, 1], dtype='int64', lod_level=0) - data_loader = fluid.io.DataLoader.from_generator( + data_loader = base.io.DataLoader.from_generator( feed_list=[x, y], capacity=64, use_double_buffer=True, iterable=False) - emb_x = layers.embedding(input=x, param_attr=fluid.ParamAttr(name="embx"), size=[10,2], is_sparse=False) - emb_y = layers.embedding(input=y, param_attr=fluid.ParamAttr(name="emby",learning_rate=0.9), size=[10,2], is_sparse=False) + emb_x = layers.embedding(input=x, param_attr=base.ParamAttr(name="embx"), size=[10,2], is_sparse=False) + emb_y = layers.embedding(input=y, param_attr=base.ParamAttr(name="emby",learning_rate=0.9), size=[10,2], is_sparse=False) - with fluid.device_guard("gpu:1"): + with base.device_guard("gpu:1"): concat = layers.concat([emb_x, emb_y], axis=1) fc = paddle.static.nn.fc(x=concat, name="fc", size=1, num_flatten_dims=1, bias_attr=False) loss = paddle.mean(fc) @@ -81,13 +81,13 @@ def train_reader(): yield x, y data_loader.set_sample_generator(train_reader, batch_size=1) - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) + exe = base.Executor(place) + exe.run(base.default_startup_program()) batch_size = 1 data_loader.start() exe.train_from_dataset( - fluid.default_main_program()) + base.default_main_program()) data_loader.reset() """ diff --git a/python/paddle/incubate/optimizer/recompute.py b/python/paddle/incubate/optimizer/recompute.py index dde8d723e0d1e..0414e70e17843 100644 --- a/python/paddle/incubate/optimizer/recompute.py +++ b/python/paddle/incubate/optimizer/recompute.py @@ -15,9 +15,9 @@ import logging import paddle -from paddle.fluid import core, framework, unique_name -from paddle.fluid.backward import append_backward -from paddle.fluid.framework import Variable, in_dygraph_mode, program_guard +from paddle.base import core, framework, unique_name +from paddle.base.backward import append_backward +from paddle.base.framework import Variable, in_dygraph_mode, program_guard from paddle.optimizer import Optimizer @@ -50,7 +50,7 @@ class RecomputeOptimizer(Optimizer): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base import numpy as np paddle.enable_static() @@ -78,14 +78,14 @@ def mlp(input_x, input_y, hid_dim=128, label_dim=2): sgd.minimize(cost) print("Finished optimize") - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) step = 10 for i in range(step): cost_val = exe.run(feed=gen_data(), - program=fluid.default_main_program(), + program=base.default_main_program(), fetch_list=[cost.name]) print("step=%d cost=%f" % (i, cost_val[0])) @@ -133,7 +133,7 @@ def load(self, state_dict): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base paddle.enable_static() def mlp(input_x, input_y, hid_dim=128, label_dim=2): @@ -178,8 +178,8 @@ def apply_gradients(self, params_grads): .. code-block:: python import paddle - import paddle.fluid as fluid - import paddle.fluid.framework as framework + import paddle.base as base + import paddle.base.framework as framework paddle.enable_static() @@ -666,7 +666,7 @@ def backward( .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base paddle.enable_static() @@ -748,7 +748,7 @@ def apply_optimize(self, loss, startup_program, params_grads): Examples: .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base paddle.enable_static() diff --git a/python/paddle/incubate/passes/ir.py b/python/paddle/incubate/passes/ir.py index c657f20abcb99..8b9d9944d9bee 100644 --- a/python/paddle/incubate/passes/ir.py +++ b/python/paddle/incubate/passes/ir.py @@ -16,19 +16,19 @@ from os import path import paddle -from paddle.fluid.proto import framework_pb2 +from paddle.base.proto import framework_pb2 -from ...fluid import core, unique_name -from ...fluid.framework import OpProtoHolder +from ...base import core, unique_name +from ...base.framework import OpProtoHolder try: - from paddle.fluid.proto import pass_desc_pb2 + from paddle.base.proto import pass_desc_pb2 except ModuleNotFoundError: import sys - fluid_path = path.dirname(__file__) + '/../../fluid' - sys.path.append(path.join(fluid_path, 'proto')) - from paddle.fluid.proto import pass_desc_pb2 + base_path = path.dirname(__file__) + '/../../base' + sys.path.append(path.join(base_path, 'proto')) + from paddle.base.proto import pass_desc_pb2 class RegisterPassHelper: @@ -470,7 +470,7 @@ def RegisterPass(function=None, input_specs={}): .. code-block:: python >>> import paddle - >>> from paddle.fluid.ir import RegisterPass + >>> from paddle.base.ir import RegisterPass >>> @RegisterPass >>> def multi_add_to_addn(): diff --git a/python/paddle/incubate/tensor/manipulation.py b/python/paddle/incubate/tensor/manipulation.py index 70c017a8eec36..a1615fd2b8d20 100644 --- a/python/paddle/incubate/tensor/manipulation.py +++ b/python/paddle/incubate/tensor/manipulation.py @@ -13,8 +13,8 @@ # limitations under the License. from paddle import _C_ops -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode __all__ = [] diff --git a/python/paddle/incubate/tensor/math.py b/python/paddle/incubate/tensor/math.py index 04ed1da3e1980..649b1a74fcb08 100644 --- a/python/paddle/incubate/tensor/math.py +++ b/python/paddle/incubate/tensor/math.py @@ -13,8 +13,8 @@ # limitations under the License. from paddle import _C_ops -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode from paddle.utils import deprecated diff --git a/python/paddle/incubate/xpu/resnet_block.py b/python/paddle/incubate/xpu/resnet_block.py index 081583b6a8c40..7c24943f35b11 100644 --- a/python/paddle/incubate/xpu/resnet_block.py +++ b/python/paddle/incubate/xpu/resnet_block.py @@ -15,9 +15,9 @@ import numpy as np import paddle -from paddle import _legacy_C_ops, fluid -from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.param_attr import ParamAttr +from paddle import _legacy_C_ops, base +from paddle.base.layer_helper import LayerHelper +from paddle.base.param_attr import ParamAttr from paddle.nn import Layer from paddle.nn import initializer as I @@ -60,7 +60,7 @@ def resnet_basic_block( trainable_statistics=False, find_conv_max=True, ): - if fluid.framework.in_dygraph_mode(): + if base.framework.in_dygraph_mode(): attrs = ( 'stride1', stride1, @@ -153,8 +153,8 @@ def resnet_basic_block( ) return out helper = LayerHelper('resnet_basic_block', **locals()) - bn_param_dtype = fluid.core.VarDesc.VarType.FP32 - max_dtype = fluid.core.VarDesc.VarType.FP32 + bn_param_dtype = base.core.VarDesc.VarType.FP32 + max_dtype = base.core.VarDesc.VarType.FP32 out = helper.create_variable_for_type_inference( dtype=x.dtype, stop_gradient=True @@ -526,7 +526,7 @@ def _get_default_param_initializer(channels, kernel_size): return I.Normal(0.0, std) # init filter - bn_param_dtype = fluid.core.VarDesc.VarType.FP32 + bn_param_dtype = base.core.VarDesc.VarType.FP32 bn1_param_shape = [1, 1, num_filter1] bn2_param_shape = [1, 1, num_filter2] filter1_shape = [num_filter1, num_channels1, filter1_size, filter1_size] diff --git a/python/paddle/inference/__init__.py b/python/paddle/inference/__init__.py index e7120bdf7a4c3..f59c5990573db 100644 --- a/python/paddle/inference/__init__.py +++ b/python/paddle/inference/__init__.py @@ -22,7 +22,7 @@ convert_to_mixed_precision, ) -from paddle.fluid.core import ( +from paddle.base.core import ( create_predictor, get_version, _get_phi_kernel_name, diff --git a/python/paddle/inference/contrib/utils/__init__.py b/python/paddle/inference/contrib/utils/__init__.py index 5a52525049250..aef5702903e0d 100644 --- a/python/paddle/inference/contrib/utils/__init__.py +++ b/python/paddle/inference/contrib/utils/__init__.py @@ -12,4 +12,4 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ....fluid.core import copy_tensor # noqa: F401 +from ....base.core import copy_tensor # noqa: F401 diff --git a/python/paddle/inference/wrapper.py b/python/paddle/inference/wrapper.py index 7163bdee25e36..51095647fee2f 100644 --- a/python/paddle/inference/wrapper.py +++ b/python/paddle/inference/wrapper.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle.fluid import core -from paddle.fluid.core import ( +from paddle.base import core +from paddle.base.core import ( AnalysisConfig, PaddleDType, PaddleInferPredictor, @@ -58,7 +58,7 @@ def tensor_share_external_data(self, data): self._share_external_data_bind(data) elif isinstance(data, paddle.Tensor): self._share_external_data_paddle_tensor_bind(data) - elif isinstance(data, paddle.fluid.framework.Variable): + elif isinstance(data, paddle.base.framework.Variable): raise TypeError( "The interface 'share_external_data' can only be used in dynamic graph mode. " "Maybe you called 'paddle.enable_static()' and you are in static graph mode now. " diff --git a/python/paddle/io/dataloader/dataloader_iter.py b/python/paddle/io/dataloader/dataloader_iter.py index 0ffe7c46e77c9..7ddec5c44b83e 100644 --- a/python/paddle/io/dataloader/dataloader_iter.py +++ b/python/paddle/io/dataloader/dataloader_iter.py @@ -25,7 +25,7 @@ import paddle from paddle import profiler -from paddle.fluid.framework import _current_expected_place, _set_expected_place +from paddle.base.framework import _current_expected_place, _set_expected_place from paddle.profiler.timer import benchmark from paddle.profiler.utils import in_profiler_mode @@ -230,7 +230,7 @@ def _thread_loop(self, legacy_expected_place): indices = next(self._sampler_iter) # read data from dataset in mini-batch - # with paddle.fluid.dygraph.guard(place=paddle.CPUPlace()): + # with paddle.base.dygraph.guard(place=paddle.CPUPlace()): # read data from dataset in mini-batch batch = self._dataset_fetcher.fetch( indices, self._thread_done_event @@ -397,7 +397,7 @@ def __init__(self, loader): # Note(zhangbo): shm_buffer_size is used for MemoryMapAllocationPool. # MemoryMapAllocationPool is used to cache and reuse shm, thus reducing munmap in dataloader. - # For more details, please see: paddle/fluid/memory/allocation/mmap_allocator.h + # For more details, please see: paddle/base/memory/allocation/mmap_allocator.h if os.environ.get('FLAGS_use_shm_cache', False) in [ 1, '1', diff --git a/python/paddle/io/dataloader/flat.py b/python/paddle/io/dataloader/flat.py index f674d7fb2b4b9..87c35e6dedd38 100644 --- a/python/paddle/io/dataloader/flat.py +++ b/python/paddle/io/dataloader/flat.py @@ -36,7 +36,7 @@ def _flatten(batch, flat_batch, structure, field_idx): for field in batch: if isinstance( field, - (np.ndarray, paddle.Tensor, paddle.fluid.core.eager.Tensor), + (np.ndarray, paddle.Tensor, paddle.base.core.eager.Tensor), ): structure.append(f'{FIELD_PREFIX}{field_idx}') flat_batch.append(field) @@ -59,7 +59,7 @@ def _flatten(batch, flat_batch, structure, field_idx): for k, field in batch.items(): if isinstance( field, - (np.ndarray, paddle.Tensor, paddle.fluid.core.eager.Tensor), + (np.ndarray, paddle.Tensor, paddle.base.core.eager.Tensor), ): structure[k] = f'{FIELD_PREFIX}{field_idx}' flat_batch.append(field) diff --git a/python/paddle/io/dataloader/worker.py b/python/paddle/io/dataloader/worker.py index 4a1667483da64..814dc667a7cf3 100644 --- a/python/paddle/io/dataloader/worker.py +++ b/python/paddle/io/dataloader/worker.py @@ -370,7 +370,7 @@ def _worker_loop( # may copy CPU tensor to GPU even if users want to use # CPU tensor operation, so we add CPUPlace guard here # to make sure tensor will be operated only on CPU - with paddle.fluid.dygraph.guard(place=paddle.CPUPlace()): + with paddle.base.dygraph.guard(place=paddle.CPUPlace()): batch = fetcher.fetch(indices) except Exception as e: if ( diff --git a/python/paddle/io/reader.py b/python/paddle/io/reader.py index d8db6cc2ab012..81483712bf453 100644 --- a/python/paddle/io/reader.py +++ b/python/paddle/io/reader.py @@ -19,9 +19,9 @@ import warnings import paddle -from paddle.fluid.framework import logging +from paddle.base.framework import logging -from ..fluid.framework import ( +from ..base.framework import ( _current_expected_place, _get_paddle_place, _get_paddle_place_list, diff --git a/python/paddle/ir/__init__.py b/python/paddle/ir/__init__.py index 4fee1c1a064c5..df98fd79fe2a3 100644 --- a/python/paddle/ir/__init__.py +++ b/python/paddle/ir/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid.libpaddle.ir import ( +from paddle.base.libpaddle.ir import ( Program, Block, Operation, @@ -21,7 +21,7 @@ OpResult, Type, ) # noqa: F401 -from paddle.fluid.libpaddle.ir import ( +from paddle.base.libpaddle.ir import ( translate_to_new_ir, set_global_program, set_insertion_point, diff --git a/python/paddle/ir/core.py b/python/paddle/ir/core.py index c7f2a73f2ad5e..2a95411292d60 100644 --- a/python/paddle/ir/core.py +++ b/python/paddle/ir/core.py @@ -15,10 +15,10 @@ import numpy as np -from paddle.fluid.libpaddle import DataType -from paddle.fluid.libpaddle.ir import Program, set_global_program +from paddle.base.libpaddle import DataType +from paddle.base.libpaddle.ir import Program, set_global_program -from ..fluid.wrapped_decorator import signature_safe_contextmanager +from ..base.wrapped_decorator import signature_safe_contextmanager np_type_to_paddle_type = { np.dtype("float32"): DataType.FLOAT32, @@ -229,7 +229,7 @@ def program_guard(main_program, startup_program=None): data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') """ - from ..fluid.data_feeder import check_type + from ..base.data_feeder import check_type check_type( main_program, 'main_program', Program, 'paddle.static.program_guard' diff --git a/python/paddle/jit/api.py b/python/paddle/jit/api.py index dd96a1001eae5..b1947e9017716 100644 --- a/python/paddle/jit/api.py +++ b/python/paddle/jit/api.py @@ -28,14 +28,14 @@ import types import paddle -from paddle.fluid import core, dygraph -from paddle.fluid.compiler import ( +from paddle.base import core, dygraph +from paddle.base.compiler import ( BuildStrategy, CompiledProgram, ExecutionStrategy, ) -from paddle.fluid.data_feeder import check_type -from paddle.fluid.dygraph.base import ( +from paddle.base.data_feeder import check_type +from paddle.base.dygraph.base import ( program_desc_tracing_guard, switch_to_static_graph, ) @@ -59,21 +59,21 @@ INFER_PROPERTY_SUFFIX, ) from paddle.nn import Layer -from paddle.fluid.executor import Executor, scope_guard -from paddle.fluid.framework import ( +from paddle.base.executor import Executor, scope_guard +from paddle.base.framework import ( Block, Program, Variable, Parameter, EagerParamBase, ) -from paddle.fluid.framework import ( +from paddle.base.framework import ( _current_expected_place, _dygraph_guard, _dygraph_tracer, ) -from paddle.fluid.framework import dygraph_only -from paddle.fluid.wrapped_decorator import wrap_decorator +from paddle.base.framework import dygraph_only +from paddle.base.wrapped_decorator import wrap_decorator from paddle.static.io import save_inference_model from paddle.framework import in_dynamic_mode @@ -94,7 +94,7 @@ def _extract_vars(inputs, result_list, err_tag='inputs'): _extract_vars(var, result_list, err_tag) else: raise TypeError( - "The type of 'each element of {}' in paddle.jit.TracedLayer.trace must be fluid.Variable, but received {}.".format( + "The type of 'each element of {}' in paddle.jit.TracedLayer.trace must be base.Variable, but received {}.".format( err_tag, type(inputs) ) ) @@ -647,11 +647,11 @@ def _get_output_vars(outputs, output_spec, with_hook=False): # 1. Expected cases: # - paddle.jit.save # - paddle.static.save_inference_model -# - paddle.fluid.io.save_inference_model +# - paddle.base.io.save_inference_model # 2. Error cases: # - paddle.save: no .pdmodel for prefix # - paddle.static.save: no .pdiparams but .pdparams exists -# - paddle.fluid.io.save_params/save_persistables: no __model__ +# - paddle.base.io.save_params/save_persistables: no __model__ # TODO(chenweihang): polish error message in above error cases def _build_load_path_and_config(path, config): # NOTE(chenweihang): If both [prefix save format] and [directory save format] exist, @@ -1336,7 +1336,7 @@ def load(path, **configs): :api_attr: imperative Load model saved by ``paddle.jit.save`` or ``paddle.static.save_inference_model`` or - paddle 1.x API ``paddle.fluid.io.save_inference_model`` as ``paddle.jit.TranslatedLayer``, + paddle 1.x API ``paddle.base.io.save_inference_model`` as ``paddle.jit.TranslatedLayer``, then performing inference or fine-tune training. .. note:: @@ -1455,7 +1455,7 @@ def load(path, **configs): >>> train(loaded_layer, loader, loss_fn, adam) - 2. Load model saved by ``paddle.fluid.io.save_inference_model`` then performing and fine-tune training. + 2. Load model saved by ``paddle.base.io.save_inference_model`` then performing and fine-tune training. .. code-block:: python :name: code-example2 @@ -1523,7 +1523,7 @@ def load(path, **configs): ... ) >>> model_path = "fc.example.model" - >>> paddle.fluid.io.save_inference_model( + >>> paddle.base.io.save_inference_model( >>> model_path, ["image"], [pred], exe) >>> # 2. load model @@ -1755,12 +1755,12 @@ def set_strategy(self, build_strategy=None, exec_strategy=None): assert self._compiled_program is None, "Cannot set strategy after run" assert isinstance( build_strategy, (type(None), BuildStrategy) - ), "The type of 'build_strategy' in paddle.jit.TracedLayer.set_strategy must be fluid.BuildStrategy, but received {}.".format( + ), "The type of 'build_strategy' in paddle.jit.TracedLayer.set_strategy must be base.BuildStrategy, but received {}.".format( type(build_strategy) ) assert isinstance( exec_strategy, (type(None), ExecutionStrategy) - ), "The type of 'exec_strategy' in paddle.jit.TracedLayer.set_strategy must be fluid.ExecutionStrategy, but received {}.".format( + ), "The type of 'exec_strategy' in paddle.jit.TracedLayer.set_strategy must be base.ExecutionStrategy, but received {}.".format( type(exec_strategy) ) self._build_strategy = build_strategy diff --git a/python/paddle/jit/dy2static/base_transformer.py b/python/paddle/jit/dy2static/base_transformer.py index cddea92376007..e3e2dc7b39941 100644 --- a/python/paddle/jit/dy2static/base_transformer.py +++ b/python/paddle/jit/dy2static/base_transformer.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid import unique_name +from paddle.base import unique_name from paddle.jit.dy2static.utils import ( FOR_ITER_INDEX_PREFIX, FOR_ITER_ITERATOR_PREFIX, diff --git a/python/paddle/jit/dy2static/basic_api_transformer.py b/python/paddle/jit/dy2static/basic_api_transformer.py index 30af698923de3..af111b55e79a6 100644 --- a/python/paddle/jit/dy2static/basic_api_transformer.py +++ b/python/paddle/jit/dy2static/basic_api_transformer.py @@ -230,7 +230,7 @@ def is_to_variable(node): def to_assign_node(node): - # Transform dygraph api `fluid.dygraph.to_variable` alias `paddle.to_tensor` to static api `paddle.assign`. + # Transform dygraph api `base.dygraph.to_variable` alias `paddle.to_tensor` to static api `paddle.assign`. # NOTE: # 1. Api `to_variable` supports data type {float16, float32, float64, int16, int32, int64, uint8, uint16}, # but api `assign` only supports {float32, float64, int32, int64, bool}; diff --git a/python/paddle/jit/dy2static/break_continue_transformer.py b/python/paddle/jit/dy2static/break_continue_transformer.py index 6e1199bf0ede1..4c6cad0e788e3 100644 --- a/python/paddle/jit/dy2static/break_continue_transformer.py +++ b/python/paddle/jit/dy2static/break_continue_transformer.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid import unique_name +from paddle.base import unique_name from paddle.jit.dy2static.utils import BaseNodeVisitor, index_in_list from paddle.jit.dy2static.variable_trans_func import create_bool_node from paddle.utils import gast diff --git a/python/paddle/jit/dy2static/convert_operators.py b/python/paddle/jit/dy2static/convert_operators.py index 5ffb3ebfce978..700c23189b079 100644 --- a/python/paddle/jit/dy2static/convert_operators.py +++ b/python/paddle/jit/dy2static/convert_operators.py @@ -15,12 +15,12 @@ import re import paddle -from paddle.fluid.data_feeder import convert_dtype -from paddle.fluid.dygraph.base import ( +from paddle.base.data_feeder import convert_dtype +from paddle.base.dygraph.base import ( _convert_into_variable, in_declarative_mode, ) -from paddle.fluid.framework import Variable, core, default_main_program +from paddle.base.framework import Variable, core, default_main_program from .utils import ( RETURN_NO_VALUE_VAR_NAME, @@ -41,7 +41,7 @@ def convert_attr(x, attr): def convert_load(x): - if in_declarative_mode() and isinstance(x, paddle.fluid.core.eager.Tensor): + if in_declarative_mode() and isinstance(x, paddle.base.core.eager.Tensor): """ TODO:(@xiongkun) may run convert_load in dygraph mode, which should be fixed. """ diff --git a/python/paddle/jit/dy2static/function_spec.py b/python/paddle/jit/dy2static/function_spec.py index 65978176cb177..a38cca8684f45 100644 --- a/python/paddle/jit/dy2static/function_spec.py +++ b/python/paddle/jit/dy2static/function_spec.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle.fluid import core -from paddle.fluid.dygraph.base import switch_to_static_graph +from paddle.base import core +from paddle.base.dygraph.base import switch_to_static_graph from paddle.jit.translated_layer import TranslatedLayer from paddle.nn.layer import layers @@ -299,7 +299,7 @@ def _replace_value_with_input_spec(args): stop_gradient = input_var.stop_gradient input_var = paddle.static.InputSpec.from_tensor(input_var) input_var.stop_gradient = stop_gradient - elif isinstance(input_var, paddle.fluid.framework.Variable): + elif isinstance(input_var, paddle.base.framework.Variable): stop_gradient = input_var.stop_gradient input_var = paddle.static.InputSpec( input_var.shape, input_var.dtype, input_var.name diff --git a/python/paddle/jit/dy2static/ifelse_transformer.py b/python/paddle/jit/dy2static/ifelse_transformer.py index 0986bc1933dad..8da098959aa6e 100644 --- a/python/paddle/jit/dy2static/ifelse_transformer.py +++ b/python/paddle/jit/dy2static/ifelse_transformer.py @@ -15,7 +15,7 @@ import copy from collections import defaultdict -from paddle.fluid import unique_name +from paddle.base import unique_name from paddle.jit.dy2static.utils import ( FOR_ITER_INDEX_PREFIX, FOR_ITER_ITERATOR_PREFIX, diff --git a/python/paddle/jit/dy2static/logging_utils.py b/python/paddle/jit/dy2static/logging_utils.py index 7b45ab82d03ef..23d0435415493 100644 --- a/python/paddle/jit/dy2static/logging_utils.py +++ b/python/paddle/jit/dy2static/logging_utils.py @@ -15,7 +15,7 @@ import os import threading -from paddle.fluid import log_helper +from paddle.base import log_helper from .ast_utils import ast_to_source_code diff --git a/python/paddle/jit/dy2static/loop_transformer.py b/python/paddle/jit/dy2static/loop_transformer.py index 043d5be4b76cf..e96cfa943abca 100644 --- a/python/paddle/jit/dy2static/loop_transformer.py +++ b/python/paddle/jit/dy2static/loop_transformer.py @@ -15,7 +15,7 @@ import copy from collections import defaultdict -from paddle.fluid import unique_name +from paddle.base import unique_name from paddle.utils import gast from .base_transformer import ( diff --git a/python/paddle/jit/dy2static/origin_info.py b/python/paddle/jit/dy2static/origin_info.py index 42e5f0ea4d3c9..d6fee112ded91 100644 --- a/python/paddle/jit/dy2static/origin_info.py +++ b/python/paddle/jit/dy2static/origin_info.py @@ -15,8 +15,8 @@ import inspect from collections.abc import Sequence -from paddle.fluid import core -from paddle.fluid.framework import Program +from paddle.base import core +from paddle.base.framework import Program from paddle.utils import gast from .utils import ORIGI_INFO, unwrap diff --git a/python/paddle/jit/dy2static/partial_program.py b/python/paddle/jit/dy2static/partial_program.py index 2c44346054cd6..3bf82cbd68c9c 100644 --- a/python/paddle/jit/dy2static/partial_program.py +++ b/python/paddle/jit/dy2static/partial_program.py @@ -20,12 +20,12 @@ import paddle from paddle import _legacy_C_ops from paddle.amp.auto_cast import _in_amp_guard, _in_pure_fp16_guard -from paddle.fluid import backward, core, framework, program_guard -from paddle.fluid.compiler import BuildStrategy -from paddle.fluid.data_feeder import check_type, convert_dtype -from paddle.fluid.dygraph.base import switch_to_static_graph -from paddle.fluid.framework import _apply_pass -from paddle.fluid.unique_name import guard as UniqueNameGuard +from paddle.base import backward, core, framework, program_guard +from paddle.base.compiler import BuildStrategy +from paddle.base.data_feeder import check_type, convert_dtype +from paddle.base.dygraph.base import switch_to_static_graph +from paddle.base.framework import _apply_pass +from paddle.base.unique_name import guard as UniqueNameGuard from paddle.optimizer.lr import LRScheduler from . import logging_utils @@ -869,10 +869,10 @@ def _inout_var_names(self): """ var_names = [] for var in self._inputs: - if isinstance(var, paddle.fluid.framework.Variable): + if isinstance(var, paddle.base.framework.Variable): var_names.append(var.desc.name()) for var in self._outputs: - if isinstance(var, paddle.fluid.framework.Variable): + if isinstance(var, paddle.base.framework.Variable): var_names.append(var.desc.name()) return var_names diff --git a/python/paddle/jit/dy2static/program_translator.py b/python/paddle/jit/dy2static/program_translator.py index ed044d1fd2293..7fba47f400435 100644 --- a/python/paddle/jit/dy2static/program_translator.py +++ b/python/paddle/jit/dy2static/program_translator.py @@ -20,9 +20,9 @@ import warnings import weakref -from paddle.fluid import core, framework -from paddle.fluid.data_feeder import check_type -from paddle.fluid.dygraph.base import ( +from paddle.base import core, framework +from paddle.base.data_feeder import check_type +from paddle.base.dygraph.base import ( _switch_declarative_mode_guard_, param_guard, switch_to_static_graph, @@ -1182,8 +1182,8 @@ def from_func_spec( main_program, startup_program = framework.Program(), framework.Program() # Note: The random seed should be synchronized into cached program - # if set in `fluid.dygraph_guard` because some ops rely on it, such as - # `fluid.layers.dropout`. + # if set in `base.dygraph_guard` because some ops rely on it, such as + # `base.layers.dropout`. main_program.random_seed = framework.default_main_program().random_seed startup_program.random_seed = ( framework.default_startup_program().random_seed diff --git a/python/paddle/jit/dy2static/return_transformer.py b/python/paddle/jit/dy2static/return_transformer.py index 3b04d39e521e7..3fc259e0a303a 100644 --- a/python/paddle/jit/dy2static/return_transformer.py +++ b/python/paddle/jit/dy2static/return_transformer.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid import unique_name +from paddle.base import unique_name from paddle.utils import gast from .base_transformer import BaseTransformer diff --git a/python/paddle/jit/dy2static/utils.py b/python/paddle/jit/dy2static/utils.py index eaa47258b1074..b66d97efa6e1c 100644 --- a/python/paddle/jit/dy2static/utils.py +++ b/python/paddle/jit/dy2static/utils.py @@ -31,11 +31,11 @@ import numpy as np import paddle -from paddle import fluid # noqa: F401 -from paddle.fluid import backward, core, framework, unique_name -from paddle.fluid.data_feeder import convert_dtype -from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.wrapped_decorator import signature_safe_contextmanager +from paddle import base # noqa: F401 +from paddle.base import backward, core, framework, unique_name +from paddle.base.data_feeder import convert_dtype +from paddle.base.layer_helper import LayerHelper +from paddle.base.wrapped_decorator import signature_safe_contextmanager from paddle.utils import gast from .ast_utils import ast_to_source_code @@ -376,7 +376,7 @@ def to_static_ast(node, class_node): attr='layers', ctx=gast.Load(), value=gast.Name( - ctx=gast.Load(), id='fluid', annotation=None, type_comment=None + ctx=gast.Load(), id='base', annotation=None, type_comment=None ), ), ) @@ -609,7 +609,7 @@ def _inject_import_statements(): import_statements = [ "import paddle", "from paddle import Tensor", - "import paddle.fluid as fluid", + "import paddle.base as base", "import paddle.jit.dy2static as _jst", "from typing import *", "import numpy as np", diff --git a/python/paddle/jit/dy2static/utils_helper.py b/python/paddle/jit/dy2static/utils_helper.py index 4899b8185169c..06f96d2094a1e 100644 --- a/python/paddle/jit/dy2static/utils_helper.py +++ b/python/paddle/jit/dy2static/utils_helper.py @@ -19,10 +19,10 @@ import numpy as np # noqa: F401 import paddle # noqa: F401 -from paddle import fluid # noqa: F401 -from paddle.fluid import dygraph # noqa: F401 -from paddle.fluid import layers # noqa: F401 -from paddle.fluid.dygraph import to_variable # noqa: F401 +from paddle import base # noqa: F401 +from paddle.base import dygraph # noqa: F401 +from paddle.base import layers # noqa: F401 +from paddle.base.dygraph import to_variable # noqa: F401 from paddle.utils import gast from .ast_utils import ast_to_source_code @@ -41,7 +41,7 @@ def index_in_list(array_list, item): # module such as paddlenlp. PADDLE_MODULE_PREFIX = 'paddle.' DYGRAPH_TO_STATIC_MODULE_PREFIX = 'paddle.jit.dy2static' -DYGRAPH_MODULE_PREFIX = 'paddle.fluid.dygraph' +DYGRAPH_MODULE_PREFIX = 'paddle.base.dygraph' def is_dygraph_api(node): diff --git a/python/paddle/jit/dy2static/variable_trans_func.py b/python/paddle/jit/dy2static/variable_trans_func.py index 80c4487dc29c6..ee358d57ee019 100644 --- a/python/paddle/jit/dy2static/variable_trans_func.py +++ b/python/paddle/jit/dy2static/variable_trans_func.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle -from paddle.fluid.framework import Variable +from paddle.base.framework import Variable from paddle.utils import gast, is_sequence, map_structure from .utils import UndefinedVar, create_undefined_variable diff --git a/python/paddle/jit/layer.py b/python/paddle/jit/layer.py index e3204ab65df02..fca710e87ac1f 100644 --- a/python/paddle/jit/layer.py +++ b/python/paddle/jit/layer.py @@ -13,8 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid import core -from paddle.fluid.core import Load +from paddle.base import core +from paddle.base.core import Load class Layer: diff --git a/python/paddle/jit/translated_layer.py b/python/paddle/jit/translated_layer.py index edb0b8bd355a9..a7f51c1a8c164 100644 --- a/python/paddle/jit/translated_layer.py +++ b/python/paddle/jit/translated_layer.py @@ -19,10 +19,10 @@ import paddle from paddle import _legacy_C_ops -from paddle.fluid import backward, core, framework, unique_name -from paddle.fluid.data_feeder import check_type -from paddle.fluid.dygraph.base import switch_to_static_graph -from paddle.fluid.framework import OpProtoHolder +from paddle.base import backward, core, framework, unique_name +from paddle.base.data_feeder import check_type +from paddle.base.dygraph.base import switch_to_static_graph +from paddle.base.framework import OpProtoHolder from paddle.framework import in_dynamic_mode from paddle.jit.dy2static.partial_program import ( LazyInitialized, diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index 21064b9bb0e90..9cac2ff006c72 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -19,9 +19,9 @@ import paddle from paddle import _legacy_C_ops -from ..fluid.data_feeder import check_variable_and_dtype -from ..fluid.framework import _create_tensor -from ..fluid.layer_helper import LayerHelper +from ..base.data_feeder import check_variable_and_dtype +from ..base.framework import _create_tensor +from ..base.layer_helper import LayerHelper from ..framework import in_dynamic_mode __all__ = [] @@ -293,7 +293,7 @@ def update(self, correct, *args): Return: Tensor: the accuracy of current step. """ - if isinstance(correct, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): + if isinstance(correct, (paddle.Tensor, paddle.base.core.eager.Tensor)): correct = np.array(correct) num_samples = np.prod(np.array(correct.shape[:-1])) accs = [] @@ -420,12 +420,12 @@ def update(self, preds, labels): the shape should keep the same as preds. The data type is 'int32' or 'int64'. """ - if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): + if isinstance(preds, (paddle.Tensor, paddle.base.core.eager.Tensor)): preds = np.array(preds) elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") - if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): + if isinstance(labels, (paddle.Tensor, paddle.base.core.eager.Tensor)): labels = np.array(labels) elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") @@ -552,12 +552,12 @@ def update(self, preds, labels): the shape should keep the same as preds. Shape: [batch_size, 1], Dtype: 'int32' or 'int64'. """ - if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): + if isinstance(preds, (paddle.Tensor, paddle.base.core.eager.Tensor)): preds = np.array(preds) elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") - if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): + if isinstance(labels, (paddle.Tensor, paddle.base.core.eager.Tensor)): labels = np.array(labels) elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") @@ -703,12 +703,12 @@ def update(self, preds, labels): (batch_size, 1), labels[i] is either o or 1, representing the label of the instance i. """ - if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): + if isinstance(labels, (paddle.Tensor, paddle.base.core.eager.Tensor)): labels = np.array(labels) elif not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") - if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): + if isinstance(preds, (paddle.Tensor, paddle.base.core.eager.Tensor)): preds = np.array(preds) elif not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") diff --git a/python/paddle/nn/clip.py b/python/paddle/nn/clip.py index f8c674c8a0be4..2c27abbdc667f 100644 --- a/python/paddle/nn/clip.py +++ b/python/paddle/nn/clip.py @@ -20,8 +20,8 @@ import paddle.autograd as imperative_base from paddle import _C_ops from paddle.common_ops_import import Variable, check_type, default_main_program -from paddle.fluid import core, framework, unique_name -from paddle.fluid.data_feeder import check_variable_and_dtype +from paddle.base import core, framework, unique_name +from paddle.base.data_feeder import check_variable_and_dtype from paddle.framework import LayerHelper, in_dynamic_mode from paddle.tensor.layer_function_generator import templatedoc @@ -100,11 +100,11 @@ def merge_selected_rows(x, name=None): Examples: .. code-block:: python - import paddle.fluid as fluid - b = fluid.default_main_program().global_block() + import paddle.base as base + b = base.default_main_program().global_block() var = b.create_var( name="X", dtype="float32", persistable=True, - type=fluid.core.VarDesc.VarType.SELECTED_ROWS) + type=base.core.VarDesc.VarType.SELECTED_ROWS) y = nn.merge_selected_rows(var) """ if in_dynamic_mode(): @@ -153,8 +153,8 @@ def get_tensor_from_selected_rows(x, name=None): .. code-block:: python from paddle import nnp.py - b = fluid.default_main_program().global_block() - input = b.create_var(name="X", dtype="float32", persistable=True, type=fluid.core.VarDesc.VarType.SELECTED_ROWS) + b = base.default_main_program().global_block() + input = b.create_var(name="X", dtype="float32", persistable=True, type=base.core.VarDesc.VarType.SELECTED_ROWS) out = nn.get_tensor_from_selected_rows(input) """ @@ -253,21 +253,21 @@ class ErrorClipByValue(BaseErrorClipAttr): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import paddle paddle.enable_static() BATCH_SIZE = 128 CLIP_MAX = 2e-6 CLIP_MIN = -1e-6 - prog = fluid.framework.Program() - with fluid.program_guard(main_program=prog): - image = fluid.layers.data( + prog = base.framework.Program() + with base.program_guard(main_program=prog): + image = base.layers.data( name='x', shape=[784], dtype='float32') - hidden1 = fluid.layers.fc(input=image, size=128, act='relu') - hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu') - predict = fluid.layers.fc( + hidden1 = base.layers.fc(input=image, size=128, act='relu') + hidden2 = base.layers.fc(input=hidden1, size=64, act='relu') + predict = base.layers.fc( input=hidden2, size=10, act='softmax') - label = fluid.layers.data(name='y', shape=[1], dtype='int64') + label = base.layers.data(name='y', shape=[1], dtype='int64') cost = paddle.nn.functional.cross_entropy(input=predict, label=label) avg_cost = paddle.mean(cost) prog_clip = prog.clone() @@ -937,22 +937,22 @@ def set_gradient_clip(clip, param_list=None, program=None): and it may be removed in future releases, so it is not recommended. It is recommended to set ``grad_clip`` when initializing the ``optimizer`` , this is a better method to clip gradient. There are three clipping strategies: - :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , - :ref:`api_fluid_clip_GradientClipByValue` . + :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , + :ref:`api_base_clip_GradientClipByValue` . To specify parameters that require gradient clip. Args: grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three cliping strategies - ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , - :ref:`api_fluid_clip_GradientClipByValue` ). Default value: None, and there is no + ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , + :ref:`api_base_clip_GradientClipByValue` ). Default value: None, and there is no gradient clipping. param_list (list(Variable), optional): Parameters that require gradient clip. It can be a list of parameter or a list of parameter's name. Default None, meaning that all parameters in the program will be included. program (Program, optional): The program where parameters are located. - Default None, meaning that using :ref:`api_fluid_default_main_program` . + Default None, meaning that using :ref:`api_base_default_main_program` . Returns: None @@ -961,58 +961,58 @@ def set_gradient_clip(clip, param_list=None, program=None): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base paddle.enable_static() def network(): image = paddle.static.data(name='image', shape=[ None, 28], dtype='float32') - param_attr1 = fluid.ParamAttr("fc1_param") - fc1 = fluid.layers.fc(image, size=10, param_attr=param_attr1) - param_attr2 = fluid.ParamAttr("fc2_param") - fc2 = fluid.layers.fc(fc1, size=10, param_attr=param_attr2) + param_attr1 = base.ParamAttr("fc1_param") + fc1 = base.layers.fc(image, size=10, param_attr=param_attr1) + param_attr2 = base.ParamAttr("fc2_param") + fc2 = base.layers.fc(fc1, size=10, param_attr=param_attr2) loss = paddle.mean(fc2) return loss # network 1: clip all parameter gradient - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): loss = network() paddle.nn.clip.set_gradient_clip( paddle.nn.ClipGradByGlobalNorm(clip_norm=2.0)) - sgd = fluid.optimizer.SGD(learning_rate=1e-3) + sgd = base.optimizer.SGD(learning_rate=1e-3) sgd.minimize(loss) # network 2: clip parameter gradient by name - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): loss = network() paddle.nn.clip.set_gradient_clip( paddle.nn.ClipGradByValue(min=-1.0, max=1.0), param_list=["fc1_param", "fc2_param"]) - sgd = fluid.optimizer.SGD(learning_rate=1e-3) + sgd = base.optimizer.SGD(learning_rate=1e-3) sgd.minimize(loss) # network 3: clip parameter gradient by value - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): loss = network() - param_var1 = fluid.default_main_program().global_block().var("fc1_param") - param_var2 = fluid.default_main_program().global_block().var("fc2_param") + param_var1 = base.default_main_program().global_block().var("fc1_param") + param_var2 = base.default_main_program().global_block().var("fc2_param") paddle.nn.clip.set_gradient_clip( paddle.nn.ClipGradByValue(min=-1.0, max=1.0), param_list=[param_var1, param_var2]) - sgd = fluid.optimizer.SGD(learning_rate=1e-3) + sgd = base.optimizer.SGD(learning_rate=1e-3) sgd.minimize(loss) # network 4: use 'set_gradient_clip' and 'optimize(grad_clip=clip)' together - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): loss = network() clip1 = paddle.nn.ClipGradByValue(min=-1.0, max=1.0) clip2 = paddle.nn.ClipGradByNorm(clip_norm=1.0) # Set the gradient clipping strategy: clip1 paddle.nn.clip.set_gradient_clip(clip1) # Set the gradient clipping strategy: clip2 - sgd = fluid.optimizer.SGD(learning_rate=1e-3, grad_clip=clip2) + sgd = base.optimizer.SGD(learning_rate=1e-3, grad_clip=clip2) sgd.minimize(loss) # 'set_gradient_clip' will not take effect when setting has a conflict, # and the gradient clipping strategy will be 'clip2' diff --git a/python/paddle/nn/decode.py b/python/paddle/nn/decode.py index 4563df700e411..be0a9fcc16dfa 100644 --- a/python/paddle/nn/decode.py +++ b/python/paddle/nn/decode.py @@ -22,7 +22,7 @@ from paddle.common_ops_import import default_main_program from paddle.framework import in_dynamic_mode -from ..fluid.data_feeder import convert_dtype +from ..base.data_feeder import convert_dtype __all__ = [] diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index bc8a49d3e8c3e..fd136e857dd56 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -17,9 +17,9 @@ from paddle.framework import core from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only -from ...fluid.data_feeder import check_dtype, check_variable_and_dtype -from ...fluid.framework import convert_np_dtype_to_dtype_ -from ...fluid.layer_helper import LayerHelper +from ...base.data_feeder import check_dtype, check_variable_and_dtype +from ...base.framework import convert_np_dtype_to_dtype_ +from ...base.layer_helper import LayerHelper from ...tensor.manipulation import chunk from ...tensor.math import tanh # noqa: F401 from ...tensor.math import tanh_ # noqa: F401 diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index bc43e73c4163d..e231ea252f3f5 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -17,11 +17,11 @@ import paddle from paddle import _C_ops from paddle.common_ops_import import Variable, default_main_program -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.framework import core, in_dynamic_mode from paddle.tensor.creation import full -from ...fluid.data_feeder import ( +from ...base.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index 81af0d9be5f4f..5765a8d36b74c 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -18,14 +18,14 @@ is_compiled_with_cuda, is_compiled_with_rocm, ) -from paddle.fluid.framework import _global_flags +from paddle.base.framework import _global_flags from paddle.tensor.manipulation import reshape from paddle.tensor.math import _add_with_axis from ...common_ops_import import Variable from ...device import get_cudnn_version -from ...fluid.data_feeder import check_dtype, check_variable_and_dtype -from ...fluid.layer_helper import LayerHelper +from ...base.data_feeder import check_dtype, check_variable_and_dtype +from ...base.layer_helper import LayerHelper from ...framework import no_grad from ...tensor.manipulation import squeeze, unsqueeze from ...utils import ( diff --git a/python/paddle/nn/functional/distance.py b/python/paddle/nn/functional/distance.py index 64352cd051dea..dc69092daed08 100644 --- a/python/paddle/nn/functional/distance.py +++ b/python/paddle/nn/functional/distance.py @@ -16,8 +16,8 @@ from paddle import _C_ops from paddle.framework import in_dynamic_mode -from ...fluid.data_feeder import check_type, check_variable_and_dtype -from ...fluid.layer_helper import LayerHelper +from ...base.data_feeder import check_type, check_variable_and_dtype +from ...base.layer_helper import LayerHelper __all__ = [] @@ -71,7 +71,7 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None): sub = _C_ops.subtract(x, y) # p_norm op has not used epsilon, so change it to the following. if epsilon != 0.0: - epsilon = paddle.fluid.dygraph.base.to_variable( + epsilon = paddle.base.dygraph.base.to_variable( [epsilon], dtype=sub.dtype ) sub = _C_ops.add(sub, epsilon) diff --git a/python/paddle/nn/functional/extension.py b/python/paddle/nn/functional/extension.py index adfb976293987..9963c454720d6 100644 --- a/python/paddle/nn/functional/extension.py +++ b/python/paddle/nn/functional/extension.py @@ -19,12 +19,12 @@ from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode from ...common_ops_import import Variable -from ...fluid.data_feeder import ( +from ...base.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, ) -from ...fluid.layer_helper import LayerHelper +from ...base.layer_helper import LayerHelper from ...framework import convert_np_dtype_to_dtype_, core from ...tensor.creation import assign diff --git a/python/paddle/nn/functional/flash_attention.py b/python/paddle/nn/functional/flash_attention.py index 822348d5be852..11b85df5d1377 100644 --- a/python/paddle/nn/functional/flash_attention.py +++ b/python/paddle/nn/functional/flash_attention.py @@ -15,8 +15,8 @@ import paddle import paddle.nn.functional as F from paddle import _C_ops, in_dynamic_mode -from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.wrapped_decorator import signature_safe_contextmanager +from paddle.base.layer_helper import LayerHelper +from paddle.base.wrapped_decorator import signature_safe_contextmanager g_enable_math = None g_enable_flash = None diff --git a/python/paddle/nn/functional/input.py b/python/paddle/nn/functional/input.py index e9bb3d19fce32..57175ae79434d 100644 --- a/python/paddle/nn/functional/input.py +++ b/python/paddle/nn/functional/input.py @@ -15,8 +15,8 @@ from paddle import _C_ops from ...common_ops_import import Variable -from ...fluid.data_feeder import check_variable_and_dtype -from ...fluid.layer_helper import LayerHelper +from ...base.data_feeder import check_variable_and_dtype +from ...base.layer_helper import LayerHelper from ...framework import in_dynamic_mode __all__ = [] diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index f0411d096dee4..d66381996a3cb 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -16,15 +16,15 @@ # TODO: define loss functions of neural network import paddle -from paddle import _C_ops, fluid, in_dynamic_mode +from paddle import _C_ops, base, in_dynamic_mode from paddle.framework import core from paddle.static.nn.control_flow import Assert from paddle.utils import deprecated from ...common_ops_import import Variable -from ...fluid.data_feeder import check_variable_and_dtype -from ...fluid.framework import _current_expected_place -from ...fluid.layer_helper import LayerHelper +from ...base.data_feeder import check_variable_and_dtype +from ...base.framework import _current_expected_place +from ...base.layer_helper import LayerHelper from ...tensor.manipulation import reshape __all__ = [] @@ -161,7 +161,7 @@ def log_loss(input, label, epsilon=1e-4, name=None): return loss -def fluid_softmax_with_cross_entropy( +def base_softmax_with_cross_entropy( logits, label, soft_label=False, @@ -380,7 +380,7 @@ def npair_loss(anchor, positive, labels, l2_reg=0.002): similarity_matrix = paddle.matmul( anchor, positive, transpose_x=False, transpose_y=True ) - softmax_ce = fluid_softmax_with_cross_entropy( + softmax_ce = base_softmax_with_cross_entropy( logits=similarity_matrix, label=labels, soft_label=True ) cross_entropy = paddle.sum(labels * softmax_ce, 0) @@ -1199,7 +1199,7 @@ def margin_ranking_loss( out = _C_ops.subtract(other, input) out = _C_ops.multiply(out, label) if margin != 0.0: - margin = fluid.dygraph.base.to_variable([margin], dtype=out.dtype) + margin = base.dygraph.base.to_variable([margin], dtype=out.dtype) out = _C_ops.add(out, margin) out = _C_ops.relu(out) if reduction == 'sum': @@ -1679,13 +1679,13 @@ def kl_div(input, label, reduction='mean', name=None): """ # ugly type promotion if ( - fluid.data_feeder.convert_dtype(input.dtype) == 'float32' - and fluid.data_feeder.convert_dtype(label.dtype) == 'float64' + base.data_feeder.convert_dtype(input.dtype) == 'float32' + and base.data_feeder.convert_dtype(label.dtype) == 'float64' ): input = paddle.cast(input, 'float64') elif ( - fluid.data_feeder.convert_dtype(input.dtype) == 'float64' - and fluid.data_feeder.convert_dtype(label.dtype) == 'float32' + base.data_feeder.convert_dtype(input.dtype) == 'float64' + and base.data_feeder.convert_dtype(label.dtype) == 'float32' ): label = paddle.cast(label, 'float64') @@ -1709,7 +1709,7 @@ def kl_div(input, label, reduction='mean', name=None): check_variable_and_dtype( label, 'label', ['float32', 'float64'], 'kl_div' ) - fluid.data_feeder.check_type(reduction, 'reduction', str, 'kl_div') + base.data_feeder.check_type(reduction, 'reduction', str, 'kl_div') loss = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( @@ -2483,7 +2483,7 @@ def softmax_with_cross_entropy( [1.15328646]) """ - return fluid_softmax_with_cross_entropy( + return base_softmax_with_cross_entropy( logits, label, soft_label, @@ -2823,13 +2823,13 @@ def cross_entropy( out = _C_ops.multiply(out, weight_gather_reshape) if reduction == "sum": - # because of fluid_softmax_with_cross_entropy op's inner logic, + # because of base_softmax_with_cross_entropy op's inner logic, # in the out tensor of this op, the loss of sample with class_index==ignore_index is 0 # so, reduce_sum all directly is ok return _C_ops.sum(out, [], None, False) elif reduction == "mean": # 1. if weight==none, - # numerator: reduce_sum all loss directly is ok causeof fluid_softmax_with_cross_entropy's inner logic + # numerator: reduce_sum all loss directly is ok causeof base_softmax_with_cross_entropy's inner logic # denominator: count sample num with class_index!=ignore_index # 2. else # numerator: loss's weighted sum @@ -3126,7 +3126,7 @@ def sigmoid_focal_loss( ), ) - alpha = fluid.dygraph.base.to_variable([alpha], dtype=loss.dtype) + alpha = base.dygraph.base.to_variable([alpha], dtype=loss.dtype) alpha_t = _C_ops.add( _C_ops.multiply(alpha, label), _C_ops.multiply( @@ -3135,7 +3135,7 @@ def sigmoid_focal_loss( ) loss = _C_ops.multiply(alpha_t, loss) - gamma = fluid.dygraph.base.to_variable([gamma], dtype=loss.dtype) + gamma = base.dygraph.base.to_variable([gamma], dtype=loss.dtype) gamma_t = _C_ops.pow(_C_ops.subtract(one, p_t), gamma) loss = _C_ops.multiply(gamma_t, loss) @@ -3993,10 +3993,10 @@ def soft_margin_loss(input, label, reduction='mean', name=None): ) if not in_dynamic_mode(): - fluid.data_feeder.check_variable_and_dtype( + base.data_feeder.check_variable_and_dtype( input, 'input', ['float32', 'float64'], 'soft_margin_loss' ) - fluid.data_feeder.check_variable_and_dtype( + base.data_feeder.check_variable_and_dtype( label, 'label', ['int32', 'int64', 'float32', 'float64'], diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index 81ae8efd37b05..a510c1ac625f6 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -16,11 +16,11 @@ # TODO: define normalization api import paddle -from paddle import _C_ops, fluid, in_dynamic_mode -from paddle.fluid.framework import in_dygraph_mode +from paddle import _C_ops, base, in_dynamic_mode +from paddle.base.framework import in_dygraph_mode -from ...fluid.data_feeder import check_type, check_variable_and_dtype -from ...fluid.layer_helper import LayerHelper +from ...base.data_feeder import check_type, check_variable_and_dtype +from ...base.layer_helper import LayerHelper __all__ = [] @@ -79,7 +79,7 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): """ if in_dygraph_mode(): - eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype) + eps = base.dygraph.base.to_variable([epsilon], dtype=x.dtype) out = _C_ops.p_norm(x, float(p), axis, epsilon, True, False) return x / _C_ops.maximum(out, eps) @@ -236,7 +236,7 @@ def batch_norm( } helper = LayerHelper('batch_norm', **locals()) - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype param_dtype = ( x.dtype @@ -361,7 +361,7 @@ def layer_norm( # create output helper = LayerHelper('layer_norm', **locals()) - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype param_dtype = ( x.dtype if convert_dtype(x.dtype) != 'float16' else 'float32' diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 955d63469d399..2f4ac35cbb078 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -15,10 +15,10 @@ import numpy as np from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode -from paddle.fluid.framework import Variable, in_dygraph_mode +from paddle.base.framework import Variable, in_dygraph_mode -from ...fluid.data_feeder import check_type, check_variable_and_dtype -from ...fluid.layers import LayerHelper +from ...base.data_feeder import check_type, check_variable_and_dtype +from ...base.layers import LayerHelper from ...tensor.manipulation import squeeze, unsqueeze # TODO: define pooling functions diff --git a/python/paddle/nn/functional/sparse_attention.py b/python/paddle/nn/functional/sparse_attention.py index bef511a3fa859..1d5f5013435bb 100644 --- a/python/paddle/nn/functional/sparse_attention.py +++ b/python/paddle/nn/functional/sparse_attention.py @@ -13,7 +13,7 @@ # limitations under the License. from paddle import _legacy_C_ops, in_dynamic_mode -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper def sparse_attention( diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index 37975595bd61e..c382349d4622e 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -13,12 +13,12 @@ # limitations under the License. from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode -from paddle.fluid.framework import in_dygraph_mode +from paddle.base.framework import in_dygraph_mode from ...common_ops_import import Variable from ...device import get_cudnn_version, is_compiled_with_rocm -from ...fluid.data_feeder import check_variable_and_dtype -from ...fluid.layer_helper import LayerHelper +from ...base.data_feeder import check_variable_and_dtype +from ...base.layer_helper import LayerHelper __all__ = [] diff --git a/python/paddle/nn/initializer/Bilinear.py b/python/paddle/nn/initializer/Bilinear.py index 403f8773f15b1..c0a1fe4b31ab8 100644 --- a/python/paddle/nn/initializer/Bilinear.py +++ b/python/paddle/nn/initializer/Bilinear.py @@ -16,8 +16,8 @@ from paddle import _C_ops -from ...fluid import core, framework, unique_name -from ...fluid.framework import _current_expected_place, in_dygraph_mode +from ...base import core, framework, unique_name +from ...base.framework import _current_expected_place, in_dygraph_mode from .initializer import Initializer __all__ = [] diff --git a/python/paddle/nn/initializer/__init__.py b/python/paddle/nn/initializer/__init__.py index 6ef516c8b6af5..adc81e5bbfd5d 100644 --- a/python/paddle/nn/initializer/__init__.py +++ b/python/paddle/nn/initializer/__init__.py @@ -13,7 +13,7 @@ # limitations under the License. # TODO: define the initializers to create a Parameter in neural network -from ...fluid.initializer import set_global_initializer # noqa: F401 +from ...base.initializer import set_global_initializer # noqa: F401 from .Bilinear import Bilinear # noqa: F401 diff --git a/python/paddle/nn/initializer/assign.py b/python/paddle/nn/initializer/assign.py index b85f3e7509fa8..a1cd06cab59b4 100644 --- a/python/paddle/nn/initializer/assign.py +++ b/python/paddle/nn/initializer/assign.py @@ -14,9 +14,9 @@ import paddle from paddle import _C_ops -from ...fluid import core, framework, unique_name -from ...fluid.data_feeder import check_type -from ...fluid.framework import _current_expected_place, in_dygraph_mode +from ...base import core, framework, unique_name +from ...base.data_feeder import check_type +from ...base.framework import _current_expected_place, in_dygraph_mode from .initializer import Initializer __all__ = [] diff --git a/python/paddle/nn/initializer/constant.py b/python/paddle/nn/initializer/constant.py index 355bac8b784e3..745ac44ac2273 100644 --- a/python/paddle/nn/initializer/constant.py +++ b/python/paddle/nn/initializer/constant.py @@ -14,8 +14,8 @@ from paddle import _C_ops -from ...fluid import core, framework -from ...fluid.framework import _current_expected_place, in_dygraph_mode +from ...base import core, framework +from ...base.framework import _current_expected_place, in_dygraph_mode # TODO: define the initializers of Constant in neural network from .initializer import Initializer diff --git a/python/paddle/nn/initializer/dirac.py b/python/paddle/nn/initializer/dirac.py index cb1c5efbf3a91..3673a20ddbf61 100644 --- a/python/paddle/nn/initializer/dirac.py +++ b/python/paddle/nn/initializer/dirac.py @@ -15,11 +15,11 @@ from paddle import _C_ops, in_dynamic_mode from paddle.utils import unique_name -from ... import fluid -from ...fluid import framework -from ...fluid.core import VarDesc -from ...fluid.data_feeder import check_variable_and_dtype -from ...fluid.framework import _current_expected_place +from ... import base +from ...base import framework +from ...base.core import VarDesc +from ...base.data_feeder import check_variable_and_dtype +from ...base.framework import _current_expected_place from .initializer import Initializer __all__ = [] @@ -133,7 +133,7 @@ def __call__(self, var, block=None): out_var = var op = None if framework.in_dygraph_mode(): - with fluid.dygraph.no_grad(): + with base.dygraph.no_grad(): place = _current_expected_place() _C_ops.full_( out_var, out_var.shape, str(float(0)), out_var.dtype, place @@ -176,7 +176,7 @@ def __call__(self, var, block=None): offset += origin_shape[k] // 2 * stride idx_list.append(offset) if framework.in_dygraph_mode(): - with fluid.dygraph.no_grad(): + with base.dygraph.no_grad(): tmp_out = _C_ops.reshape(out_var, [-1]) tmp_out._share_underline_tensor_to(out_var) else: @@ -203,7 +203,7 @@ def __call__(self, var, block=None): ) if framework.in_dygraph_mode(): - with fluid.dygraph.no_grad(): + with base.dygraph.no_grad(): tmp_tensor = framework._create_tensor() _C_ops.assign_value_( tmp_tensor, @@ -232,7 +232,7 @@ def __call__(self, var, block=None): ) if framework.in_dygraph_mode(): - with fluid.dygraph.no_grad(): + with base.dygraph.no_grad(): tmp_tensor = framework._create_tensor() _C_ops.assign_value_( tmp_tensor, @@ -256,7 +256,7 @@ def __call__(self, var, block=None): ) if framework.in_dygraph_mode(): - with fluid.dygraph.no_grad(): + with base.dygraph.no_grad(): tmp_out = _C_ops.scatter( out_var, index_tensor, value_tensor, True ) diff --git a/python/paddle/nn/initializer/initializer.py b/python/paddle/nn/initializer/initializer.py index 9d5880aa09561..c77d6ae466637 100644 --- a/python/paddle/nn/initializer/initializer.py +++ b/python/paddle/nn/initializer/initializer.py @@ -17,7 +17,7 @@ import numpy as np -from ...fluid.framework import default_main_program, in_dygraph_mode +from ...base.framework import default_main_program, in_dygraph_mode from .lazy_init import lazy_init_helper __all__ = [] diff --git a/python/paddle/nn/initializer/kaiming.py b/python/paddle/nn/initializer/kaiming.py index 1f519a31f77c3..752185ea819a1 100644 --- a/python/paddle/nn/initializer/kaiming.py +++ b/python/paddle/nn/initializer/kaiming.py @@ -17,8 +17,8 @@ from paddle import _C_ops -from ...fluid import core, framework, unique_name -from ...fluid.framework import _current_expected_place, in_dygraph_mode +from ...base import core, framework, unique_name +from ...base.framework import _current_expected_place, in_dygraph_mode from .initializer import Initializer, calculate_gain __all__ = [] diff --git a/python/paddle/nn/initializer/lazy_init.py b/python/paddle/nn/initializer/lazy_init.py index e2321f682f77e..7c67649738203 100644 --- a/python/paddle/nn/initializer/lazy_init.py +++ b/python/paddle/nn/initializer/lazy_init.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ...fluid import framework +from ...base import framework __all__ = ["LazyGuard"] diff --git a/python/paddle/nn/initializer/normal.py b/python/paddle/nn/initializer/normal.py index d735782b3090e..c1bcb89f676f7 100644 --- a/python/paddle/nn/initializer/normal.py +++ b/python/paddle/nn/initializer/normal.py @@ -14,9 +14,9 @@ from paddle import _C_ops -from ...fluid import core, framework, unique_name -from ...fluid.data_feeder import check_variable_and_dtype -from ...fluid.framework import _current_expected_place, in_dygraph_mode +from ...base import core, framework, unique_name +from ...base.data_feeder import check_variable_and_dtype +from ...base.framework import _current_expected_place, in_dygraph_mode from .initializer import Initializer __all__ = [] diff --git a/python/paddle/nn/initializer/orthogonal.py b/python/paddle/nn/initializer/orthogonal.py index 035e13d5c7449..ed16e75827e14 100644 --- a/python/paddle/nn/initializer/orthogonal.py +++ b/python/paddle/nn/initializer/orthogonal.py @@ -15,9 +15,9 @@ from paddle import _C_ops from paddle.utils import unique_name -from ...fluid import framework -from ...fluid.data_feeder import check_variable_and_dtype -from ...fluid.dygraph import no_grad +from ...base import framework +from ...base.data_feeder import check_variable_and_dtype +from ...base.dygraph import no_grad from .initializer import Initializer __all__ = [] diff --git a/python/paddle/nn/initializer/uniform.py b/python/paddle/nn/initializer/uniform.py index a46ae2a41e3cd..f5dfa1fb00310 100644 --- a/python/paddle/nn/initializer/uniform.py +++ b/python/paddle/nn/initializer/uniform.py @@ -14,9 +14,9 @@ from paddle import _C_ops -from ...fluid import core, framework, unique_name -from ...fluid.data_feeder import check_variable_and_dtype -from ...fluid.framework import _current_expected_place, in_dygraph_mode +from ...base import core, framework, unique_name +from ...base.data_feeder import check_variable_and_dtype +from ...base.framework import _current_expected_place, in_dygraph_mode from .initializer import Initializer __all__ = [] diff --git a/python/paddle/nn/initializer/xavier.py b/python/paddle/nn/initializer/xavier.py index 60242ecf5b27c..147328c222c46 100644 --- a/python/paddle/nn/initializer/xavier.py +++ b/python/paddle/nn/initializer/xavier.py @@ -16,9 +16,9 @@ from paddle import _C_ops -from ...fluid import core, framework, unique_name -from ...fluid.data_feeder import check_variable_and_dtype -from ...fluid.framework import _current_expected_place, in_dygraph_mode +from ...base import core, framework, unique_name +from ...base.data_feeder import check_variable_and_dtype +from ...base.framework import _current_expected_place, in_dygraph_mode from .initializer import Initializer __all__ = [] diff --git a/python/paddle/nn/layer/container.py b/python/paddle/nn/layer/container.py index 750e65e349d74..4768ecf08a140 100644 --- a/python/paddle/nn/layer/container.py +++ b/python/paddle/nn/layer/container.py @@ -15,8 +15,8 @@ from collections import OrderedDict from collections.abc import Iterable, Mapping -from ...fluid.dygraph.base import param_guard -from ...fluid.framework import Parameter +from ...base.dygraph.base import param_guard +from ...base.framework import Parameter from .layers import Layer __all__ = [] diff --git a/python/paddle/nn/layer/layers.py b/python/paddle/nn/layer/layers.py index 2e3bd18012955..8908b38defec4 100644 --- a/python/paddle/nn/layer/layers.py +++ b/python/paddle/nn/layer/layers.py @@ -23,26 +23,26 @@ import paddle from paddle import nn, profiler -from paddle.fluid import core, framework, unique_name -from paddle.fluid.core import VarDesc -from paddle.fluid.dygraph import no_grad -from paddle.fluid.dygraph.base import ( +from paddle.base import core, framework, unique_name +from paddle.base.core import VarDesc +from paddle.base.dygraph import no_grad +from paddle.base.dygraph.base import ( _convert_into_variable, in_declarative_mode, program_desc_tracing_guard, ) -from paddle.fluid.dygraph_utils import _append_activation_in_dygraph -from paddle.fluid.executor import Executor, global_scope -from paddle.fluid.framework import Parameter, Program -from paddle.fluid.framework import _current_expected_place as _get_device -from paddle.fluid.framework import ( +from paddle.base.dygraph_utils import _append_activation_in_dygraph +from paddle.base.executor import Executor, global_scope +from paddle.base.framework import Parameter, Program +from paddle.base.framework import _current_expected_place as _get_device +from paddle.base.framework import ( _global_flags, convert_np_dtype_to_dtype_, default_main_program, in_dygraph_mode, ) -from paddle.fluid.layer_helper_base import LayerHelperBase -from paddle.fluid.param_attr import ParamAttr +from paddle.base.layer_helper_base import LayerHelperBase +from paddle.base.param_attr import ParamAttr from paddle.profiler.utils import in_profiler_mode from paddle.utils import deprecated @@ -2164,7 +2164,7 @@ def _transform(self, t, device, dtype, blocking): # 2. cast param / Tensor to dtype if dtype is not None and dtype != t_used.dtype: - with paddle.fluid.framework._dygraph_place_guard( + with paddle.base.framework._dygraph_place_guard( place=t_used.place ): t_casted = t_used.cast(dtype=dtype) diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index b4da52f4ad2ad..eb2a66caedf77 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -15,8 +15,8 @@ import paddle # TODO: define loss functions of neural network -from paddle import fluid, in_dynamic_mode -from paddle.fluid.framework import in_dygraph_mode +from paddle import base, in_dynamic_mode +from paddle.base.framework import in_dygraph_mode from .. import functional as F from .layers import Layer @@ -583,10 +583,10 @@ def __init__(self, reduction='mean'): def forward(self, input, label): if not in_dynamic_mode(): - fluid.data_feeder.check_variable_and_dtype( + base.data_feeder.check_variable_and_dtype( input, 'input', ['float32', 'float64'], 'MSELoss' ) - fluid.data_feeder.check_variable_and_dtype( + base.data_feeder.check_variable_and_dtype( label, 'label', ['float32', 'float64'], 'MSELoss' ) diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index e01e426a75ff2..330e468afcf68 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -35,8 +35,8 @@ from paddle import _C_ops, in_dynamic_mode from paddle.device import get_all_custom_device_type -from ...fluid import dygraph_utils -from ...fluid.data_feeder import check_variable_and_dtype +from ...base import dygraph_utils +from ...base.data_feeder import check_variable_and_dtype from ...framework import ParamAttr, _global_flags, get_default_dtype, no_grad from .. import functional as F from ..functional import batch_norm, instance_norm, layer_norm @@ -955,14 +955,14 @@ class BatchNorm(Layer): Examples: .. code-block:: python - >>> import paddle.fluid as fluid + >>> import paddle.base as base >>> import paddle.nn as nn - >>> from paddle.fluid.dygraph.base import to_variable + >>> from paddle.base.dygraph.base import to_variable >>> import numpy as np >>> x = np.random.random(size=(3, 10, 3, 7)).astype('float32') - >>> with fluid.dygraph.guard(): + >>> with base.dygraph.guard(): ... x = to_variable(x) ... batch_norm = nn.layer.norm.BatchNorm(10) ... hidden1 = batch_norm(x) diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index a2122c2dab3b7..df8800555992e 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -21,9 +21,9 @@ import paddle from paddle import _C_ops, _legacy_C_ops, framework, in_dynamic_mode from paddle.common_ops_import import Variable -from paddle.fluid.data_feeder import check_type, check_variable_and_dtype -from paddle.fluid.dygraph.base import NON_PERSISTABLE_VAR_NAME_SUFFIX -from paddle.fluid.framework import ( +from paddle.base.data_feeder import check_type, check_variable_and_dtype +from paddle.base.dygraph.base import NON_PERSISTABLE_VAR_NAME_SUFFIX +from paddle.base.framework import ( default_startup_program, in_dygraph_mode, program_guard, @@ -272,7 +272,7 @@ def _switch_grad(x, stop=False): ) mask = paddle.reverse(mask, axis=[0]) if sequence_length else None - with paddle.fluid.framework.device_guard("cpu"): + with paddle.base.framework.device_guard("cpu"): start_i = paddle.zeros([], dtype="int64") end = max_seq_len @@ -296,12 +296,12 @@ def _switch_grad(x, stop=False): with while_op.block(): step_in = inputs[start_i] - # step_in = paddle.fluid.layers.Print( step_in, message="step in") + # step_in = paddle.base.layers.Print( step_in, message="step in") pre_state = paddle.utils.map_structure( lambda x: paddle.tensor.array_read(x, start_i), init_array ) outputs, new_states = cell(step_in, pre_state, **kwargs) - assert isinstance(outputs, paddle.fluid.framework.Variable) + assert isinstance(outputs, paddle.base.framework.Variable) paddle.utils.assert_same_structure(new_states, pre_state) if sequence_length: step_mask = paddle.unsqueeze(mask[start_i], 1) @@ -317,7 +317,7 @@ def _switch_grad(x, stop=False): paddle.tensor.array_write(outputs, start_i, out_array) - with paddle.fluid.framework.device_guard("cpu"): + with paddle.base.framework.device_guard("cpu"): start_i = paddle.tensor.increment(x=start_i, value=1) paddle.utils.map_structure( lambda x, y: paddle.tensor.array_write(x, start_i, y), @@ -325,7 +325,7 @@ def _switch_grad(x, stop=False): init_array, ) - with paddle.fluid.framework.device_guard("cpu"): + with paddle.base.framework.device_guard("cpu"): new_cond = paddle.tensor.less_than(start_i, end) paddle.assign(new_cond, cond) @@ -740,7 +740,7 @@ class SimpleRNNCell(RNNCellBase): - **states** (Tensor): shape `[batch_size, hidden_size]`, the new hidden state, corresponding to :math:`h_{t}` in the formula. Notes: - All the weights and bias are initialized with `Uniform(-std, std)` by default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more information about parameter initialization, please refer to :ref:`api_fluid_ParamAttr`. + All the weights and bias are initialized with `Uniform(-std, std)` by default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more information about parameter initialization, please refer to :ref:`api_base_ParamAttr`. Examples: @@ -893,7 +893,7 @@ class LSTMCell(RNNCellBase): Notes: All the weights and bias are initialized with `Uniform(-std, std)` by default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more - information about parameter initialization, please refer to :ref:`api_fluid_ParamAttr`. + information about parameter initialization, please refer to :ref:`api_base_ParamAttr`. Examples: @@ -1054,7 +1054,7 @@ class GRUCell(RNNCellBase): Notes: All the weights and bias are initialized with `Uniform(-std, std)` by default. Where std = :math:`\frac{1}{\sqrt{hidden\_size}}`. For more - information about parameter initialization, please refer to s:ref:`api_fluid_ParamAttr`. + information about parameter initialization, please refer to s:ref:`api_base_ParamAttr`. Examples: diff --git a/python/paddle/nn/layer/transformer.py b/python/paddle/nn/layer/transformer.py index e2e3f052240a4..d9260f9b22911 100644 --- a/python/paddle/nn/layer/transformer.py +++ b/python/paddle/nn/layer/transformer.py @@ -20,7 +20,7 @@ import numpy as np import paddle -from paddle.fluid.data_feeder import convert_dtype +from paddle.base.data_feeder import convert_dtype from ... import tensor from ...framework import ParamAttr @@ -800,7 +800,7 @@ class TransformerDecoderLayer(Layer): for linear in FFN. Otherwise, the three sub-layers all uses it as `weight_attr` to create parameters. Default: None, which means the default weight parameter property is used. See usage for details - in :ref:`api_paddle_fluid_param_attr_ParamAttr` . + in :ref:`api_paddle_base_param_attr_ParamAttr` . bias_attr (ParamAttr|list|tuple|bool, optional): To specify the bias parameter property. If it is a list/tuple, `bias_attr[0]` would be used as `bias_attr` for self attention, `bias_attr[1]` would be used as `bias_attr` for diff --git a/python/paddle/nn/quant/quant_layers.py b/python/paddle/nn/quant/quant_layers.py index 0df2c1c2c5943..a83cbef801f94 100644 --- a/python/paddle/nn/quant/quant_layers.py +++ b/python/paddle/nn/quant/quant_layers.py @@ -16,9 +16,9 @@ import paddle from paddle import _legacy_C_ops, in_dynamic_mode -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import _create_tensor -from paddle.fluid.log_helper import get_logger +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.framework import _create_tensor +from paddle.base.log_helper import get_logger from paddle.framework import ParamAttr, core from paddle.nn import Layer from paddle.nn import functional as F diff --git a/python/paddle/nn/utils/transform_parameters.py b/python/paddle/nn/utils/transform_parameters.py index 16a5045bb05a0..c3f9562e544e8 100644 --- a/python/paddle/nn/utils/transform_parameters.py +++ b/python/paddle/nn/utils/transform_parameters.py @@ -16,7 +16,7 @@ import paddle from paddle import _C_ops -from paddle.fluid.framework import ( +from paddle.base.framework import ( _create_tensor, _dygraph_tracer, dygraph_only, @@ -28,7 +28,7 @@ def _inplace_reshape_dygraph(x, shape): x_shape = _create_tensor(dtype='int64') if in_dygraph_mode(): - with paddle.fluid.dygraph.no_grad(): + with paddle.base.dygraph.no_grad(): tmp_out = _C_ops.reshape(x, shape) tmp_out._share_underline_tensor_to(x) else: @@ -68,7 +68,7 @@ def _stride_column(param): """ assert len(param.shape) == 2 shape = [param.shape[1], param.shape[0]] - with paddle.fluid.dygraph.no_grad(): + with paddle.base.dygraph.no_grad(): reshape_var = paddle.reshape(param, shape) transpose_var = paddle.transpose(reshape_var, [1, 0]) transpose_var._share_underline_tensor_to(param) @@ -108,7 +108,7 @@ def parameters_to_vector(parameters, name=None): out = _create_tensor(dtype=dtype) if in_dygraph_mode(): - with paddle.fluid.dygraph.no_grad(): + with paddle.base.dygraph.no_grad(): tmp = _C_ops.concat(parameters, 0) tmp._share_underline_tensor_to(out) else: @@ -163,7 +163,7 @@ def vector_to_parameters(vec, parameters, name=None): sections.append(0) if in_dygraph_mode(): - with paddle.fluid.dygraph.no_grad(): + with paddle.base.dygraph.no_grad(): res = _C_ops.split(vec, sections, 0) for i in range(0, len(parameters)): res[i]._share_underline_tensor_to(parameters[i]) diff --git a/python/paddle/nn/utils/weight_norm_hook.py b/python/paddle/nn/utils/weight_norm_hook.py index 3747a14cc118a..4ef5fdf2deefd 100644 --- a/python/paddle/nn/utils/weight_norm_hook.py +++ b/python/paddle/nn/utils/weight_norm_hook.py @@ -14,8 +14,8 @@ import paddle from paddle import _C_ops -from ...fluid.data_feeder import check_variable_and_dtype -from ...fluid.layer_helper import LayerHelper +from ...base.data_feeder import check_variable_and_dtype +from ...base.layer_helper import LayerHelper from ...framework import in_dynamic_mode __all__ = [] diff --git a/python/paddle/optimizer/adadelta.py b/python/paddle/optimizer/adadelta.py index e4826997761c1..3523ece1e831f 100644 --- a/python/paddle/optimizer/adadelta.py +++ b/python/paddle/optimizer/adadelta.py @@ -16,8 +16,8 @@ from paddle import _C_ops -from ..fluid import framework -from ..fluid.dygraph import no_grad +from ..base import framework +from ..base.dygraph import no_grad from ..framework import in_dynamic_mode from .optimizer import Optimizer @@ -54,15 +54,15 @@ class Adadelta(Optimizer): The default value is None in static graph mode, at this time all parameters will be updated. weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ It canbe a float value as coeff of L2 regularization or \ - :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \ + :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. + If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three cliping strategies - ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , - :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , + :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . diff --git a/python/paddle/optimizer/adagrad.py b/python/paddle/optimizer/adagrad.py index 3373866ba048a..eae910870ae53 100644 --- a/python/paddle/optimizer/adagrad.py +++ b/python/paddle/optimizer/adagrad.py @@ -13,7 +13,7 @@ # limitations under the License. import warnings -from ..fluid import framework +from ..base import framework from .optimizer import Optimizer __all__ = [] @@ -53,7 +53,7 @@ class Adagrad(Optimizer): weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. It canbe a float value as coeff of L2 regularization or :ref:`api_paddle_regularizer_L1Decay`, :ref:`api_paddle_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_paddle_fluid_param_attr_aramAttr` already, + If a parameter has set regularizer using :ref:`api_paddle_base_param_attr_aramAttr` already, the regularization setting here in optimizer will be ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect. Default None, meaning there is no regularization. diff --git a/python/paddle/optimizer/adam.py b/python/paddle/optimizer/adam.py index f58b82a13188c..75a1415690445 100644 --- a/python/paddle/optimizer/adam.py +++ b/python/paddle/optimizer/adam.py @@ -18,9 +18,9 @@ import paddle from paddle import _C_ops -from ..fluid import core, framework -from ..fluid.dygraph import base as imperative_base -from ..fluid.framework import Variable, in_dygraph_mode +from ..base import core, framework +from ..base.dygraph import base as imperative_base +from ..base.framework import Variable, in_dygraph_mode from .optimizer import Optimizer __all__ = [] @@ -72,15 +72,15 @@ class Adam(Optimizer): The default value is None in static graph mode, at this time all parameters will be updated. weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. It canbe a float value as coeff of L2 regularization or - :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, + :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. + If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, the regularization setting here in optimizer will be ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect. Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three cliping strategies - ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , - :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , + :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. lazy_mode (bool, optional): The official Adam algorithm has two moving-average accumulators. The accumulators are updated at every step. Every element of the two moving-average is updated in both dense mode and sparse mode. If the size of parameter is very large, @@ -419,7 +419,7 @@ def step(self): >>> adam.step() >>> adam.clear_grad() """ - if paddle.fluid.dygraph.base.in_declarative_mode(): + if paddle.base.dygraph.base.in_declarative_mode(): self._declarative_step() return diff --git a/python/paddle/optimizer/adamax.py b/python/paddle/optimizer/adamax.py index 26988e9d3c96a..ff7ce4a232224 100644 --- a/python/paddle/optimizer/adamax.py +++ b/python/paddle/optimizer/adamax.py @@ -16,9 +16,9 @@ from paddle import _C_ops -from ..fluid import core, framework -from ..fluid.dygraph import no_grad -from ..fluid.framework import name_scope +from ..base import core, framework +from ..base.dygraph import no_grad +from ..base.framework import name_scope from .optimizer import Optimizer __all__ = [] @@ -67,15 +67,15 @@ class Adamax(Optimizer): The default value is None in static graph mode, at this time all parameters will be updated. weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. It can be a float value as coeff of L2 regularization or - :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, + :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. + If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, the regularization setting here in optimizer will be ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect. Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient clipping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three clipping strategies - ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , - :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , + :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. The default value is None. diff --git a/python/paddle/optimizer/adamw.py b/python/paddle/optimizer/adamw.py index 6f61e5ddd5b7c..823358bc1a717 100644 --- a/python/paddle/optimizer/adamw.py +++ b/python/paddle/optimizer/adamw.py @@ -19,9 +19,9 @@ import paddle from .. import _C_ops -from ..fluid import core, framework -from ..fluid.dygraph import base as imperative_base -from ..fluid.framework import Parameter, Variable +from ..base import core, framework +from ..base.dygraph import base as imperative_base +from ..base.framework import Parameter, Variable from ..nn.clip import GradientClipBase from .lr import LRScheduler from .optimizer import Optimizer @@ -77,8 +77,8 @@ class AdamW(Optimizer): Default: None. grad_clip (GradientClipBase, optional): Gradient clipping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three clipping strategies - ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , - :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , + :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. lazy_mode (bool, optional): The official Adam algorithm has two moving-average accumulators. The accumulators are updated at every step. Every element of the two moving-average is updated in both dense mode and sparse mode. If the size of parameter is very large, @@ -557,7 +557,7 @@ def step(self): >>> opt.step() >>> opt.clear_grad() """ - if paddle.fluid.dygraph.base.in_declarative_mode(): + if paddle.base.dygraph.base.in_declarative_mode(): self._declarative_step() return diff --git a/python/paddle/optimizer/lamb.py b/python/paddle/optimizer/lamb.py index f8662941c78d6..b409e88e338e9 100644 --- a/python/paddle/optimizer/lamb.py +++ b/python/paddle/optimizer/lamb.py @@ -13,10 +13,10 @@ # limitations under the License. from paddle import _C_ops -from paddle.fluid.executor import global_scope +from paddle.base.executor import global_scope -from ..fluid import core, framework -from ..fluid.framework import Variable +from ..base import core, framework +from ..base.framework import Variable from .optimizer import Optimizer __all__ = [] @@ -68,9 +68,9 @@ class Lamb(Optimizer): The default value is None in static graph mode, at this time all parameters will be updated. grad_clip (GradientClipBase, optional): Gradient clipping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three clipping strategies - ( :ref:`api_paddle_fluid_clip_ClipGradByGlobalNorm` , :ref:`api_paddle_fluid_clip_ClipGradByNorm` , - :ref:`api_paddle_fluid_clip_ClipGradByValue` ). If you want better convergence, it is recommended - to use :ref:`api_paddle_fluid_clip_ClipGradByGlobalNorm` . Default None, meaning there is no gradient clipping. + ( :ref:`api_paddle_base_clip_ClipGradByGlobalNorm` , :ref:`api_paddle_base_clip_ClipGradByNorm` , + :ref:`api_paddle_base_clip_ClipGradByValue` ). If you want better convergence, it is recommended + to use :ref:`api_paddle_base_clip_ClipGradByGlobalNorm` . Default None, meaning there is no gradient clipping. exclude_from_weight_decay_fn (function, optional): whether to skip weight decay for a parameter when this function returns True while take the parameter as input. always_adapt (bool, optional): whether to use Layer-wise LR adaptation. By default, skip adaptation on parameters that are excluded from weight decay, unless always_adapt == True, then always enable LR adaptation. diff --git a/python/paddle/optimizer/lbfgs.py b/python/paddle/optimizer/lbfgs.py index a3efa46ea5ef5..f552f2d67ab74 100644 --- a/python/paddle/optimizer/lbfgs.py +++ b/python/paddle/optimizer/lbfgs.py @@ -17,7 +17,7 @@ import paddle -from ..fluid import framework +from ..base import framework from .optimizer import Optimizer __all__ = [] @@ -338,16 +338,16 @@ class LBFGS(Optimizer): parameters (list|tuple, optional): List/Tuple of ``Tensor`` names to update to minimize ``loss``. \ This parameter is required in dygraph mode. The default value is None. weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ - It can be a float value as coeff of L2 regularization or \ - :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \ + It canbe a float value as coeff of L2 regularization or \ + :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. + If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. - grad_clip (GradientClipBase, optional): Gradient clipping strategy, it's an instance of \ - some derived class of ``GradientClipBase`` . There are three clipping strategies \ - ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , \ - :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \ + some derived class of ``GradientClipBase`` . There are three cliping strategies \ + ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , \ + :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. The default value is None. diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index 5915be7cf526a..113b95eec2598 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -19,14 +19,14 @@ import paddle from paddle import Tensor -from paddle.fluid import core -from paddle.fluid.data_feeder import check_type -from paddle.fluid.framework import ( +from paddle.base import core +from paddle.base.data_feeder import check_type +from paddle.base.framework import ( Variable, default_main_program, in_dygraph_mode, ) -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper __all__ = [ # noqa 'LRScheduler', diff --git a/python/paddle/optimizer/momentum.py b/python/paddle/optimizer/momentum.py index 59b9657aa43bb..36d09f00c0e61 100644 --- a/python/paddle/optimizer/momentum.py +++ b/python/paddle/optimizer/momentum.py @@ -19,7 +19,7 @@ from paddle.framework import in_dynamic_mode from paddle.regularizer import L2Decay -from ..fluid import core, framework +from ..base import core, framework from .optimizer import Optimizer __all__ = [] @@ -59,15 +59,15 @@ class Momentum(Optimizer): The default value is None in static graph mode, at this time all parameters will be updated. weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ It can be a float value as coeff of L2 regularization or \ - :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \ + :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. + If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient clipping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three clipping strategies - ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , - :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , + :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. multi_precision (bool, optional): Whether to use multi-precision during weight updating. Default is false. rescale_grad (float, optional): Multiply the gradient with `rescale_grad` before updating. \ Often choose to be ``1.0/batch_size``. diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index 146413b84572d..f95db125adb2a 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -20,8 +20,8 @@ import paddle import paddle.autograd as imperative_base from paddle import _C_ops -from paddle.fluid import core -from paddle.fluid.framework import ( +from paddle.base import core +from paddle.base.framework import ( Variable, _current_expected_place, default_main_program, @@ -31,10 +31,10 @@ ) from paddle.regularizer import L2Decay -from ..fluid import framework, unique_name -from ..fluid.backward import _get_no_grad_set_name, append_backward -from ..fluid.framework import Parameter, program_guard -from ..fluid.layer_helper import LayerHelper +from ..base import framework, unique_name +from ..base.backward import _get_no_grad_set_name, append_backward +from ..base.framework import Parameter, program_guard +from ..base.layer_helper import LayerHelper from .lr import LRScheduler __all__ = [] @@ -106,15 +106,15 @@ class Optimizer: The default value is None in static graph mode, at this time all parameters will be updated. weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ It canbe a float value as coeff of L2 regularization or \ - :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \ + :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. + If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient cliping strategy, it's an instance of \ some derived class of ``GradientClipBase`` . There are three cliping strategies \ - ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , \ - :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , \ + :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. The default value is None. @@ -480,7 +480,7 @@ def do_create(): persistable=True, ) - with paddle.fluid.framework.dygraph_guard_if_declarative(): + with paddle.base.framework.dygraph_guard_if_declarative(): do_create() @framework.dygraph_only @@ -1067,7 +1067,7 @@ def _create_optimization_pass( ) if isinstance(parameters_and_grads, list): - with paddle.fluid.framework.dygraph_guard_if_declarative(): + with paddle.base.framework.dygraph_guard_if_declarative(): self._create_accumulators( target_block, [ @@ -1083,7 +1083,7 @@ def _create_optimization_pass( for p in params_acc_dict['params'] if not p[0].stop_gradient ] - with paddle.fluid.framework.dygraph_guard_if_declarative(): + with paddle.base.framework.dygraph_guard_if_declarative(): self._create_accumulators(target_block, params_acc_dict) if framework.in_dygraph_mode(): @@ -1156,9 +1156,9 @@ def backward( Args: loss (Tensor): ``loss`` tensor to run optimizations. - startup_program (Program, optional): :ref:`api_fluid_Program` for + startup_program (Program, optional): :ref:`api_base_Program` for initializing parameters in ``parameters``. The default value - is None, at this time :ref:`api_fluid_default_startup_program` will be used. + is None, at this time :ref:`api_base_default_startup_program` will be used. parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update to minimize ``loss``. The default value is None, at this time all parameters will be updated. @@ -1425,7 +1425,7 @@ def append_regularization_ops( ): repeate_regularizer = True logging.info( - "If regularizer of a Parameter has been set by 'fluid.ParamAttr' or 'fluid.WeightNormParamAttr' already. " + "If regularizer of a Parameter has been set by 'base.ParamAttr' or 'base.WeightNormParamAttr' already. " "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!" % regularization.__str__() ) @@ -1503,9 +1503,9 @@ def minimize( Args: loss (Tensor): A ``Tensor`` containing the value to minimize. - startup_program (Program, optional): :ref:`api_fluid_Program` for + startup_program (Program, optional): :ref:`api_base_Program` for initializing parameters in ``parameters``. The default value - is None, at this time :ref:`api_fluid_default_startup_program` will be used. + is None, at this time :ref:`api_base_default_startup_program` will be used. parameters (list, optional): List of ``Tensor`` or ``Tensor.name`` to update to minimize ``loss``. The default value is None, at this time all parameters will be updated. @@ -1603,7 +1603,7 @@ def step(self): >>> adam.step() >>> adam.clear_grad() """ - if paddle.fluid.dygraph.base.in_declarative_mode(): + if paddle.base.dygraph.base.in_declarative_mode(): self._declarative_step() return diff --git a/python/paddle/optimizer/rmsprop.py b/python/paddle/optimizer/rmsprop.py index 3bab1f25f00f2..13537b7683387 100644 --- a/python/paddle/optimizer/rmsprop.py +++ b/python/paddle/optimizer/rmsprop.py @@ -16,8 +16,8 @@ from paddle import _C_ops -from ..fluid import framework -from ..fluid.framework import in_dygraph_mode +from ..base import framework +from ..base.framework import in_dygraph_mode from .optimizer import Optimizer __all__ = [] @@ -91,15 +91,15 @@ class RMSProp(Optimizer): The default value is None in static graph mode, at this time all parameters will be updated. weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. It can be a float value as coeff of L2 regularization or \ - :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, + :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. + If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, the regularization setting here in optimizer will be ignored for this parameter. Otherwise, the regularization setting here in optimizer will take effect. Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient clipping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three clipping strategies - ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , - :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , + :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): This parameter is used by developers to print debugging information. For details, please refer to :ref:`api_guide_Name`. Default is None. diff --git a/python/paddle/optimizer/sgd.py b/python/paddle/optimizer/sgd.py index 29de80b59f20d..19851c7386821 100644 --- a/python/paddle/optimizer/sgd.py +++ b/python/paddle/optimizer/sgd.py @@ -16,9 +16,9 @@ from paddle import _C_ops -from ..fluid import framework -from ..fluid.dygraph import no_grad -from ..fluid.framework import in_dygraph_mode +from ..base import framework +from ..base.dygraph import no_grad +from ..base.framework import in_dygraph_mode from .optimizer import Optimizer __all__ = [] @@ -40,15 +40,15 @@ class SGD(Optimizer): The default value is None in static graph mode, at this time all parameters will be updated. weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \ It can be a float value as coeff of L2 regularization or \ - :ref:`api_fluid_regularizer_L1Decay`, :ref:`api_fluid_regularizer_L2Decay`. - If a parameter has set regularizer using :ref:`api_fluid_ParamAttr` already, \ + :ref:`api_base_regularizer_L1Decay`, :ref:`api_base_regularizer_L2Decay`. + If a parameter has set regularizer using :ref:`api_base_ParamAttr` already, \ the regularization setting here in optimizer will be ignored for this parameter. \ Otherwise, the regularization setting here in optimizer will take effect. \ Default None, meaning there is no regularization. grad_clip (GradientClipBase, optional): Gradient clipping strategy, it's an instance of some derived class of ``GradientClipBase`` . There are three clipping strategies - ( :ref:`api_fluid_clip_GradientClipByGlobalNorm` , :ref:`api_fluid_clip_GradientClipByNorm` , - :ref:`api_fluid_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. + ( :ref:`api_base_clip_GradientClipByGlobalNorm` , :ref:`api_base_clip_GradientClipByNorm` , + :ref:`api_base_clip_GradientClipByValue` ). Default None, meaning there is no gradient clipping. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . diff --git a/python/paddle/profiler/profiler.py b/python/paddle/profiler/profiler.py index 87b63336d04a7..ff82fb912da4f 100644 --- a/python/paddle/profiler/profiler.py +++ b/python/paddle/profiler/profiler.py @@ -22,7 +22,7 @@ from warnings import warn import paddle -from paddle.fluid.core import ( +from paddle.base.core import ( ProfilerOptions, TracerEventType, _Profiler, diff --git a/python/paddle/profiler/profiler_statistic.py b/python/paddle/profiler/profiler_statistic.py index 1a22f9b08f80c..23c38d804f1a1 100755 --- a/python/paddle/profiler/profiler_statistic.py +++ b/python/paddle/profiler/profiler_statistic.py @@ -15,7 +15,7 @@ import re from enum import Enum -from paddle.fluid.core import TracerEventType, TracerMemEventType +from paddle.base.core import TracerEventType, TracerMemEventType from paddle.utils.flops import flops from .statistic_helper import ( diff --git a/python/paddle/profiler/utils.py b/python/paddle/profiler/utils.py index 851d34d5b7aa5..62e57b515c520 100644 --- a/python/paddle/profiler/utils.py +++ b/python/paddle/profiler/utils.py @@ -18,8 +18,8 @@ from typing import Any from warnings import warn -from paddle.fluid import core -from paddle.fluid.core import TracerEventType, _RecordEvent +from paddle.base import core +from paddle.base.core import TracerEventType, _RecordEvent _is_profiler_used = False _has_optimizer_wrapped = False diff --git a/python/paddle/quantization/imperative/qat.py b/python/paddle/quantization/imperative/qat.py index f261f4cabe42a..871b67d51c7ec 100644 --- a/python/paddle/quantization/imperative/qat.py +++ b/python/paddle/quantization/imperative/qat.py @@ -15,7 +15,7 @@ import os import paddle -from paddle.fluid.framework import IrGraph +from paddle.base.framework import IrGraph from paddle.framework import core from paddle.nn.quant import quant_layers diff --git a/python/paddle/quantization/quanters/abs_max.py b/python/paddle/quantization/quanters/abs_max.py index 455f918643425..a8f3792463edb 100644 --- a/python/paddle/quantization/quanters/abs_max.py +++ b/python/paddle/quantization/quanters/abs_max.py @@ -14,8 +14,8 @@ import paddle from paddle import _legacy_C_ops -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import _create_tensor +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.framework import _create_tensor from paddle.framework import ParamAttr, core from paddle.nn.initializer import Constant from paddle.utils import unique_name diff --git a/python/paddle/reader/decorator.py b/python/paddle/reader/decorator.py index fba81b96f6355..cd05182f5a7f8 100644 --- a/python/paddle/reader/decorator.py +++ b/python/paddle/reader/decorator.py @@ -22,7 +22,7 @@ from queue import Queue from threading import Thread -from paddle.fluid.reader import QUEUE_GET_TIMEOUT +from paddle.base.reader import QUEUE_GET_TIMEOUT __all__ = [] @@ -67,7 +67,7 @@ def cache(reader): ... yield i ... >>> # All data is cached into memory - >>> cached_reader = paddle.fluid.io.cache(reader) + >>> cached_reader = paddle.base.io.cache(reader) >>> for i in cached_reader(): ... print(i) @@ -126,7 +126,7 @@ def reader(): def shuffle(reader, buf_size): """ - paddle.fluid.io.shuffle ( :ref:`api_fluid_io_shuffle` ) is recommended to use, + paddle.base.io.shuffle ( :ref:`api_base_io_shuffle` ) is recommended to use, and paddle.reader.shuffle is an alias. This API creates a decorated reader that outputs the shuffled data. @@ -179,8 +179,8 @@ def chain(*readers): the format of the outputs. **Note**: - ``paddle.reader.chain`` is the alias of ``paddle.fluid.io.chain``, and - ``paddle.fluid.io.chain`` is recommended to use. + ``paddle.reader.chain`` is the alias of ``paddle.base.io.chain``, and + ``paddle.base.io.chain`` is recommended to use. For example, if three input readers' outputs are as follows: [0, 0, 0], @@ -359,7 +359,7 @@ def data_reader(): def firstn(reader, n): """ - paddle.fluid.io.firstn ( :ref:`api_fluid_io_firstn` ) is recommended to use, + paddle.base.io.firstn ( :ref:`api_base_io_firstn` ) is recommended to use, and paddle.reader.firstn is an alias. This API creates a decorated reader, and limits the max number of @@ -553,23 +553,23 @@ def multiprocess_reader(readers, use_pipe=True, queue_size=1000): ... # generate sample input files ... fake_input_files() ... - ... with fluid.program_guard(fluid.Program(), fluid.Program()): - ... place = fluid.CPUPlace() + ... with base.program_guard(base.Program(), base.Program()): + ... place = base.CPUPlace() ... # the 1st 2 is batch size ... ... image = paddle.static.data(name='image', dtype='int64', shape=[2, 1, 2]) ... paddle.static.Print(image) ... # print detailed tensor info of image variable ... - ... reader = fluid.io.PyReader(feed_list=[image], capacity=2) + ... reader = base.io.PyReader(feed_list=[image], capacity=2) ... ... decorated_reader = paddle.reader.multiprocess_reader( ... [generate_reader(sample_files[0]), generate_reader(sample_files[1])], False) ... ... reader.decorate_sample_generator(decorated_reader, batch_size=2, places=[place]) ... - ... exe = fluid.Executor(place) - ... exe.run(fluid.default_startup_program()) + ... exe = base.Executor(place) + ... exe.run(base.default_startup_program()) ... ... for data in reader(): ... res = exe.run(feed=data, fetch_list=[image]) diff --git a/python/paddle/regularizer.py b/python/paddle/regularizer.py index 501a06e32f89e..dabf365e52681 100644 --- a/python/paddle/regularizer.py +++ b/python/paddle/regularizer.py @@ -14,8 +14,8 @@ from paddle import _C_ops -from paddle.fluid import framework -from paddle.fluid.framework import in_dygraph_mode +from paddle.base import framework +from paddle.base.framework import in_dygraph_mode __all__ = ['L1Decay', 'L2Decay'] diff --git a/python/paddle/signal.py b/python/paddle/signal.py index d1dc910f043ea..4529f86fbea7a 100644 --- a/python/paddle/signal.py +++ b/python/paddle/signal.py @@ -17,8 +17,8 @@ from paddle.framework import in_dynamic_mode from .fft import fft_c2c, fft_c2r, fft_r2c -from .fluid.data_feeder import check_variable_and_dtype -from .fluid.layer_helper import LayerHelper +from .base.data_feeder import check_variable_and_dtype +from .base.layer_helper import LayerHelper from .tensor.attribute import is_complex __all__ = [ diff --git a/python/paddle/sparse/binary.py b/python/paddle/sparse/binary.py index 9de436190972a..55efa6b4210cf 100644 --- a/python/paddle/sparse/binary.py +++ b/python/paddle/sparse/binary.py @@ -13,8 +13,8 @@ # limitations under the License. from paddle import _C_ops, in_dynamic_mode -from paddle.fluid.framework import core, dygraph_only -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.framework import core, dygraph_only +from paddle.base.layer_helper import LayerHelper from .unary import cast diff --git a/python/paddle/sparse/creation.py b/python/paddle/sparse/creation.py index 62a83aab39834..4f8b2a17ce15b 100644 --- a/python/paddle/sparse/creation.py +++ b/python/paddle/sparse/creation.py @@ -16,14 +16,14 @@ import paddle from paddle import _C_ops, in_dynamic_mode -from paddle.fluid.data_feeder import convert_dtype -from paddle.fluid.framework import ( +from paddle.base.data_feeder import convert_dtype +from paddle.base.framework import ( _current_expected_place, _get_paddle_place, core, dygraph_only, ) -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.tensor import max, to_tensor __all__ = [ diff --git a/python/paddle/sparse/multiary.py b/python/paddle/sparse/multiary.py index 8e3179f0bdcc0..2aa25e8473ba9 100644 --- a/python/paddle/sparse/multiary.py +++ b/python/paddle/sparse/multiary.py @@ -13,7 +13,7 @@ # limitations under the License. from paddle import _C_ops -from paddle.fluid.framework import dygraph_only +from paddle.base.framework import dygraph_only __all__ = [] diff --git a/python/paddle/sparse/nn/functional/activation.py b/python/paddle/sparse/nn/functional/activation.py index db8e5891964cd..966ea27b1368d 100644 --- a/python/paddle/sparse/nn/functional/activation.py +++ b/python/paddle/sparse/nn/functional/activation.py @@ -15,8 +15,8 @@ __all__ = [] from paddle import _C_ops, in_dynamic_mode -from paddle.fluid.framework import dygraph_only -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.framework import dygraph_only +from paddle.base.layer_helper import LayerHelper def relu(x, name=None): diff --git a/python/paddle/sparse/nn/functional/conv.py b/python/paddle/sparse/nn/functional/conv.py index cbe56a88067a4..c3f51421408bf 100644 --- a/python/paddle/sparse/nn/functional/conv.py +++ b/python/paddle/sparse/nn/functional/conv.py @@ -15,7 +15,7 @@ __all__ = [] from paddle import _C_ops, in_dynamic_mode -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.nn.functional.conv import _update_padding_nd from paddle.utils import convert_to_list diff --git a/python/paddle/sparse/nn/functional/transformer.py b/python/paddle/sparse/nn/functional/transformer.py index bed15cd42f342..ff75ec9028df0 100644 --- a/python/paddle/sparse/nn/functional/transformer.py +++ b/python/paddle/sparse/nn/functional/transformer.py @@ -15,7 +15,7 @@ __all__ = [] from paddle import _C_ops -from paddle.fluid.framework import dygraph_only +from paddle.base.framework import dygraph_only @dygraph_only diff --git a/python/paddle/sparse/nn/layer/norm.py b/python/paddle/sparse/nn/layer/norm.py index dc8c2713f45a4..ebb4b68930ece 100644 --- a/python/paddle/sparse/nn/layer/norm.py +++ b/python/paddle/sparse/nn/layer/norm.py @@ -16,7 +16,7 @@ import paddle from paddle import _C_ops, in_dynamic_mode -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.framework import no_grad from paddle.nn.layer.norm import _BatchNormBase diff --git a/python/paddle/sparse/unary.py b/python/paddle/sparse/unary.py index 8b798bb3bd03c..4fb2a8d32e1a0 100644 --- a/python/paddle/sparse/unary.py +++ b/python/paddle/sparse/unary.py @@ -17,8 +17,8 @@ import paddle from paddle import _C_ops, in_dynamic_mode from paddle.common_ops_import import Variable -from paddle.fluid.data_feeder import check_type, check_variable_and_dtype -from paddle.fluid.framework import ( +from paddle.base.data_feeder import check_type, check_variable_and_dtype +from paddle.base.framework import ( convert_np_dtype_to_dtype_, core, dygraph_only, diff --git a/python/paddle/static/__init__.py b/python/paddle/static/__init__.py index 361a6fc4ee3f5..5cf44a3efc7c0 100644 --- a/python/paddle/static/__init__.py +++ b/python/paddle/static/__init__.py @@ -37,7 +37,7 @@ from .io import load # noqa: F401 from .io import load_program_state # noqa: F401 from .io import set_program_state # noqa: F401 -from ..fluid import Scope # noqa: F401 +from ..base import Scope # noqa: F401 from .input import data # noqa: F401 from .input import InputSpec # noqa: F401 from .input import setitem # noqa: F401 @@ -45,39 +45,39 @@ from ..tensor.creation import create_parameter # noqa: F401 from ..tensor.creation import create_global_var # noqa: F401 -from ..fluid.executor import Executor # noqa: F401 -from ..fluid.executor import global_scope # noqa: F401 -from ..fluid.executor import scope_guard # noqa: F401 -from ..fluid.backward import append_backward # noqa: F401 -from ..fluid.backward import gradients # noqa: F401 -from ..fluid.compiler import BuildStrategy # noqa: F401 -from ..fluid.compiler import CompiledProgram # noqa: F401 -from ..fluid.compiler import IpuCompiledProgram # noqa: F401 -from ..fluid.compiler import IpuStrategy # noqa: F401 -from ..fluid.compiler import ExecutionStrategy # noqa: F401 -from ..fluid.framework import default_main_program # noqa: F401 -from ..fluid.framework import default_startup_program # noqa: F401 -from ..fluid.framework import device_guard # noqa: F401 +from ..base.executor import Executor # noqa: F401 +from ..base.executor import global_scope # noqa: F401 +from ..base.executor import scope_guard # noqa: F401 +from ..base.backward import append_backward # noqa: F401 +from ..base.backward import gradients # noqa: F401 +from ..base.compiler import BuildStrategy # noqa: F401 +from ..base.compiler import CompiledProgram # noqa: F401 +from ..base.compiler import IpuCompiledProgram # noqa: F401 +from ..base.compiler import IpuStrategy # noqa: F401 +from ..base.compiler import ExecutionStrategy # noqa: F401 +from ..base.framework import default_main_program # noqa: F401 +from ..base.framework import default_startup_program # noqa: F401 +from ..base.framework import device_guard # noqa: F401 -from ..fluid.framework import name_scope # noqa: F401 -from ..fluid.framework import cpu_places # noqa: F401 -from ..fluid.framework import cuda_places # noqa: F401 -from ..fluid.framework import xpu_places # noqa: F401 -from ..fluid.framework import Variable # noqa: F401 -from ..fluid.framework import Operator # noqa: F401 -from ..fluid.framework import Parameter # noqa: F401 -from ..fluid.framework import ipu_shard_guard # noqa: F401 -from ..fluid.framework import set_ipu_shard # noqa: F401 +from ..base.framework import name_scope # noqa: F401 +from ..base.framework import cpu_places # noqa: F401 +from ..base.framework import cuda_places # noqa: F401 +from ..base.framework import xpu_places # noqa: F401 +from ..base.framework import Variable # noqa: F401 +from ..base.framework import Operator # noqa: F401 +from ..base.framework import Parameter # noqa: F401 +from ..base.framework import ipu_shard_guard # noqa: F401 +from ..base.framework import set_ipu_shard # noqa: F401 from .nn.control_flow import Print # noqa: F401 -from ..fluid.param_attr import WeightNormParamAttr # noqa: F401 +from ..base.param_attr import WeightNormParamAttr # noqa: F401 from .nn.metric import auc # noqa: F401 from .nn.metric import accuracy # noqa: F401 from .nn.metric import ctr_metric_bundle # noqa: F401 -from ..fluid.framework import program_guard # noqa: F401 -from ..fluid.framework import Program # noqa: F401 +from ..base.framework import program_guard # noqa: F401 +from ..base.framework import Program # noqa: F401 __all__ = [ # noqa 'append_backward', diff --git a/python/paddle/static/amp/amp_nn.py b/python/paddle/static/amp/amp_nn.py index 6e56f253fbd4b..304ad8d7d6c40 100644 --- a/python/paddle/static/amp/amp_nn.py +++ b/python/paddle/static/amp/amp_nn.py @@ -13,10 +13,10 @@ # limitations under the License. from paddle import _C_ops -from paddle.fluid import core -from paddle.fluid.data_feeder import check_type, check_variable_and_dtype -from paddle.fluid.framework import Variable, in_dygraph_mode -from paddle.fluid.layer_helper import LayerHelper +from paddle.base import core +from paddle.base.data_feeder import check_type, check_variable_and_dtype +from paddle.base.framework import Variable, in_dygraph_mode +from paddle.base.layer_helper import LayerHelper def check_finite_and_unscale(x, scale, name=None, float_status=None): diff --git a/python/paddle/static/amp/bf16/amp_lists.py b/python/paddle/static/amp/bf16/amp_lists.py index 0d6d3997aebd5..4020b4f5261a1 100644 --- a/python/paddle/static/amp/bf16/amp_lists.py +++ b/python/paddle/static/amp/bf16/amp_lists.py @@ -15,7 +15,7 @@ import copy from paddle.amp.amp_lists import BF16_WHITE_LIST -from paddle.fluid import core +from paddle.base import core from ..fp16_lists import black_list as black_list_fp16 from ..fp16_lists import gray_list as gray_list_fp16 diff --git a/python/paddle/static/amp/bf16/amp_utils.py b/python/paddle/static/amp/bf16/amp_utils.py index 55ca4a5d06c86..071328435e939 100644 --- a/python/paddle/static/amp/bf16/amp_utils.py +++ b/python/paddle/static/amp/bf16/amp_utils.py @@ -19,9 +19,9 @@ import numpy as np -from paddle.fluid import core, framework, global_scope -from paddle.fluid.log_helper import get_logger -from paddle.fluid.wrapped_decorator import signature_safe_contextmanager +from paddle.base import core, framework, global_scope +from paddle.base.log_helper import get_logger +from paddle.base.wrapped_decorator import signature_safe_contextmanager from ..fp16_utils import ( _rename_arg, @@ -483,9 +483,9 @@ def cast_parameters_to_bf16(place, program, scope=None, to_bf16_var_names=None): Traverse all parameters in the whole model and set them to the BF16 data type. Whereas, this function will keep parameters of batchnorms in FP32. Args: - place(fluid.CPUPlace|fluid.CUDAPlace): `place` is used to restore the BF16 weight tensors. + place(base.CPUPlace|base.CUDAPlace): `place` is used to restore the BF16 weight tensors. program (Program): The used program. - scope(fluid.Scope, optional): `scope` is used to get the FP32 weight tensor values. + scope(base.Scope, optional): `scope` is used to get the FP32 weight tensor values. Default is None. to_bf16_var_names(set|list, optional): The data types of vars in `to_bf16_var_names` will be set to BF16. Usually, it is the returned diff --git a/python/paddle/static/amp/bf16/decorator.py b/python/paddle/static/amp/bf16/decorator.py index 47b19b697d1ec..74e896bc7f790 100644 --- a/python/paddle/static/amp/bf16/decorator.py +++ b/python/paddle/static/amp/bf16/decorator.py @@ -16,7 +16,7 @@ import warnings import paddle -from paddle.fluid import core, default_main_program, program_guard, unique_name +from paddle.base import core, default_main_program, program_guard, unique_name from .amp_lists import AutoMixedPrecisionListsBF16 from .amp_utils import ( diff --git a/python/paddle/static/amp/debugging.py b/python/paddle/static/amp/debugging.py index 38e3764203ab1..9dcef221fbfc0 100644 --- a/python/paddle/static/amp/debugging.py +++ b/python/paddle/static/amp/debugging.py @@ -16,7 +16,7 @@ import logging import paddle -from paddle.fluid.log_helper import get_logger +from paddle.base.log_helper import get_logger _logger = get_logger( __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' @@ -35,11 +35,11 @@ def update(self, dtype): if dtype is None: self.other_calls = self.other_calls + 1 else: - if dtype == paddle.fluid.core.VarDesc.VarType.FP32: + if dtype == paddle.base.core.VarDesc.VarType.FP32: self.fp32_calls = self.fp32_calls + 1 - elif dtype == paddle.fluid.core.VarDesc.VarType.FP16: + elif dtype == paddle.base.core.VarDesc.VarType.FP16: self.fp16_calls = self.fp16_calls + 1 - elif dtype == paddle.fluid.core.VarDesc.VarType.BF16: + elif dtype == paddle.base.core.VarDesc.VarType.BF16: self.bf16_calls = self.bf16_calls + 1 else: self.other_calls = self.other_calls + 1 @@ -61,10 +61,10 @@ def convert_to_list(self): def _is_floating_point(dtype): if dtype in [ - paddle.fluid.core.VarDesc.VarType.FP64, - paddle.fluid.core.VarDesc.VarType.FP32, - paddle.fluid.core.VarDesc.VarType.FP16, - paddle.fluid.core.VarDesc.VarType.BF16, + paddle.base.core.VarDesc.VarType.FP64, + paddle.base.core.VarDesc.VarType.FP32, + paddle.base.core.VarDesc.VarType.FP16, + paddle.base.core.VarDesc.VarType.BF16, ]: return True else: diff --git a/python/paddle/static/amp/decorator.py b/python/paddle/static/amp/decorator.py index 440048f70209b..0d51bdf1a5245 100644 --- a/python/paddle/static/amp/decorator.py +++ b/python/paddle/static/amp/decorator.py @@ -16,7 +16,7 @@ import warnings import paddle -from paddle.fluid import ( +from paddle.base import ( core, default_main_program, default_startup_program, @@ -415,7 +415,7 @@ def _append_cast_to_master_grad_op(self, param_grads): ] params_master_grads = [] - assert isinstance(target_block, paddle.fluid.framework.Block) + assert isinstance(target_block, paddle.base.framework.Block) # create for p, g in param_grads: if g.name not in self._optimizer._master_grads.keys(): diff --git a/python/paddle/static/amp/fp16_lists.py b/python/paddle/static/amp/fp16_lists.py index 3a0f407198797..3023628e9a389 100644 --- a/python/paddle/static/amp/fp16_lists.py +++ b/python/paddle/static/amp/fp16_lists.py @@ -21,8 +21,8 @@ FP16_BLACK_LIST, FP16_WHITE_LIST, ) -from paddle.fluid import core -from paddle.fluid.log_helper import get_logger +from paddle.base import core +from paddle.base.log_helper import get_logger _logger = get_logger( __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' diff --git a/python/paddle/static/amp/fp16_utils.py b/python/paddle/static/amp/fp16_utils.py index 46c669ba54e46..8744c16f04d37 100644 --- a/python/paddle/static/amp/fp16_utils.py +++ b/python/paddle/static/amp/fp16_utils.py @@ -17,9 +17,9 @@ import numpy as np import paddle -from paddle.fluid import core, framework, global_scope -from paddle.fluid.log_helper import get_logger -from paddle.fluid.wrapped_decorator import signature_safe_contextmanager +from paddle.base import core, framework, global_scope +from paddle.base.log_helper import get_logger +from paddle.base.wrapped_decorator import signature_safe_contextmanager from .fp16_lists import ( AutoMixedPrecisionLists, @@ -795,9 +795,9 @@ def cast_parameters_to_fp16( Traverse all parameters in the whole model and set them to the FP16 data type. Whereas, this function will keep parameters of batchnorms in FP32. Args: - place(fluid.CPUPlace|fluid.CUDAPlace): `place` is used to restore the FP16 weight tensors. + place(base.CPUPlace|base.CUDAPlace): `place` is used to restore the FP16 weight tensors. program (Program): The used program. - scope(fluid.Scope, optional): `scope` is used to get the FP32 weight tensor values. + scope(base.Scope, optional): `scope` is used to get the FP32 weight tensor values. Default is None. to_fp16_var_names(set|list, optional): The data types of vars in `to_fp16_var_names` will be set to FP16. Usually, it is the returned diff --git a/python/paddle/static/amp/function_overload.py b/python/paddle/static/amp/function_overload.py index 8139401c21db1..0767873dd6d99 100644 --- a/python/paddle/static/amp/function_overload.py +++ b/python/paddle/static/amp/function_overload.py @@ -19,7 +19,7 @@ import logging from enum import Enum -from paddle.fluid.log_helper import get_logger +from paddle.base.log_helper import get_logger _logger = get_logger( __name__, logging.INFO, fmt='%(asctime)s-%(levelname)s: %(message)s' diff --git a/python/paddle/static/input.py b/python/paddle/static/input.py index 30a853336c976..a2e43e18361c8 100644 --- a/python/paddle/static/input.py +++ b/python/paddle/static/input.py @@ -15,12 +15,12 @@ import os import paddle -from paddle.fluid import Variable, core -from paddle.fluid.data_feeder import check_type -from paddle.fluid.framework import convert_np_dtype_to_dtype_, static_only -from paddle.fluid.layer_helper import LayerHelper +from paddle.base import Variable, core +from paddle.base.data_feeder import check_type +from paddle.base.framework import convert_np_dtype_to_dtype_, static_only +from paddle.base.layer_helper import LayerHelper -from ..fluid.variable_index import _setitem_impl_, _setitem_static +from ..base.variable_index import _setitem_impl_, _setitem_static __all__ = [] diff --git a/python/paddle/static/io.py b/python/paddle/static/io.py index 1b6f65e28e5ed..27682416f8c1b 100644 --- a/python/paddle/static/io.py +++ b/python/paddle/static/io.py @@ -23,7 +23,7 @@ import numpy as np import paddle -from paddle.fluid import ( +from paddle.base import ( CompiledProgram, Program, Variable, @@ -32,9 +32,9 @@ program_guard, unique_name, ) -from paddle.fluid.executor import Executor, global_scope -from paddle.fluid.framework import Parameter, dygraph_not_support, static_only -from paddle.fluid.log_helper import get_logger +from paddle.base.executor import Executor, global_scope +from paddle.base.framework import Parameter, dygraph_not_support, static_only +from paddle.base.log_helper import get_logger from paddle.framework.io_utils import ( _clone_var_in_block_, _load_program_scope, @@ -110,7 +110,7 @@ def _get_valid_program(program=None): ) if not isinstance(program, Program): raise TypeError( - "The type of input program is invalid, expected type is fluid.Program, but received %s" + "The type of input program is invalid, expected type is base.Program, but received %s" % type(program) ) return program @@ -228,7 +228,7 @@ def normalize_program(program, feed_vars, fetch_vars, **kwargs): """ if not isinstance(program, Program): raise TypeError( - "program type must be `fluid.Program`, but received `%s`" + "program type must be `base.Program`, but received `%s`" % type(program) ) if not isinstance(feed_vars, list): @@ -700,7 +700,7 @@ def deserialize_persistables(program, data, executor): """ if not isinstance(program, Program): raise TypeError( - "program type must be `fluid.Program`, but received `%s`" + "program type must be `base.Program`, but received `%s`" % type(program) ) # load params to a tmp program @@ -747,7 +747,7 @@ def deserialize_persistables(program, data, executor): for var in check_vars: if not isinstance(var, Parameter): continue - var_tmp = paddle.fluid.global_scope().find_var(var.name) + var_tmp = paddle.base.global_scope().find_var(var.name) assert var_tmp is not None, "can't not find var: " + var.name new_shape = (np.array(var_tmp.get_tensor())).shape assert var.name in origin_shape_map, var.name + " MUST in var list." @@ -1228,7 +1228,7 @@ def load_vars( main_program = default_main_program() if not isinstance(main_program, Program): raise TypeError( - "The type of input main_program is invalid, expected type is fluid.Program, but received %s" + "The type of input main_program is invalid, expected type is base.Program, but received %s" % type(main_program) ) @@ -1248,7 +1248,7 @@ def load_vars( if not isinstance(main_program, Program): raise TypeError( - "The type of input main_program is invalid, expected type is fluid.Program, but received %s" + "The type of input main_program is invalid, expected type is base.Program, but received %s" % type(main_program) ) @@ -1373,7 +1373,7 @@ def load_vars( for each_var in check_vars: if not isinstance(each_var, Parameter): continue - var_temp = paddle.fluid.global_scope().find_var(each_var.name) + var_temp = paddle.base.global_scope().find_var(each_var.name) assert var_temp is not None, "can't not find var: " + each_var.name new_shape = (np.array(var_temp.get_tensor())).shape assert each_var.name in orig_para_shape, ( @@ -1548,7 +1548,7 @@ def load(program, model_path, executor=None, var_list=None): parameter_file_name = model_prefix + ".pdparams" if not os.path.exists(parameter_file_name): - # model file save by fluid.save not found, try to load model file saved with + # model file save by base.save not found, try to load model file saved with # [save_vars, save_params, save_persistables] _logger.debug( "{} not found, try to load model file saved with [ save_params, save_persistables, save_vars ]".format( @@ -1641,30 +1641,30 @@ def set_var(var, ndarray): t = global_scope().find_var(var.name).get_tensor() p = t._place() if p.is_cpu_place(): - place = paddle.fluid.CPUPlace() + place = paddle.base.CPUPlace() elif p.is_cuda_pinned_place(): - place = paddle.fluid.CUDAPinnedPlace() + place = paddle.base.CUDAPinnedPlace() elif p.is_xpu_place(): - p = paddle.fluid.core.Place() + p = paddle.base.core.Place() p.set_place(t._place()) - place = paddle.fluid.XPUPlace(p.xpu_device_id()) + place = paddle.base.XPUPlace(p.xpu_device_id()) elif p.is_custom_place(): - p = paddle.fluid.core.Place() + p = paddle.base.core.Place() p.set_place(t._place()) - place = paddle.fluid.CustomPlace( + place = paddle.base.CustomPlace( paddle.device.get_device().split(':')[0], p.custom_device_id() ) else: - p = paddle.fluid.core.Place() + p = paddle.base.core.Place() p.set_place(t._place()) - place = paddle.fluid.CUDAPlace(p.gpu_device_id()) + place = paddle.base.CUDAPlace(p.gpu_device_id()) t.set(ndarray, place) parameter_list = list(filter(is_parameter, program.list_vars())) if executor: - paddle.fluid.core._create_loaded_parameter( + paddle.base.core._create_loaded_parameter( parameter_list, global_scope(), executor._default_executor ) with open(parameter_file_name, 'rb') as f: @@ -1693,7 +1693,7 @@ def set_var(var, ndarray): ), f"Optimizer file [{opt_file_name}] not exits" if executor: - paddle.fluid.core._create_loaded_parameter( + paddle.base.core._create_loaded_parameter( optimizer_var_list, global_scope(), executor._default_executor ) @@ -1750,7 +1750,7 @@ def set_program_state(program, state_dict): used_para_list = {} for para in parameter_list: - var_temp = paddle.fluid.global_scope().find_var(para.name) + var_temp = paddle.base.global_scope().find_var(para.name) assert ( var_temp is not None ), "Variable [ {} ] Not found, Please make sure run startup program".format( @@ -1778,17 +1778,17 @@ def set_program_state(program, state_dict): # assert ten_place.is_gpu_place() or ten_place.is_cpu_place(), \ # "Place not support, only support CPUPlace and GPUPlace, now is {}".format(str(ten_place)) - py_place = paddle.fluid.CPUPlace() + py_place = paddle.base.CPUPlace() if ten_place.is_cuda_pinned_place(): - place = paddle.fluid.CUDAPinnedPlace() + place = paddle.base.CUDAPinnedPlace() elif ten_place.is_gpu_place(): - p = paddle.fluid.core.Place() + p = paddle.base.core.Place() p.set_place(ten_place) - py_place = paddle.fluid.CUDAPlace(p.gpu_device_id()) + py_place = paddle.base.CUDAPlace(p.gpu_device_id()) elif ten_place.is_xpu_place(): - p = paddle.fluid.core.Place() + p = paddle.base.core.Place() p.set_place(ten_place) - py_place = paddle.fluid.XPUPlace(p.xpu_device_id()) + py_place = paddle.base.XPUPlace(p.xpu_device_id()) ten.set(new_para_np, py_place) @@ -1874,7 +1874,7 @@ def load_program_state(model_path, var_list=None): parameter_file_name = model_prefix + ".pdparams" if not os.path.exists(parameter_file_name): - # model file saved with fluid.save is not found, try to load model file saved with + # model file saved with base.save is not found, try to load model file saved with # [save_vars, save_params, save_persistables] _logger.debug( "{} not found, try to load model file saved with [ save_params, save_persistables, save_vars ]".format( @@ -1941,8 +1941,8 @@ def _load_vars_with_try_catch( warnings.warn(error_str % filenames, RuntimeWarning) return False - place = paddle.fluid.CPUPlace() - exe = paddle.fluid.Executor(place) + place = paddle.base.CPUPlace() + exe = paddle.base.Executor(place) loaded_var_list = [] @@ -1982,7 +1982,7 @@ def _load_vars_with_try_catch( res_dict = {} for var in loaded_var_list: res_dict[var.name] = np.asarray( - paddle.fluid.global_scope().find_var(var.name).get_tensor() + paddle.base.global_scope().find_var(var.name).get_tensor() ) return res_dict diff --git a/python/paddle/static/nn/common.py b/python/paddle/static/nn/common.py index 5994742bffbfe..c424066c554ae 100644 --- a/python/paddle/static/nn/common.py +++ b/python/paddle/static/nn/common.py @@ -24,9 +24,9 @@ check_type, check_variable_and_dtype, ) -from paddle.fluid import core, unique_name -from paddle.fluid.data_feeder import check_dtype -from paddle.fluid.framework import ( +from paddle.base import core, unique_name +from paddle.base.data_feeder import check_dtype +from paddle.base.framework import ( Program, Variable, default_main_program, @@ -35,9 +35,9 @@ program_guard, static_only, ) -from paddle.fluid.layers.layer_function_generator import templatedoc -from paddle.fluid.param_attr import ParamAttr -from paddle.fluid.wrapped_decorator import signature_safe_contextmanager +from paddle.base.layers.layer_function_generator import templatedoc +from paddle.base.param_attr import ParamAttr +from paddle.base.wrapped_decorator import signature_safe_contextmanager from paddle.nn.initializer import Constant, Normal __all__ = [] @@ -180,7 +180,7 @@ def fc( """ - def fc_fluid( + def fc_base( input, size, num_flatten_dims=1, @@ -236,7 +236,7 @@ def fc_fluid( # add activation return helper.append_activation(pre_activation) - return fc_fluid( + return fc_base( input=x, size=size, num_flatten_dims=num_flatten_dims, @@ -1037,7 +1037,7 @@ def _get_default_param_initializer(): if ( core.is_compiled_with_cuda() - and paddle.fluid.get_flags("FLAGS_conv2d_disable_cudnn")[ + and paddle.base.get_flags("FLAGS_conv2d_disable_cudnn")[ "FLAGS_conv2d_disable_cudnn" ] ): @@ -2566,10 +2566,10 @@ def bilinear_tensor_product( :ref:`api_guide_Name` . Usually name is no need to set and None by default. param_attr (ParamAttr|None): To specify the weight parameter attribute. Default: None, which means the default weight parameter property is - used. See usage for details in :ref:`api_fluid_ParamAttr` . + used. See usage for details in :ref:`api_base_ParamAttr` . bias_attr (ParamAttr|None): To specify the bias parameter attribute. Default: None, which means the default bias parameter property is - used. See usage for details in :ref:`api_fluid_ParamAttr` . + used. See usage for details in :ref:`api_base_ParamAttr` . Returns: Tensor, A 2-D Tensor of shape [batch_size, size]. Data type is the same as input **x**. @@ -2887,7 +2887,7 @@ def batch_norm( *attrs_, ) - return paddle.fluid.dygraph_utils._append_activation_in_dygraph( + return paddle.base.dygraph_utils._append_activation_in_dygraph( batch_norm_out, act=act, use_mkldnn=False ) diff --git a/python/paddle/static/nn/control_flow.py b/python/paddle/static/nn/control_flow.py index 39a95f42a7018..6226270e597e4 100644 --- a/python/paddle/static/nn/control_flow.py +++ b/python/paddle/static/nn/control_flow.py @@ -23,9 +23,9 @@ convert_dtype, in_dygraph_mode, ) -from paddle.fluid import core -from paddle.fluid.backward import _infer_var_data_type_shape_ -from paddle.fluid.framework import Operator, Program, Variable, static_only +from paddle.base import core +from paddle.base.backward import _infer_var_data_type_shape_ +from paddle.base.framework import Operator, Program, Variable, static_only from paddle.utils import ( assert_same_structure, copy_mutable_vars, @@ -415,8 +415,8 @@ class While: while loop control flow. Repeat while body until cond is False. Note: - A new OP :ref:`api_fluid_layers_while_loop` is highly recommended instead of ``While`` if the shape of parameter ``cond`` is [1]. - OP :ref:`api_fluid_layers_while_loop` is easier to use and is called with less code but does the same thing as ``While`` . + A new OP :ref:`api_base_layers_while_loop` is highly recommended instead of ``While`` if the shape of parameter ``cond`` is [1]. + OP :ref:`api_base_layers_while_loop` is easier to use and is called with less code but does the same thing as ``While`` . Notice: Local variables created in ``While`` are similar to that created in while of C++, and cannot be referenced externally. @@ -1213,8 +1213,8 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): return false_fn() return None - check_variable_and_dtype(pred, "pred", ['bool'], "fluid.layers.cond") - check_type(name, "name", (str, type(None)), "fluid.layers.cond") + check_variable_and_dtype(pred, "pred", ['bool'], "base.layers.cond") + check_type(name, "name", (str, type(None)), "base.layers.cond") helper = LayerHelper('cond', **locals()) true_output = None false_output = None @@ -1758,7 +1758,7 @@ def case(self, condition): condition, 'condition', ['bool'], - 'the member function case of fluid.layers.Switch', + 'the member function case of base.layers.Switch', ) if len(self.pre_not_conditions) == 0: diff --git a/python/paddle/static/nn/loss.py b/python/paddle/static/nn/loss.py index cbc099d963a23..7f1ef25622612 100644 --- a/python/paddle/static/nn/loss.py +++ b/python/paddle/static/nn/loss.py @@ -14,15 +14,15 @@ import numpy as np -from paddle.fluid.framework import static_only +from paddle.base.framework import static_only # TODO: define loss functions of neural network -from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.layers.layer_function_generator import templatedoc -from paddle.fluid.param_attr import ParamAttr +from paddle.base.layer_helper import LayerHelper +from paddle.base.layers.layer_function_generator import templatedoc +from paddle.base.param_attr import ParamAttr from paddle.nn.initializer import Assign -from ...fluid.data_feeder import check_variable_and_dtype +from ...base.data_feeder import check_variable_and_dtype __all__ = [] @@ -62,10 +62,10 @@ def nce( sample is 1.0. param_attr (ParamAttr|None): To specify the weight parameter attribute. Default: None, which means the default weight parameter property is - used. See usage for details in :ref:`api_fluid_ParamAttr` . + used. See usage for details in :ref:`api_base_ParamAttr` . bias_attr (ParamAttr|None): To specify the bias parameter attribute. Default: None, which means the default bias parameter property is - used. See usage for details in :ref:`api_fluid_ParamAttr` . + used. See usage for details in :ref:`api_base_ParamAttr` . num_neg_samples (int): ${num_neg_samples_comment}. name(str|None): For detailed information, please refer to :ref:`api_guide_Name` . Usually name is no need to set and None by default. diff --git a/python/paddle/static/nn/metric.py b/python/paddle/static/nn/metric.py index c4fc0a9b4f240..74db729843327 100644 --- a/python/paddle/static/nn/metric.py +++ b/python/paddle/static/nn/metric.py @@ -18,9 +18,9 @@ import paddle from paddle import _legacy_C_ops -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import Variable, _create_tensor, in_dygraph_mode -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.framework import Variable, _create_tensor, in_dygraph_mode +from paddle.base.layer_helper import LayerHelper from paddle.nn.initializer import ConstantInitializer __all__ = [] diff --git a/python/paddle/static/nn/sequence_lod.py b/python/paddle/static/nn/sequence_lod.py index f9b9ea355e262..653541d92551c 100644 --- a/python/paddle/static/nn/sequence_lod.py +++ b/python/paddle/static/nn/sequence_lod.py @@ -13,11 +13,11 @@ # limitations under the License. import paddle -from paddle.fluid.core import VarDesc -from paddle.fluid.data_feeder import check_type, check_variable_and_dtype -from paddle.fluid.framework import Variable, in_dygraph_mode -from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.layers.layer_function_generator import templatedoc +from paddle.base.core import VarDesc +from paddle.base.data_feeder import check_type, check_variable_and_dtype +from paddle.base.framework import Variable, in_dygraph_mode +from paddle.base.layer_helper import LayerHelper +from paddle.base.layers.layer_function_generator import templatedoc __all__ = [] @@ -38,7 +38,7 @@ def sequence_conv( r""" Note: - Only receives Tensor as input. If your input is Tensor, please use conv2d Op.(fluid.layers.** :ref:`api_fluid_layers_conv2d` ). + Only receives Tensor as input. If your input is Tensor, please use conv2d Op.(base.layers.** :ref:`api_base_layers_conv2d` ). This operator receives input sequences with variable length and other convolutional configuration parameters(num_filters, filter_size) to apply the convolution operation. @@ -108,9 +108,9 @@ def sequence_conv( on both sides of the sequence. If set 0, the length of :math:`filter\_size - 1` data is padded at the end of each input sequence. Default: None. bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the - default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . + default bias parameter property is used. See usage for details in :ref:`api_base_ParamAttr` . param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the - default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . + default weight parameter property is used. See usage for details in :ref:`api_base_ParamAttr` . act (str): Activation to be applied to the output of this layer, such as tanh, softmax, sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None. name (str, optional): The default value is None. Normally there is no need for user to set this property. @@ -730,7 +730,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): .. code-block:: python import paddle - from paddle import fluid + from paddle import base paddle.enable_static() import numpy as np @@ -739,11 +739,11 @@ def sequence_expand(x, y, ref_level=-1, name=None): dtype='float32', lod_level=1) out = paddle.static.nn.sequence_expand(x=x, y=y, ref_level=0) - exe = paddle.static.Executor(fluid.CPUPlace()) + exe = paddle.static.Executor(base.CPUPlace()) place = paddle.CPUPlace() np_data = np.array([[1], [2], [3], [4]]).astype('float32') - x_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2]], place) + x_lod_tensor = base.create_lod_tensor(np_data, [[2, 2]], place) print(x_lod_tensor) #lod: [[0, 2, 4]] # dim: 4, 1 @@ -752,7 +752,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): # data: [1 2 3 4] np_data = np.array([[1], [2], [3], [4], [5], [6], [7], [8]]).astype('float32') - y_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2], [3,3,1,1]], place) + y_lod_tensor = base.create_lod_tensor(np_data, [[2, 2], [3,3,1,1]], place) print(y_lod_tensor) #lod: [[0, 2, 4][0, 3, 6, 7, 8]] # dim: 8, 1 @@ -760,7 +760,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): # dtype: int64_t # data: [0 0 1 1 1 1 1 0] - out_main = exe.run(fluid.default_main_program(), + out_main = exe.run(base.default_main_program(), feed={'x': x_lod_tensor, 'y': y_lod_tensor}, fetch_list=[out], return_numpy=False) print(out_main[0]) @@ -853,7 +853,7 @@ def sequence_expand_as(x, y, name=None): .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base paddle.enable_static() import numpy as np @@ -861,11 +861,11 @@ def sequence_expand_as(x, y, name=None): y = paddle.static.data(name='y', shape=[8, 1], dtype='float32', lod_level=1) out = paddle.static.nn.sequence_expand_as(x=x, y=y) - exe = fluid.Executor(fluid.CPUPlace()) - place = fluid.CPUPlace() + exe = base.Executor(base.CPUPlace()) + place = base.CPUPlace() np_data = np.array([[1], [2], [3], [4]]).astype('float32') - x_lod_tensor = fluid.create_lod_tensor(np_data, [[2, 2]], place) + x_lod_tensor = base.create_lod_tensor(np_data, [[2, 2]], place) print(x_lod_tensor) #lod: [[0, 2, 4]] # dim: 4, 1 @@ -874,7 +874,7 @@ def sequence_expand_as(x, y, name=None): # data: [1 2 3 4] np_data = np.array([[1], [2], [3], [4], [5], [6], [7], [8]]).astype('float32') - y_lod_tensor = fluid.create_lod_tensor(np_data, [[3,3,1,1]], place) + y_lod_tensor = base.create_lod_tensor(np_data, [[3,3,1,1]], place) print(y_lod_tensor) #lod: [[0, 3, 6, 7, 8]] # dim: 8, 1 @@ -882,7 +882,7 @@ def sequence_expand_as(x, y, name=None): # dtype: int64_t # data: [0 0 1 0 1 1 1 0] - out_main = exe.run(fluid.default_main_program(), + out_main = exe.run(base.default_main_program(), feed={'x': x_lod_tensor, 'y': y_lod_tensor}, fetch_list=[out], return_numpy=False) print(out_main[0]) @@ -986,7 +986,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): import paddle paddle.enable_static() - import paddle.fluid as fluid + import paddle.base as base import numpy x = paddle.static.data(name='x', shape=[10, 5], dtype='float32', lod_level=1) @@ -1072,7 +1072,7 @@ def sequence_unpad(x, length, name=None): import paddle paddle.enable_static() - import paddle.fluid as fluid + import paddle.base as base import numpy # pad data @@ -1418,7 +1418,7 @@ def sequence_reverse(x, name=None): Only supports Tensor as input. It will reverse each sequence for input Tensor. Currently it only supports 1-level Tensor. This operator is very useful when building a - reverse :ref:`api_fluid_layers_DynamicRNN` network. + reverse :ref:`api_base_layers_DynamicRNN` network. .. code-block:: text diff --git a/python/paddle/static/quantization/post_training_quantization.py b/python/paddle/static/quantization/post_training_quantization.py index 61f1cdf6fe5ff..7234fdfb62227 100644 --- a/python/paddle/static/quantization/post_training_quantization.py +++ b/python/paddle/static/quantization/post_training_quantization.py @@ -24,7 +24,7 @@ from .utils import tqdm -from paddle.fluid.framework import IrGraph, _get_var +from paddle.base.framework import IrGraph, _get_var from ... import io, static from ...framework import core diff --git a/python/paddle/static/quantization/quant2_int8_mkldnn_pass.py b/python/paddle/static/quantization/quant2_int8_mkldnn_pass.py index 990f11220927b..cbb46200a6029 100644 --- a/python/paddle/static/quantization/quant2_int8_mkldnn_pass.py +++ b/python/paddle/static/quantization/quant2_int8_mkldnn_pass.py @@ -14,7 +14,7 @@ import numpy as np -from ...fluid.framework import IrGraph +from ...base.framework import IrGraph from ...framework import _get_paddle_place, core OpRole = core.op_proto_and_checker_maker.OpRole diff --git a/python/paddle/static/quantization/quant_int8_mkldnn_pass.py b/python/paddle/static/quantization/quant_int8_mkldnn_pass.py index d11e7d212686b..2d4cc4bfd8364 100644 --- a/python/paddle/static/quantization/quant_int8_mkldnn_pass.py +++ b/python/paddle/static/quantization/quant_int8_mkldnn_pass.py @@ -14,7 +14,7 @@ import numpy as np -from ...fluid.framework import IrGraph +from ...base.framework import IrGraph from ...framework import _get_paddle_place diff --git a/python/paddle/static/quantization/quanter.py b/python/paddle/static/quantization/quanter.py index a5baf899060c4..218f5786724ce 100644 --- a/python/paddle/static/quantization/quanter.py +++ b/python/paddle/static/quantization/quanter.py @@ -20,7 +20,7 @@ import paddle -from ...fluid.framework import IrGraph, core +from ...base.framework import IrGraph, core from ..log_helper import get_logger from .quantization_pass import ( AddQuantDequantPass, diff --git a/python/paddle/static/quantization/quantization_pass.py b/python/paddle/static/quantization/quantization_pass.py index fdb721681f681..4387732362f9a 100644 --- a/python/paddle/static/quantization/quantization_pass.py +++ b/python/paddle/static/quantization/quantization_pass.py @@ -24,7 +24,7 @@ import paddle -from ...fluid.framework import IrGraph, IrNode +from ...base.framework import IrGraph, IrNode from ...framework import _get_paddle_place, core from ...static import Program, data, program_guard, scope_guard from ...utils import unique_name @@ -168,7 +168,7 @@ def __init__( optimizer_func(function): Fuction return a optimizer. When 'is_test' is False and user want to use self-defined quantization function and preprocess function, this function must be set. Default is None. - executor(Fluid.Executor): If user want to use self-defined quantization + executor(base.Executor): If user want to use self-defined quantization function and preprocess function, executor must be set for initialization. Default is None. @@ -179,7 +179,7 @@ def __init__( >>> # The original graph will be rewrite. >>> import paddle.static as static >>> from paddle.static.quantization import QuantizationTransformPass - >>> from paddle.fluid.framework import IrGraph + >>> from paddle.base.framework import IrGraph >>> from paddle.framework import core >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False) @@ -2449,7 +2449,7 @@ def __init__( >>> import paddle >>> import paddle.static as static >>> from paddle.static.quantization import QuantizationTransformPassV2 - >>> from paddle.fluid.framework import IrGraph + >>> from paddle.base.framework import IrGraph >>> from paddle.framework import core >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False) @@ -2842,7 +2842,7 @@ def __init__( >>> import paddle >>> import paddle.static as static >>> from paddle.static.quantization import AddQuantDequantPassV2 - >>> from paddle.fluid.framework import IrGraph + >>> from paddle.base.framework import IrGraph >>> from paddle.framework import core >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False) @@ -3025,7 +3025,7 @@ def __init__(self, scope, place, quant_bits=8): >>> import paddle >>> import paddle.static as static >>> from paddle.static.quantization import ReplaceFakeQuantDequantPass - >>> from paddle.fluid.framework import IrGraph + >>> from paddle.base.framework import IrGraph >>> from paddle.framework import core >>> graph = IrGraph(core.Graph(static.Program().desc), for_test=False) @@ -3182,7 +3182,7 @@ class QuantWeightPass: >>> import paddle >>> import paddle.static as static >>> from paddle.static.quantization import QuantWeightPass - >>> from paddle.fluid.framework import IrGraph + >>> from paddle.base.framework import IrGraph >>> from paddle.framework import core >>> graph = IrGraph(core.Graph(paddle.static.Program().desc), for_test=False) diff --git a/python/paddle/static/quantization/utils.py b/python/paddle/static/quantization/utils.py index 7397ff3fe01d2..29317795c05ba 100644 --- a/python/paddle/static/quantization/utils.py +++ b/python/paddle/static/quantization/utils.py @@ -16,7 +16,7 @@ import numpy as np -from ...fluid.framework import IrNode, Operator +from ...base.framework import IrNode, Operator from .quant_config import SUPPORT_QUANTIZATION_OP_DICT _channelwise_quant_axis1_ops = [ diff --git a/python/paddle/tensor/array.py b/python/paddle/tensor/array.py index 801aa78d4929a..752eb848aae74 100644 --- a/python/paddle/tensor/array.py +++ b/python/paddle/tensor/array.py @@ -15,7 +15,7 @@ # Define functions about array. from ..common_ops_import import Variable -from ..fluid.data_feeder import check_type, check_variable_and_dtype +from ..base.data_feeder import check_type, check_variable_and_dtype from ..framework import LayerHelper, core, in_dynamic_mode __all__ = [] diff --git a/python/paddle/tensor/attribute.py b/python/paddle/tensor/attribute.py index a7f9e5cef2297..561681416f5c3 100644 --- a/python/paddle/tensor/attribute.py +++ b/python/paddle/tensor/attribute.py @@ -20,8 +20,8 @@ from paddle import _C_ops from ..common_ops_import import Variable -from ..fluid.data_feeder import check_type, check_variable_and_dtype -from ..fluid.framework import in_dygraph_mode +from ..base.data_feeder import check_type, check_variable_and_dtype +from ..base.framework import in_dygraph_mode from ..framework import LayerHelper, core from .creation import _complex_to_real_dtype, assign diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 528a3c40b9a4b..8382c510999aa 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -24,15 +24,15 @@ from paddle import _C_ops from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only -from ..fluid.data_feeder import ( +from ..base.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, convert_dtype, convert_float_to_uint16, ) -from ..fluid.framework import Variable, device_guard -from ..fluid.param_attr import ParamAttr +from ..base.framework import Variable, device_guard +from ..base.param_attr import ParamAttr from ..framework import ( LayerHelper, _current_expected_place, diff --git a/python/paddle/tensor/einsum.py b/python/paddle/tensor/einsum.py index 11fb7e6f47607..955d104804bc8 100644 --- a/python/paddle/tensor/einsum.py +++ b/python/paddle/tensor/einsum.py @@ -22,9 +22,9 @@ from paddle import _C_ops -from ..fluid.data_feeder import check_type, check_variable_and_dtype -from ..fluid.framework import in_dygraph_mode -from ..fluid.layer_helper import LayerHelper +from ..base.data_feeder import check_type, check_variable_and_dtype +from ..base.framework import in_dygraph_mode +from ..base.layer_helper import LayerHelper from .linalg import matmul, transpose from .manipulation import reshape, squeeze, unsqueeze from .math import multiply diff --git a/python/paddle/tensor/layer_function_generator.py b/python/paddle/tensor/layer_function_generator.py index 955e2b13ec548..a1856cea7c4ed 100644 --- a/python/paddle/tensor/layer_function_generator.py +++ b/python/paddle/tensor/layer_function_generator.py @@ -19,8 +19,8 @@ from paddle import _C_ops, _legacy_C_ops from ..common_ops_import import Variable -from ..fluid.data_feeder import check_variable_and_dtype -from ..fluid.proto import framework_pb2 +from ..base.data_feeder import check_variable_and_dtype +from ..base.proto import framework_pb2 from ..framework import ( LayerHelper, OpProtoHolder, diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index c8ed387c5b71b..7b916a398a67e 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -19,7 +19,7 @@ from paddle.common_ops_import import VarDesc from ..common_ops_import import Variable -from ..fluid.data_feeder import ( +from ..base.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 7bf39eb2e59df..8723517255052 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -17,10 +17,10 @@ import paddle from ..common_ops_import import Variable -from ..fluid.data_feeder import check_type, check_variable_and_dtype +from ..base.data_feeder import check_type, check_variable_and_dtype from .layer_function_generator import templatedoc -Tensor = paddle.fluid.framework.core.eager.Tensor +Tensor = paddle.base.framework.core.eager.Tensor from paddle import _C_ops from paddle.tensor.creation import full @@ -1073,7 +1073,7 @@ def is_tensor(x): """ if in_dynamic_mode(): - return isinstance(x, (Tensor, paddle.fluid.core.eager.Tensor)) + return isinstance(x, (Tensor, paddle.base.core.eager.Tensor)) else: return isinstance(x, Variable) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index d44810aea1aaf..a6262b604a52f 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -21,13 +21,13 @@ from paddle.tensor import fill_constant from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only -from ..fluid.data_feeder import ( +from ..base.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, convert_dtype, ) -from ..fluid.framework import Variable +from ..base.framework import Variable from ..framework import ( LayerHelper, convert_np_dtype_to_dtype_, @@ -5033,7 +5033,7 @@ def as_strided(x, shape, stride, offset=0, name=None): .. code-block:: python import paddle - paddle.fluid.set_flags({"FLAGS_use_stride_kernel": True}) + paddle.base.set_flags({"FLAGS_use_stride_kernel": True}) x = paddle.rand([2, 4, 6], dtype="float32") @@ -5065,7 +5065,7 @@ def view(x, shape_or_dtype, name=None): .. code-block:: python import paddle - paddle.fluid.set_flags({"FLAGS_use_stride_kernel": True}) + paddle.base.set_flags({"FLAGS_use_stride_kernel": True}) x = paddle.rand([2, 4, 6], dtype="float32") @@ -5074,7 +5074,7 @@ def view(x, shape_or_dtype, name=None): import paddle - paddle.fluid.set_flags({"FLAGS_use_stride_kernel": True}) + paddle.base.set_flags({"FLAGS_use_stride_kernel": True}) x = paddle.rand([2, 4, 6], dtype="float32") @@ -5109,7 +5109,7 @@ def view_as(x, other, name=None): .. code-block:: python import paddle - paddle.fluid.set_flags({"FLAGS_use_stride_kernel": True}) + paddle.base.set_flags({"FLAGS_use_stride_kernel": True}) x = paddle.rand([2, 4, 6], dtype="float32") y = paddle.rand([8, 6], dtype="float32") @@ -5142,7 +5142,7 @@ def unfold(x, axis, size, step, name=None): .. code-block:: python import paddle - paddle.fluid.set_flags({"FLAGS_use_stride_kernel": True}) + paddle.base.set_flags({"FLAGS_use_stride_kernel": True}) x = paddle.arange(9, dtype="float64") diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 02ac8b4669e7e..63e21bc98fca0 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -23,7 +23,7 @@ from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only from ..common_ops_import import Variable -from ..fluid.data_feeder import ( +from ..base.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, diff --git a/python/paddle/tensor/ops.py b/python/paddle/tensor/ops.py index e4d73408c367e..e15bf17beb646 100644 --- a/python/paddle/tensor/ops.py +++ b/python/paddle/tensor/ops.py @@ -16,7 +16,7 @@ from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only from .. import _C_ops -from ..fluid.data_feeder import check_variable_and_dtype +from ..base.data_feeder import check_variable_and_dtype from ..framework import LayerHelper, in_dynamic_mode from .layer_function_generator import ( add_sample_code, diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index f92d714dccb0b..e2b71457bbdde 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -17,10 +17,10 @@ import paddle from paddle import _C_ops, _legacy_C_ops from paddle.common_ops_import import Variable -from paddle.fluid.framework import _current_expected_place +from paddle.base.framework import _current_expected_place from paddle.framework import in_dynamic_mode -from ..fluid.data_feeder import ( +from ..base.data_feeder import ( check_dtype, check_shape, check_type, @@ -255,7 +255,7 @@ def uniform_random_batch_size_like( Examples: .. code-block:: python import paddle - import paddle.fluid as fluid + import paddle.base as base from paddle.tensor import random paddle.enable_static() # example 1: diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index b7fe99f477ee8..5a90657016067 100755 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -21,7 +21,7 @@ from paddle.common_ops_import import VarDesc, Variable from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only -from ..fluid.data_feeder import check_dtype, check_variable_and_dtype +from ..base.data_feeder import check_dtype, check_variable_and_dtype from ..framework import ( LayerHelper, convert_np_dtype_to_dtype_, @@ -29,8 +29,8 @@ in_dynamic_mode, ) -# from ..fluid.layers import has_inf #DEFINE_ALIAS -# from ..fluid.layers import has_nan #DEFINE_ALIAS +# from ..base.layers import has_inf #DEFINE_ALIAS +# from ..base.layers import has_nan #DEFINE_ALIAS __all__ = [] diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index 04fe17d85546b..6b1b54a7d0bdf 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -19,7 +19,7 @@ from paddle.framework import in_dynamic_mode, in_dynamic_or_new_ir_mode from ..common_ops_import import Variable -from ..fluid.data_feeder import check_type, check_variable_and_dtype +from ..base.data_feeder import check_type, check_variable_and_dtype from ..framework import LayerHelper, core from .math import _get_reduce_axis_with_tensor from .search import where diff --git a/python/paddle/tensor/to_string.py b/python/paddle/tensor/to_string.py index 9875263b45ab7..a9b5740b2fc30 100644 --- a/python/paddle/tensor/to_string.py +++ b/python/paddle/tensor/to_string.py @@ -14,7 +14,7 @@ import numpy as np -from paddle.fluid.data_feeder import check_type, convert_dtype +from paddle.base.data_feeder import check_type, convert_dtype from ..framework import core diff --git a/python/paddle/text/viterbi_decode.py b/python/paddle/text/viterbi_decode.py index 591f7ae6033e5..0091395bd26a9 100644 --- a/python/paddle/text/viterbi_decode.py +++ b/python/paddle/text/viterbi_decode.py @@ -14,9 +14,9 @@ from paddle import _C_ops -from ..fluid.data_feeder import check_type, check_variable_and_dtype -from ..fluid.framework import in_dygraph_mode -from ..fluid.layer_helper import LayerHelper +from ..base.data_feeder import check_type, check_variable_and_dtype +from ..base.framework import in_dygraph_mode +from ..base.layer_helper import LayerHelper from ..nn import Layer __all__ = ['viterbi_decode', 'ViterbiDecoder'] diff --git a/python/paddle/utils/__init__.py b/python/paddle/utils/__init__.py index df62d9982f6f4..630af97f280f5 100644 --- a/python/paddle/utils/__init__.py +++ b/python/paddle/utils/__init__.py @@ -18,7 +18,7 @@ from .op_version import OpLastCheckpointChecker # noqa: F401 from .install_check import run_check # noqa: F401 from . import unique_name # noqa: F401 -from ..fluid.framework import require_version # noqa: F401 +from ..base.framework import require_version # noqa: F401 from . import download # noqa: F401 from . import image_util # noqa: F401 diff --git a/python/paddle/utils/cpp_extension/cpp_extension.py b/python/paddle/utils/cpp_extension/cpp_extension.py index a1c1f794e4f6f..4ea6c9ad591d6 100644 --- a/python/paddle/utils/cpp_extension/cpp_extension.py +++ b/python/paddle/utils/cpp_extension/cpp_extension.py @@ -59,7 +59,7 @@ ) from .extension_utils import CLANG_COMPILE_FLAGS, CLANG_LINK_FLAGS -from ...fluid import core +from ...base import core # Note(zhouwei): On windows, it will export function 'PyInit_[name]' by default, # The solution is: 1.User add function PyInit_[name] 2. set not to export diff --git a/python/paddle/utils/cpp_extension/extension_utils.py b/python/paddle/utils/cpp_extension/extension_utils.py index d98d2c8e7aed3..df2fd45b345ea 100644 --- a/python/paddle/utils/cpp_extension/extension_utils.py +++ b/python/paddle/utils/cpp_extension/extension_utils.py @@ -38,8 +38,8 @@ except ImportError: DEVNULL = open(os.devnull, 'wb') -from ...fluid import core -from ...fluid.framework import OpProtoHolder +from ...base import core +from ...base.framework import OpProtoHolder from ...sysconfig import get_include, get_lib logger = logging.getLogger("utils.cpp_extension") @@ -448,13 +448,13 @@ def get_rocm_arch_flags(cflags): return cflags -def _get_fluid_path(): +def _get_base_path(): """ - Return installed fluid dir path. + Return installed base dir path. """ import paddle - return os.path.join(os.path.dirname(paddle.__file__), 'fluid') + return os.path.join(os.path.dirname(paddle.__file__), 'base') def _get_core_name(): @@ -471,7 +471,7 @@ def _get_lib_core_path(): """ raw_core_name = _get_core_name() lib_core_name = f"lib{raw_core_name[:-3]}.dylib" - return os.path.join(_get_fluid_path(), lib_core_name) + return os.path.join(_get_base_path(), lib_core_name) def _get_dll_core_path(): @@ -480,17 +480,17 @@ def _get_dll_core_path(): """ raw_core_name = _get_core_name() dll_core_name = "libpaddle.dll" - return os.path.join(_get_fluid_path(), dll_core_name) + return os.path.join(_get_base_path(), dll_core_name) def _reset_so_rpath(so_path): """ NOTE(Aurelius84): Runtime path of libpaddle.so is modified into `@loader_path/../libs` in setup.py.in. While loading custom op, `@loader_path` is the dirname of custom op - instead of `paddle/fluid`. So we modify `@loader_path` from custom dylib into `@rpath` + instead of `paddle/base`. So we modify `@loader_path` from custom dylib into `@rpath` to ensure dynamic loader find it correctly. - Moreover, we will add `-rpath site-packages/paddle/fluid` while linking the dylib so + Moreover, we will add `-rpath site-packages/paddle/base` while linking the dylib so that we don't need to set `LD_LIBRARY_PATH` any more. """ assert os.path.exists(so_path) @@ -518,7 +518,7 @@ def _get_include_dirs_when_compiling(compile_dir): with open(include_dirs_file, 'r') as f: include_dirs = [line.strip() for line in f.readlines() if line.strip()] - extra_dirs = ['paddle/fluid/platform'] + extra_dirs = ['paddle/base/platform'] all_include_dirs = list(include_dirs) for extra_dir in extra_dirs: for include_dir in include_dirs: @@ -583,7 +583,7 @@ def normalize_extension_kwargs(kwargs, use_cuda=False): # ----------------------- MacOS Platform ----------------------- # else: # See _reset_so_rpath for details. - extra_link_args.append(f'-Wl,-rpath,{_get_fluid_path()}') + extra_link_args.append(f'-Wl,-rpath,{_get_base_path()}') # On MacOS, ld don't support `-l:xx`, so we create a # liblibpaddle.dylib symbol link. lib_core_name = create_sym_link_if_not_exist() @@ -606,7 +606,7 @@ def normalize_extension_kwargs(kwargs, use_cuda=False): kwargs['runtime_library_dirs'] = runtime_library_dirs if compile_dir is None: - # Add this compile option to isolate fluid headers + # Add this compile option to isolate base headers add_compile_flag(extra_compile_args, ['-DPADDLE_WITH_CUSTOM_KERNEL']) kwargs['extra_compile_args'] = extra_compile_args @@ -621,7 +621,7 @@ def create_sym_link_if_not_exist(): assert OS_NAME.startswith('darwin') or IS_WINDOWS raw_core_name = _get_core_name() - core_path = os.path.join(_get_fluid_path(), raw_core_name) + core_path = os.path.join(_get_base_path(), raw_core_name) if IS_WINDOWS: new_dll_core_path = _get_dll_core_path() # create symbol link on windows @@ -871,8 +871,8 @@ def find_paddle_libraries(use_cuda=False): cuda_lib_dir = find_cuda_libraries() paddle_lib_dirs.extend(cuda_lib_dir) - # add `paddle/fluid` to search `libpaddle.so` - paddle_lib_dirs.append(_get_fluid_path()) + # add `paddle/base` to search `libpaddle.so` + paddle_lib_dirs.append(_get_base_path()) return paddle_lib_dirs @@ -1142,9 +1142,9 @@ def _custom_api_content(op_name): ) API_TEMPLATE = textwrap.dedent( """ - import paddle.fluid.core as core + import paddle.base.core as core from paddle.framework import in_dynamic_mode - from paddle.fluid.layer_helper import LayerHelper + from paddle.base.layer_helper import LayerHelper def {op_name}({params_list}): # The output variable's dtype use default value 'float32', diff --git a/python/paddle/utils/dlpack.py b/python/paddle/utils/dlpack.py index c818d16125eed..ed196beedb356 100644 --- a/python/paddle/utils/dlpack.py +++ b/python/paddle/utils/dlpack.py @@ -14,9 +14,9 @@ import paddle -from ..fluid.core import LoDTensor -from ..fluid.data_feeder import check_type -from ..fluid.framework import in_dygraph_mode +from ..base.core import LoDTensor +from ..base.data_feeder import check_type +from ..base.framework import in_dygraph_mode __all__ = [ 'to_dlpack', @@ -50,7 +50,7 @@ def to_dlpack(x): """ if in_dygraph_mode(): - if not isinstance(x, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): + if not isinstance(x, (paddle.Tensor, paddle.base.core.eager.Tensor)): raise TypeError( "The type of 'x' in to_dlpack must be paddle.Tensor," " but received {}.".format(type(x)) @@ -98,9 +98,9 @@ def from_dlpack(dlpack): ) if in_dygraph_mode(): - out = paddle.fluid.core.from_dlpack(dlpack) + out = paddle.base.core.from_dlpack(dlpack) out = paddle.to_tensor(out) return out - out = paddle.fluid.core.from_dlpack(dlpack) + out = paddle.base.core.from_dlpack(dlpack) return out diff --git a/python/paddle/utils/inplace_utils.py b/python/paddle/utils/inplace_utils.py index bee68e1212598..ee5032d0657fe 100644 --- a/python/paddle/utils/inplace_utils.py +++ b/python/paddle/utils/inplace_utils.py @@ -15,7 +15,7 @@ import warnings import paddle # noqa: F401 -from paddle.fluid.wrapped_decorator import wrap_decorator +from paddle.base.wrapped_decorator import wrap_decorator from paddle.framework import in_dynamic_mode @@ -33,7 +33,7 @@ def __impl__(*args, **kwargs): func.__name__, origin_api_name ) ) - from ..fluid.dygraph.base import in_declarative_mode + from ..base.dygraph.base import in_declarative_mode if in_declarative_mode(): for arg in args: diff --git a/python/paddle/utils/layers_utils.py b/python/paddle/utils/layers_utils.py index 57d014c40e0ab..d7ac845926875 100644 --- a/python/paddle/utils/layers_utils.py +++ b/python/paddle/utils/layers_utils.py @@ -20,8 +20,8 @@ import paddle -from ..fluid.data_feeder import check_dtype, convert_dtype -from ..fluid.framework import Block, Variable, in_dygraph_mode +from ..base.data_feeder import check_dtype, convert_dtype +from ..base.framework import Block, Variable, in_dygraph_mode def convert_to_list(value, n, name, dtype=int): @@ -175,7 +175,7 @@ def flatten(nest): """ :alias_main: paddle.flatten :alias: paddle.flatten,paddle.tensor.flatten,paddle.tensor.manipulation.flatten - :old_api: paddle.fluid.layers.flatten + :old_api: paddle.base.layers.flatten Traverse all entries in the nested structure and put them into an list. """ diff --git a/python/paddle/utils/op_version.py b/python/paddle/utils/op_version.py index 793e0b621990c..6d70215f40cbd 100644 --- a/python/paddle/utils/op_version.py +++ b/python/paddle/utils/op_version.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..fluid import core +from ..base import core __all__ = [] diff --git a/python/paddle/utils/unique_name.py b/python/paddle/utils/unique_name.py index 0aae339f29545..bfd26da255fa1 100644 --- a/python/paddle/utils/unique_name.py +++ b/python/paddle/utils/unique_name.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from ..fluid.unique_name import generate # noqa: F401 -from ..fluid.unique_name import generate_with_ignorable_key # noqa: F401 -from ..fluid.unique_name import guard # noqa: F401 -from ..fluid.unique_name import switch # noqa: F401 +from ..base.unique_name import generate # noqa: F401 +from ..base.unique_name import generate_with_ignorable_key # noqa: F401 +from ..base.unique_name import guard # noqa: F401 +from ..base.unique_name import switch # noqa: F401 __all__ = ['generate', 'switch', 'guard'] # noqa diff --git a/python/paddle/vision/models/alexnet.py b/python/paddle/vision/models/alexnet.py index 7a6e2b0328ae5..26282bf88f7a1 100644 --- a/python/paddle/vision/models/alexnet.py +++ b/python/paddle/vision/models/alexnet.py @@ -17,7 +17,7 @@ import paddle import paddle.nn.functional as F from paddle import nn -from paddle.fluid.param_attr import ParamAttr +from paddle.base.param_attr import ParamAttr from paddle.nn import Conv2D, Dropout, Linear, MaxPool2D, ReLU from paddle.nn.initializer import Uniform from paddle.utils.download import get_weights_path_from_url diff --git a/python/paddle/vision/models/densenet.py b/python/paddle/vision/models/densenet.py index ccf6573f5588a..90346d4ff7493 100644 --- a/python/paddle/vision/models/densenet.py +++ b/python/paddle/vision/models/densenet.py @@ -16,7 +16,7 @@ import paddle from paddle import nn -from paddle.fluid.param_attr import ParamAttr +from paddle.base.param_attr import ParamAttr from paddle.nn import ( AdaptiveAvgPool2D, AvgPool2D, diff --git a/python/paddle/vision/models/googlenet.py b/python/paddle/vision/models/googlenet.py index 617ce182c5039..174ddb144fe3d 100644 --- a/python/paddle/vision/models/googlenet.py +++ b/python/paddle/vision/models/googlenet.py @@ -15,7 +15,7 @@ import paddle import paddle.nn.functional as F from paddle import nn -from paddle.fluid.param_attr import ParamAttr +from paddle.base.param_attr import ParamAttr from paddle.nn import ( AdaptiveAvgPool2D, AvgPool2D, diff --git a/python/paddle/vision/models/inceptionv3.py b/python/paddle/vision/models/inceptionv3.py index 9482d7b12e208..24249fbf98aee 100644 --- a/python/paddle/vision/models/inceptionv3.py +++ b/python/paddle/vision/models/inceptionv3.py @@ -16,7 +16,7 @@ import paddle from paddle import nn -from paddle.fluid.param_attr import ParamAttr +from paddle.base.param_attr import ParamAttr from paddle.nn import AdaptiveAvgPool2D, AvgPool2D, Dropout, Linear, MaxPool2D from paddle.nn.initializer import Uniform from paddle.utils.download import get_weights_path_from_url diff --git a/python/paddle/vision/models/squeezenet.py b/python/paddle/vision/models/squeezenet.py index 08cc1a6463324..9133a19993421 100644 --- a/python/paddle/vision/models/squeezenet.py +++ b/python/paddle/vision/models/squeezenet.py @@ -15,7 +15,7 @@ import paddle import paddle.nn.functional as F from paddle import nn -from paddle.fluid.param_attr import ParamAttr +from paddle.base.param_attr import ParamAttr from paddle.nn import AdaptiveAvgPool2D, Conv2D, Dropout, MaxPool2D from paddle.utils.download import get_weights_path_from_url diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index 0f73f950d1548..ccec5c41a549b 100755 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -18,10 +18,10 @@ from paddle.tensor.math import _add_with_axis from paddle.utils import convert_to_list -from ..fluid import core -from ..fluid.data_feeder import check_type, check_variable_and_dtype -from ..fluid.framework import Variable, in_dygraph_mode -from ..fluid.layer_helper import LayerHelper +from ..base import core +from ..base.data_feeder import check_type, check_variable_and_dtype +from ..base.framework import Variable, in_dygraph_mode +from ..base.layer_helper import LayerHelper from ..framework import _current_expected_place from ..nn import BatchNorm2D, Conv2D, Layer, ReLU, Sequential from ..nn.initializer import Normal diff --git a/python/paddle/vision/transforms/functional.py b/python/paddle/vision/transforms/functional.py index 40a708da5b1f6..eed38fadc7734 100644 --- a/python/paddle/vision/transforms/functional.py +++ b/python/paddle/vision/transforms/functional.py @@ -20,7 +20,7 @@ import paddle -from ...fluid.framework import Variable +from ...base.framework import Variable from . import functional_cv2 as F_cv2 from . import functional_pil as F_pil from . import functional_tensor as F_t diff --git a/python/paddle/vision/transforms/functional_cv2.py b/python/paddle/vision/transforms/functional_cv2.py index 1f76b35d201fe..0c4f70aad78c8 100644 --- a/python/paddle/vision/transforms/functional_cv2.py +++ b/python/paddle/vision/transforms/functional_cv2.py @@ -50,7 +50,7 @@ def to_tensor(pic, data_format='CHW'): else: img = paddle.to_tensor(pic) - if paddle.fluid.data_feeder.convert_dtype(img.dtype) == 'uint8': + if paddle.base.data_feeder.convert_dtype(img.dtype) == 'uint8': return paddle.cast(img, np.float32) / 255.0 else: return img diff --git a/python/paddle/vision/transforms/functional_pil.py b/python/paddle/vision/transforms/functional_pil.py index 3d816c25742f9..6f1a8b9860e79 100644 --- a/python/paddle/vision/transforms/functional_pil.py +++ b/python/paddle/vision/transforms/functional_pil.py @@ -81,7 +81,7 @@ def to_tensor(pic, data_format='CHW'): else: nchannel = len(pic.mode) - dtype = paddle.fluid.data_feeder.convert_dtype(img.dtype) + dtype = paddle.base.data_feeder.convert_dtype(img.dtype) if dtype == 'uint8': img = paddle.cast(img, np.float32) / 255.0 diff --git a/python/paddle/vision/transforms/functional_tensor.py b/python/paddle/vision/transforms/functional_tensor.py index e7b57a011baeb..c22ff07e922ed 100644 --- a/python/paddle/vision/transforms/functional_tensor.py +++ b/python/paddle/vision/transforms/functional_tensor.py @@ -20,7 +20,7 @@ import paddle import paddle.nn.functional as F -from ...fluid.framework import Variable +from ...base.framework import Variable __all__ = [] diff --git a/python/setup.py.in b/python/setup.py.in index a0e665526dc68..71be9a303dae3 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -421,14 +421,14 @@ packages=['paddle', 'paddle.inference', 'paddle.inference.contrib', 'paddle.inference.contrib.utils', - 'paddle.fluid', - 'paddle.fluid.dygraph', - 'paddle.fluid.proto', - 'paddle.fluid.proto.profiler', - 'paddle.fluid.layers', - 'paddle.fluid.incubate', + 'paddle.base', + 'paddle.base.dygraph', + 'paddle.base.proto', + 'paddle.base.proto.profiler', + 'paddle.base.layers', + 'paddle.base.incubate', 'paddle.incubate.distributed.fleet', - 'paddle.fluid.incubate.checkpoint', + 'paddle.base.incubate.checkpoint', 'paddle.amp', 'paddle.cost_model', 'paddle.hapi', @@ -523,19 +523,19 @@ if not '${WIN32}': paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/scripts/paddle'] if os.name != 'nt': - package_data={'paddle.fluid': ['${FLUID_CORE_NAME}' + '.so']} + package_data={'paddle.base': ['${FLUID_CORE_NAME}' + '.so']} else: - package_data={'paddle.fluid': ['${FLUID_CORE_NAME}' + '.pyd', '${FLUID_CORE_NAME}' + '.lib']} + package_data={'paddle.base': ['${FLUID_CORE_NAME}' + '.pyd', '${FLUID_CORE_NAME}' + '.lib']} -package_data['paddle.fluid'] += ['${PADDLE_BINARY_DIR}/python/paddle/cost_model/static_op_benchmark.json'] +package_data['paddle.base'] += ['${PADDLE_BINARY_DIR}/python/paddle/cost_model/static_op_benchmark.json'] package_dir={ '': '${PADDLE_BINARY_DIR}/python', - # The paddle.fluid.proto will be generated while compiling. + # The paddle.base.proto will be generated while compiling. # So that package points to other directory. - 'paddle.fluid.proto.profiler': '${PADDLE_BINARY_DIR}/paddle/fluid/platform', - 'paddle.fluid.proto': '${PADDLE_BINARY_DIR}/paddle/fluid/framework', - 'paddle.fluid': '${PADDLE_BINARY_DIR}/python/paddle/fluid', + 'paddle.base.proto.profiler': '${PADDLE_BINARY_DIR}/paddle/fluid/platform', + 'paddle.base.proto': '${PADDLE_BINARY_DIR}/paddle/fluid/framework', + 'paddle.base': '${PADDLE_BINARY_DIR}/python/paddle/base', } # put all thirdparty libraries in paddle.libs @@ -705,14 +705,14 @@ package_dir['paddle.libs']=libs_path # change rpath of ${FLUID_CORE_NAME}.ext, add $ORIGIN/../libs/ to it. # The reason is that libwarpctc.ext, libiomp5.ext etc are in paddle.libs, and -# ${FLUID_CORE_NAME}.ext is in paddle.fluid, thus paddle/fluid/../libs will pointer to above libraries. +# ${FLUID_CORE_NAME}.ext is in paddle.base, thus paddle/fluid/../libs will pointer to above libraries. # This operation will fix https://github.com/PaddlePaddle/Paddle/issues/3213 if '${CMAKE_BUILD_TYPE}' == 'Release': if os.name != 'nt': # only change rpath in Release mode, since in Debug mode, ${FLUID_CORE_NAME}.xx is too large to be changed. if "@APPLE@" == "1": - commands = ["install_name_tool -id '@loader_path/../libs/' ${PADDLE_BINARY_DIR}/python/paddle/fluid/${FLUID_CORE_NAME}" + '.so'] - commands.append("install_name_tool -add_rpath '@loader_path/../libs/' ${PADDLE_BINARY_DIR}/python/paddle/fluid/${FLUID_CORE_NAME}" + '.so') + commands = ["install_name_tool -id '@loader_path/../libs/' ${PADDLE_BINARY_DIR}/python/paddle/base/${FLUID_CORE_NAME}" + '.so'] + commands.append("install_name_tool -add_rpath '@loader_path/../libs/' ${PADDLE_BINARY_DIR}/python/paddle/base/${FLUID_CORE_NAME}" + '.so') if('${WITH_SHARED_PHI}' == 'ON'): # change rpath of phi.ext for loading 3rd party libb commands.append("install_name_tool -add_rpath '@loader_path' ${PADDLE_BINARY_DIR}/python/paddle/libs/${PHI_NAME}") @@ -720,7 +720,7 @@ if '${CMAKE_BUILD_TYPE}' == 'Release': # change rpath of ir.ext for loading 3rd party libb commands.append("install_name_tool -add_rpath '@loader_path' ${PADDLE_BINARY_DIR}/python/paddle/libs/${IR_NAME}") else: - commands = ["patchelf --set-rpath '$ORIGIN/../libs/' ${PADDLE_BINARY_DIR}/python/paddle/fluid/${FLUID_CORE_NAME}" + '.so'] + commands = ["patchelf --set-rpath '$ORIGIN/../libs/' ${PADDLE_BINARY_DIR}/python/paddle/base/${FLUID_CORE_NAME}" + '.so'] if('${WITH_SHARED_PHI}' == 'ON'): # change rpath of phi.ext for loading 3rd party lib commands.append("patchelf --set-rpath '$ORIGIN' ${PADDLE_BINARY_DIR}/python/paddle/libs/${PHI_NAME}") diff --git a/setup.py b/setup.py index f2b1df02d8c1e..58d2a6ac93553 100644 --- a/setup.py +++ b/setup.py @@ -219,10 +219,10 @@ class DevelopCommand(DevelopCommandBase): def run(self): # copy proto and .so to python_source_dir fluid_proto_binary_path = ( - paddle_binary_dir + '/python/paddle/fluid/proto/' + paddle_binary_dir + '/python/paddle/base/proto/' ) fluid_proto_source_path = ( - paddle_source_dir + '/python/paddle/fluid/proto/' + paddle_source_dir + '/python/paddle/base/proto/' ) distributed_proto_binary_path = ( paddle_binary_dir + '/python/paddle/distributed/fleet/proto/' @@ -237,8 +237,8 @@ def run(self): distributed_proto_binary_path, distributed_proto_source_path ) shutil.copy( - paddle_binary_dir + '/python/paddle/fluid/libpaddle.so', - paddle_source_dir + '/python/paddle/fluid/', + paddle_binary_dir + '/python/paddle/base/libpaddle.so', + paddle_source_dir + '/python/paddle/base/', ) dynamic_library_binary_path = paddle_binary_dir + '/python/paddle/libs/' dynamic_library_source_path = paddle_source_dir + '/python/paddle/libs/' @@ -913,7 +913,7 @@ def get_package_data_and_package_dir(): 'paddle.fluid.proto': env_dict.get("PADDLE_BINARY_DIR") + '/paddle/fluid/framework', 'paddle.fluid': env_dict.get("PADDLE_BINARY_DIR") - + '/python/paddle/fluid', + + '/python/paddle/base', } # put all thirdparty libraries in paddle.libs libs_path = paddle_binary_dir + '/python/paddle/libs' @@ -1159,14 +1159,14 @@ def get_package_data_and_package_dir(): commands = [ "install_name_tool -id '@loader_path/../libs/' " + env_dict.get("PADDLE_BINARY_DIR") - + '/python/paddle/fluid/' + + '/python/paddle/base/' + env_dict.get("FLUID_CORE_NAME") + '.so' ] commands.append( "install_name_tool -add_rpath '@loader_path/../libs/' " + env_dict.get("PADDLE_BINARY_DIR") - + '/python/paddle/fluid/' + + '/python/paddle/base/' + env_dict.get("FLUID_CORE_NAME") + '.so' ) @@ -1188,7 +1188,7 @@ def get_package_data_and_package_dir(): commands = [ "patchelf --set-rpath '$ORIGIN/../libs/' " + env_dict.get("PADDLE_BINARY_DIR") - + '/python/paddle/fluid/' + + '/python/paddle/base/' + env_dict.get("FLUID_CORE_NAME") + '.so' ] diff --git a/test/amp/test_amp_decorate.py b/test/amp/test_amp_decorate.py index 58989571aad8b..f956d37c63b39 100644 --- a/test/amp/test_amp_decorate.py +++ b/test/amp/test_amp_decorate.py @@ -16,7 +16,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core class ConvBNLayer(paddle.nn.Layer): diff --git a/test/amp/test_amp_list.py b/test/amp/test_amp_list.py index 1eb5b1fb7b5b3..20a7a45e95784 100644 --- a/test/amp/test_amp_list.py +++ b/test/amp/test_amp_list.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle.fluid import core +from paddle.base import core from paddle.static.amp import AutoMixedPrecisionLists, fp16_lists diff --git a/test/amp/test_amp_master_grad.py b/test/amp/test_amp_master_grad.py index 3eaf6546009d0..3a30e5d4a712d 100644 --- a/test/amp/test_amp_master_grad.py +++ b/test/amp/test_amp_master_grad.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core class SimpleNet(paddle.nn.Layer): @@ -80,7 +80,7 @@ def run_dygraph(self, total_steps, accumulate_batchs_num, model, optimizer): scaler.update() opt.clear_grad() paddle.amp.debugging.disable_operator_stats_collection() - op_list = paddle.fluid.core.get_low_precision_op_list() + op_list = paddle.base.core.get_low_precision_op_list() return fp32_grads, op_list def test_adam_master_grad(self): diff --git a/test/amp/test_amp_promote.py b/test/amp/test_amp_promote.py index 95017df905fc9..52cda97d15fbb 100644 --- a/test/amp/test_amp_promote.py +++ b/test/amp/test_amp_promote.py @@ -18,7 +18,7 @@ from amp_base_models import AmpTestBase, build_conv_model import paddle -from paddle.fluid import core +from paddle.base import core from paddle.static import amp @@ -138,7 +138,7 @@ def check_promote_results( scaler.minimize(optimizer, scaled) optimizer.clear_grad() paddle.amp.debugging.disable_operator_stats_collection() - op_stats = paddle.fluid.core.get_low_precision_op_list() + op_stats = paddle.base.core.get_low_precision_op_list() self._check_op_calls( op_stats, diff --git a/test/amp/test_collect_operator_stats.py b/test/amp/test_collect_operator_stats.py index 157a1100df302..d17ece43727f4 100644 --- a/test/amp/test_collect_operator_stats.py +++ b/test/amp/test_collect_operator_stats.py @@ -22,7 +22,7 @@ class TestOpStatsEager(unittest.TestCase): def _check_result(self, dtype): # Returned the dict. - op_list = paddle.fluid.core.get_low_precision_op_list() + op_list = paddle.base.core.get_low_precision_op_list() self.assertTrue('elementwise_add' in op_list) self.assertTrue('conv2d' in op_list) diff --git a/test/amp/test_compare_accuracy_api.py b/test/amp/test_compare_accuracy_api.py index 83eb7af8df68e..ec35288652fa7 100644 --- a/test/amp/test_compare_accuracy_api.py +++ b/test/amp/test_compare_accuracy_api.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( @@ -23,7 +23,7 @@ ) class TestCompareAccuracyApi(unittest.TestCase): def calc(self, path, dtype): - paddle.fluid.core.set_nan_inf_debug_path(path) + paddle.base.core.set_nan_inf_debug_path(path) x = paddle.to_tensor( [2000, 3000, 4, 0], place=core.CUDAPlace(0), dtype=dtype ) diff --git a/test/amp/test_layer_convert_dtype.py b/test/amp/test_layer_convert_dtype.py index 31332e1d47f0a..4c8e7fe9cd2ae 100644 --- a/test/amp/test_layer_convert_dtype.py +++ b/test/amp/test_layer_convert_dtype.py @@ -17,7 +17,7 @@ import paddle import paddle.nn.functional as F from paddle import nn -from paddle.fluid import core +from paddle.base import core class MyModel(paddle.nn.Layer): diff --git a/test/amp/test_model_cast_to_bf16.py b/test/amp/test_model_cast_to_bf16.py index 79751b3d99d0c..da1e82dddc7dd 100644 --- a/test/amp/test_model_cast_to_bf16.py +++ b/test/amp/test_model_cast_to_bf16.py @@ -25,8 +25,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.static import amp paddle.enable_static() @@ -55,23 +55,23 @@ def static_graph(self): @contextlib.contextmanager def scope_prog_guard(self): - prog = fluid.Program() - startup_prog = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + scope = base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): yield def get_static_graph_result( self, feed, fetch_list, amp_fun, with_lod=False, startup_prog=None ): - exe = fluid.Executor(core.CPUPlace()) + exe = base.Executor(core.CPUPlace()) exe.run( - fluid.default_startup_program() + base.default_startup_program() if startup_prog is None else startup_prog ) - prog = fluid.default_main_program() + prog = base.default_main_program() if amp_fun is not None: if startup_prog is not None: amp_fun(prog, startup_prog) @@ -193,7 +193,7 @@ def test_graph_cast(self): ), use_bf16_guard=True, ), - startup_prog=fluid.default_startup_program(), + startup_prog=base.default_startup_program(), ) diff --git a/test/asp/asp_pruning_base.py b/test/asp/asp_pruning_base.py index c6d7ff440cc11..5160d3a9652de 100644 --- a/test/asp/asp_pruning_base.py +++ b/test/asp/asp_pruning_base.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.incubate.asp import ASPHelper paddle.enable_static() @@ -27,8 +27,8 @@ class TestASPHelperPruningBase(unittest.TestCase): def setUp(self): - self.main_program = fluid.Program() - self.startup_program = fluid.Program() + self.main_program = base.Program() + self.startup_program = base.Program() def build_model(): img = paddle.static.data( @@ -46,7 +46,7 @@ def build_model(): ) return img, label, prediction - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.img, self.label, self.predict = build_model() def run_inference_pruning_test( @@ -55,14 +55,14 @@ def run_inference_pruning_test( place = paddle.CPUPlace() if core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) - exe = fluid.Executor(place) + exe = base.Executor(place) self.__pruning_and_checking( exe, place, get_mask_gen_func, get_mask_check_func, False ) def run_training_pruning_test(self, get_mask_gen_func, get_mask_check_func): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): loss = paddle.mean( paddle.nn.functional.cross_entropy( input=self.predict, @@ -79,7 +79,7 @@ def run_training_pruning_test(self, get_mask_gen_func, get_mask_check_func): place = paddle.CPUPlace() if core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) - exe = fluid.Executor(place) + exe = base.Executor(place) self.__pruning_and_checking( exe, place, get_mask_gen_func, get_mask_check_func, True @@ -95,7 +95,7 @@ def __pruning_and_checking( for param in self.main_program.global_block().all_parameters(): if ASPHelper._is_supported_layer(self.main_program, param.name): mat = np.array( - fluid.global_scope().find_var(param.name).get_tensor() + base.global_scope().find_var(param.name).get_tensor() ) self.assertTrue( paddle.incubate.asp.check_sparsity( diff --git a/test/asp/test_asp_customized_pruning.py b/test/asp/test_asp_customized_pruning.py index c63d12449c03d..e7933c9b7142e 100644 --- a/test/asp/test_asp_customized_pruning.py +++ b/test/asp/test_asp_customized_pruning.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.incubate import asp as sparsity from paddle.incubate.asp.supported_layer_list import ( supported_layers_and_prune_func_map, @@ -188,8 +188,8 @@ class TestASPStaticCustomerizedPruneFunc(unittest.TestCase): def setUp(self): paddle.enable_static() - self.main_program = fluid.Program() - self.startup_program = fluid.Program() + self.main_program = base.Program() + self.startup_program = base.Program() self.customer_prefix = "customer_layer" @@ -215,14 +215,14 @@ def build_model(): ) return img, label, prediction - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.img, self.label, self.predict = build_model() self.supported_layer_count_ref = 5 self.place = paddle.CPUPlace() if core.is_compiled_with_cuda(): self.place = paddle.CUDAPlace(0) - self.exe = fluid.Executor(self.place) + self.exe = base.Executor(self.place) sparsity.add_supported_layer(self.customer_prefix, my_own_pruning) @@ -236,7 +236,7 @@ def test_inference_pruning(self): supported_layer_count = 0 for param in self.main_program.global_block().all_parameters(): mat = np.array( - fluid.global_scope().find_var(param.name).get_tensor() + base.global_scope().find_var(param.name).get_tensor() ) if sparsity.asp.ASPHelper._is_supported_layer( self.main_program, param.name @@ -265,7 +265,7 @@ def test_inference_pruning(self): self.assertEqual(supported_layer_count, self.supported_layer_count_ref) def test_training_pruning(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): loss = paddle.mean( paddle.nn.functional.cross_entropy( input=self.predict, @@ -288,13 +288,13 @@ def test_training_pruning(self): supported_layer_count = 0 for param in self.main_program.global_block().all_parameters(): mat = np.array( - fluid.global_scope().find_var(param.name).get_tensor() + base.global_scope().find_var(param.name).get_tensor() ) if sparsity.asp.ASPHelper._is_supported_layer( self.main_program, param.name ): mat_mask = np.array( - fluid.global_scope() + base.global_scope() .find_var(sparsity.asp.ASPHelper._get_mask_name(param.name)) .get_tensor() ) diff --git a/test/asp/test_asp_optimize_dynamic.py b/test/asp/test_asp_optimize_dynamic.py index 79aada06ac562..293a5bbe7e15c 100644 --- a/test/asp/test_asp_optimize_dynamic.py +++ b/test/asp/test_asp_optimize_dynamic.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core from paddle.incubate.asp import ASPHelper diff --git a/test/asp/test_asp_optimize_static.py b/test/asp/test_asp_optimize_static.py index 863bbe4f0c04e..6074bfd7c8310 100644 --- a/test/asp/test_asp_optimize_static.py +++ b/test/asp/test_asp_optimize_static.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.incubate.asp import ASPHelper paddle.enable_static() @@ -27,8 +27,8 @@ class TestASPStaticOptimize(unittest.TestCase): def setUp(self): - self.main_program = fluid.Program() - self.startup_program = fluid.Program() + self.main_program = base.Program() + self.startup_program = base.Program() def build_model(): img = paddle.static.data( @@ -46,7 +46,7 @@ def build_model(): ) return img, label, prediction - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.img, self.label, predict = build_model() self.loss = paddle.mean( paddle.nn.functional.cross_entropy( @@ -74,7 +74,7 @@ def check_params(params, params_from_asp): ) self.assertTrue(check_params(params, params_from_asp)) - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): ASPHelper._minimize( self.optimizer, self.loss, @@ -166,7 +166,7 @@ def test_decorate(self): param_names = self.__get_param_names( self.main_program.global_block().all_parameters() ) - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.optimizer = paddle.incubate.asp.decorate(self.optimizer) self.optimizer.minimize(self.loss, self.startup_program) param_names_after_minimize = self.__get_param_names( @@ -178,15 +178,15 @@ def test_decorate(self): ) def test_asp_training(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.optimizer = paddle.incubate.asp.decorate(self.optimizer) self.optimizer.minimize(self.loss, self.startup_program) place = paddle.CPUPlace() if core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[self.img, self.label], place=place) + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=[self.img, self.label], place=place) exe.run(self.startup_program) paddle.incubate.asp.prune_model(self.main_program) @@ -200,7 +200,7 @@ def test_asp_training(self): for param in self.main_program.global_block().all_parameters(): if ASPHelper._is_supported_layer(self.main_program, param.name): mat = np.array( - fluid.global_scope().find_var(param.name).get_tensor() + base.global_scope().find_var(param.name).get_tensor() ) if (len(param.shape) == 4 and param.shape[1] < 4) or ( len(param.shape) == 2 and param.shape[0] < 4 @@ -216,13 +216,13 @@ def test_asp_training(self): def test_asp_training_with_amp(self): if core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.optimizer = paddle.static.amp.decorate(self.optimizer) self.optimizer = paddle.incubate.asp.decorate(self.optimizer) self.optimizer.minimize(self.loss, self.startup_program) - exe = fluid.Executor(place) - feeder = fluid.DataFeeder( + exe = base.Executor(place) + feeder = base.DataFeeder( feed_list=[self.img, self.label], place=place ) @@ -238,7 +238,7 @@ def test_asp_training_with_amp(self): for param in self.main_program.global_block().all_parameters(): if ASPHelper._is_supported_layer(self.main_program, param.name): mat = np.array( - fluid.global_scope().find_var(param.name).get_tensor() + base.global_scope().find_var(param.name).get_tensor() ) if (len(param.shape) == 4 and param.shape[1] < 4) or ( len(param.shape) == 2 and param.shape[0] < 4 diff --git a/test/asp/test_asp_pruning_dynamic.py b/test/asp/test_asp_pruning_dynamic.py index 095180ff3f346..b41f52b7c1050 100644 --- a/test/asp/test_asp_pruning_dynamic.py +++ b/test/asp/test_asp_pruning_dynamic.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core from paddle.incubate.asp import ASPHelper diff --git a/test/asp/test_asp_pruning_static.py b/test/asp/test_asp_pruning_static.py index 3f87a2174eba8..2db7d8d42f6ab 100644 --- a/test/asp/test_asp_pruning_static.py +++ b/test/asp/test_asp_pruning_static.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.incubate.asp import ASPHelper paddle.enable_static() @@ -27,8 +27,8 @@ class TestASPStaticPruningBase(unittest.TestCase): def setUp(self): - self.main_program = fluid.Program() - self.startup_program = fluid.Program() + self.main_program = base.Program() + self.startup_program = base.Program() def build_model(): img = paddle.static.data( @@ -49,7 +49,7 @@ def build_model(): ) return img, label, prediction - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.img, self.label, self.predict = build_model() self.set_config() @@ -62,12 +62,12 @@ def test_inference_pruning(self): place = paddle.CPUPlace() if core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) - exe = fluid.Executor(place) + exe = base.Executor(place) self.__pruning_and_checking(exe, place, False) def test_training_pruning(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): loss = paddle.mean( paddle.nn.functional.cross_entropy( input=self.predict, @@ -84,7 +84,7 @@ def test_training_pruning(self): place = paddle.CPUPlace() if core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) - exe = fluid.Executor(place) + exe = base.Executor(place) self.__pruning_and_checking(exe, place, True) @@ -96,7 +96,7 @@ def __pruning_and_checking(self, exe, place, with_mask): for param in self.main_program.global_block().all_parameters(): if ASPHelper._is_supported_layer(self.main_program, param.name): mat = np.array( - fluid.global_scope().find_var(param.name).get_tensor() + base.global_scope().find_var(param.name).get_tensor() ) if (len(param.shape) == 4 and param.shape[1] < 4) or ( len(param.shape) == 2 and param.shape[0] < 4 diff --git a/test/asp/test_asp_save_load.py b/test/asp/test_asp_save_load.py index 698aacbd9b401..f9966c321b37e 100644 --- a/test/asp/test_asp_save_load.py +++ b/test/asp/test_asp_save_load.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.incubate.asp import ASPHelper @@ -124,8 +124,8 @@ class TestASPStaticOptimize(unittest.TestCase): def setUp(self): paddle.enable_static() - self.main_program = fluid.Program() - self.startup_program = fluid.Program() + self.main_program = base.Program() + self.startup_program = base.Program() def build_model(): img = paddle.static.data( @@ -143,7 +143,7 @@ def build_model(): ) return img, label, prediction - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.img, self.label, predict = build_model() self.loss = paddle.mean( paddle.nn.functional.cross_entropy( @@ -160,7 +160,7 @@ def build_model(): self.place = paddle.CPUPlace() if core.is_compiled_with_cuda(): self.place = paddle.CUDAPlace(0) - self.exe = fluid.Executor(self.place) + self.exe = base.Executor(self.place) self.exe.run(self.startup_program) paddle.incubate.asp.prune_model(self.main_program) @@ -178,7 +178,7 @@ def test_save_and_load(self): state_dict = paddle.load(param_path) prog.set_state_dict(state_dict) - feeder = fluid.DataFeeder( + feeder = base.DataFeeder( feed_list=[self.img, self.label], place=self.place ) @@ -191,7 +191,7 @@ def test_save_and_load(self): for param in prog.global_block().all_parameters(): if ASPHelper._is_supported_layer(prog, param.name): mat = np.array( - fluid.global_scope().find_var(param.name).get_tensor() + base.global_scope().find_var(param.name).get_tensor() ) if (len(param.shape) == 4 and param.shape[1] < 4) or ( len(param.shape) == 2 and param.shape[0] < 4 diff --git a/test/asp/test_fleet_with_asp_dynamic.py b/test/asp/test_fleet_with_asp_dynamic.py index ee2261aaf5248..6e46785ae2b3c 100644 --- a/test/asp/test_fleet_with_asp_dynamic.py +++ b/test/asp/test_fleet_with_asp_dynamic.py @@ -20,7 +20,7 @@ import paddle from paddle.distributed import fleet -from paddle.fluid import core +from paddle.base import core from paddle.incubate.asp import ASPHelper cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES') diff --git a/test/asp/test_fleet_with_asp_sharding.py b/test/asp/test_fleet_with_asp_sharding.py index 4dabb3549df33..59cf1d575d33d 100644 --- a/test/asp/test_fleet_with_asp_sharding.py +++ b/test/asp/test_fleet_with_asp_sharding.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.incubate import asp as sparsity from paddle.incubate.asp import ASPHelper @@ -48,7 +48,7 @@ def setUp(self): os.environ['FLAGS_check_nan_inf'] = "0" def net(self, main_prog, startup_prog): - with fluid.program_guard(main_prog, startup_prog): + with base.program_guard(main_prog, startup_prog): input_x = paddle.static.data( name="x", shape=[-1, 32], dtype='float32' ) @@ -86,27 +86,27 @@ def net(self, main_prog, startup_prog): def test_with_asp_sharding(self): fleet.init(is_collective=True) - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy, input_x, input_y = self.net( train_prog, startup_prog ) - with fluid.program_guard(train_prog, startup_prog): + with base.program_guard(train_prog, startup_prog): optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = fleet.distributed_optimizer( optimizer, strategy=strategy ) optimizer.minimize(avg_cost) - if paddle.fluid.is_compiled_with_cuda(): - place = fluid.CUDAPlace( + if paddle.base.is_compiled_with_cuda(): + place = base.CUDAPlace( int(os.environ.get('FLAGS_selected_gpus', 0)) ) else: - place = fluid.CPUPlace() + place = base.CPUPlace() - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[input_x, input_y], place=place) + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=[input_x, input_y], place=place) exe.run(startup_prog) sparsity.prune_model(train_prog) @@ -117,7 +117,7 @@ def test_with_asp_sharding(self): for param in train_prog.global_block().all_parameters(): if ASPHelper._is_supported_layer(train_prog, param.name): mat = np.array( - fluid.global_scope().find_var(param.name).get_tensor() + base.global_scope().find_var(param.name).get_tensor() ) if (len(param.shape) == 4 and param.shape[1] < 4) or ( len(param.shape) == 2 and param.shape[0] < 4 diff --git a/test/asp/test_fleet_with_asp_static.py b/test/asp/test_fleet_with_asp_static.py index c0763f309e7a4..fc67b1f249f03 100644 --- a/test/asp/test_fleet_with_asp_static.py +++ b/test/asp/test_fleet_with_asp_static.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.incubate import asp as sparsity from paddle.incubate.asp import ASPHelper @@ -41,7 +41,7 @@ def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "0" def net(self, main_prog, startup_prog): - with fluid.program_guard(main_prog, startup_prog): + with base.program_guard(main_prog, startup_prog): input_x = paddle.static.data( name="x", shape=[-1, 32], dtype='float32' ) @@ -65,12 +65,12 @@ def net(self, main_prog, startup_prog): def test_with_asp(self): fleet.init(is_collective=True) - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy, input_x, input_y = self.net( train_prog, startup_prog ) - with fluid.program_guard(train_prog, startup_prog): + with base.program_guard(train_prog, startup_prog): optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = fleet.distributed_optimizer( optimizer, strategy=strategy @@ -78,13 +78,13 @@ def test_with_asp(self): optimizer.minimize(avg_cost) place = ( - fluid.CUDAPlace(0) - if paddle.fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if paddle.base.is_compiled_with_cuda() + else base.CPUPlace() ) - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[input_x, input_y], place=place) + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=[input_x, input_y], place=place) exe.run(startup_prog) sparsity.prune_model(train_prog) @@ -95,7 +95,7 @@ def test_with_asp(self): for param in train_prog.global_block().all_parameters(): if ASPHelper._is_supported_layer(train_prog, param.name): mat = np.array( - fluid.global_scope().find_var(param.name).get_tensor() + base.global_scope().find_var(param.name).get_tensor() ) if (len(param.shape) == 4 and param.shape[1] < 4) or ( len(param.shape) == 2 and param.shape[0] < 4 @@ -117,7 +117,7 @@ def setUp(self): os.environ["PADDLE_TRAINER_ID"] = "0" def net(self, main_prog, startup_prog): - with fluid.program_guard(main_prog, startup_prog): + with base.program_guard(main_prog, startup_prog): input_x = paddle.static.data( name="x", shape=[-1, 32], dtype='float32' ) @@ -141,13 +141,13 @@ def net(self, main_prog, startup_prog): def test_with_asp_and_amp(self): fleet.init(is_collective=True) - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy, input_x, input_y = self.net( train_prog, startup_prog ) strategy.amp = True - with fluid.program_guard(train_prog, startup_prog): + with base.program_guard(train_prog, startup_prog): optimizer = paddle.optimizer.SGD(learning_rate=0.01) optimizer = fleet.distributed_optimizer( optimizer, strategy=strategy @@ -155,13 +155,13 @@ def test_with_asp_and_amp(self): optimizer.minimize(avg_cost) place = ( - fluid.CUDAPlace(0) - if paddle.fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if paddle.base.is_compiled_with_cuda() + else base.CPUPlace() ) - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[input_x, input_y], place=place) + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=[input_x, input_y], place=place) exe.run(startup_prog) optimizer.amp_init(place) @@ -174,7 +174,7 @@ def test_with_asp_and_amp(self): for param in train_prog.global_block().all_parameters(): if ASPHelper._is_supported_layer(train_prog, param.name): mat = np.array( - fluid.global_scope().find_var(param.name).get_tensor() + base.global_scope().find_var(param.name).get_tensor() ) if (len(param.shape) == 4 and param.shape[1] < 4) or ( len(param.shape) == 2 and param.shape[0] < 4 @@ -189,7 +189,7 @@ def test_with_asp_and_amp(self): def test_with_asp_and_pure_fp16(self): fleet.init(is_collective=True) - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() with paddle.static.amp.fp16_guard(): avg_cost, strategy, input_x, input_y = self.net( train_prog, startup_prog @@ -197,7 +197,7 @@ def test_with_asp_and_pure_fp16(self): strategy.amp = True strategy.amp_configs = {'use_pure_fp16': True} - with fluid.program_guard(train_prog, startup_prog): + with base.program_guard(train_prog, startup_prog): with paddle.static.amp.fp16_guard(): optimizer = optimizer = paddle.optimizer.Momentum( learning_rate=0.01, multi_precision=True @@ -208,13 +208,13 @@ def test_with_asp_and_pure_fp16(self): optimizer.minimize(avg_cost) place = ( - fluid.CUDAPlace(0) - if paddle.fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if paddle.base.is_compiled_with_cuda() + else base.CPUPlace() ) - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[input_x, input_y], place=place) + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=[input_x, input_y], place=place) exe.run(startup_prog) optimizer.amp_init(place) @@ -227,7 +227,7 @@ def test_with_asp_and_pure_fp16(self): for param in train_prog.global_block().all_parameters(): if ASPHelper._is_supported_layer(train_prog, param.name): mat = np.array( - fluid.global_scope().find_var(param.name).get_tensor() + base.global_scope().find_var(param.name).get_tensor() ) if (len(param.shape) == 4 and param.shape[1] < 4) or ( len(param.shape) == 2 and param.shape[0] < 4 diff --git a/test/auto_parallel/1F1B_pass_unittest.py b/test/auto_parallel/1F1B_pass_unittest.py index 4f55031c84f05..8d5b9940aec6d 100644 --- a/test/auto_parallel/1F1B_pass_unittest.py +++ b/test/auto_parallel/1F1B_pass_unittest.py @@ -45,8 +45,8 @@ def apply_pass(use_1f1b=False): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class Test1F1BPass(unittest.TestCase): @@ -63,7 +63,7 @@ def init(self, engine): np.random.seed(2021) random.seed(2021) paddle.distributed.fleet.init(is_collective=True) - place = paddle.fluid.CUDAPlace(ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine(self, use_1f1b=False): diff --git a/test/auto_parallel/amp_o2_pass.py b/test/auto_parallel/amp_o2_pass.py index 04af0112e31cc..a770be6d1e428 100644 --- a/test/auto_parallel/amp_o2_pass.py +++ b/test/auto_parallel/amp_o2_pass.py @@ -59,8 +59,8 @@ def apply_pass(use_amp=False, amp_dtype="bfloat16"): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class TestShardingStage2WithNewEXE(unittest.TestCase): @@ -74,7 +74,7 @@ def init(self, engine): paddle.seed(2022) np.random.seed(2022) random.seed(2022) - place = paddle.fluid.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine(self, use_amp=False, amp_dtype="bfloat16"): diff --git a/test/auto_parallel/amp_pass_unittest.py b/test/auto_parallel/amp_pass_unittest.py index f8b422d5281a4..5d326936eb28e 100644 --- a/test/auto_parallel/amp_pass_unittest.py +++ b/test/auto_parallel/amp_pass_unittest.py @@ -44,8 +44,8 @@ def apply_pass(use_amp=False, level=None): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class TestAMPPass(unittest.TestCase): @@ -61,7 +61,7 @@ def init(self, engine): paddle.seed(2021) np.random.seed(2021) random.seed(2021) - place = paddle.fluid.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine(self, use_amp=False, level=None): diff --git a/test/auto_parallel/auto_parallel_relaunch_model.py b/test/auto_parallel/auto_parallel_relaunch_model.py index 65fc730a1dab7..25397e63b654c 100644 --- a/test/auto_parallel/auto_parallel_relaunch_model.py +++ b/test/auto_parallel/auto_parallel_relaunch_model.py @@ -109,7 +109,7 @@ def mlp_pretrain_forward(train_program, start_program): error_cost = paddle.nn.functional.square_error_cost(predict, label) loss = paddle.mean(error_cost) - loader = paddle.fluid.io.DataLoader.from_generator( + loader = paddle.base.io.DataLoader.from_generator( feed_list=[input, label], capacity=4 * batch_size, iterable=True ) diff --git a/test/auto_parallel/clip_grad_by_global_norm.py b/test/auto_parallel/clip_grad_by_global_norm.py index 11fe954b7b51a..071d9c52c7891 100644 --- a/test/auto_parallel/clip_grad_by_global_norm.py +++ b/test/auto_parallel/clip_grad_by_global_norm.py @@ -37,13 +37,13 @@ def apply_pass(use_sharding=False): def get_parameter_value(program): - from paddle.fluid.framework import Parameter + from paddle.base.framework import Parameter def is_parameter(var): return isinstance(var, Parameter) def get_tensor(var): - t = paddle.fluid.global_scope().find_var(var.name).get_tensor() + t = paddle.base.global_scope().find_var(var.name).get_tensor() return np.array(t) def get_name(var): @@ -57,8 +57,8 @@ def get_name(var): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class TestGradientClipByGlobalNorm(unittest.TestCase): @@ -72,7 +72,7 @@ def init(self, engine): paddle.seed(2022) np.random.seed(2022) random.seed(2022) - place = paddle.fluid.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine(self, use_sharding=False): diff --git a/test/auto_parallel/engine_api.py b/test/auto_parallel/engine_api.py index 38e19468a6bf3..cc921d41a74a9 100644 --- a/test/auto_parallel/engine_api.py +++ b/test/auto_parallel/engine_api.py @@ -306,7 +306,7 @@ def train_builtin_data_vars(): with static.program_guard(engine.main_program, engine.startup_program): feed_list = engine.inputs + engine.labels print(feed_list) - loader = paddle.fluid.io.DataLoader.from_generator( + loader = paddle.base.io.DataLoader.from_generator( feed_list=feed_list, capacity=4 * batch_size, iterable=False ) @@ -318,7 +318,7 @@ def train_builtin_data_vars(): try: while True: engine.run() - except paddle.fluid.core.EOFException: + except paddle.base.core.EOFException: loader.reset() # call DataLoader.reset() after catching EOFException @@ -336,7 +336,7 @@ def train_non_builtin_data_vars(): ) label = static.data(name="label", shape=[batch_size, 1], dtype='int64') - loader = paddle.fluid.io.DataLoader.from_generator( + loader = paddle.base.io.DataLoader.from_generator( feed_list=[input, label], capacity=4 * batch_size, iterable=False ) places = static.cuda_places() @@ -380,7 +380,7 @@ def train_non_builtin_data_vars(): try: while True: engine.run() - except paddle.fluid.core.EOFException: + except paddle.base.core.EOFException: loader.reset() # call DataLoader.reset() after catching EOFException @@ -398,7 +398,7 @@ def get_cost(): ) label = static.data(name="label", shape=[batch_size, 1], dtype='int64') - loader = paddle.fluid.io.DataLoader.from_generator( + loader = paddle.base.io.DataLoader.from_generator( feed_list=[input, label], capacity=4 * batch_size, iterable=False ) places = static.cuda_places() @@ -455,7 +455,7 @@ def get_cost_by_default_program(): input, process_mesh=PP_MESH_0, shard_spec=[None, None] ) - loader = paddle.fluid.io.DataLoader.from_generator( + loader = paddle.base.io.DataLoader.from_generator( feed_list=[input, label], capacity=4 * batch_size, iterable=False ) places = static.cuda_places() diff --git a/test/auto_parallel/generation_pipeline_pass_unittest.py b/test/auto_parallel/generation_pipeline_pass_unittest.py index e2ad275805df4..9b4a74253fdad 100644 --- a/test/auto_parallel/generation_pipeline_pass_unittest.py +++ b/test/auto_parallel/generation_pipeline_pass_unittest.py @@ -159,7 +159,7 @@ def test_pp2(self): engine._executor.run( engine.main_program, use_program_cache=False, return_numpy=False ) - except paddle.fluid.core.EOFException: + except paddle.base.core.EOFException: print("test done") train_dataloader._inner_dataloader.reset() diff --git a/test/auto_parallel/gradient_merge_pass_unittest.py b/test/auto_parallel/gradient_merge_pass_unittest.py index adf40a236a852..048016be0c702 100644 --- a/test/auto_parallel/gradient_merge_pass_unittest.py +++ b/test/auto_parallel/gradient_merge_pass_unittest.py @@ -38,8 +38,8 @@ def apply_pass(use_gradient_merge=False): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class TestGradientMergePass(unittest.TestCase): @@ -55,7 +55,7 @@ def init(self, engine): paddle.seed(2021) np.random.seed(2021) random.seed(2021) - place = paddle.fluid.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine(self, use_gradient_merge=False): diff --git a/test/auto_parallel/pipeline_scheduler_unittest.py b/test/auto_parallel/pipeline_scheduler_unittest.py index f19666d7aff5f..1617854ac446e 100644 --- a/test/auto_parallel/pipeline_scheduler_unittest.py +++ b/test/auto_parallel/pipeline_scheduler_unittest.py @@ -40,8 +40,8 @@ def apply_pass(schedule_mode="FThenB"): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class Test1F1BPass(unittest.TestCase): @@ -58,7 +58,7 @@ def init(self, engine): np.random.seed(2021) random.seed(2021) paddle.distributed.fleet.init(is_collective=True) - place = paddle.fluid.CUDAPlace(ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine(self, schedule_mode="FThenB"): diff --git a/test/auto_parallel/random_control_unittest.py b/test/auto_parallel/random_control_unittest.py index addc384699885..0ed0857add7ef 100644 --- a/test/auto_parallel/random_control_unittest.py +++ b/test/auto_parallel/random_control_unittest.py @@ -44,8 +44,8 @@ def apply_pass(use_recompute=False, no_recompute_segments=[]): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class TestRandomControl(unittest.TestCase): @@ -62,7 +62,7 @@ def init(self, engine): paddle.seed(2022) np.random.seed(2022) random.seed(2022) - place = paddle.fluid.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine(self, use_recompute=False, no_recompute_segments=[]): diff --git a/test/auto_parallel/recompute_pass_unittest.py b/test/auto_parallel/recompute_pass_unittest.py index 70350bd2d184f..3888ad9597329 100644 --- a/test/auto_parallel/recompute_pass_unittest.py +++ b/test/auto_parallel/recompute_pass_unittest.py @@ -34,8 +34,8 @@ def apply_pass(use_recompute=False, no_recompute_segments=[]): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class TestRecomputePass(unittest.TestCase): @@ -51,7 +51,7 @@ def init(self, engine): paddle.seed(2022) np.random.seed(2022) random.seed(2022) - place = paddle.fluid.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine(self, use_recompute=False, no_recompute_segments=[]): diff --git a/test/auto_parallel/reshard_s_to_r.py b/test/auto_parallel/reshard_s_to_r.py index 90ba0cc655f36..d3aed1472ffe8 100644 --- a/test/auto_parallel/reshard_s_to_r.py +++ b/test/auto_parallel/reshard_s_to_r.py @@ -18,7 +18,7 @@ import paddle import paddle.distributed as dist -from paddle.fluid import core +from paddle.base import core class TestReshardSToR: diff --git a/test/auto_parallel/sharding_newexe.py b/test/auto_parallel/sharding_newexe.py index 48690f585cbeb..a8d30772cfbae 100644 --- a/test/auto_parallel/sharding_newexe.py +++ b/test/auto_parallel/sharding_newexe.py @@ -67,8 +67,8 @@ def apply_pass(use_sharding=False, use_amp=False, use_recompute=False): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class TestShardingStage2WithNewEXE(unittest.TestCase): @@ -82,7 +82,7 @@ def init(self, engine): paddle.seed(2022) np.random.seed(2022) random.seed(2022) - place = paddle.fluid.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine( diff --git a/test/auto_parallel/sharding_pass_unittest.py b/test/auto_parallel/sharding_pass_unittest.py index a39c455a3cfd0..b0d0a39df3b47 100644 --- a/test/auto_parallel/sharding_pass_unittest.py +++ b/test/auto_parallel/sharding_pass_unittest.py @@ -48,8 +48,8 @@ def apply_pass(use_sharding=False, stage=None): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) paddle.utils.unique_name.switch() diff --git a/test/auto_parallel/test_align_tool.py b/test/auto_parallel/test_align_tool.py index 500b11c78916c..85e5482ae5e0c 100644 --- a/test/auto_parallel/test_align_tool.py +++ b/test/auto_parallel/test_align_tool.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle import fluid, nn, optimizer, static +from paddle import base, nn, optimizer, static from paddle.distributed.auto_parallel.static.auto_align_tool import ( AutoAlignTool, ) @@ -29,8 +29,8 @@ paddle.enable_static() paddle.set_device("gpu") -startup_program = fluid.default_startup_program() -main_program = fluid.default_main_program() +startup_program = base.default_startup_program() +main_program = base.default_main_program() class MnistDataset(MNIST): @@ -50,7 +50,7 @@ def __len__(self): dataset = MnistDataset("train") place = paddle.CUDAPlace(0) -with fluid.program_guard(main_program, startup_program): +with base.program_guard(main_program, startup_program): inputs = static.data(name="image", shape=[-1, 1, 28, 28], dtype="float32") labels = static.data(name="label", shape=[-1, 1], dtype="int64") z = nn.Conv2D(1, 6, 3, 1, 1).forward(inputs) @@ -71,7 +71,7 @@ def __len__(self): class TestAlignTool(unittest.TestCase): def test_align_tool(self): - executor = fluid.Executor() + executor = base.Executor() executor.run(startup_program) align_tool = AutoAlignTool(main_program, 1, [losses.name]) diff --git a/test/auto_parallel/test_dist_assign.py b/test/auto_parallel/test_dist_assign.py index bb3558ae11a95..030a6b1513888 100644 --- a/test/auto_parallel/test_dist_assign.py +++ b/test/auto_parallel/test_dist_assign.py @@ -21,8 +21,8 @@ def make_program(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 4, 8], dtype='float32') y = paddle.static.data(name='y', shape=[4, 4, 8], dtype='float32') diff --git a/test/auto_parallel/test_dist_attr_v2.py b/test/auto_parallel/test_dist_attr_v2.py index 07041f478a091..7e0fc4a3be59e 100644 --- a/test/auto_parallel/test_dist_attr_v2.py +++ b/test/auto_parallel/test_dist_attr_v2.py @@ -33,7 +33,7 @@ _copy_dist_attr_to_cpp_for_graph, ) from paddle.distributed.fleet import auto -from paddle.fluid.core import OperatorDistAttr, TensorDistAttr +from paddle.base.core import OperatorDistAttr, TensorDistAttr paddle.enable_static() @@ -129,7 +129,7 @@ def get_program(): ) data_holder = [input, label] # dataloader - dataloader = paddle.fluid.io.DataLoader.from_generator( + dataloader = paddle.base.io.DataLoader.from_generator( feed_list=data_holder, capacity=4 * batch_size, iterable=False ) dataloader.set_batch_generator( diff --git a/test/auto_parallel/test_dist_context.py b/test/auto_parallel/test_dist_context.py index 695949fd698c0..3bc419482374e 100644 --- a/test/auto_parallel/test_dist_context.py +++ b/test/auto_parallel/test_dist_context.py @@ -114,7 +114,7 @@ def get_program(): ) data_holder = [input, label] # dataloader - dataloader = paddle.fluid.io.DataLoader.from_generator( + dataloader = paddle.base.io.DataLoader.from_generator( feed_list=data_holder, capacity=4 * batch_size, iterable=False ) dataloader.set_batch_generator( diff --git a/test/auto_parallel/test_dist_embedding.py b/test/auto_parallel/test_dist_embedding.py index bdfdc0ef32a78..691d9e4b3c3ba 100644 --- a/test/auto_parallel/test_dist_embedding.py +++ b/test/auto_parallel/test_dist_embedding.py @@ -23,8 +23,8 @@ def make_program_lookup_table_v1_mp_dp(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() block = main_program.global_block() with paddle.static.program_guard(main_program, start_program): src_ids = paddle.static.data( @@ -34,7 +34,7 @@ def make_program_lookup_table_v1_mp_dp(): emb_out = block.create_var(name='emb_out', dtype='float32') w = paddle.create_parameter( - attr=paddle.fluid.ParamAttr(name="emb_weight"), + attr=paddle.base.ParamAttr(name="emb_weight"), shape=[64, 128], dtype='float32', is_bias=False, diff --git a/test/auto_parallel/test_dist_matmul.py b/test/auto_parallel/test_dist_matmul.py index 77c15942709c2..892ed45ce92a0 100644 --- a/test/auto_parallel/test_dist_matmul.py +++ b/test/auto_parallel/test_dist_matmul.py @@ -16,8 +16,8 @@ import paddle from paddle.distributed.fleet import auto -from paddle.fluid import program_guard -from paddle.fluid.backward import append_backward +from paddle.base import program_guard +from paddle.base.backward import append_backward paddle.enable_static() @@ -77,8 +77,8 @@ def init_y_col(trans_y): def matmul_dp2mp2(init_x, init_y, trans_x, trans_y): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = init_x(trans_x) y = init_y(trans_y) @@ -90,8 +90,8 @@ def matmul_dp2mp2(init_x, init_y, trans_x, trans_y): def matmulv2_dp2mp2(init_x, init_y, trans_x, trans_y): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = init_x(trans_x) y = init_y(trans_y) diff --git a/test/auto_parallel/test_dist_op_cost.py b/test/auto_parallel/test_dist_op_cost.py index b5ac224987351..b025eceb817c0 100644 --- a/test/auto_parallel/test_dist_op_cost.py +++ b/test/auto_parallel/test_dist_op_cost.py @@ -22,8 +22,8 @@ is_elementwise_op, ) from paddle.distributed.fleet import auto -from paddle.fluid import program_guard -from paddle.fluid.backward import append_backward +from paddle.base import program_guard +from paddle.base.backward import append_backward paddle.enable_static() diff --git a/test/auto_parallel/test_dist_pnorm.py b/test/auto_parallel/test_dist_pnorm.py index 623114208150c..eab9e7fabef0a 100644 --- a/test/auto_parallel/test_dist_pnorm.py +++ b/test/auto_parallel/test_dist_pnorm.py @@ -16,15 +16,15 @@ import paddle from paddle.distributed.fleet import auto -from paddle.fluid import program_guard -from paddle.fluid.backward import append_backward +from paddle.base import program_guard +from paddle.base.backward import append_backward paddle.enable_static() def make_program_dp2_axis_None(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 5, 6], dtype='float32') x.stop_gradient = False @@ -36,8 +36,8 @@ def make_program_dp2_axis_None(): def make_program_dp2_axis_0(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 5, 6], dtype='float32') x.stop_gradient = False @@ -49,8 +49,8 @@ def make_program_dp2_axis_0(): def make_program_dp2_axis_1(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 5, 6], dtype='float32') x.stop_gradient = False @@ -62,8 +62,8 @@ def make_program_dp2_axis_1(): def make_program_serial(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 5, 6], dtype='float32') x.stop_gradient = False diff --git a/test/auto_parallel/test_dist_reshape.py b/test/auto_parallel/test_dist_reshape.py index 8df5f25edddb4..adeb8ee906f0b 100644 --- a/test/auto_parallel/test_dist_reshape.py +++ b/test/auto_parallel/test_dist_reshape.py @@ -21,8 +21,8 @@ def make_program_dp2(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 4, 8], dtype='float32') x.stop_gradient = False diff --git a/test/auto_parallel/test_dist_scale.py b/test/auto_parallel/test_dist_scale.py index 0d0b591d84d40..e367e2b579494 100644 --- a/test/auto_parallel/test_dist_scale.py +++ b/test/auto_parallel/test_dist_scale.py @@ -21,8 +21,8 @@ def make_program(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 4, 8], dtype='float32') x.stop_gradient = False diff --git a/test/auto_parallel/test_dist_shape.py b/test/auto_parallel/test_dist_shape.py index 5d510f0a586d4..e048af0680153 100644 --- a/test/auto_parallel/test_dist_shape.py +++ b/test/auto_parallel/test_dist_shape.py @@ -21,8 +21,8 @@ def make_program(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 4, 8], dtype='float32') x.stop_gradient = False diff --git a/test/auto_parallel/test_dist_slice.py b/test/auto_parallel/test_dist_slice.py index e94dcf32f7bf9..211c3f5a2c9fe 100644 --- a/test/auto_parallel/test_dist_slice.py +++ b/test/auto_parallel/test_dist_slice.py @@ -21,8 +21,8 @@ def make_program_dp2(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 5, 6], dtype='float32') auto.shard_tensor( @@ -38,8 +38,8 @@ def make_program_dp2(): def make_program_serial(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 5, 6], dtype='float32') auto.shard_tensor( diff --git a/test/auto_parallel/test_dist_split.py b/test/auto_parallel/test_dist_split.py index 9b2bcf9c9de07..131d6d4d845f9 100644 --- a/test/auto_parallel/test_dist_split.py +++ b/test/auto_parallel/test_dist_split.py @@ -21,8 +21,8 @@ def make_program_dp2(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 12, 16], dtype='float32') x.stop_gradient = False diff --git a/test/auto_parallel/test_fp16_assign.py b/test/auto_parallel/test_fp16_assign.py index a7257cb025470..f2d50708c6a0f 100644 --- a/test/auto_parallel/test_fp16_assign.py +++ b/test/auto_parallel/test_fp16_assign.py @@ -23,8 +23,8 @@ def make_program(): - main_program = paddle.fluid.Program() - start_program = paddle.fluid.Program() + main_program = paddle.base.Program() + start_program = paddle.base.Program() with paddle.static.program_guard(main_program, start_program): x = paddle.static.data(name='x', shape=[4, 6, 8], dtype='float32') y = paddle.static.data(name='y', shape=[4, 6, 6], dtype='float32') diff --git a/test/auto_parallel/test_fused_linear_pass.py b/test/auto_parallel/test_fused_linear_pass.py index aa8a303cd2f91..575b83d0df5fb 100644 --- a/test/auto_parallel/test_fused_linear_pass.py +++ b/test/auto_parallel/test_fused_linear_pass.py @@ -34,8 +34,8 @@ def apply_pass(use_fused_passes=False, fused_passes_list=[]): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class TestFusedLinearPass(unittest.TestCase): @@ -51,7 +51,7 @@ def init(self, engine): paddle.seed(2021) np.random.seed(2021) random.seed(2021) - place = paddle.fluid.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine(self, use_fused_passes=False, fused_passes_list=[]): diff --git a/test/auto_parallel/test_interface.py b/test/auto_parallel/test_interface.py index 5ea4209a6253b..38a3c3bb78daa 100644 --- a/test/auto_parallel/test_interface.py +++ b/test/auto_parallel/test_interface.py @@ -110,7 +110,7 @@ def test_api(self): with ProcessMesh(process_mesh1.mesh, process_mesh1.dim_names): linear0, linear1, linear0_out, gelu_out, linear1_out = mlp(input) - default_program = paddle.fluid.default_main_program() + default_program = paddle.base.default_main_program() default_dist_context = get_default_distributed_context() self.assertEqual(len(default_program.blocks[0].ops), 5) diff --git a/test/auto_parallel/test_pass_base_list.py b/test/auto_parallel/test_pass_base_list.py index f72aceb8bff6e..da7df4ad6fc85 100644 --- a/test/auto_parallel/test_pass_base_list.py +++ b/test/auto_parallel/test_pass_base_list.py @@ -34,8 +34,8 @@ def apply_pass(use_fused_passes=False, fused_passes_list=[]): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class TestFusedPassBaseList(unittest.TestCase): @@ -51,7 +51,7 @@ def init(self, engine): paddle.seed(2021) np.random.seed(2021) random.seed(2021) - place = paddle.fluid.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine(self, use_fused_passes=False, fused_passes_list=[]): diff --git a/test/auto_parallel/test_pass_bf16.py b/test/auto_parallel/test_pass_bf16.py index 411cee39eca54..ce9841ced88c7 100644 --- a/test/auto_parallel/test_pass_bf16.py +++ b/test/auto_parallel/test_pass_bf16.py @@ -20,7 +20,7 @@ import paddle from paddle import nn from paddle.distributed.fleet import auto -from paddle.fluid import core +from paddle.base import core from paddle.static import InputSpec from paddle.static.amp.bf16.amp_utils import _valid_types from paddle.static.amp.fp16_utils import find_true_prev_op @@ -57,8 +57,8 @@ def __len__(self): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class Model(nn.Layer): @@ -90,7 +90,7 @@ def init(self, engine): paddle.seed(2021) np.random.seed(2021) random.seed(2021) - place = paddle.fluid.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine(self, use_bf16=False): diff --git a/test/auto_parallel/test_prim_dist_op.py b/test/auto_parallel/test_prim_dist_op.py index 3b7351b2dd4ab..a01e1fb90441f 100644 --- a/test/auto_parallel/test_prim_dist_op.py +++ b/test/auto_parallel/test_prim_dist_op.py @@ -23,7 +23,7 @@ from paddle.distributed.auto_parallel.static.partitioner import Partitioner from paddle.distributed.auto_parallel.static.utils import set_var_dist_attr from paddle.distributed.fleet import auto -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.incubate.autograd import enable_prim paddle.enable_static() diff --git a/test/auto_parallel/test_process_mesh.py b/test/auto_parallel/test_process_mesh.py index d4b91a5dcc345..51812a31ceebc 100644 --- a/test/auto_parallel/test_process_mesh.py +++ b/test/auto_parallel/test_process_mesh.py @@ -135,7 +135,7 @@ def test_context_manager(self): with ProcessMesh(mesh, ["d"]): out = mlp(input) - default_program = paddle.fluid.default_main_program() + default_program = paddle.base.default_main_program() default_dist_context = get_default_distributed_context() for block in default_program.blocks: @@ -196,11 +196,11 @@ def test_merge_process_meshes(self): self.assertEqual(merged_process_mesh, ProcessMesh([0, 1, 2, 3, 4, 5])) merged_process_mesh = merge_process_meshes( - [process_mesh1, paddle.fluid.core.ProcessMesh()] + [process_mesh1, paddle.base.core.ProcessMesh()] ) self.assertEqual(merged_process_mesh, ProcessMesh([0, 1, 2, 3, 4, 5])) merged_process_mesh = merge_process_meshes( - [paddle.fluid.core.ProcessMesh(), process_mesh1] + [paddle.base.core.ProcessMesh(), process_mesh1] ) self.assertEqual(merged_process_mesh, ProcessMesh([0, 1, 2, 3, 4, 5])) diff --git a/test/auto_parallel/test_selective_recompute.py b/test/auto_parallel/test_selective_recompute.py index 21b11abbe84b8..5099a6adefa4f 100644 --- a/test/auto_parallel/test_selective_recompute.py +++ b/test/auto_parallel/test_selective_recompute.py @@ -74,8 +74,8 @@ def apply_pass(use_recompute=False, no_recompute_segments=[]): def reset_prog(): - paddle.fluid.framework.switch_main_program(paddle.static.Program()) - paddle.fluid.framework.switch_startup_program(paddle.static.Program()) + paddle.base.framework.switch_main_program(paddle.static.Program()) + paddle.base.framework.switch_startup_program(paddle.static.Program()) class TestRecomputePassWithRecomputeAPI(unittest.TestCase): @@ -91,7 +91,7 @@ def init(self, engine): paddle.seed(2022) np.random.seed(2022) random.seed(2022) - place = paddle.fluid.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) + place = paddle.base.CUDAPlace(paddle.distributed.ParallelEnv().dev_id) engine._executor = paddle.static.Executor(place) def get_engine( diff --git a/test/auto_parallel/test_serialization.py b/test/auto_parallel/test_serialization.py index adc3f3e337e3c..01c17b3733570 100644 --- a/test/auto_parallel/test_serialization.py +++ b/test/auto_parallel/test_serialization.py @@ -26,8 +26,8 @@ ) from paddle.distributed.auto_parallel.static.process_mesh_v2 import ProcessMesh from paddle.distributed.fleet import auto -from paddle.fluid.core import TensorDistAttr -from paddle.fluid.framework import Program +from paddle.base.core import TensorDistAttr +from paddle.base.framework import Program paddle.enable_static() @@ -123,7 +123,7 @@ def get_program(): ) data_holder = [input, label] # dataloader - dataloader = paddle.fluid.io.DataLoader.from_generator( + dataloader = paddle.base.io.DataLoader.from_generator( feed_list=data_holder, capacity=4 * batch_size, iterable=False ) dataloader.set_batch_generator( diff --git a/test/auto_parallel/test_while_op_completion.py b/test/auto_parallel/test_while_op_completion.py index 6cdf31daec505..16ca6a7ae4a60 100644 --- a/test/auto_parallel/test_while_op_completion.py +++ b/test/auto_parallel/test_while_op_completion.py @@ -149,7 +149,7 @@ def get_program(): ) data_holder = [input, label] # dataloader - dataloader = paddle.fluid.io.DataLoader.from_generator( + dataloader = paddle.base.io.DataLoader.from_generator( feed_list=data_holder, capacity=4 * batch_size, iterable=False ) dataloader.set_batch_generator( diff --git a/test/auto_parallel/test_while_op_partition.py b/test/auto_parallel/test_while_op_partition.py index fd8edc6eba7c1..362f0c0904320 100644 --- a/test/auto_parallel/test_while_op_partition.py +++ b/test/auto_parallel/test_while_op_partition.py @@ -18,7 +18,7 @@ import paddle import paddle.nn.functional as F -from paddle import fluid, nn, static +from paddle import base, nn, static from paddle.distributed import fleet from paddle.distributed.auto_parallel.static.completion import Completer from paddle.distributed.auto_parallel.static.dist_context import ( @@ -111,7 +111,7 @@ def get_program(): train_program = static.Program() start_program = static.Program() - with fluid.program_guard(train_program, start_program): + with base.program_guard(train_program, start_program): # 循环计数器 i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) auto.shard_tensor(i, _g_process_mesh, [None]) @@ -134,7 +134,7 @@ def get_program(): data_holder = [input, label] # dataloader - dataloader = fluid.io.DataLoader.from_generator( + dataloader = base.io.DataLoader.from_generator( feed_list=data_holder, capacity=4 * batch_size, iterable=False ) dataloader.set_batch_generator( diff --git a/test/autograd/test_autograd_functional_dynamic.py b/test/autograd/test_autograd_functional_dynamic.py index 021e63728abbe..02c4e61748d0a 100644 --- a/test/autograd/test_autograd_functional_dynamic.py +++ b/test/autograd/test_autograd_functional_dynamic.py @@ -212,7 +212,7 @@ def test_all_cases(self): def test_input_single_tensor(self): self.assertIsInstance( paddle.incubate.autograd.vjp(paddle.tanh, paddle.rand((3, 4)))[1], - paddle.fluid.framework.Variable, + paddle.base.framework.Variable, ) diff --git a/test/autograd/test_autograd_functional_static.py b/test/autograd/test_autograd_functional_static.py index 51e694957a346..127cb93a4cbc6 100644 --- a/test/autograd/test_autograd_functional_static.py +++ b/test/autograd/test_autograd_functional_static.py @@ -23,7 +23,7 @@ import utils import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -252,10 +252,10 @@ class TestJacobianFloat32(unittest.TestCase): @classmethod def setUpClass(self): paddle.enable_static() - if fluid.core.is_compiled_with_cuda(): - self.place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + self.place = base.CUDAPlace(0) else: - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() self.dtype = 'float32' self.np_dtype = np.float32 prepare_data(self, all_data_shapes, self.dtype) @@ -269,9 +269,9 @@ def setUpClass(self): self.atol = 1e-2 def run_test_by_fullmatrix(self, pd_f, np_f, inps, batch=False): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): xs = make_tensors(inps) JJ = paddle.incubate.autograd.Jacobian(pd_f, xs, is_batched=batch) if batch: @@ -279,7 +279,7 @@ def run_test_by_fullmatrix(self, pd_f, np_f, inps, batch=False): else: nrow, ncol = JJ.shape full_jacobian = JJ[:] - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(startup) if isinstance(inps, list): feeds = {f'x{i}': x for i, x in enumerate(inps)} @@ -299,9 +299,9 @@ def run_test_by_fullmatrix(self, pd_f, np_f, inps, batch=False): ) def run_test_by_rows(self, pd_f, np_f, inps, batch=False): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): xs = make_tensors(inps) JJ = paddle.incubate.autograd.Jacobian(pd_f, xs, is_batched=batch) if batch: @@ -311,7 +311,7 @@ def run_test_by_rows(self, pd_f, np_f, inps, batch=False): nrow, ncol = JJ.shape rows = [JJ[i, :] for i in range(nrow)] - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(startup) if isinstance(inps, list): feeds = {f'x{i}': x for i, x in enumerate(inps)} @@ -325,9 +325,9 @@ def run_test_by_rows(self, pd_f, np_f, inps, batch=False): ) def run_test_by_entries(self, pd_f, np_f, inps, batch=False): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): xs = make_tensors(inps) JJ = paddle.incubate.autograd.Jacobian(pd_f, xs, is_batched=batch) if batch: @@ -338,7 +338,7 @@ def run_test_by_entries(self, pd_f, np_f, inps, batch=False): else: nrow, ncol = JJ.shape entries = [JJ[i, j] for i in range(nrow) for j in range(ncol)] - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(startup) if isinstance(inps, list): feeds = {f'x{i}': x for i, x in enumerate(inps)} @@ -408,10 +408,10 @@ class TestJacobianFloat64(TestJacobianFloat32): @classmethod def setUpClass(self): paddle.enable_static() - if fluid.core.is_compiled_with_cuda(): - self.place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + self.place = base.CUDAPlace(0) else: - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() self.dtype = 'float64' prepare_data(self, all_data_shapes, self.dtype) self.eps = ( @@ -429,10 +429,10 @@ class TestHessianFloat32(unittest.TestCase): @classmethod def setUpClass(self): paddle.enable_static() - if fluid.core.is_compiled_with_cuda(): - self.place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + self.place = base.CUDAPlace(0) else: - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() self.dtype = 'float32' prepare_data(self, all_data_shapes, self.dtype) self.eps = ( @@ -450,14 +450,14 @@ def setUpClass(self): ) def run_test_by_fullmatrix(self, pd_f, inps, np_hess, batch=False): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): xs = make_tensors(inps) HH = paddle.incubate.autograd.Hessian(pd_f, xs, is_batched=batch) nrow, ncol = HH.shape full_hessian = HH[:] - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(startup) if isinstance(inps, list): feeds = {f'x{i}': x for i, x in enumerate(inps)} @@ -484,10 +484,10 @@ class TestHessianFloat64(TestHessianFloat32): @classmethod def setUpClass(self): paddle.enable_static() - if fluid.core.is_compiled_with_cuda(): - self.place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + self.place = base.CUDAPlace(0) else: - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() self.dtype = 'float64' prepare_data(self, all_data_shapes, self.dtype) self.eps = ( diff --git a/test/autograd/test_jvp_and_transpose.py b/test/autograd/test_jvp_and_transpose.py index 0c22c2db4490d..b37fd4e201a4e 100644 --- a/test/autograd/test_jvp_and_transpose.py +++ b/test/autograd/test_jvp_and_transpose.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.incubate.autograd.primrules import _jvp, _transpose paddle.enable_static() diff --git a/test/autograd/test_orig2prim.py b/test/autograd/test_orig2prim.py index 8a42255d711ee..22310e0c8d4a4 100644 --- a/test/autograd/test_orig2prim.py +++ b/test/autograd/test_orig2prim.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.incubate.autograd.primrules import _orig2prim paddle.enable_static() diff --git a/test/autograd/test_prim2orig.py b/test/autograd/test_prim2orig.py index 2d9427b2e8b4f..d2c5431ee2f36 100644 --- a/test/autograd/test_prim2orig.py +++ b/test/autograd/test_prim2orig.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.incubate.autograd.primrules import _prim2orig paddle.enable_static() diff --git a/test/autograd/test_primapi.py b/test/autograd/test_primapi.py index 0095ab0233d55..7bbe4e4476046 100644 --- a/test/autograd/test_primapi.py +++ b/test/autograd/test_primapi.py @@ -24,7 +24,7 @@ import utils import paddle -from paddle.fluid import core +from paddle.base import core from paddle.incubate.autograd import primapi, primx @@ -260,8 +260,8 @@ def without_program_guard(): ys_grad = paddle.incubate.autograd.forward_grad( ys, static_xs, static_v ) - sp = paddle.fluid.framework.default_startup_program() - mp = paddle.fluid.framework.default_main_program() + sp = paddle.base.framework.default_startup_program() + mp = paddle.base.framework.default_main_program() exe = paddle.static.Executor() exe.run(sp) out = exe.run(mp, feed=feed, fetch_list=ys_grad) @@ -311,8 +311,8 @@ def without_program_guard(): else self.fun(static_xs) ) xs_grad = paddle.incubate.autograd.grad(ys, static_xs, static_v) - sp = paddle.fluid.framework.default_startup_program() - mp = paddle.fluid.framework.default_main_program() + sp = paddle.base.framework.default_startup_program() + mp = paddle.base.framework.default_main_program() exe = paddle.static.Executor() exe.run(sp) out = exe.run(mp, feed=feed, fetch_list=xs_grad) diff --git a/test/autograd/utils.py b/test/autograd/utils.py index 6b2ea8437228f..f077f520deff6 100644 --- a/test/autograd/utils.py +++ b/test/autograd/utils.py @@ -31,7 +31,7 @@ def _product(t): def _get_item(t, idx): assert isinstance( - t, paddle.fluid.framework.Variable + t, paddle.base.framework.Variable ), "The first argument t must be Tensor." assert isinstance( idx, int @@ -42,7 +42,7 @@ def _get_item(t, idx): def _set_item(t, idx, value): assert isinstance( - t, paddle.fluid.framework.Variable + t, paddle.base.framework.Variable ), "The first argument t must be Tensor." assert isinstance( idx, int diff --git a/test/book/notest_understand_sentiment.py b/test/book/notest_understand_sentiment.py index 606c0f2542d85..250b3f2dc679c 100644 --- a/test/book/notest_understand_sentiment.py +++ b/test/book/notest_understand_sentiment.py @@ -25,7 +25,7 @@ import nets import paddle -from paddle import fluid +from paddle import base def convolution_net( @@ -93,12 +93,12 @@ def train( ), batch_size=BATCH_SIZE, ) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[data, label], place=place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=[data, label], place=place) def train_loop(main_program): - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) for pass_id in range(PASS_NUM): for data in train_data(): @@ -119,7 +119,7 @@ def train_loop(main_program): raise AssertionError(f"Cost is too large for {net_method.__name__}") if is_local: - train_loop(fluid.default_main_program()) + train_loop(base.default_main_program()) else: port = os.getenv("PADDLE_PSERVER_PORT", "6174") pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... @@ -148,11 +148,11 @@ def infer(word_dict, use_cuda, save_dirname=None): if save_dirname is None: return - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) - inference_scope = fluid.core.Scope() - with fluid.scope_guard(inference_scope): + inference_scope = base.core.Scope() + with base.scope_guard(inference_scope): # Use paddle.static.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that @@ -178,7 +178,7 @@ def infer(word_dict, use_cuda, save_dirname=None): recursive_seq_lens = [[3, 4, 2]] base_shape = [1] # The range of random integers is [low, high] - tensor_words = fluid.create_random_int_lodtensor( + tensor_words = base.create_random_int_lodtensor( recursive_seq_lens, base_shape, place, low=0, high=word_dict_len - 1 ) @@ -198,7 +198,7 @@ def infer(word_dict, use_cuda, save_dirname=None): def main(word_dict, net_method, use_cuda, parallel=False, save_dirname=None): - if use_cuda and not fluid.core.is_compiled_with_cuda(): + if use_cuda and not base.core.is_compiled_with_cuda(): return train( @@ -218,11 +218,11 @@ def setUpClass(cls): @contextlib.contextmanager def new_program_scope(self): - prog = fluid.Program() - startup_prog = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + scope = base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): yield def test_conv_cpu(self): diff --git a/test/book/test_fit_a_line.py b/test/book/test_fit_a_line.py index 0d1e77ce7fcb1..5e51999446725 100644 --- a/test/book/test_fit_a_line.py +++ b/test/book/test_fit_a_line.py @@ -23,7 +23,7 @@ import numpy import paddle -from paddle import fluid +from paddle import base from paddle.static import amp paddle.enable_static() @@ -85,7 +85,7 @@ def train(use_cuda, save_dirname, is_local, use_bf16, pure_bf16): use_pure_bf16=pure_bf16, ) sgd_optimizer.minimize( - avg_cost, startup_program=fluid.default_startup_program() + avg_cost, startup_program=base.default_startup_program() ) BATCH_SIZE = 20 @@ -95,12 +95,12 @@ def train(use_cuda, save_dirname, is_local, use_bf16, pure_bf16): batch_size=BATCH_SIZE, ) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) def train_loop(main_program): - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe.run(fluid.default_startup_program()) + feeder = base.DataFeeder(place=place, feed_list=[x, y]) + exe.run(base.default_startup_program()) test_prog = main_program.clone(for_test=True) if pure_bf16: sgd_optimizer.amp_init( @@ -132,7 +132,7 @@ def train_loop(main_program): ) if is_local: - train_loop(fluid.default_main_program()) + train_loop(base.default_main_program()) else: port = os.getenv("PADDLE_PSERVER_PORT", "6174") pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... @@ -161,11 +161,11 @@ def infer(use_cuda, save_dirname=None, use_bf16=False): if save_dirname is None: return - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) - inference_scope = fluid.core.Scope() - with fluid.scope_guard(inference_scope): + inference_scope = base.core.Scope() + with base.scope_guard(inference_scope): # Use paddle.static.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that @@ -210,10 +210,10 @@ def infer(use_cuda, save_dirname=None, use_bf16=False): def main(use_cuda, is_local=True, use_bf16=False, pure_bf16=False): - if use_cuda and not fluid.core.is_compiled_with_cuda(): + if use_cuda and not base.core.is_compiled_with_cuda(): return - if use_bf16 and not fluid.core.is_compiled_with_mkldnn(): + if use_bf16 and not base.core.is_compiled_with_mkldnn(): return temp_dir = tempfile.TemporaryDirectory() @@ -228,11 +228,11 @@ def main(use_cuda, is_local=True, use_bf16=False, pure_bf16=False): class TestFitALineBase(unittest.TestCase): @contextlib.contextmanager def program_scope_guard(self): - prog = fluid.Program() - startup_prog = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + scope = base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): yield @@ -247,7 +247,7 @@ def test_cuda(self): @unittest.skipIf( - not fluid.core.supports_bfloat16(), "place does not support BF16 evaluation" + not base.core.supports_bfloat16(), "place does not support BF16 evaluation" ) class TestFitALineBF16(TestFitALineBase): def test_bf16(self): diff --git a/test/book/test_image_classification.py b/test/book/test_image_classification.py index bb8c45ea9011c..3e8f771983cac 100644 --- a/test/book/test_image_classification.py +++ b/test/book/test_image_classification.py @@ -26,7 +26,7 @@ import nets import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -130,7 +130,7 @@ def train(net_type, use_cuda, save_dirname, is_local): acc = paddle.static.accuracy(input=predict, label=label) # Test program - test_program = fluid.default_main_program().clone(for_test=True) + test_program = base.default_main_program().clone(for_test=True) optimizer = paddle.optimizer.Adam(learning_rate=0.001) optimizer.minimize(avg_cost) @@ -149,12 +149,12 @@ def train(net_type, use_cuda, save_dirname, is_local): paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE ) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) + feeder = base.DataFeeder(place=place, feed_list=[images, label]) def train_loop(main_program): - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) loss = 0.0 for pass_id in range(PASS_NUM): for batch_id, data in enumerate(train_reader()): @@ -194,7 +194,7 @@ def train_loop(main_program): return if is_local: - train_loop(fluid.default_main_program()) + train_loop(base.default_main_program()) else: port = os.getenv("PADDLE_PSERVER_PORT", "6174") pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... @@ -223,11 +223,11 @@ def infer(use_cuda, save_dirname=None): if save_dirname is None: return - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) - inference_scope = fluid.core.Scope() - with fluid.scope_guard(inference_scope): + inference_scope = base.core.Scope() + with base.scope_guard(inference_scope): # Use paddle.static.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that @@ -266,7 +266,7 @@ def infer(use_cuda, save_dirname=None): def main(net_type, use_cuda, is_local=True): - if use_cuda and not fluid.core.is_compiled_with_cuda(): + if use_cuda and not base.core.is_compiled_with_cuda(): return # Directory for saving the trained model @@ -299,11 +299,11 @@ def test_resnet_cpu(self): @contextlib.contextmanager def scope_prog_guard(self): - prog = fluid.Program() - startup_prog = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + scope = base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): yield diff --git a/test/book/test_recognize_digits.py b/test/book/test_recognize_digits.py index aff66af640031..643aaae6ce6d9 100644 --- a/test/book/test_recognize_digits.py +++ b/test/book/test_recognize_digits.py @@ -24,8 +24,8 @@ import nets import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -79,7 +79,7 @@ def train( params_filename=None, is_local=True, ): - if use_cuda and not fluid.core.is_compiled_with_cuda(): + if use_cuda and not base.core.is_compiled_with_cuda(): return img = paddle.static.data(name='img', shape=[-1, 1, 28, 28], dtype='float32') label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') @@ -94,14 +94,14 @@ def train( else: prediction, avg_loss, acc = net_conf(img, label) - test_program = fluid.default_main_program().clone(for_test=True) + test_program = base.default_main_program().clone(for_test=True) optimizer = paddle.optimizer.Adam(learning_rate=0.001) optimizer.minimize(avg_loss) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() - exe = fluid.Executor(place) + exe = base.Executor(place) train_reader = paddle.batch( paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), @@ -110,10 +110,10 @@ def train( test_reader = paddle.batch( paddle.dataset.mnist.test(), batch_size=BATCH_SIZE ) - feeder = fluid.DataFeeder(feed_list=[img, label], place=place) + feeder = base.DataFeeder(feed_list=[img, label], place=place) def train_loop(main_program): - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) PASS_NUM = 100 for pass_id in range(PASS_NUM): @@ -165,7 +165,7 @@ def train_loop(main_program): raise AssertionError("Loss of recognize digits is too large") if is_local: - train_loop(fluid.default_main_program()) + train_loop(base.default_main_program()) else: port = os.getenv("PADDLE_PSERVER_PORT", "6174") pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... @@ -196,11 +196,11 @@ def infer( if save_dirname is None: return - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) - inference_scope = fluid.core.Scope() - with fluid.scope_guard(inference_scope): + inference_scope = base.core.Scope() + with base.scope_guard(inference_scope): # Use paddle.static.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be feeded # data using feed operators), and the fetch_targets (variables that @@ -269,11 +269,11 @@ class TestRecognizeDigits(unittest.TestCase): def inject_test_method(use_cuda, parallel, nn_type, combine): def __impl__(self): - prog = fluid.Program() - startup_prog = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + scope = base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): main(use_cuda, parallel, nn_type, combine) fn = 'test_{}_{}_{}_{}'.format( diff --git a/test/book/test_recommender_system.py b/test/book/test_recommender_system.py index 8912467413be1..7a4a70be105d5 100644 --- a/test/book/test_recommender_system.py +++ b/test/book/test_recommender_system.py @@ -24,9 +24,9 @@ import nets import paddle -from paddle import fluid -from paddle.fluid import framework -from paddle.fluid.executor import Executor +from paddle import base +from paddle.base import framework +from paddle.base.executor import Executor from paddle.optimizer import SGD paddle.enable_static() @@ -186,12 +186,12 @@ def train(use_cuda, save_dirname, is_local=True): scale_infer, avg_cost = model() # test program - test_program = fluid.default_main_program().clone(for_test=True) + test_program = base.default_main_program().clone(for_test=True) sgd_optimizer = SGD(learning_rate=0.2) sgd_optimizer.minimize(avg_cost) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() exe = Executor(place) @@ -233,7 +233,7 @@ def train_loop(main_program): main_program.global_block().var(var_name) for var_name in feed_infer_order ] - feeder = fluid.DataFeeder(feed_list, place) + feeder = base.DataFeeder(feed_list, place) PASS_NUM = 100 for pass_id in range(PASS_NUM): @@ -273,7 +273,7 @@ def train_loop(main_program): sys.exit("got NaN loss, training failed.") if is_local: - train_loop(fluid.default_main_program()) + train_loop(base.default_main_program()) else: port = os.getenv("PADDLE_PSERVER_PORT", "6174") pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... @@ -302,11 +302,11 @@ def infer(use_cuda, save_dirname=None): if save_dirname is None: return - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) - inference_scope = fluid.core.Scope() - with fluid.scope_guard(inference_scope): + inference_scope = base.core.Scope() + with base.scope_guard(inference_scope): # Use paddle.static.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that @@ -328,27 +328,27 @@ def infer(use_cuda, save_dirname=None): # Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one # level of detail info, indicating that `data` consists of two sequences # of length 3 and 2, respectively. - user_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place) + user_id = base.create_lod_tensor([[np.int64(1)]], [[1]], place) assert feed_target_names[1] == "gender_id" - gender_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place) + gender_id = base.create_lod_tensor([[np.int64(1)]], [[1]], place) assert feed_target_names[2] == "age_id" - age_id = fluid.create_lod_tensor([[np.int64(0)]], [[1]], place) + age_id = base.create_lod_tensor([[np.int64(0)]], [[1]], place) assert feed_target_names[3] == "job_id" - job_id = fluid.create_lod_tensor([[np.int64(10)]], [[1]], place) + job_id = base.create_lod_tensor([[np.int64(10)]], [[1]], place) assert feed_target_names[4] == "movie_id" - movie_id = fluid.create_lod_tensor([[np.int64(783)]], [[1]], place) + movie_id = base.create_lod_tensor([[np.int64(783)]], [[1]], place) assert feed_target_names[5] == "category_id" - category_id = fluid.create_lod_tensor( + category_id = base.create_lod_tensor( [np.array([10, 8, 9], dtype='int64')], [[3]], place ) assert feed_target_names[6] == "movie_title" - movie_title = fluid.create_lod_tensor( + movie_title = base.create_lod_tensor( [np.array([1069, 4140, 2923, 710, 988], dtype='int64')], [[5]], place, @@ -374,7 +374,7 @@ def infer(use_cuda, save_dirname=None): def main(use_cuda): - if use_cuda and not fluid.core.is_compiled_with_cuda(): + if use_cuda and not base.core.is_compiled_with_cuda(): return # Directory for saving the inference model diff --git a/test/book/test_word2vec_book.py b/test/book/test_word2vec_book.py index f971db41dbbc3..bfa4a05b5e160 100644 --- a/test/book/test_word2vec_book.py +++ b/test/book/test_word2vec_book.py @@ -21,18 +21,18 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base paddle.enable_static() def get_place(target): if target == "cuda": - return fluid.CUDAPlace(0) + return base.CUDAPlace(0) elif target == "xpu": - return fluid.XPUPlace(0) + return base.XPUPlace(0) elif target == "cpu": - return fluid.CPUPlace() + return base.CPUPlace() else: raise ValueError( "Target `{}` is not on the support list: `cuda`, `xpu` and `cpu`.".format( @@ -134,21 +134,21 @@ def __network__(words): use_pure_bf16=pure_bf16, ) - sgd_optimizer.minimize(avg_cost, fluid.default_startup_program()) + sgd_optimizer.minimize(avg_cost, base.default_startup_program()) train_reader = paddle.batch( paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE ) place = get_place(target) - exe = fluid.Executor(place) - feeder = fluid.DataFeeder( + exe = base.Executor(place) + feeder = base.DataFeeder( feed_list=[first_word, second_word, third_word, forth_word, next_word], place=place, ) def train_loop(main_program): - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) if pure_bf16: sgd_optimizer.amp_init(exe.place) @@ -172,7 +172,7 @@ def train_loop(main_program): raise AssertionError(f"Cost is too large {avg_cost_np[0]:2.2}") if is_local: - train_loop(fluid.default_main_program()) + train_loop(base.default_main_program()) else: port = os.getenv("PADDLE_PSERVER_PORT", "6174") pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... @@ -202,9 +202,9 @@ def infer(target, save_dirname=None): return place = get_place(target) - exe = fluid.Executor(place) - inference_scope = fluid.core.Scope() - with fluid.scope_guard(inference_scope): + exe = base.Executor(place) + inference_scope = base.core.Scope() + with base.scope_guard(inference_scope): # Use paddle.static.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that @@ -229,16 +229,16 @@ def infer(target, save_dirname=None): recursive_seq_lens = [[1]] base_shape = [1] # The range of random integers is [low, high] - first_word = fluid.create_random_int_lodtensor( + first_word = base.create_random_int_lodtensor( recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1 ) - second_word = fluid.create_random_int_lodtensor( + second_word = base.create_random_int_lodtensor( recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1 ) - third_word = fluid.create_random_int_lodtensor( + third_word = base.create_random_int_lodtensor( recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1 ) - fourth_word = fluid.create_random_int_lodtensor( + fourth_word = base.create_random_int_lodtensor( recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1 ) @@ -262,17 +262,17 @@ def infer(target, save_dirname=None): ) def to_infer_tensor(lod_tensor): - infer_tensor = fluid.core.PaddleTensor() + infer_tensor = base.core.PaddleTensor() infer_tensor.lod = lod_tensor.lod() - infer_tensor.data = fluid.core.PaddleBuf(np.array(lod_tensor)) + infer_tensor.data = base.core.PaddleBuf(np.array(lod_tensor)) infer_tensor.shape = lod_tensor.shape() - infer_tensor.dtype = fluid.core.PaddleDType.INT64 + infer_tensor.dtype = base.core.PaddleDType.INT64 return infer_tensor infer_inputs = [first_word, second_word, third_word, fourth_word] infer_inputs = [to_infer_tensor(t) for t in infer_inputs] - infer_config = fluid.core.NativeConfig() + infer_config = base.core.NativeConfig() infer_config.prog_file = save_dirname + ".pdmodel" infer_config.param_file = save_dirname + ".pdiparams" if target == "cuda": @@ -281,7 +281,7 @@ def to_infer_tensor(lod_tensor): infer_config.fraction_of_gpu_memory = 0.15 elif target == "xpu": infer_config.use_xpu = True - compiled_program = fluid.compiler.CompiledProgram(inference_program) + compiled_program = base.compiler.CompiledProgram(inference_program) compiled_program._with_inference_optimize(infer_config) assert compiled_program._is_inference is True infer_outputs = exe.run(compiled_program, feed=infer_inputs) @@ -292,12 +292,12 @@ def to_infer_tensor(lod_tensor): def main(target, is_sparse, is_parallel, use_bf16, pure_bf16): - if target == "cuda" and not fluid.core.is_compiled_with_cuda(): + if target == "cuda" and not base.core.is_compiled_with_cuda(): return - if target == "xpu" and not fluid.core.is_compiled_with_xpu(): + if target == "xpu" and not base.core.is_compiled_with_xpu(): return - if use_bf16 and not fluid.core.is_compiled_with_mkldnn(): + if use_bf16 and not base.core.is_compiled_with_mkldnn(): return temp_dir = tempfile.TemporaryDirectory() @@ -349,15 +349,15 @@ def inject_test_method( ) def __impl__(*args, **kwargs): - prog = fluid.Program() - startup_prog = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + scope = base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): main(target, is_sparse, is_parallel, use_bf16, pure_bf16) if ( - not fluid.core.is_compiled_with_cuda() or target == "cuda" + not base.core.is_compiled_with_cuda() or target == "cuda" ) and is_sparse: fn = __impl__ else: diff --git a/test/cinn/op_mappers/op_mapper_test.py b/test/cinn/op_mappers/op_mapper_test.py index 2e277bd879b78..584c26488484a 100644 --- a/test/cinn/op_mappers/op_mapper_test.py +++ b/test/cinn/op_mappers/op_mapper_test.py @@ -23,7 +23,7 @@ from cinn.frontend import PaddleModelConvertor import paddle -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper from paddle.static import Variable as PaddleVariable sys.path.append("/work/dev_CINN/build/python/tests") @@ -426,7 +426,7 @@ def paddleddtype2nptype(dtype): paddle.int64: "int64", paddle.uint8: "uint8", paddle.bool: "bool", - paddle.fluid.core.VarDesc.VarType.RAW: "unk", + paddle.base.core.VarDesc.VarType.RAW: "unk", } assert dtype in switch_map, str(dtype) + " not support in CINN" return switch_map[dtype] @@ -444,7 +444,7 @@ def nptype2paddledtype(dtype): "uint8": paddle.uint8, "bool": paddle.bool, # The paddle's phi::DataType::UNDEFINED is mapped into ProtoDataType::RAW, - "unk": paddle.fluid.core.VarDesc.VarType.RAW, + "unk": paddle.base.core.VarDesc.VarType.RAW, } assert dtype in switch_map, dtype + " not support in CINN" return switch_map[dtype] diff --git a/test/cinn/ops/test_conv2d_op.py b/test/cinn/ops/test_conv2d_op.py index 79dc3d7aed8ca..4f7a5ed577863 100755 --- a/test/cinn/ops/test_conv2d_op.py +++ b/test/cinn/ops/test_conv2d_op.py @@ -23,7 +23,7 @@ import paddle set_cinn_cudnn_deterministic(True) -paddle.fluid.set_flags({'FLAGS_cudnn_deterministic': 1}) +paddle.base.set_flags({'FLAGS_cudnn_deterministic': 1}) @OpTestTool.skip_if( diff --git a/test/cinn/test_computation.py b/test/cinn/test_computation.py index 19deb70a3e6cf..ce4b9c5fcc592 100755 --- a/test/cinn/test_computation.py +++ b/test/cinn/test_computation.py @@ -22,7 +22,7 @@ from cinn.frontend import Computation, NetBuilder import paddle -from paddle import fluid, static +from paddle import base, static assert len(sys.argv) == 3 enable_gpu = sys.argv.pop() @@ -113,11 +113,11 @@ def test_compile_paddle_model(self): out = computation.get_tensor("fc_0.tmp_2") res_cinn = out.numpy(self.target) - config = fluid.core.AnalysisConfig(naive_model_dir) + config = base.core.AnalysisConfig(naive_model_dir) config.disable_gpu() config.switch_ir_optim(False) - paddle_predictor = fluid.core.create_paddle_predictor(config) - data = fluid.core.PaddleTensor(A_data) + paddle_predictor = base.core.create_paddle_predictor(config) + data = base.core.PaddleTensor(A_data) paddle_out = paddle_predictor.run([data]) res_paddle = paddle_out[0].as_ndarray() diff --git a/test/cinn/test_efficientnet.py b/test/cinn/test_efficientnet.py index 4a627e090220d..f3a817d140fe1 100755 --- a/test/cinn/test_efficientnet.py +++ b/test/cinn/test_efficientnet.py @@ -22,7 +22,7 @@ from cinn.common import DefaultHostTarget, DefaultNVGPUTarget from cinn.frontend import Interpreter -from paddle import fluid +from paddle import base enable_gpu = sys.argv.pop() model_dir = sys.argv.pop() @@ -40,13 +40,13 @@ def setUp(self): self.input_tensor = 'image' def get_paddle_inference_result(self, model_dir, data): - config = fluid.core.AnalysisConfig( + config = base.core.AnalysisConfig( model_dir + '/__model__', model_dir + '/params' ) config.disable_gpu() config.switch_ir_optim(False) - self.paddle_predictor = fluid.core.create_paddle_predictor(config) - data = fluid.core.PaddleTensor(data) + self.paddle_predictor = base.core.create_paddle_predictor(config) + data = base.core.PaddleTensor(data) results = self.paddle_predictor.run([data]) get_tensor = self.paddle_predictor.get_output_tensor( self.target_tensor diff --git a/test/cinn/test_facedet.py b/test/cinn/test_facedet.py index 20290846625c9..b2282cc5faa94 100755 --- a/test/cinn/test_facedet.py +++ b/test/cinn/test_facedet.py @@ -22,7 +22,7 @@ from cinn.common import DefaultHostTarget, DefaultNVGPUTarget from cinn.frontend import Interpreter -from paddle import fluid +from paddle import base enable_gpu = sys.argv.pop() model_dir = sys.argv.pop() @@ -42,13 +42,13 @@ def setUp(self): self.input_tensor = 'x0' def get_paddle_inference_result(self, model_dir, data): - config = fluid.core.AnalysisConfig( + config = base.core.AnalysisConfig( model_dir + '/__model__', model_dir + '/params' ) config.disable_gpu() config.switch_ir_optim(False) - self.paddle_predictor = fluid.core.create_paddle_predictor(config) - data = fluid.core.PaddleTensor(data) + self.paddle_predictor = base.core.create_paddle_predictor(config) + data = base.core.PaddleTensor(data) results = self.paddle_predictor.run([data]) get_tensor = self.paddle_predictor.get_output_tensor( self.target_tensor diff --git a/test/cinn/test_frontend.py b/test/cinn/test_frontend.py index d7ecf05d6840f..634c2c227822f 100755 --- a/test/cinn/test_frontend.py +++ b/test/cinn/test_frontend.py @@ -21,7 +21,7 @@ from cinn.common import DefaultHostTarget, DefaultNVGPUTarget from cinn.frontend import Interpreter -from paddle import fluid +from paddle import base assert len(sys.argv) == 1 + 2 + 1 # model and enable_gpu count enable_gpu = sys.argv.pop() @@ -113,11 +113,11 @@ def setUp(self): self.model_dir = naive_model_dir def get_paddle_inference_result(self, model_dir, data): - config = fluid.core.AnalysisConfig(model_dir) + config = base.core.AnalysisConfig(model_dir) config.disable_gpu() config.switch_ir_optim(False) - self.paddle_predictor = fluid.core.create_paddle_predictor(config) - data = fluid.core.PaddleTensor(data) + self.paddle_predictor = base.core.create_paddle_predictor(config) + data = base.core.PaddleTensor(data) results = self.paddle_predictor.run([data]) return results[0].as_ndarray() @@ -156,11 +156,11 @@ def setUp(self): self.model_dir = multi_fc_model_dir def get_paddle_inference_result(self, model_dir, data): - config = fluid.core.AnalysisConfig(model_dir) + config = base.core.AnalysisConfig(model_dir) config.disable_gpu() config.switch_ir_optim(False) - self.paddle_predictor = fluid.core.create_paddle_predictor(config) - data = fluid.core.PaddleTensor(data) + self.paddle_predictor = base.core.create_paddle_predictor(config) + data = base.core.PaddleTensor(data) results = self.paddle_predictor.run([data]) return results[0].as_ndarray() diff --git a/test/cinn/test_mobilenetv1.py b/test/cinn/test_mobilenetv1.py index 44d6de40c198f..6c4cfaf011100 100644 --- a/test/cinn/test_mobilenetv1.py +++ b/test/cinn/test_mobilenetv1.py @@ -22,7 +22,7 @@ from cinn.common import DefaultHostTarget, DefaultNVGPUTarget from cinn.frontend import Interpreter -from paddle import fluid +from paddle import base enable_gpu = sys.argv.pop() model_dir = sys.argv.pop() @@ -42,11 +42,11 @@ def setUp(self): self.input_tensor = 'image' def get_paddle_inference_result(self, model_dir, data): - config = fluid.core.AnalysisConfig(model_dir) + config = base.core.AnalysisConfig(model_dir) config.disable_gpu() config.switch_ir_optim(False) - self.paddle_predictor = fluid.core.create_paddle_predictor(config) - data = fluid.core.PaddleTensor(data) + self.paddle_predictor = base.core.create_paddle_predictor(config) + data = base.core.PaddleTensor(data) results = self.paddle_predictor.run([data]) get_tensor = self.paddle_predictor.get_output_tensor( self.target_tensor diff --git a/test/cinn/test_mobilenetv2.py b/test/cinn/test_mobilenetv2.py index b81314d87f5b8..4332678788117 100755 --- a/test/cinn/test_mobilenetv2.py +++ b/test/cinn/test_mobilenetv2.py @@ -22,7 +22,7 @@ from cinn.common import DefaultHostTarget, DefaultNVGPUTarget from cinn.frontend import Interpreter -from paddle import fluid +from paddle import base enable_gpu = sys.argv.pop() model_dir = sys.argv.pop() @@ -40,13 +40,13 @@ def setUp(self): self.input_tensor = 'image' def get_paddle_inference_result(self, model_dir, data): - config = fluid.core.AnalysisConfig( + config = base.core.AnalysisConfig( model_dir + '/__model__', model_dir + '/params' ) config.disable_gpu() config.switch_ir_optim(False) - self.paddle_predictor = fluid.core.create_paddle_predictor(config) - data = fluid.core.PaddleTensor(data) + self.paddle_predictor = base.core.create_paddle_predictor(config) + data = base.core.PaddleTensor(data) results = self.paddle_predictor.run([data]) get_tensor = self.paddle_predictor.get_output_tensor( self.target_tensor diff --git a/test/cinn/test_resnet.py b/test/cinn/test_resnet.py index a9c8cc3f40dd7..63ff06f528787 100755 --- a/test/cinn/test_resnet.py +++ b/test/cinn/test_resnet.py @@ -21,7 +21,7 @@ from cinn.common import DefaultHostTarget, DefaultNVGPUTarget from cinn.frontend import Interpreter -from paddle import fluid +from paddle import base enable_gpu = sys.argv.pop() model_dir = sys.argv.pop() @@ -39,13 +39,13 @@ def setUp(self): self.x_shape = [1, 160, 7, 7] def get_paddle_inference_result(self, data): - config = fluid.core.AnalysisConfig( + config = base.core.AnalysisConfig( self.model_dir + ".pdmodel", self.model_dir + ".pdiparams" ) config.disable_gpu() config.switch_ir_optim(False) - self.paddle_predictor = fluid.core.create_paddle_predictor(config) - data = fluid.core.PaddleTensor(data) + self.paddle_predictor = base.core.create_paddle_predictor(config) + data = base.core.PaddleTensor(data) results = self.paddle_predictor.run([data]) return results[0].as_ndarray() diff --git a/test/cinn/test_resnet18.py b/test/cinn/test_resnet18.py index 7b66b3a7c3538..3d746113ea3a5 100755 --- a/test/cinn/test_resnet18.py +++ b/test/cinn/test_resnet18.py @@ -22,7 +22,7 @@ from cinn.common import DefaultHostTarget, DefaultNVGPUTarget from cinn.frontend import Interpreter -from paddle import fluid +from paddle import base enable_gpu = sys.argv.pop() model_dir = sys.argv.pop() @@ -42,13 +42,13 @@ def setUp(self): self.input_tensor = 'image' def get_paddle_inference_result(self, model_dir, data): - config = fluid.core.AnalysisConfig( + config = base.core.AnalysisConfig( model_dir + '/__model__', model_dir + '/params' ) config.disable_gpu() config.switch_ir_optim(False) - self.paddle_predictor = fluid.core.create_paddle_predictor(config) - data = fluid.core.PaddleTensor(data) + self.paddle_predictor = base.core.create_paddle_predictor(config) + data = base.core.PaddleTensor(data) results = self.paddle_predictor.run([data]) get_tensor = self.paddle_predictor.get_output_tensor( self.target_tensor diff --git a/test/cinn/test_resnet50.py b/test/cinn/test_resnet50.py index e63415db9ed95..2987bd2ad6837 100755 --- a/test/cinn/test_resnet50.py +++ b/test/cinn/test_resnet50.py @@ -22,7 +22,7 @@ from cinn.common import DefaultHostTarget, DefaultNVGPUTarget from cinn.frontend import Interpreter -from paddle import fluid +from paddle import base enable_gpu = sys.argv.pop() model_dir = sys.argv.pop() @@ -42,13 +42,13 @@ def setUp(self): self.input_tensor = 'inputs' def get_paddle_inference_result(self, model_dir, data): - config = fluid.core.AnalysisConfig( + config = base.core.AnalysisConfig( model_dir + '/__model__', model_dir + '/params' ) config.disable_gpu() config.switch_ir_optim(False) - self.paddle_predictor = fluid.core.create_paddle_predictor(config) - data = fluid.core.PaddleTensor(data) + self.paddle_predictor = base.core.create_paddle_predictor(config) + data = base.core.PaddleTensor(data) results = self.paddle_predictor.run([data]) get_tensor = self.paddle_predictor.get_output_tensor( self.target_tensor diff --git a/test/cinn/test_squeezenet.py b/test/cinn/test_squeezenet.py index 78a28d33abf19..2228961e41f4f 100644 --- a/test/cinn/test_squeezenet.py +++ b/test/cinn/test_squeezenet.py @@ -22,7 +22,7 @@ from cinn.common import DefaultHostTarget, DefaultNVGPUTarget from cinn.frontend import Interpreter -from paddle import fluid +from paddle import base enable_gpu = sys.argv.pop() model_dir = sys.argv.pop() @@ -42,11 +42,11 @@ def setUp(self): self.input_tensor = 'data' def get_paddle_inference_result(self, model_dir, data): - config = fluid.core.AnalysisConfig(model_dir) + config = base.core.AnalysisConfig(model_dir) config.disable_gpu() config.switch_ir_optim(False) - self.paddle_predictor = fluid.core.create_paddle_predictor(config) - data = fluid.core.PaddleTensor(data) + self.paddle_predictor = base.core.create_paddle_predictor(config) + data = base.core.PaddleTensor(data) results = self.paddle_predictor.run([data]) get_tensor = self.paddle_predictor.get_output_tensor( self.target_tensor diff --git a/test/collective/collective_allgather_api.py b/test/collective/collective_allgather_api.py index 761703cb49497..800546664529b 100644 --- a/test/collective/collective_allgather_api.py +++ b/test/collective/collective_allgather_api.py @@ -18,8 +18,8 @@ import paddle import paddle.distributed as dist -from paddle import fluid, framework -from paddle.fluid import data_feeder +from paddle import base, framework +from paddle.base import data_feeder paddle.enable_static() @@ -83,7 +83,7 @@ def __init__(self): def get_model(self, main_prog, startup_program, rank, dtype=None): dtype = "float32" if dtype is None else dtype - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tensor_list = [] tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype=dtype @@ -94,7 +94,7 @@ def get_model(self, main_prog, startup_program, rank, dtype=None): def get_model_new( self, main_prog, startup_program, rank, dtype=None, reduce_type=None ): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tensor_list = [] tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype=dtype @@ -103,8 +103,8 @@ def get_model_new( return tensor_list def run_trainer(self, args): - train_prog = fluid.Program() - startup_prog = fluid.Program() + train_prog = base.Program() + startup_prog = base.Program() endpoints = args["endpoints"].split(",") rank = args["trainerid"] current_endpoint = args["currentendpoint"] @@ -115,14 +115,14 @@ def run_trainer(self, args): paddle.distributed.init_parallel_env() if args['backend'] == 'nccl': device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace( + place = base.CUDAPlace( device_id - ) # if args.use_gpu else fluid.CPUPlace() + ) # if args.use_gpu else base.CPUPlace() elif args['backend'] == 'bkcl': device_id = int(os.getenv("FLAGS_selected_xpus", "0")) - place = fluid.XPUPlace(device_id) + place = base.XPUPlace(device_id) else: - place = fluid.CPUPlace() + place = base.CPUPlace() indata = test_base.create_test_data( shape=(10, 1000), dtype=args["dtype"], seed=os.getpid() ) @@ -138,7 +138,7 @@ def run_trainer(self, args): train_prog, startup_prog, rank, dtype=args["dtype"] ) ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup_prog) fetch_list = [] for elem in result: diff --git a/test/collective/collective_allgather_api_dygraph.py b/test/collective/collective_allgather_api_dygraph.py index 15bf9c4629bad..ec33cf3419d88 100644 --- a/test/collective/collective_allgather_api_dygraph.py +++ b/test/collective/collective_allgather_api_dygraph.py @@ -16,7 +16,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveAllgatherAPI(test_base.TestCollectiveAPIRunnerBase): @@ -24,7 +24,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tensor_list = [] # NOTE: this is a hack relying on an undocumented behavior that `to_tensor` uses uint16 to replace bfloat16 if indata.dtype == "bfloat16": diff --git a/test/collective/collective_allgather_object_api_dygraph.py b/test/collective/collective_allgather_object_api_dygraph.py index 1f3289e74ea69..734e7541954aa 100644 --- a/test/collective/collective_allgather_object_api_dygraph.py +++ b/test/collective/collective_allgather_object_api_dygraph.py @@ -15,7 +15,7 @@ import legacy_test.test_collective_api_base as test_base import paddle -from paddle import fluid +from paddle import base class TestCollectiveAllgatherObjectAPI(test_base.TestCollectiveAPIRunnerBase): @@ -23,7 +23,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): object_list = [] paddle.distributed.all_gather_object(object_list, indata) return object_list diff --git a/test/collective/collective_allreduce_api.py b/test/collective/collective_allreduce_api.py index b1f2770205518..21f4db05a35a4 100644 --- a/test/collective/collective_allreduce_api.py +++ b/test/collective/collective_allreduce_api.py @@ -19,8 +19,8 @@ import paddle import paddle.distributed as dist -from paddle import fluid, framework -from paddle.fluid import data_feeder +from paddle import base, framework +from paddle.base import data_feeder paddle.enable_static() @@ -71,7 +71,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype='float32' ) @@ -86,7 +86,7 @@ def get_model_new( dtype='float32', reduce_type=str(dist.ReduceOp.SUM), ): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype=dtype ) diff --git a/test/collective/collective_allreduce_api_dygraph.py b/test/collective/collective_allreduce_api_dygraph.py index 58f95e78e5586..9244aad430017 100644 --- a/test/collective/collective_allreduce_api_dygraph.py +++ b/test/collective/collective_allreduce_api_dygraph.py @@ -16,7 +16,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveAllreduceAPI(test_base.TestCollectiveAPIRunnerBase): @@ -24,7 +24,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): # NOTE: this is a hack relying on an undocumented behavior that `to_tensor` uses uint16 to replace bfloat16 if indata.dtype == "bfloat16": tindata = paddle.to_tensor(indata, "float32").cast("uint16") diff --git a/test/collective/collective_allreduce_new_group_api.py b/test/collective/collective_allreduce_new_group_api.py index 36836b8fec0c3..6897bfd31c81e 100644 --- a/test/collective/collective_allreduce_new_group_api.py +++ b/test/collective/collective_allreduce_new_group_api.py @@ -15,7 +15,7 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -25,7 +25,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[1, 10, 1000], dtype='float32' ) diff --git a/test/collective/collective_allreduce_op.py b/test/collective/collective_allreduce_op.py index 4c3bbd13b4e46..5200db7c7ed9d 100644 --- a/test/collective/collective_allreduce_op.py +++ b/test/collective/collective_allreduce_op.py @@ -18,8 +18,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -30,7 +30,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) diff --git a/test/collective/collective_allreduce_op_wait.py b/test/collective/collective_allreduce_op_wait.py index f57c20c787c8a..02232fa31ba76 100644 --- a/test/collective/collective_allreduce_op_wait.py +++ b/test/collective/collective_allreduce_op_wait.py @@ -18,8 +18,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -30,7 +30,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) diff --git a/test/collective/collective_alltoall_api.py b/test/collective/collective_alltoall_api.py index ab0de9b7c49f2..999cfeb4308ea 100644 --- a/test/collective/collective_alltoall_api.py +++ b/test/collective/collective_alltoall_api.py @@ -19,8 +19,8 @@ import paddle import paddle.distributed as dist -from paddle import fluid, framework -from paddle.fluid import data_feeder +from paddle import base, framework +from paddle.base import data_feeder paddle.enable_static() @@ -103,7 +103,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) @@ -116,7 +116,7 @@ def get_model(self, main_prog, startup_program, rank): def get_model_new( self, main_prog, startup_program, rank, dtype=None, reduce_type=None ): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype=dtype ) diff --git a/test/collective/collective_alltoall_api_dygraph.py b/test/collective/collective_alltoall_api_dygraph.py index 83f96883331b7..ef138514e2585 100644 --- a/test/collective/collective_alltoall_api_dygraph.py +++ b/test/collective/collective_alltoall_api_dygraph.py @@ -16,7 +16,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveAllToAllAPI(test_base.TestCollectiveAPIRunnerBase): @@ -24,7 +24,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): toutdata = [] # NOTE: this is a hack relying on an undocumented behavior that `to_tensor` uses uint16 to replace bfloat16 if indata.dtype == "bfloat16": diff --git a/test/collective/collective_alltoall_single_api_dygraph.py b/test/collective/collective_alltoall_single_api_dygraph.py index 24ef25b97a225..b96bdc271ce36 100644 --- a/test/collective/collective_alltoall_single_api_dygraph.py +++ b/test/collective/collective_alltoall_single_api_dygraph.py @@ -16,7 +16,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveAllToAllSingleAPI(test_base.TestCollectiveAPIRunnerBase): @@ -24,7 +24,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): # NOTE: this is a hack relying on an undocumented behavior that `to_tensor` uses uint16 to replace bfloat16 if indata.dtype == "bfloat16": tindata = paddle.to_tensor(indata, "float32").cast("uint16") diff --git a/test/collective/collective_barrier_api.py b/test/collective/collective_barrier_api.py index 0e857e037ee89..05450da9d7907 100644 --- a/test/collective/collective_barrier_api.py +++ b/test/collective/collective_barrier_api.py @@ -19,7 +19,7 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -29,7 +29,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): paddle.distributed.barrier() return [] diff --git a/test/collective/collective_broadcast_api.py b/test/collective/collective_broadcast_api.py index 4e5db14eeaea9..c3f5109c7f304 100644 --- a/test/collective/collective_broadcast_api.py +++ b/test/collective/collective_broadcast_api.py @@ -18,8 +18,8 @@ ) import paddle -from paddle import fluid, framework -from paddle.fluid import data_feeder +from paddle import base, framework +from paddle.base import data_feeder paddle.enable_static() @@ -62,7 +62,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, dtype='float32'): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype=dtype ) @@ -73,7 +73,7 @@ def get_model(self, main_prog, startup_program, rank, dtype='float32'): def get_model_new( self, main_prog, startup_program, rank, dtype=None, reduce_type=None ): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype=dtype ) diff --git a/test/collective/collective_broadcast_api_dygraph.py b/test/collective/collective_broadcast_api_dygraph.py index c17447c6716eb..e6f59a506a9bd 100644 --- a/test/collective/collective_broadcast_api_dygraph.py +++ b/test/collective/collective_broadcast_api_dygraph.py @@ -16,7 +16,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveBroadcastAPI(test_base.TestCollectiveAPIRunnerBase): @@ -24,7 +24,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): # NOTE: this is a hack relying on an undocumented behavior that `to_tensor` uses uint16 to replace bfloat16 if indata.dtype == "bfloat16": tindata = paddle.to_tensor(indata, "float32").cast("uint16") diff --git a/test/collective/collective_broadcast_object_list_api_dygraph.py b/test/collective/collective_broadcast_object_list_api_dygraph.py index 50b3050159428..0fb247b01d290 100644 --- a/test/collective/collective_broadcast_object_list_api_dygraph.py +++ b/test/collective/collective_broadcast_object_list_api_dygraph.py @@ -15,7 +15,7 @@ import test_collective_api_base as test_base import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveBroadcastObjectListAPI( @@ -25,7 +25,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): object_list = [indata] dist.broadcast_object_list(object_list, src=1) return object_list diff --git a/test/collective/collective_broadcast_op.py b/test/collective/collective_broadcast_op.py index 72e469ebd6b39..44adc62676f8a 100644 --- a/test/collective/collective_broadcast_op.py +++ b/test/collective/collective_broadcast_op.py @@ -18,8 +18,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -31,7 +31,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 rootid = 1 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) diff --git a/test/collective/collective_concat_api.py b/test/collective/collective_concat_api.py index fab9711a91867..c349ad14aa883 100644 --- a/test/collective/collective_concat_api.py +++ b/test/collective/collective_concat_api.py @@ -18,8 +18,8 @@ ) import paddle -from paddle import fluid, framework -from paddle.fluid import data_feeder +from paddle import base, framework +from paddle.base import data_feeder paddle.enable_static() @@ -70,7 +70,7 @@ def get_model(self, main_prog, startup_program): def get_model_new( self, main_prog, startup_program, rank, dtype=None, reduce_type=None ): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype=dtype ) diff --git a/test/collective/collective_concat_op.py b/test/collective/collective_concat_op.py index 40a41c47632d8..d139ff0b95277 100644 --- a/test/collective/collective_concat_op.py +++ b/test/collective/collective_concat_op.py @@ -18,8 +18,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -31,7 +31,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 nranks = 2 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) diff --git a/test/collective/collective_gather_api_dygraph.py b/test/collective/collective_gather_api_dygraph.py index ddb6b7dcd40aa..229861785e9a9 100644 --- a/test/collective/collective_gather_api_dygraph.py +++ b/test/collective/collective_gather_api_dygraph.py @@ -16,7 +16,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveGatherAPI(test_base.TestCollectiveAPIRunnerBase): @@ -24,7 +24,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): gather_list = [] # NOTE: this is a hack relying on an undocumented behavior that `to_tensor` uses uint16 to replace bfloat16 if indata.dtype == "bfloat16": diff --git a/test/collective/collective_global_gather.py b/test/collective/collective_global_gather.py index 2601830a8dae0..674e3c8193f40 100644 --- a/test/collective/collective_global_gather.py +++ b/test/collective/collective_global_gather.py @@ -22,7 +22,7 @@ ) import paddle -from paddle import fluid +from paddle import base from paddle.distributed.utils import moe_utils paddle.enable_static() @@ -33,7 +33,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): seed = os.getpid() np.random.seed(seed) in_feat = 2 @@ -57,8 +57,8 @@ def get_model(self, main_prog, startup_program, rank, indata=None): return [output] def run_trainer(self, args): - train_prog = fluid.Program() - startup_prog = fluid.Program() + train_prog = base.Program() + startup_prog = base.Program() endpoints = args["endpoints"].split(",") rank = args["trainerid"] current_endpoint = args["currentendpoint"] @@ -66,22 +66,22 @@ def run_trainer(self, args): nranks = 2 if args['backend'] == 'nccl': device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace( + place = base.CUDAPlace( device_id - ) # if args.use_gpu else fluid.CPUPlace() + ) # if args.use_gpu else base.CPUPlace() elif args['backend'] == 'bkcl': device_id = int(os.getenv("FLAGS_selected_xpus", "0")) - place = fluid.XPUPlace(device_id) + place = base.XPUPlace(device_id) else: - place = fluid.CPUPlace() + place = base.CPUPlace() in_feat = 2 n_expert = 2 world_size = 2 tot_expert = n_expert * world_size - tmp_main_prog = fluid.Program() - with fluid.program_guard(tmp_main_prog, fluid.Program()): + tmp_main_prog = base.Program() + with base.program_guard(tmp_main_prog, base.Program()): local_expert_count = paddle.static.data( name="local_expert_count", shape=[tot_expert], dtype="int64" ) @@ -90,7 +90,7 @@ def run_trainer(self, args): paddle.split(local_expert_count, 2, axis=0), global_expert_count ) global_expert_count = paddle.concat(global_expert_count, axis=0) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup_prog) np.random.seed(os.getpid()) local_expert_count = np.random.randint(1, 4, size=tot_expert).astype( diff --git a/test/collective/collective_global_gather_dygraph.py b/test/collective/collective_global_gather_dygraph.py index c816132f9ef90..4dbb3ee1e0bf1 100644 --- a/test/collective/collective_global_gather_dygraph.py +++ b/test/collective/collective_global_gather_dygraph.py @@ -21,7 +21,7 @@ ) import paddle -from paddle import fluid +from paddle import base from paddle.distributed.utils import moe_utils @@ -30,7 +30,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): seed = os.getpid() np.random.seed(seed) in_feat = 2 diff --git a/test/collective/collective_global_scatter.py b/test/collective/collective_global_scatter.py index e4388dd1df3df..c92dcd1631390 100644 --- a/test/collective/collective_global_scatter.py +++ b/test/collective/collective_global_scatter.py @@ -22,7 +22,7 @@ ) import paddle -from paddle import fluid +from paddle import base from paddle.distributed.utils import moe_utils paddle.enable_static() @@ -33,7 +33,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): seed = os.getpid() np.random.seed(seed) in_feat = 2 @@ -57,8 +57,8 @@ def get_model(self, main_prog, startup_program, rank, indata=None): return [output] def run_trainer(self, args): - train_prog = fluid.Program() - startup_prog = fluid.Program() + train_prog = base.Program() + startup_prog = base.Program() endpoints = args["endpoints"].split(",") rank = args["trainerid"] current_endpoint = args["currentendpoint"] @@ -66,14 +66,14 @@ def run_trainer(self, args): paddle.distributed.init_parallel_env() if args['backend'] == 'nccl': device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace( + place = base.CUDAPlace( device_id - ) # if args.use_gpu else fluid.CPUPlace() + ) # if args.use_gpu else base.CPUPlace() elif args['backend'] == 'bkcl': device_id = int(os.getenv("FLAGS_selected_xpus", "0")) - place = fluid.XPUPlace(device_id) + place = base.XPUPlace(device_id) else: - place = fluid.CPUPlace() + place = base.CPUPlace() np.random.seed(os.getpid()) in_feat = 2 n_expert = 2 @@ -88,7 +88,7 @@ def run_trainer(self, args): ) if args['static_mode']: result = self.get_model(train_prog, startup_prog, rank) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup_prog) fetch_list = [] for elem in result: diff --git a/test/collective/collective_global_scatter_dygraph.py b/test/collective/collective_global_scatter_dygraph.py index 2e5001371fd47..0fc468d73484f 100644 --- a/test/collective/collective_global_scatter_dygraph.py +++ b/test/collective/collective_global_scatter_dygraph.py @@ -21,7 +21,7 @@ ) import paddle -from paddle import fluid +from paddle import base from paddle.distributed.utils import moe_utils @@ -30,7 +30,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): seed = os.getpid() np.random.seed(seed) in_feat = 2 diff --git a/test/collective/collective_identity_op.py b/test/collective/collective_identity_op.py index 5bee820fb2e9e..c56111da62ab5 100644 --- a/test/collective/collective_identity_op.py +++ b/test/collective/collective_identity_op.py @@ -18,8 +18,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -31,7 +31,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 nranks = 2 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) diff --git a/test/collective/collective_isend_irecv_api_dygraph.py b/test/collective/collective_isend_irecv_api_dygraph.py index f26160b3f0444..61b3453ffd425 100644 --- a/test/collective/collective_isend_irecv_api_dygraph.py +++ b/test/collective/collective_isend_irecv_api_dygraph.py @@ -16,7 +16,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveIsendIrecvAPI(test_base.TestCollectiveAPIRunnerBase): @@ -24,7 +24,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): # NOTE: this is a hack relying on an undocumented behavior that `to_tensor` uses uint16 to replace bfloat16 if indata.dtype == "bfloat16": tindata = paddle.to_tensor(indata, "float32").cast("uint16") diff --git a/test/collective/collective_reduce_api.py b/test/collective/collective_reduce_api.py index cd3b35091414d..799a8502eee01 100644 --- a/test/collective/collective_reduce_api.py +++ b/test/collective/collective_reduce_api.py @@ -20,8 +20,8 @@ import paddle import paddle.distributed as dist -from paddle import fluid, framework -from paddle.fluid import data_feeder +from paddle import base, framework +from paddle.base import data_feeder paddle.enable_static() @@ -68,7 +68,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) @@ -84,7 +84,7 @@ def get_model_new( dtype='float32', reduce_type=str(dist.ReduceOp.SUM), ): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype=dtype ) diff --git a/test/collective/collective_reduce_api_dygraph.py b/test/collective/collective_reduce_api_dygraph.py index a3b765b184c93..2dbdfc41a2fe1 100644 --- a/test/collective/collective_reduce_api_dygraph.py +++ b/test/collective/collective_reduce_api_dygraph.py @@ -19,7 +19,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveReduceAPI(test_base.TestCollectiveAPIRunnerBase): @@ -27,7 +27,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): # NOTE: this is a hack relying on an undocumented behavior that `to_tensor` uses uint16 to replace bfloat16 if indata.dtype == "bfloat16": tindata = paddle.to_tensor(indata, "float32").cast("uint16") diff --git a/test/collective/collective_reduce_op.py b/test/collective/collective_reduce_op.py index f250839d6030e..1ce1143122ddc 100644 --- a/test/collective/collective_reduce_op.py +++ b/test/collective/collective_reduce_op.py @@ -18,8 +18,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -31,7 +31,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 rootid = 1 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) diff --git a/test/collective/collective_reduce_op_calc_stream.py b/test/collective/collective_reduce_op_calc_stream.py index b7efdbe54cd94..99e4abb759964 100644 --- a/test/collective/collective_reduce_op_calc_stream.py +++ b/test/collective/collective_reduce_op_calc_stream.py @@ -18,8 +18,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -31,7 +31,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 rootid = 1 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) diff --git a/test/collective/collective_reduce_scatter_api.py b/test/collective/collective_reduce_scatter_api.py index b1cec07badd90..9246303549402 100644 --- a/test/collective/collective_reduce_scatter_api.py +++ b/test/collective/collective_reduce_scatter_api.py @@ -18,7 +18,7 @@ ) import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -38,7 +38,7 @@ def get_model_new( dtype='float32', reduce_type=None, ): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype=dtype ) diff --git a/test/collective/collective_reduce_scatter_api_dygraph.py b/test/collective/collective_reduce_scatter_api_dygraph.py index 6e741b8695866..f68c5ca651488 100644 --- a/test/collective/collective_reduce_scatter_api_dygraph.py +++ b/test/collective/collective_reduce_scatter_api_dygraph.py @@ -16,7 +16,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveReduceScatterAPI(test_base.TestCollectiveAPIRunnerBase): @@ -24,7 +24,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): # NOTE: this is a hack relying on an undocumented behavior that `to_tensor` uses uint16 to replace bfloat16 if indata.dtype == "bfloat16": tindata = paddle.to_tensor(indata, "float32").cast("uint16") diff --git a/test/collective/collective_scatter_api.py b/test/collective/collective_scatter_api.py index 4579d00fa9cff..17846321deacb 100644 --- a/test/collective/collective_scatter_api.py +++ b/test/collective/collective_scatter_api.py @@ -19,7 +19,7 @@ from test_collective_api_base import TestCollectiveAPIRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -29,7 +29,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], diff --git a/test/collective/collective_scatter_api_dygraph.py b/test/collective/collective_scatter_api_dygraph.py index 41d35be8f8e46..2861ca82fb17d 100644 --- a/test/collective/collective_scatter_api_dygraph.py +++ b/test/collective/collective_scatter_api_dygraph.py @@ -20,7 +20,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveScatterAPI(test_base.TestCollectiveAPIRunnerBase): @@ -28,7 +28,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): # NOTE: this is a hack relying on an undocumented behavior that `to_tensor` uses uint16 to replace bfloat16 if indata.dtype == "bfloat16": tindata = paddle.to_tensor(indata, "float32").cast("uint16") diff --git a/test/collective/collective_scatter_object_list_api_dygraph.py b/test/collective/collective_scatter_object_list_api_dygraph.py index 9c7810910d5a2..c58d9b385afef 100644 --- a/test/collective/collective_scatter_object_list_api_dygraph.py +++ b/test/collective/collective_scatter_object_list_api_dygraph.py @@ -15,7 +15,7 @@ import legacy_test.test_collective_api_base as test_base import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveScatterObjectListAPI(test_base.TestCollectiveAPIRunnerBase): @@ -23,7 +23,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): data_len = len(indata) // 2 in_object_list = [indata[:data_len], indata[data_len:]] out_object_list = [] diff --git a/test/collective/collective_scatter_op.py b/test/collective/collective_scatter_op.py index aa010c86724c7..8e5a06dc62852 100644 --- a/test/collective/collective_scatter_op.py +++ b/test/collective/collective_scatter_op.py @@ -18,8 +18,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -31,7 +31,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 rootid = 1 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) diff --git a/test/collective/collective_sendrecv_api.py b/test/collective/collective_sendrecv_api.py index 9dae4b34801cb..b5fc01be7af15 100644 --- a/test/collective/collective_sendrecv_api.py +++ b/test/collective/collective_sendrecv_api.py @@ -18,8 +18,8 @@ ) import paddle -from paddle import fluid, framework -from paddle.fluid import data_feeder +from paddle import base, framework +from paddle.base import data_feeder paddle.enable_static() @@ -85,7 +85,7 @@ def recv_new(tensor, src, group=None, sync_op=True, dtype='float32'): 'peer': src, 'dynamic_shape': True, 'out_shape': tensor.shape, - 'dtype': fluid.framework.convert_np_dtype_to_dtype_(dtype), + 'dtype': base.framework.convert_np_dtype_to_dtype_(dtype), }, ) return None @@ -96,7 +96,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], @@ -116,7 +116,7 @@ def get_model_new( dtype='float32', reduce_type=None, ): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], diff --git a/test/collective/collective_sendrecv_api_dygraph.py b/test/collective/collective_sendrecv_api_dygraph.py index fe26cc19ba505..a68112e25bb2e 100644 --- a/test/collective/collective_sendrecv_api_dygraph.py +++ b/test/collective/collective_sendrecv_api_dygraph.py @@ -16,7 +16,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base class TestCollectiveSendRecvAPI(test_base.TestCollectiveAPIRunnerBase): @@ -24,7 +24,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank, indata=None): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): # NOTE: this is a hack relying on an undocumented behavior that `to_tensor` uses uint16 to replace bfloat16 if indata.dtype == "bfloat16": tindata = paddle.to_tensor(indata, "float32").cast("uint16") diff --git a/test/collective/collective_sendrecv_op.py b/test/collective/collective_sendrecv_op.py index 30a83f02efd7a..1a3c38cde9e00 100644 --- a/test/collective/collective_sendrecv_op.py +++ b/test/collective/collective_sendrecv_op.py @@ -18,7 +18,7 @@ ) import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -29,7 +29,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = self.global_ring_id - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], diff --git a/test/collective/collective_sendrecv_op_array.py b/test/collective/collective_sendrecv_op_array.py index fb7f0848c103b..e2ac44f14f6b6 100644 --- a/test/collective/collective_sendrecv_op_array.py +++ b/test/collective/collective_sendrecv_op_array.py @@ -19,7 +19,7 @@ ) import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -30,7 +30,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = self.global_ring_id - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], diff --git a/test/collective/collective_sendrecv_op_dynamic_shape.py b/test/collective/collective_sendrecv_op_dynamic_shape.py index d01a0b421dab7..209ff932eee76 100644 --- a/test/collective/collective_sendrecv_op_dynamic_shape.py +++ b/test/collective/collective_sendrecv_op_dynamic_shape.py @@ -18,7 +18,7 @@ ) import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -29,7 +29,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = self.global_ring_id - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], diff --git a/test/collective/collective_split_op.py b/test/collective/collective_split_op.py index bb64bfadc8649..f006617b51f6d 100644 --- a/test/collective/collective_split_op.py +++ b/test/collective/collective_split_op.py @@ -18,8 +18,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -31,7 +31,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 nranks = 2 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) diff --git a/test/collective/column_parallel_linear_api.py b/test/collective/column_parallel_linear_api.py index ac30aace6dfc9..b433cca4c528e 100644 --- a/test/collective/column_parallel_linear_api.py +++ b/test/collective/column_parallel_linear_api.py @@ -19,7 +19,7 @@ ) import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet paddle.enable_static() @@ -30,7 +30,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): fleet.init(is_collective=True) np.random.seed(2020) np_array = np.random.rand(1000, 16) @@ -40,11 +40,11 @@ def get_model(self, main_prog, startup_program, rank): ) paddle.distributed.broadcast(data, src=0) if rank == 0: - param_attr = paddle.fluid.ParamAttr( + param_attr = paddle.base.ParamAttr( initializer=paddle.nn.initializer.Assign(np_array[:, 0:8]), ) else: - param_attr = paddle.fluid.ParamAttr( + param_attr = paddle.base.ParamAttr( initializer=paddle.nn.initializer.Assign(np_array[:, 8:16]), ) diff --git a/test/collective/fleet/auto_parallel_parallelizer.py b/test/collective/fleet/auto_parallel_parallelizer.py index acf0900333011..a5bfe090d6904 100755 --- a/test/collective/fleet/auto_parallel_parallelizer.py +++ b/test/collective/fleet/auto_parallel_parallelizer.py @@ -19,7 +19,7 @@ from paddle import nn, static, utils from paddle.distributed import fleet from paddle.distributed.fleet import auto -from paddle.fluid import core +from paddle.base import core paddle.enable_static() _global_parallel_strategy = None diff --git a/test/collective/fleet/c_comm_init_op.py b/test/collective/fleet/c_comm_init_op.py index 359266453364f..988c0fcc27954 100644 --- a/test/collective/fleet/c_comm_init_op.py +++ b/test/collective/fleet/c_comm_init_op.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet.base.private_helper_function import ( wait_server_ready, ) @@ -31,20 +31,20 @@ def setUp(self): self.nranks = len(self.endpoints) self.rank = self.endpoints.index(self.current_endpoint) self.gpu_id = int(os.getenv("FLAGS_selected_gpus")) - self.place = fluid.CUDAPlace(self.gpu_id) - self.exe = fluid.Executor(self.place) + self.place = base.CUDAPlace(self.gpu_id) + self.exe = base.Executor(self.place) self.endpoints.remove(self.current_endpoint) self.other_endpoints = self.endpoints if self.rank == 0: wait_server_ready(self.other_endpoints) def test_specifying_devices(self): - program = fluid.Program() + program = base.Program() block = program.global_block() nccl_id_var = block.create_var( - name=fluid.unique_name.generate('nccl_id'), + name=base.unique_name.generate('nccl_id'), persistable=True, - type=fluid.core.VarDesc.VarType.RAW, + type=base.core.VarDesc.VarType.RAW, ) block.append_op( type='c_gen_nccl_id', diff --git a/test/collective/fleet/dist_mnist_gradient_merge.py b/test/collective/fleet/dist_mnist_gradient_merge.py index deeb3b32de857..f37220c8e75e7 100644 --- a/test/collective/fleet/dist_mnist_gradient_merge.py +++ b/test/collective/fleet/dist_mnist_gradient_merge.py @@ -20,14 +20,14 @@ from test_dist_base import TestDistRunnerBase, _insert_comm_op, runtime_main import paddle -from paddle import fluid +from paddle import base DTYPE = "float32" paddle.dataset.mnist.fetch() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 class TestDistMnist2x2(TestDistRunnerBase): @@ -51,7 +51,7 @@ def get_model(self, batch_size=2, single_device=False): input=predict, label=label, total=batch_size_tensor ) - inference_program = fluid.default_main_program().clone() + inference_program = base.default_main_program().clone() # Optimization opt = paddle.optimizer.Momentum(learning_rate=0.001, momentum=0.9) opt = paddle.incubate.optimizer.GradientMergeOptimizer(opt, 2) diff --git a/test/collective/fleet/dist_mnist_gradient_merge_raw_optimizer.py b/test/collective/fleet/dist_mnist_gradient_merge_raw_optimizer.py index 369e163c3528c..d44538ee99edd 100644 --- a/test/collective/fleet/dist_mnist_gradient_merge_raw_optimizer.py +++ b/test/collective/fleet/dist_mnist_gradient_merge_raw_optimizer.py @@ -22,7 +22,7 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid, nn +from paddle import base, nn from paddle.distributed import fleet @@ -32,7 +32,7 @@ def get_model(self, batch_size=2, single_device=False): paddle.seed(1) np.random.seed(1) - assert fluid.core.globals()['FLAGS_apply_pass_to_program'] + assert base.core.globals()['FLAGS_apply_pass_to_program'] strategy = fleet.DistributedStrategy() build_strategy = paddle.static.BuildStrategy() settings = { diff --git a/test/collective/fleet/dygraph_group_sharded_stage3.py b/test/collective/fleet/dygraph_group_sharded_stage3.py index bbe0884d982e4..fb2c2fec6bd3b 100644 --- a/test/collective/fleet/dygraph_group_sharded_stage3.py +++ b/test/collective/fleet/dygraph_group_sharded_stage3.py @@ -33,7 +33,7 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( GroupShardedScaler, ) -from paddle.fluid import core +from paddle.base import core from paddle.nn import Linear epoch = 10 diff --git a/test/collective/fleet/dygraph_group_sharded_stage3_offload.py b/test/collective/fleet/dygraph_group_sharded_stage3_offload.py index b34f178aa8363..b680f343e3f25 100644 --- a/test/collective/fleet/dygraph_group_sharded_stage3_offload.py +++ b/test/collective/fleet/dygraph_group_sharded_stage3_offload.py @@ -24,7 +24,7 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( GroupShardedScaler, ) -from paddle.fluid import core +from paddle.base import core from paddle.nn import Linear epoch = 10 diff --git a/test/collective/fleet/fused_attention_pass_with_mp.py b/test/collective/fleet/fused_attention_pass_with_mp.py index 71ca3ef252711..cdbebc11eec79 100644 --- a/test/collective/fleet/fused_attention_pass_with_mp.py +++ b/test/collective/fleet/fused_attention_pass_with_mp.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.passes import PassManager, new_pass @@ -116,8 +116,8 @@ def setUp(self): self.nranks = len(self.endpoints) self.rank = self.endpoints.index(self.current_endpoint) self.gpu_id = int(os.getenv("FLAGS_selected_gpus")) - self.place = fluid.CUDAPlace(self.gpu_id) - self.exe = fluid.Executor(self.place) + self.place = base.CUDAPlace(self.gpu_id) + self.exe = base.Executor(self.place) self.endpoints.remove(self.current_endpoint) self.other_endpoints = self.endpoints self.add_residual = True @@ -180,9 +180,9 @@ def get_rst(self, use_pass=False): startup_block = startup_prog.global_block() nccl_id_var = startup_block.create_var( - name=fluid.unique_name.generate('nccl_id'), + name=base.unique_name.generate('nccl_id'), persistable=True, - type=fluid.core.VarDesc.VarType.RAW, + type=base.core.VarDesc.VarType.RAW, ) startup_block.append_op( type='c_gen_nccl_id', diff --git a/test/collective/fleet/hybrid_parallel_inference_helper.py b/test/collective/fleet/hybrid_parallel_inference_helper.py index 5e118f0f67fe9..1e365c1822f42 100644 --- a/test/collective/fleet/hybrid_parallel_inference_helper.py +++ b/test/collective/fleet/hybrid_parallel_inference_helper.py @@ -58,12 +58,12 @@ def test_hybrid_parallel_inference_helper_mp1pp2(self): device = "gpu" with paddle.static.program_guard(main_program, startup_program): - with paddle.fluid.device_guard(f'{device}:0'): + with paddle.base.device_guard(f'{device}:0'): X = paddle.static.data( name='X', shape=[None, 2], dtype='float32' ) - with paddle.fluid.device_guard(f'{device}:all'): + with paddle.base.device_guard(f'{device}:all'): max_len = paddle.tensor.fill_constant( shape=[1], dtype="int64", value=2, force_cpu=False, name="n" ) @@ -87,12 +87,12 @@ def test_hybrid_parallel_inference_helper_mp1pp2(self): ) with while_op.block(): - with paddle.fluid.device_guard(f'{device}:all'): + with paddle.base.device_guard(f'{device}:all'): input = paddle.tensor.array_read(array=data, i=step_idx) paddle.increment(x=step_idx, value=1.0) paddle.tensor.array_write(input, i=step_idx, array=data) - with paddle.fluid.device_guard(f'{device}:0'): + with paddle.base.device_guard(f'{device}:0'): param_attr = paddle.ParamAttr( initializer=paddle.nn.initializer.Constant(1.0) ) @@ -104,7 +104,7 @@ def test_hybrid_parallel_inference_helper_mp1pp2(self): ) hidden1 = paddle.matmul(input, weight1) - with paddle.fluid.device_guard(f'{device}:1'): + with paddle.base.device_guard(f'{device}:1'): param_attr = paddle.ParamAttr( initializer=paddle.nn.initializer.Constant(2.0) ) @@ -122,15 +122,15 @@ def test_hybrid_parallel_inference_helper_mp1pp2(self): paddle.assign(paddle.less_than(x=step_idx, y=max_len), cond) paddle.assign(paddle.cast(cond, dtype="int32"), cond_int) - with paddle.fluid.device_guard(f'{device}:all'): + with paddle.base.device_guard(f'{device}:all'): # the code below must at end of while block and exists in device:all paddle.assign(paddle.cast(cond_int, dtype='bool'), cond) - with paddle.fluid.device_guard(f'{device}:all'): + with paddle.base.device_guard(f'{device}:all'): out = paddle.tensor.create_array(data.dtype) paddle.assign(data, out) - with paddle.fluid.device_guard(f'{device}:all'): + with paddle.base.device_guard(f'{device}:all'): # use a empty lod_tensor_array to clear lod_tensor_array paddle.assign(paddle.tensor.create_array(data.dtype), data) diff --git a/test/collective/fleet/hybrid_parallel_qat.py b/test/collective/fleet/hybrid_parallel_qat.py index 00bc0f746e761..484cfd168b530 100644 --- a/test/collective/fleet/hybrid_parallel_qat.py +++ b/test/collective/fleet/hybrid_parallel_qat.py @@ -19,7 +19,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid, nn +from paddle import base, nn from paddle.distributed import fleet from paddle.distributed.utils.launch_utils import find_free_ports, get_cluster from paddle.quantization import ImperativeQuantAware @@ -328,8 +328,8 @@ def train(self, model_a, optimizer_a, model_b, optimizer_b): def test_mp_model_1(self): if ( - not fluid.core.is_compiled_with_cuda() - or fluid.core.get_cuda_device_count() == 0 + not base.core.is_compiled_with_cuda() + or base.core.get_cuda_device_count() == 0 ): return selected_gpus = get_gpus('0,1') @@ -344,8 +344,8 @@ def test_mp_model_1(self): def test_mp_model_2(self): if ( - not fluid.core.is_compiled_with_cuda() - or fluid.core.get_cuda_device_count() == 0 + not base.core.is_compiled_with_cuda() + or base.core.get_cuda_device_count() == 0 ): return selected_gpus = get_gpus('0,1') diff --git a/test/collective/fleet/parallel_dygraph_control_flow_same.py b/test/collective/fleet/parallel_dygraph_control_flow_same.py index 3fbc5fc35b60a..6b53194b12650 100644 --- a/test/collective/fleet/parallel_dygraph_control_flow_same.py +++ b/test/collective/fleet/parallel_dygraph_control_flow_same.py @@ -19,7 +19,7 @@ ) import paddle -from paddle.fluid.dygraph.base import to_variable +from paddle.base.dygraph.base import to_variable from paddle.nn import Linear np.random.seed(2021) diff --git a/test/collective/fleet/parallel_dygraph_no_sync.py b/test/collective/fleet/parallel_dygraph_no_sync.py index 5f2b9751a6733..4c76e4b4e4b51 100644 --- a/test/collective/fleet/parallel_dygraph_no_sync.py +++ b/test/collective/fleet/parallel_dygraph_no_sync.py @@ -25,7 +25,7 @@ import paddle import paddle.distributed as dist -from paddle import fluid +from paddle import base from paddle.nn import Linear seed = 90 @@ -68,15 +68,15 @@ def run_one_loop(self, model, optimizer, batch): return loss def run_trainer_func(self, args): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace(device_id) + place = base.CUDAPlace(device_id) else: assert "Only support CUDAPlace for now." - with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + with base.dygraph.guard(place): + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed np.random.seed(seed) random.seed(seed) model, train_reader, opt = self.get_model() diff --git a/test/collective/fleet/parallel_dygraph_se_resnext.py b/test/collective/fleet/parallel_dygraph_se_resnext.py index c24e4e7ebef3d..e9ee2407a3346 100644 --- a/test/collective/fleet/parallel_dygraph_se_resnext.py +++ b/test/collective/fleet/parallel_dygraph_se_resnext.py @@ -18,8 +18,8 @@ from test_dist_base import TestParallelDyGraphRunnerBase, runtime_main import paddle -from paddle import fluid -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base.dygraph.base import to_variable from paddle.nn import Linear batch_size = 64 @@ -56,9 +56,9 @@ def optimizer_setting(params, parameter_list=None): lr = params["lr"] num_epochs = params["num_epochs"] - if fluid.in_dygraph_mode(): + if base.in_dygraph_mode(): optimizer = paddle.optimizer.Momentum( - learning_rate=fluid.layers.cosine_decay( + learning_rate=base.layers.cosine_decay( learning_rate=lr, step_each_epoch=step, epochs=num_epochs ), momentum=momentum_rate, diff --git a/test/collective/fleet/parallel_dygraph_sync_batch_norm.py b/test/collective/fleet/parallel_dygraph_sync_batch_norm.py index df501c337aae1..6fe091ad498b3 100644 --- a/test/collective/fleet/parallel_dygraph_sync_batch_norm.py +++ b/test/collective/fleet/parallel_dygraph_sync_batch_norm.py @@ -19,7 +19,7 @@ ) import paddle -from paddle.fluid.dygraph.base import to_variable +from paddle.base.dygraph.base import to_variable from paddle.nn import Conv2D, SyncBatchNorm diff --git a/test/collective/fleet/parallel_dygraph_transformer.py b/test/collective/fleet/parallel_dygraph_transformer.py index b92c14a11d40f..e3214b86e79b0 100644 --- a/test/collective/fleet/parallel_dygraph_transformer.py +++ b/test/collective/fleet/parallel_dygraph_transformer.py @@ -20,8 +20,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid.dygraph import to_variable +from paddle import base +from paddle.base.dygraph import to_variable from paddle.nn import Layer from paddle.optimizer.lr import NoamDecay @@ -245,10 +245,10 @@ def __init__(self, d_model, process_cmd, shape_len=None): if cmd == "n": self._layer_norm = paddle.nn.LayerNorm( normalized_shape=d_model, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(1.0) ), - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(0.0) ), ) @@ -513,7 +513,7 @@ def __init__( src_vocab_size, src_emb_dim, sparse=is_sparse, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name=word_emb_param_name, initializer=paddle.nn.initializer.Normal( 0.0, src_emb_dim**-0.5 @@ -529,7 +529,7 @@ def __init__( self._src_max_len, src_emb_dim, sparse=is_sparse, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name=pos_enc_param_name, initializer=paddle.nn.initializer.Assign(pos_inp), trainable=False, diff --git a/test/collective/fleet/pipeline_mnist.py b/test/collective/fleet/pipeline_mnist.py index 8e3ababc443a0..fa9288b04e0ae 100644 --- a/test/collective/fleet/pipeline_mnist.py +++ b/test/collective/fleet/pipeline_mnist.py @@ -18,7 +18,7 @@ from legacy_test.test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet paddle.enable_static() @@ -27,8 +27,8 @@ paddle.dataset.mnist.fetch() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 def cnn_model(data): @@ -39,7 +39,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -50,7 +50,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -60,12 +60,12 @@ def cnn_model(data): param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] scale = (2.0 / (param_shape[0] ** 2 * SIZE)) ** 0.5 - with fluid.device_guard("gpu:1"): + with base.device_guard("gpu:1"): predict = paddle.static.nn.fc( x=conv_pool_2, size=SIZE, activation="softmax", - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -74,7 +74,7 @@ def cnn_model(data): x=conv_pool_1, size=SIZE, activation="softmax", - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -85,7 +85,7 @@ def cnn_model(data): class TestDistMnist2x2(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data - with fluid.device_guard("gpu:0"): + with base.device_guard("gpu:0"): images = paddle.static.data( name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE ) @@ -94,7 +94,7 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): ) if dist_strategy: - data_loader = fluid.io.DataLoader.from_generator( + data_loader = base.io.DataLoader.from_generator( feed_list=[images, label], capacity=64, use_double_buffer=False, @@ -102,20 +102,20 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): ) # Train program predict = cnn_model(images) - with fluid.device_guard("gpu:1"): + with base.device_guard("gpu:1"): cost = paddle.nn.functional.cross_entropy( input=predict, label=label, reduction='none', use_softmax=False ) avg_cost = paddle.mean(x=cost) # Evaluator - with fluid.device_guard("gpu:1"): + with base.device_guard("gpu:1"): batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) - inference_program = fluid.default_main_program().clone() + inference_program = base.default_main_program().clone() base_lr = self.lr passes = [30, 60, 80, 90] steps_per_pass = 10 diff --git a/test/collective/fleet/pipeline_mnist_multi_device.py b/test/collective/fleet/pipeline_mnist_multi_device.py index 6baac996f8833..06e554f281473 100644 --- a/test/collective/fleet/pipeline_mnist_multi_device.py +++ b/test/collective/fleet/pipeline_mnist_multi_device.py @@ -18,7 +18,7 @@ from legacy_test.test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet paddle.enable_static() @@ -27,8 +27,8 @@ paddle.dataset.mnist.fetch() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 def cnn_model(data): @@ -39,7 +39,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -50,7 +50,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -60,12 +60,12 @@ def cnn_model(data): param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] scale = (2.0 / (param_shape[0] ** 2 * SIZE)) ** 0.5 - with fluid.device_guard("gpu:1"): + with base.device_guard("gpu:1"): predict = paddle.static.nn.fc( x=conv_pool_2, size=SIZE, activation="softmax", - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -74,7 +74,7 @@ def cnn_model(data): x=conv_pool_1, size=SIZE, activation="softmax", - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -85,7 +85,7 @@ def cnn_model(data): class TestDistMnist2x2(TestDistRunnerBase): def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): # Input data - with fluid.device_guard("gpu:0"): + with base.device_guard("gpu:0"): images = paddle.static.data( name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE ) @@ -94,7 +94,7 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): ) if dist_strategy: - data_loader = fluid.io.DataLoader.from_generator( + data_loader = base.io.DataLoader.from_generator( feed_list=[images, label], capacity=64, use_double_buffer=False, @@ -102,20 +102,20 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): ) # Train program predict = cnn_model(images) - with fluid.device_guard("gpu:1"): + with base.device_guard("gpu:1"): cost = paddle.nn.functional.cross_entropy( input=predict, label=label, reduction='none', use_softmax=False ) avg_cost = paddle.mean(x=cost) # Evaluator - with fluid.device_guard("gpu:1"): + with base.device_guard("gpu:1"): batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) - inference_program = fluid.default_main_program().clone() + inference_program = base.default_main_program().clone() base_lr = self.lr passes = [30, 60, 80, 90] steps_per_pass = 10 diff --git a/test/collective/fleet/pipeline_mnist_one_device.py b/test/collective/fleet/pipeline_mnist_one_device.py index 3d84414829580..a106c9b1098d5 100644 --- a/test/collective/fleet/pipeline_mnist_one_device.py +++ b/test/collective/fleet/pipeline_mnist_one_device.py @@ -18,7 +18,7 @@ from legacy_test.test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet paddle.enable_static() @@ -27,8 +27,8 @@ paddle.dataset.mnist.fetch() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 def cnn_model(data): @@ -39,7 +39,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -50,7 +50,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -64,7 +64,7 @@ def cnn_model(data): x=conv_pool_2, size=SIZE, activation="softmax", - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -77,7 +77,7 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): device_id = 0 if dist_strategy: fleet.init(is_collective=True) - with fluid.device_guard("gpu:0"): + with base.device_guard("gpu:0"): images = paddle.static.data( name='pixel', shape=[-1, 1, 28, 28], dtype=DTYPE ) @@ -86,7 +86,7 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): ) if dist_strategy: - data_loader = fluid.io.DataLoader.from_generator( + data_loader = base.io.DataLoader.from_generator( feed_list=[images, label], capacity=64, use_double_buffer=False, @@ -94,20 +94,20 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): ) # Train program predict = cnn_model(images) - with fluid.device_guard("gpu:0"): + with base.device_guard("gpu:0"): cost = paddle.nn.functional.cross_entropy( input=predict, label=label, reduction='none', use_softmax=False ) avg_cost = paddle.mean(x=cost) # Evaluator - with fluid.device_guard("gpu:0"): + with base.device_guard("gpu:0"): batch_size_tensor = paddle.tensor.create_tensor(dtype='int64') batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size_tensor ) - inference_program = fluid.default_main_program().clone() + inference_program = base.default_main_program().clone() base_lr = self.lr passes = [30, 60, 80, 90] steps_per_pass = 10 diff --git a/test/collective/fleet/static_model_parallel_by_col.py b/test/collective/fleet/static_model_parallel_by_col.py index f481e72f0b8e9..3a161298fee33 100644 --- a/test/collective/fleet/static_model_parallel_by_col.py +++ b/test/collective/fleet/static_model_parallel_by_col.py @@ -16,7 +16,7 @@ from legacy_test.test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet paddle.enable_static() @@ -27,8 +27,8 @@ OUT_SIZE = 2 * MODEL_PARALLEL_SIZE # Fix seed for test -# fluid.default_startup_program().random_seed = 1 -# fluid.default_main_program().random_seed = 1 +# base.default_startup_program().random_seed = 1 +# base.default_main_program().random_seed = 1 def get_param_attr(weight, bias): @@ -76,7 +76,7 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): ) if dist_strategy: - data_loader = fluid.io.DataLoader.from_generator( + data_loader = base.io.DataLoader.from_generator( feed_list=[data_in], capacity=64, use_double_buffer=False, diff --git a/test/collective/fleet/static_model_parallel_by_row.py b/test/collective/fleet/static_model_parallel_by_row.py index 93c76ea71afb4..94e1b56acc9fe 100644 --- a/test/collective/fleet/static_model_parallel_by_row.py +++ b/test/collective/fleet/static_model_parallel_by_row.py @@ -16,7 +16,7 @@ from legacy_test.test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet paddle.enable_static() @@ -27,8 +27,8 @@ OUT_SIZE = 2 * MODEL_PARALLEL_SIZE # Fix seed for test -# fluid.default_startup_program().random_seed = 1 -# fluid.default_main_program().random_seed = 1 +# base.default_startup_program().random_seed = 1 +# base.default_main_program().random_seed = 1 def get_param_attr(weight, bias): @@ -80,7 +80,7 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): ) if dist_strategy: - data_loader = fluid.io.DataLoader.from_generator( + data_loader = base.io.DataLoader.from_generator( feed_list=[data_in], capacity=64, use_double_buffer=False, diff --git a/test/collective/fleet/static_model_parallel_embedding.py b/test/collective/fleet/static_model_parallel_embedding.py index c762b1c960740..797a8a4f690ee 100644 --- a/test/collective/fleet/static_model_parallel_embedding.py +++ b/test/collective/fleet/static_model_parallel_embedding.py @@ -16,7 +16,7 @@ from legacy_test.test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet paddle.enable_static() @@ -27,8 +27,8 @@ OUT_SIZE = 2 * MODEL_PARALLEL_SIZE # Fix seed for test -# fluid.default_startup_program().random_seed = 1 -# fluid.default_main_program().random_seed = 1 +# base.default_startup_program().random_seed = 1 +# base.default_main_program().random_seed = 1 def create_model(data, rank): @@ -70,7 +70,7 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): ) if dist_strategy: - data_loader = fluid.io.DataLoader.from_generator( + data_loader = base.io.DataLoader.from_generator( feed_list=[data_in], capacity=64, use_double_buffer=False, diff --git a/test/collective/fleet/test_auto_checkpoint.py b/test/collective/fleet/test_auto_checkpoint.py index f036d039c9063..ccb2bfb41a90d 100644 --- a/test/collective/fleet/test_auto_checkpoint.py +++ b/test/collective/fleet/test_auto_checkpoint.py @@ -18,9 +18,9 @@ from auto_checkpoint_utils import AutoCheckpointBase, get_logger import paddle -import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp +import paddle.base.incubate.checkpoint.auto_checkpoint as acp from paddle.distributed.fleet.utils.fs import HDFSClient, LocalFS -from paddle.fluid.incubate.checkpoint.checkpoint_saver import PaddleModel +from paddle.base.incubate.checkpoint.checkpoint_saver import PaddleModel paddle.enable_static() logger = get_logger() diff --git a/test/collective/fleet/test_auto_checkpoint_dist_basic.py b/test/collective/fleet/test_auto_checkpoint_dist_basic.py index f2f0035ab9cb2..f9625b05f052f 100644 --- a/test/collective/fleet/test_auto_checkpoint_dist_basic.py +++ b/test/collective/fleet/test_auto_checkpoint_dist_basic.py @@ -19,8 +19,8 @@ from test_auto_checkpoint import AutoCheckPointACLBase import paddle -import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp -from paddle import fluid +import paddle.base.incubate.checkpoint.auto_checkpoint as acp +from paddle import base from paddle.distributed.fleet.utils.fs import HDFSClient, LocalFS from paddle.incubate.distributed.fleet import role_maker from paddle.incubate.distributed.fleet.collective import fleet @@ -76,7 +76,7 @@ def test_distributed_basic(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - with fluid.program_guard(main_prog, startup_prog): + with base.program_guard(main_prog, startup_prog): dist_optimizer = fleet.distributed_optimizer(optimizer) dist_optimizer.minimize(loss) diff --git a/test/collective/fleet/test_auto_checkpoint_multiple.py b/test/collective/fleet/test_auto_checkpoint_multiple.py index 36421a80e321c..a243ad0dc43ed 100644 --- a/test/collective/fleet/test_auto_checkpoint_multiple.py +++ b/test/collective/fleet/test_auto_checkpoint_multiple.py @@ -19,7 +19,7 @@ from test_auto_checkpoint import AutoCheckPointACLBase import paddle -import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp +import paddle.base.incubate.checkpoint.auto_checkpoint as acp from paddle.distributed.fleet.utils.fs import HDFSClient, LocalFS paddle.enable_static() diff --git a/test/collective/fleet/test_communicator_half_async.py b/test/collective/fleet/test_communicator_half_async.py index ae6d02a24d589..25e5302fb444f 100644 --- a/test/collective/fleet/test_communicator_half_async.py +++ b/test/collective/fleet/test_communicator_half_async.py @@ -20,7 +20,7 @@ import numpy import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -57,8 +57,8 @@ def run_pserver(self, role, strategy): fleet.run_server() def run_trainer(self, role, strategy): - place = fluid.core.CPUPlace() - exe = fluid.Executor(place) + place = base.core.CPUPlace() + exe = base.Executor(place) fleet.init(role) avg_cost, x, y = self.net() @@ -70,7 +70,7 @@ def run_trainer(self, role, strategy): fleet.init_worker() train_reader = paddle.batch(self.fake_reader(), batch_size=24) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + feeder = base.DataFeeder(place=place, feed_list=[x, y]) for batch_id, data in enumerate(train_reader()): exe.run( @@ -116,7 +116,7 @@ def test_communicator(self): from test_communicator_half_async import TestCommunicatorHalfAsyncEnd2End import paddle -import paddle.fluid as fluid +import paddle.base as base import paddle.distributed.fleet as fleet import paddle.distributed.fleet.base.role_maker as role_maker diff --git a/test/collective/fleet/test_dgc_momentum_op.py b/test/collective/fleet/test_dgc_momentum_op.py index 62106d4c0f8d6..90152cff3918d 100644 --- a/test/collective/fleet/test_dgc_momentum_op.py +++ b/test/collective/fleet/test_dgc_momentum_op.py @@ -17,8 +17,8 @@ import numpy as np from op import Operator -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestDGCMomentumOp1(unittest.TestCase): @@ -28,7 +28,7 @@ def get_tensor(self, name, value, place=None): return name, tensor def setup(self, place, step=0.0): - self.scope = fluid.global_scope() + self.scope = base.global_scope() self.place = place print("place:", place) diff --git a/test/collective/fleet/test_dgc_op.py b/test/collective/fleet/test_dgc_op.py index 9905719e1b4d7..2e8488cc870b3 100644 --- a/test/collective/fleet/test_dgc_op.py +++ b/test/collective/fleet/test_dgc_op.py @@ -17,8 +17,8 @@ import numpy as np from op import Operator -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core g_array_size = 102400 @@ -28,7 +28,7 @@ def setup(self, place, array_size=g_array_size): size = array_size np.random.seed(5) # fix seed - self.scope = fluid.global_scope() + self.scope = base.global_scope() self.place = place print("place:", place) diff --git a/test/collective/fleet/test_dgc_optimizer.py b/test/collective/fleet/test_dgc_optimizer.py index e356f842c9ea6..d2970e66d5bfa 100644 --- a/test/collective/fleet/test_dgc_optimizer.py +++ b/test/collective/fleet/test_dgc_optimizer.py @@ -16,7 +16,7 @@ import paddle from paddle import regularizer -from paddle.fluid import framework +from paddle.base import framework from paddle.nn import clip paddle.enable_static() diff --git a/test/collective/fleet/test_dist_mnist_dgc_nccl.py b/test/collective/fleet/test_dist_mnist_dgc_nccl.py index 4ef20da401263..1bc335a96e6fd 100644 --- a/test/collective/fleet/test_dist_mnist_dgc_nccl.py +++ b/test/collective/fleet/test_dist_mnist_dgc_nccl.py @@ -50,9 +50,9 @@ def _setup_config(self): self._use_dgc = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( os.path.abspath("../../legacy_test/dist_mnist_dgc.py"), delta=1e-5, @@ -61,9 +61,9 @@ def test_dist_train(self): ) def tearDown(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): log_file = os.path.join( self.temp_dir.name, 'test_dist_mnist_dgc_nccl_tr0_err.log' ) diff --git a/test/collective/fleet/test_dist_mnist_gradient_merge.py b/test/collective/fleet/test_dist_mnist_gradient_merge.py index fb24cc910f6de..9152ecb8b6ec5 100644 --- a/test/collective/fleet/test_dist_mnist_gradient_merge.py +++ b/test/collective/fleet/test_dist_mnist_gradient_merge.py @@ -17,7 +17,7 @@ from legacy_test.test_dist_base import TestDistBase -from paddle import fluid +from paddle import base flag_name = os.path.splitext(__file__)[0] @@ -30,7 +30,7 @@ def _setup_config(self): self._nccl2_reduce_layer = True def test_dist_train(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_mnist_gradient_merge.py", delta=1e-5, @@ -47,7 +47,7 @@ def _setup_config(self): self._fuse_all_reduce = False def test_dist_train(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_mnist_gradient_merge.py", delta=1e-5, @@ -67,7 +67,7 @@ def enable_avg(self): return False def test_dist_train(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): avg = str(self.enable_avg()) log_name = flag_name + "_raw_optimizer_gm_avg_" + avg self.check_with_place( diff --git a/test/collective/fleet/test_dist_se_resnext_dgc.py b/test/collective/fleet/test_dist_se_resnext_dgc.py index 8d39089d59763..e0ff1f607ef58 100644 --- a/test/collective/fleet/test_dist_se_resnext_dgc.py +++ b/test/collective/fleet/test_dist_se_resnext_dgc.py @@ -30,9 +30,9 @@ def _setup_config(self): @unittest.skip(reason="Skip unstable ci") def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( os.path.abspath("../../dist_se_resnext.py"), delta=30, diff --git a/test/collective/fleet/test_distributed_strategy.py b/test/collective/fleet/test_distributed_strategy.py index 4d76d7d3ad00e..337efe6d1e028 100644 --- a/test/collective/fleet/test_distributed_strategy.py +++ b/test/collective/fleet/test_distributed_strategy.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed.transpiler.distribute_transpiler import ( DistributeTranspilerConfig, ServerRuntimeConfig, @@ -82,8 +82,8 @@ def test_geo_strategy(self): self.assertEqual(strategy._program_config.geo_sgd_need_push_nums, 5) self.assertEqual(strategy._build_strategy.async_mode, True) - # test set_build_strategy using fluid.BuildStrategy - build_strategy_class = fluid.BuildStrategy() + # test set_build_strategy using base.BuildStrategy + build_strategy_class = base.BuildStrategy() build_strategy_class.memory_optimize = False strategy.set_build_strategy(build_strategy_class) build_strategy = strategy.get_build_strategy() @@ -160,8 +160,8 @@ def test_async_strategy(self): trainer_runtime_config_illegal, ) - # test set_execute_strategy using fluid.ExecutionStrategy - exec_strategy_class = fluid.ExecutionStrategy() + # test set_execute_strategy using base.ExecutionStrategy + exec_strategy_class = base.ExecutionStrategy() exec_strategy_class.num_threads = 4 strategy.set_execute_strategy(exec_strategy_class) exec_strategy = strategy.get_execute_strategy() diff --git a/test/collective/fleet/test_fleet_amp_init.py b/test/collective/fleet/test_fleet_amp_init.py index e7263d1cf990b..e4815f5cd4d9f 100644 --- a/test/collective/fleet/test_fleet_amp_init.py +++ b/test/collective/fleet/test_fleet_amp_init.py @@ -18,7 +18,7 @@ import paddle import paddle.nn.functional as F -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -45,7 +45,7 @@ def mlp(input_x, input_y, hid_dim=128, label_dim=2): class TestFleetAMPInit(unittest.TestCase): def test_fleet_amp_init(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return main_program = paddle.static.Program() @@ -89,7 +89,7 @@ def test_fleet_amp_init(self): ) def test_fleet_amp_meta_optimizer_init(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return main_program = paddle.static.Program() diff --git a/test/collective/fleet/test_fleet_amp_meta_optimizer.py b/test/collective/fleet/test_fleet_amp_meta_optimizer.py index 75a7f445eea2b..9d9c56d7cadc1 100644 --- a/test/collective/fleet/test_fleet_amp_meta_optimizer.py +++ b/test/collective/fleet/test_fleet_amp_meta_optimizer.py @@ -17,7 +17,7 @@ from fleet_meta_optimizer_base import TestFleetMetaOptimizer import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker from paddle.distributed.fleet.meta_optimizers import AMPOptimizer @@ -28,7 +28,7 @@ class TestFleetAMPOptimizer(TestFleetMetaOptimizer): def test_amp_optimizer_backward(self): """test amp optimizer backward""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) opt = paddle.optimizer.Momentum(learning_rate=0.001, momentum=0.9) @@ -45,7 +45,7 @@ def test_amp_optimizer_backward(self): def test_amp_optimizer_backward_gradients(self): """test amp optimizer backward + gradients""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) opt = paddle.optimizer.Momentum(learning_rate=0.001, momentum=0.9) @@ -55,7 +55,7 @@ def test_amp_optimizer_backward_gradients(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) opt._set_basic_info(avg_cost, role, opt, strategy) params_grads = opt.backward(avg_cost, startup_prog) - with fluid.program_guard(train_prog, startup_prog): + with base.program_guard(train_prog, startup_prog): opt.apply_gradients(params_grads) ops = [op.type for op in avg_cost.block.ops] @@ -64,7 +64,7 @@ def test_amp_optimizer_backward_gradients(self): def test_amp_optimizer_backward_optimize(self): """test amp optimizer backward + optimizer""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) opt = paddle.optimizer.Momentum(learning_rate=0.001, momentum=0.9) @@ -82,7 +82,7 @@ def test_amp_optimizer_backward_optimize(self): def test_amp_optimizer(self): """test amp""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') self.optimizer(avg_cost, strategy, train_prog, startup_prog) @@ -93,14 +93,14 @@ def test_amp_optimizer(self): def test_pure_fp16_optimizer(self): """test pure fp16""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'pure_fp16') self.optimizer(avg_cost, strategy, train_prog, startup_prog) params = train_prog.all_parameters() for param in train_prog.all_parameters(): - self.assertEqual(param.dtype, fluid.core.VarDesc.VarType.FP16) + self.assertEqual(param.dtype, base.core.VarDesc.VarType.FP16) ops = [op.type for op in avg_cost.block.ops] self.assertIn('cast', ops) @@ -108,7 +108,7 @@ def test_pure_fp16_optimizer(self): def test_amp_distributed_optimizer(self): """test amp when distributed""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') self.optimizer(avg_cost, strategy, train_prog, startup_prog) @@ -124,7 +124,7 @@ def test_amp_distributed_optimizer(self): def test_amp_recompute_optimizer(self): """test amp + recompute""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') self.set_strategy(strategy, 'recompute') @@ -144,7 +144,7 @@ def test_amp_recompute_optimizer(self): def test_amp_recompute_lars_optimizer(self): """test amp + recompute""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') self.set_strategy(strategy, 'recompute') @@ -167,7 +167,7 @@ def test_amp_recompute_lars_optimizer(self): self.assertIn('lars_momentum', ops) def test_amp_recompute_lamb_optimizer(self): - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') self.set_strategy(strategy, 'recompute') diff --git a/test/collective/fleet/test_fleet_checkpoint.py b/test/collective/fleet/test_fleet_checkpoint.py index 4b86e6d57fd7a..79c221fd152ac 100644 --- a/test/collective/fleet/test_fleet_checkpoint.py +++ b/test/collective/fleet/test_fleet_checkpoint.py @@ -16,10 +16,10 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet.utils.fs import HDFSClient, LocalFS -from paddle.fluid.incubate.checkpoint.auto_checkpoint import ExeTrainStatus -from paddle.fluid.incubate.checkpoint.checkpoint_saver import CheckpointSaver +from paddle.base.incubate.checkpoint.auto_checkpoint import ExeTrainStatus +from paddle.base.incubate.checkpoint.checkpoint_saver import CheckpointSaver from paddle.incubate.distributed.fleet import role_maker from paddle.incubate.distributed.fleet.collective import fleet @@ -39,8 +39,8 @@ def _test_checkpoint(self, fs, dir_path): name='img', shape=[None, 28, 28], dtype='float32' ) label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') - feeder = fluid.DataFeeder( - feed_list=[image, label], place=fluid.CPUPlace() + feeder = base.DataFeeder( + feed_list=[image, label], place=base.CPUPlace() ) predict = paddle.static.nn.fc(x=image, size=10, activation='softmax') loss = paddle.nn.functional.cross_entropy( @@ -52,8 +52,8 @@ def _test_checkpoint(self, fs, dir_path): dist_optimizer = fleet.distributed_optimizer(optimizer) dist_optimizer.minimize(avg_loss) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) status = ExeTrainStatus() status.epoch_no = 2 diff --git a/test/collective/fleet/test_fleet_dgc_meta_optimizer.py b/test/collective/fleet/test_fleet_dgc_meta_optimizer.py index 057e0b0d6c509..feb4ff0a77e0e 100755 --- a/test/collective/fleet/test_fleet_dgc_meta_optimizer.py +++ b/test/collective/fleet/test_fleet_dgc_meta_optimizer.py @@ -18,7 +18,7 @@ from fleet_meta_optimizer_base import TestFleetMetaOptimizer import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker from paddle.distributed.fleet.meta_optimizers import DGCOptimizer @@ -29,7 +29,7 @@ class TestFleetDGCOptimizer(TestFleetMetaOptimizer): def test_dgc_optimizer_backward(self): """test dgc optimizer backward""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'dgc') @@ -44,7 +44,7 @@ def test_dgc_optimizer_backward(self): def test_dgc_optimizer_gradients(self): """test dgc optimizer backward + gradients""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'dgc') @@ -53,7 +53,7 @@ def test_dgc_optimizer_gradients(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) dgc_opt._set_basic_info(avg_cost, role, opt, strategy) params_grads = dgc_opt.backward(avg_cost, startup_prog) - with fluid.program_guard(train_prog, startup_prog): + with base.program_guard(train_prog, startup_prog): dgc_opt.apply_gradients(params_grads) ops = [op.type for op in avg_cost.block.ops] @@ -62,7 +62,7 @@ def test_dgc_optimizer_gradients(self): def test_dgc_optimizer_optimize(self): """test dgc optimizer backward + optimize""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'dgc') @@ -78,7 +78,7 @@ def test_dgc_optimizer_optimize(self): self.assertIn('dgc_momentum', ops) def test_dgc_optimizer(self): - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'dgc') self.optimizer(avg_cost, strategy, train_prog, startup_prog) @@ -88,7 +88,7 @@ def test_dgc_optimizer(self): self.assertIn('dgc_momentum', ops) def test_dgc_not_apply_with_adam(self): - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'dgc') self.optimizer(avg_cost, strategy, train_prog, startup_prog, 'adam') @@ -101,7 +101,7 @@ def test_dgc_not_apply_with_one_worker(self): os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'dgc') self.optimizer(avg_cost, strategy, train_prog, startup_prog) @@ -111,7 +111,7 @@ def test_dgc_not_apply_with_one_worker(self): self.assertNotIn('dgc_momentum', ops) def test_dgc_recompute_optimizer(self): - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'dgc') self.set_strategy(strategy, 'recompute') @@ -131,7 +131,7 @@ def test_amp_recompute_lars_dgc_not_apply_optimizer(self): """test amp + recompute + lars + dgc, amp -/-> dgc, max_path is amp-->recompute-->lars """ - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'dgc') self.set_strategy(strategy, 'amp') diff --git a/test/collective/fleet/test_fleet_distributed_strategy.py b/test/collective/fleet/test_fleet_distributed_strategy.py index ba49cbf125a62..38ed379295f47 100644 --- a/test/collective/fleet/test_fleet_distributed_strategy.py +++ b/test/collective/fleet/test_fleet_distributed_strategy.py @@ -453,7 +453,7 @@ def test_strategy_prototxt(self): strategy.dgc = True localsgd_configs = {"k_steps": 5, "begin_step": 1} strategy.localsgd_configs = localsgd_configs - build_strategy = paddle.fluid.BuildStrategy() + build_strategy = paddle.base.BuildStrategy() build_strategy.enable_sequential_execution = True build_strategy.nccl_comm_num = 10 build_strategy.use_hierarchical_allreduce = True @@ -470,7 +470,7 @@ def test_strategy_prototxt(self): build_strategy.enable_backward_optimizer_op_deps = True build_strategy.trainers_endpoints = ["1", "2"] strategy.build_strategy = build_strategy - exe_strategy = paddle.fluid.ExecutionStrategy() + exe_strategy = paddle.base.ExecutionStrategy() exe_strategy.num_threads = 10 exe_strategy.num_iteration_per_drop_scope = 10 exe_strategy.num_iteration_per_run = 10 @@ -481,7 +481,7 @@ def test_strategy_prototxt(self): self.assertEqual(strategy.dgc, strategy2.dgc) def test_build_strategy(self): - build_strategy = paddle.fluid.BuildStrategy() + build_strategy = paddle.base.BuildStrategy() build_strategy.enable_sequential_execution = True build_strategy.nccl_comm_num = 10 build_strategy.use_hierarchical_allreduce = True @@ -502,7 +502,7 @@ def test_build_strategy(self): strategy.build_strategy = build_strategy def test_execution_strategy(self): - exe_strategy = paddle.fluid.ExecutionStrategy() + exe_strategy = paddle.base.ExecutionStrategy() exe_strategy.num_threads = 10 exe_strategy.num_iteration_per_drop_scope = 10 exe_strategy.num_iteration_per_run = 10 diff --git a/test/collective/fleet/test_fleet_fp16_allreduce_meta_optimizer.py b/test/collective/fleet/test_fleet_fp16_allreduce_meta_optimizer.py index f78f8319691f0..bb4c222725f60 100644 --- a/test/collective/fleet/test_fleet_fp16_allreduce_meta_optimizer.py +++ b/test/collective/fleet/test_fleet_fp16_allreduce_meta_optimizer.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -29,7 +29,7 @@ def setUp(self): os.environ["PADDLE_TRAINER_ENDPOINTS"] = "127.0.0.1:36001" def net(self, main_prog, startup_prog, dtype='float32'): - with fluid.program_guard(main_prog, startup_prog): + with base.program_guard(main_prog, startup_prog): input_x = paddle.static.data(name="x", shape=[-1, 32], dtype=dtype) input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') @@ -53,7 +53,7 @@ def net(self, main_prog, startup_prog, dtype='float32'): def test_fp16_allreduce_optimizer(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) optimizer = paddle.optimizer.SGD(learning_rate=0.01) @@ -80,7 +80,7 @@ def test_fp16_allreduce_optimizer(self): def test_fp16_allreduce_not_apply_fp16_net(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog, dtype='float16') optimizer = paddle.optimizer.SGD(learning_rate=0.01) diff --git a/test/collective/fleet/test_fleet_gradient_merge_meta_optimizer.py b/test/collective/fleet/test_fleet_gradient_merge_meta_optimizer.py index 1590fcaa3f1a9..16e30f974e947 100644 --- a/test/collective/fleet/test_fleet_gradient_merge_meta_optimizer.py +++ b/test/collective/fleet/test_fleet_gradient_merge_meta_optimizer.py @@ -24,8 +24,8 @@ class TestFleetGradientMergeMetaOptimizer(TestFleetMetaOptimizer): def test_gradient_merge_optimizer(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'gradient_merge') @@ -36,8 +36,8 @@ def test_gradient_merge_optimizer(self): def test_recom_gm_optimizer(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'gradient_merge') @@ -50,8 +50,8 @@ def test_recom_gm_optimizer(self): def test_gm_amp_optimizer(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'gradient_merge') @@ -64,8 +64,8 @@ def test_gm_amp_optimizer(self): def test_gm_pure_fp16_optimizer(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'gradient_merge') @@ -76,7 +76,7 @@ def test_gm_pure_fp16_optimizer(self): params = train_prog.all_parameters() for param in train_prog.all_parameters(): self.assertEqual( - param.dtype, paddle.fluid.core.VarDesc.VarType.FP16 + param.dtype, paddle.base.core.VarDesc.VarType.FP16 ) vars = [x.name for x in train_prog.list_vars()] diff --git a/test/collective/fleet/test_fleet_lamb_meta_optimizer.py b/test/collective/fleet/test_fleet_lamb_meta_optimizer.py index 5708578c951dd..f348af7875eb6 100755 --- a/test/collective/fleet/test_fleet_lamb_meta_optimizer.py +++ b/test/collective/fleet/test_fleet_lamb_meta_optimizer.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -31,8 +31,8 @@ def setUp(self): ] = "127.0.0.1:36001,127.0.0.1:36002" def net(self, main_prog, startup_prog): - with fluid.program_guard(main_prog, startup_prog): - with fluid.unique_name.guard(): + with base.program_guard(main_prog, startup_prog): + with base.unique_name.guard(): input_x = paddle.static.data( name="x", shape=[-1, 32], dtype='float32' ) @@ -67,8 +67,8 @@ def net(self, main_prog, startup_prog): def test_lamb_optimizer(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - startup_prog = fluid.Program() - train_prog = fluid.Program() + startup_prog = base.Program() + train_prog = base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) optimizer = paddle.optimizer.Adam(learning_rate=0.01) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) @@ -80,8 +80,8 @@ def test_lamb_optimizer(self): def test_lamb_not_apply_with_momentum(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - startup_prog = fluid.Program() - train_prog = fluid.Program() + startup_prog = base.Program() + train_prog = base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) optimizer = paddle.optimizer.Momentum(learning_rate=0.1, momentum=0.9) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) @@ -93,8 +93,8 @@ def test_lamb_not_apply_with_momentum(self): def test_lamb_exclude_fn(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - startup_prog = fluid.Program() - train_prog = fluid.Program() + startup_prog = base.Program() + train_prog = base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) optimizer = paddle.optimizer.Adam(learning_rate=0.01) strategy.lamb_configs = { diff --git a/test/collective/fleet/test_fleet_lars_meta_optimizer.py b/test/collective/fleet/test_fleet_lars_meta_optimizer.py index 8f6e330719175..4d60aa381a8cb 100755 --- a/test/collective/fleet/test_fleet_lars_meta_optimizer.py +++ b/test/collective/fleet/test_fleet_lars_meta_optimizer.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -31,8 +31,8 @@ def setUp(self): ] = "127.0.0.1:36001,127.0.0.1:36002" def net(self, main_prog, startup_prog): - with fluid.program_guard(main_prog, startup_prog): - with fluid.unique_name.guard(): + with base.program_guard(main_prog, startup_prog): + with base.unique_name.guard(): input_x = paddle.static.data( name="x", shape=[-1, 32], dtype='float32' ) @@ -69,8 +69,8 @@ def net(self, main_prog, startup_prog): def test_lars_optimizer(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - startup_prog = fluid.Program() - train_prog = fluid.Program() + startup_prog = base.Program() + train_prog = base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) optimizer = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) @@ -82,8 +82,8 @@ def test_lars_optimizer(self): def test_lars_not_apply_with_adam(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - startup_prog = fluid.Program() - train_prog = fluid.Program() + startup_prog = base.Program() + train_prog = base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) optimizer = paddle.optimizer.Adam(learning_rate=0.01) optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) @@ -95,8 +95,8 @@ def test_lars_not_apply_with_adam(self): def test_lars_exclude_fn(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - startup_prog = fluid.Program() - train_prog = fluid.Program() + startup_prog = base.Program() + train_prog = base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) optimizer = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9) diff --git a/test/collective/fleet/test_fleet_localsgd_meta_optimizer.py b/test/collective/fleet/test_fleet_localsgd_meta_optimizer.py index 0a716b3ddb825..d331ef59410df 100644 --- a/test/collective/fleet/test_fleet_localsgd_meta_optimizer.py +++ b/test/collective/fleet/test_fleet_localsgd_meta_optimizer.py @@ -17,14 +17,14 @@ from fleet_meta_optimizer_base import TestFleetMetaOptimizer import paddle -from paddle import fluid +from paddle import base paddle.enable_static() class TestFleetLocalSGDMetaOptimizer(TestFleetMetaOptimizer): def test_localsgd_optimizer(self): - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'localsgd') self.optimizer(avg_cost, strategy, train_prog, startup_prog) @@ -40,7 +40,7 @@ def test_localsgd_optimizer(self): self.assertIn('@SNAPSHOT', ''.join(outs)) def test_localsgd_amp_optimizer(self): - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'localsgd') self.set_strategy(strategy, 'amp') @@ -63,7 +63,7 @@ def test_localsgd_amp_optimizer(self): class TestFleetAdaptiveLocalSGDMetaOptimizer(TestFleetMetaOptimizer): def test_adaptive_localsgd_optimizer(self): - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'adaptive_localsgd') self.optimizer(avg_cost, strategy, train_prog, startup_prog) @@ -79,7 +79,7 @@ def test_adaptive_localsgd_optimizer(self): self.assertIn('@SNAPSHOT', ''.join(outs)) def test_localsgd_amp_optimizer(self): - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'adaptive_localsgd') self.set_strategy(strategy, 'amp') diff --git a/test/collective/fleet/test_fleet_meta_optimizer_base.py b/test/collective/fleet/test_fleet_meta_optimizer_base.py index 7f8db79edf22b..326f1bc64d48c 100755 --- a/test/collective/fleet/test_fleet_meta_optimizer_base.py +++ b/test/collective/fleet/test_fleet_meta_optimizer_base.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker from paddle.distributed.fleet.meta_optimizers.meta_optimizer_base import ( @@ -25,8 +25,8 @@ class TestFleetMetaOptimizerBase(unittest.TestCase): def net(main_prog, startup_prog): - with fluid.program_guard(main_prog, startup_prog): - with fluid.unique_name.guard(): + with base.program_guard(main_prog, startup_prog): + with base.unique_name.guard(): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) input_x = paddle.static.data( @@ -61,7 +61,7 @@ def net(main_prog, startup_prog): ) return None - net(fluid.default_startup_program(), fluid.default_main_program()) + net(base.default_startup_program(), base.default_main_program()) if __name__ == "__main__": diff --git a/test/collective/fleet/test_fleet_pipeline_meta_optimizer.py b/test/collective/fleet/test_fleet_pipeline_meta_optimizer.py index 2780c51e26d6d..78ad33ce44f28 100644 --- a/test/collective/fleet/test_fleet_pipeline_meta_optimizer.py +++ b/test/collective/fleet/test_fleet_pipeline_meta_optimizer.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle import fluid, static +from paddle import base, static from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -75,7 +75,7 @@ def test_pipeline_optimizer(self): train_prog, startup_prog = static.Program(), static.Program() with static.program_guard(train_prog, startup_prog): - with fluid.unique_name.guard(): + with base.unique_name.guard(): avg_cost = self.net() optimizer = paddle.optimizer.Adam(0.01) @@ -99,7 +99,7 @@ def test_pipeline_amp_optimizer(self): train_prog, startup_prog = static.Program(), static.Program() with static.program_guard(train_prog, startup_prog): - with fluid.unique_name.guard(): + with base.unique_name.guard(): avg_cost = self.net() optimizer = paddle.optimizer.Adam(0.01) diff --git a/test/collective/fleet/test_fleet_pipeline_meta_optimizer_with_recompute.py b/test/collective/fleet/test_fleet_pipeline_meta_optimizer_with_recompute.py index a0f3b1d9bb35f..f1a0b7376c102 100644 --- a/test/collective/fleet/test_fleet_pipeline_meta_optimizer_with_recompute.py +++ b/test/collective/fleet/test_fleet_pipeline_meta_optimizer_with_recompute.py @@ -33,7 +33,7 @@ def test_pipeline_optimizer(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - with paddle.fluid.device_guard("gpu:0"): + with paddle.base.device_guard("gpu:0"): input_x = paddle.static.data( name="x", shape=[-1, 32], dtype='float32' ) @@ -45,7 +45,7 @@ def test_pipeline_optimizer(self): fc_5 = paddle.static.nn.fc(x=fc_4, size=64, activation='tanh') fc_6 = paddle.static.nn.fc(x=fc_5, size=64, activation='tanh') - with paddle.fluid.device_guard("gpu:1"): + with paddle.base.device_guard("gpu:1"): fc_7 = paddle.static.nn.fc(x=fc_6, size=64, activation='tanh') prediction = paddle.static.nn.fc( x=[fc_7], size=2, activation='softmax' diff --git a/test/collective/fleet/test_fleet_qat_meta_optimizer.py b/test/collective/fleet/test_fleet_qat_meta_optimizer.py index 7559bd01973d3..4d6be6bb6aa7f 100644 --- a/test/collective/fleet/test_fleet_qat_meta_optimizer.py +++ b/test/collective/fleet/test_fleet_qat_meta_optimizer.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid, nn +from paddle import base, nn from paddle.distributed import fleet paddle.enable_static() @@ -48,8 +48,8 @@ def setup_strategy(self, strategy): strategy.qat = True def generate_program(self, strategy): - train_prog, startup_prog = fluid.Program(), fluid.Program() - with fluid.program_guard(train_prog, startup_prog): + train_prog, startup_prog = base.Program(), base.Program() + with base.program_guard(train_prog, startup_prog): input_x = paddle.static.data( name='X', shape=[self.batch_size, self.input_size], @@ -73,12 +73,12 @@ def generate_program(self, strategy): def execute_program(self, train_prog, startup_prog, input_x, input_y): place = ( - fluid.CUDAPlace(0) - if paddle.fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if paddle.base.is_compiled_with_cuda() + else base.CPUPlace() ) - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[input_x, input_y], place=place) + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=[input_x, input_y], place=place) exe.run(startup_prog) data = ( np.random.randn(self.batch_size, self.input_size), @@ -108,9 +108,9 @@ def test_fleet_with_qat(self): optimizer, ) = self.generate_program(dist_strategy) place = ( - fluid.CUDAPlace(0) - if paddle.fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if paddle.base.is_compiled_with_cuda() + else base.CPUPlace() ) eval_prog = train_prog.clone(for_test=True) optimizer.qat_init( diff --git a/test/collective/fleet/test_fleet_recompute_meta_optimizer.py b/test/collective/fleet/test_fleet_recompute_meta_optimizer.py index e0315fbf407ff..54708d69e26af 100644 --- a/test/collective/fleet/test_fleet_recompute_meta_optimizer.py +++ b/test/collective/fleet/test_fleet_recompute_meta_optimizer.py @@ -17,7 +17,7 @@ from fleet_meta_optimizer_base import TestFleetMetaOptimizer import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet.meta_optimizers import RecomputeOptimizer paddle.enable_static() @@ -26,7 +26,7 @@ class TestFleetRecomputeMetaOptimizer(TestFleetMetaOptimizer): def test_recompute_optimizer_backward(self): """test recompute optimizer backward""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') @@ -42,7 +42,7 @@ def test_recompute_optimizer_backward(self): def test_recompute_optimizer_backward_gradients(self): """test recompute optimizer backward + gradients""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') @@ -50,7 +50,7 @@ def test_recompute_optimizer_backward_gradients(self): opt = RecomputeOptimizer(opt) opt.user_defined_strategy = strategy params_grads = opt.backward(avg_cost, startup_prog) - with fluid.program_guard(train_prog, startup_prog): + with base.program_guard(train_prog, startup_prog): opt.apply_gradients(params_grads) outs = [ @@ -60,7 +60,7 @@ def test_recompute_optimizer_backward_gradients(self): def test_recompute_optimizer_backward_optimize(self): """test recompute optimizer backward + optimize""" - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') @@ -76,7 +76,7 @@ def test_recompute_optimizer_backward_optimize(self): self.assertIn('subprog', ''.join(outs)) def test_recompute_optimizer(self): - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') self.optimizer(avg_cost, strategy, train_prog, startup_prog) @@ -88,7 +88,7 @@ def test_recompute_optimizer(self): self.assertIn('subprog', ''.join(outs)) def test_recompute_lars_optimizer(self): - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') self.set_strategy(strategy, 'lars') @@ -103,7 +103,7 @@ def test_recompute_lars_optimizer(self): self.assertIn('lars_momentum', ops) def test_recompute_lamb_optimizer(self): - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute') self.set_strategy(strategy, 'lamb') @@ -118,7 +118,7 @@ def test_recompute_lamb_optimizer(self): self.assertIn('lamb', ops) def test_recompute_offload(self): - train_prog, startup_prog = fluid.Program(), fluid.Program() + train_prog, startup_prog = base.Program(), base.Program() avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'recompute-offload') self.optimizer(avg_cost, strategy, train_prog, startup_prog) diff --git a/test/collective/fleet/test_fleet_sharding_meta_optimizer.py b/test/collective/fleet/test_fleet_sharding_meta_optimizer.py index 9df8b670c7e99..8eaee6344cbff 100755 --- a/test/collective/fleet/test_fleet_sharding_meta_optimizer.py +++ b/test/collective/fleet/test_fleet_sharding_meta_optimizer.py @@ -28,8 +28,8 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): def test_sharding_optimizer(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -106,8 +106,8 @@ def test_sharding_optimizer(self): def test_sharding_amp_optimizer(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -212,8 +212,8 @@ def test_sharding_amp_optimizer(self): def test_sharding_recompute_optimizer(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -296,8 +296,8 @@ def test_sharding_recompute_optimizer(self): def test_sharding_amp_recompute_optimizer(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -421,8 +421,8 @@ def test_sharding_amp_recompute_optimizer(self): def test_sharding_amp_asp_optimizer(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -534,8 +534,8 @@ def test_sharding_amp_asp_optimizer(self): def test_sharding_weight_decay(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -617,8 +617,8 @@ def test_sharding_weight_decay(self): def test_sharding_gradient_clip(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -708,8 +708,8 @@ def test_sharding_gradient_clip(self): def test_sharding_clone_for_test(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.net(train_prog, startup_prog) self.set_strategy(strategy, 'sharding') @@ -766,8 +766,8 @@ def setUp(self): def test_sharding_with_mp(self): # NOTE(JZ-LIANG) MP parallelism need user to build model with MP API train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, _ = self.net(train_prog, startup_prog) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -817,8 +817,8 @@ def test_sharding_with_mp(self): def test_sharding_hybrid_dp(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, _ = self.net(train_prog, startup_prog) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -939,8 +939,8 @@ def test_sharding_hybrid_dp(self): def test_sharding_hybrid_dp_gm(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, _ = self.net(train_prog, startup_prog) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -1072,8 +1072,8 @@ def test_sharding_hybrid_dp_gm(self): def test_sharding_with_pp(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.sharding = True @@ -1239,8 +1239,8 @@ def test_sharding_with_pp(self): def test_sharding_dp_with_allreduce_fuse(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, _ = self.net(train_prog, startup_prog) strategy = paddle.distributed.fleet.DistributedStrategy() @@ -1271,8 +1271,8 @@ def test_sharding_dp_with_allreduce_fuse(self): def test_hybrid_with_mp_pp_amp_gclip(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') @@ -1509,8 +1509,8 @@ def test_hybrid_with_mp_pp_amp_gclip(self): def test_hybrid_with_mp_pp_amp_gclip_for_optimizer(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) self.set_strategy(strategy, 'amp') @@ -1777,8 +1777,8 @@ def test_hybrid_with_mp_pp_amp_gclip_for_optimizer(self): def test_hybrid_with_pp_dp_amp_fp16allreduce(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -1977,8 +1977,8 @@ def test_hybrid_with_pp_dp_amp_fp16allreduce(self): def test_hybrid_with_sharding_pp_amp_fp16allreduce_in_optimize(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -2078,8 +2078,8 @@ def test_hybrid_with_sharding_pp_amp_fp16allreduce_in_optimize(self): def test_hybrid_with_pp_dp_amp_fp16allreduce_optimize_cast(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -2286,8 +2286,8 @@ def test_hybrid_with_pp_dp_amp_fp16allreduce_optimize_cast(self): def test_hybrid_with_pp_dp_amp_fp16allreduce_optimize_offload(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -2517,8 +2517,8 @@ def test_hybrid_with_pp_dp_amp_fp16allreduce_optimize_cast_with_gradient_fuse( self, ): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -2716,8 +2716,8 @@ def test_hybrid_with_pp_dp_amp_fp16allreduce_optimize_cast_with_gradient_fuse( def test_hybrid_with_pp_dp_amp_with_gradient_fuse(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -2898,8 +2898,8 @@ def test_hybrid_with_pp_dp_amp_with_gradient_fuse(self): def test_hybrid_with_pp_dp_amp_with_gradient_fuse_and_avg_after_sum(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.amp = True @@ -3052,8 +3052,8 @@ def test_hybrid_with_pp_dp_amp_with_gradient_fuse_and_avg_after_sum(self): def test_hybrid_with_pp_dp_with_gradient_fuse_and_avg_after_sum(self): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.sharding = True @@ -3180,8 +3180,8 @@ def test_hybrid_with_pp_dp_with_amp_no_dynamic_gradient_fuse_and_avg_after_sum( self, ): train_prog, startup_prog = ( - paddle.fluid.Program(), - paddle.fluid.Program(), + paddle.base.Program(), + paddle.base.Program(), ) avg_cost, strategy = self.pp_net(train_prog, startup_prog) strategy.sharding = True diff --git a/test/collective/fleet/test_fleet_tensor_parallel_extra_sync.py b/test/collective/fleet/test_fleet_tensor_parallel_extra_sync.py index eba8f6926e0b4..70724709329fd 100644 --- a/test/collective/fleet/test_fleet_tensor_parallel_extra_sync.py +++ b/test/collective/fleet/test_fleet_tensor_parallel_extra_sync.py @@ -21,7 +21,7 @@ paddle.enable_static() -class TensorParallelNet(paddle.fluid.dygraph.Layer): +class TensorParallelNet(paddle.base.dygraph.Layer): def __init__(self, hidden_size): super().__init__() self.embedding = paddle.nn.Embedding(hidden_size, hidden_size) diff --git a/test/collective/fleet/test_hdfs1.py b/test/collective/fleet/test_hdfs1.py index 418a9ae3a68fa..a91241fed81cf 100644 --- a/test/collective/fleet/test_hdfs1.py +++ b/test/collective/fleet/test_hdfs1.py @@ -17,7 +17,7 @@ from hdfs_test_utils import FSTestBase -from paddle import fluid +from paddle import base from paddle.distributed.fleet.utils.fs import FSTimeOut, HDFSClient java_home = os.environ["JAVA_HOME"] @@ -42,7 +42,7 @@ def test_timeout(self): except FSTimeOut as e: print(f"execute mv {src} to {dst} timeout") - ret, output = fluid.core.shell_execute_cmd(cmd, 6 * 1000, 2 * 1000) + ret, output = base.core.shell_execute_cmd(cmd, 6 * 1000, 2 * 1000) self.assertNotEqual(ret, 0) print(f"second mv ret:{ret} output:{output}") diff --git a/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py b/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py index 05bd76127542c..3d85c6cd01edd 100644 --- a/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py +++ b/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py @@ -20,12 +20,12 @@ from test_imperative_resnet import ResNet, optimizer_setting, train_parameters import paddle -from paddle import fluid, nn +from paddle import base, nn from paddle.autograd import PyLayer from paddle.static import InputSpec -if fluid.core.is_compiled_with_cuda(): - fluid.set_flags({"FLAGS_cudnn_deterministic": True}) +if base.core.is_compiled_with_cuda(): + base.set_flags({"FLAGS_cudnn_deterministic": True}) class SimpleConv(paddle.nn.Layer): @@ -56,38 +56,38 @@ def forward(self, inputs): class TestAutoCast(unittest.TestCase): def amp_guard_white_op(self): data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32') - with fluid.dygraph.guard(): + with base.dygraph.guard(): conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False) - data = fluid.dygraph.to_variable(data) + data = base.dygraph.to_variable(data) with paddle.amp.amp_guard(True): out_fp16 = conv2d(data) with paddle.amp.amp_guard(False): out_fp32 = conv2d(data) - self.assertTrue(data.dtype == fluid.core.VarDesc.VarType.FP32) - self.assertTrue(out_fp16.dtype == fluid.core.VarDesc.VarType.FP16) - self.assertTrue(out_fp32.dtype == fluid.core.VarDesc.VarType.FP32) + self.assertTrue(data.dtype == base.core.VarDesc.VarType.FP32) + self.assertTrue(out_fp16.dtype == base.core.VarDesc.VarType.FP16) + self.assertTrue(out_fp32.dtype == base.core.VarDesc.VarType.FP32) def test_amp_guard_white_op(self): self.amp_guard_white_op() def amp_guard_black_op(self): data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32') - with fluid.dygraph.guard(): - data = fluid.dygraph.to_variable(data) + with base.dygraph.guard(): + data = base.dygraph.to_variable(data) with paddle.amp.amp_guard(True): out_fp32 = paddle.mean(data) - self.assertTrue(data.dtype == fluid.core.VarDesc.VarType.FP32) - self.assertTrue(out_fp32.dtype == fluid.core.VarDesc.VarType.FP32) + self.assertTrue(data.dtype == base.core.VarDesc.VarType.FP32) + self.assertTrue(out_fp32.dtype == base.core.VarDesc.VarType.FP32) def test_amp_guard_black_op(self): self.amp_guard_black_op() def custom_op_list(self): - with fluid.dygraph.guard(): - tracer = fluid.framework._dygraph_tracer() + with base.dygraph.guard(): + tracer = base.framework._dygraph_tracer() base_white_list = paddle.amp.white_list()["float16"]["O1"] base_black_list = paddle.amp.black_list()["float16"]["O1"] with paddle.amp.amp_guard( @@ -129,7 +129,7 @@ def custom_op_list_exception(self): inp_np = np.random.random(size=[1, 3, 128, 128]).astype(np.float32) def func(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): model = SimpleConv( num_channels=3, num_filters=64, @@ -140,7 +140,7 @@ def func(): with paddle.amp.amp_guard( custom_white_list=["conv2d"], custom_black_list=["conv2d"] ): - inp = fluid.dygraph.to_variable(inp_np) + inp = base.dygraph.to_variable(inp_np) out = model(inp) self.assertRaises(ValueError, func) @@ -150,9 +150,9 @@ def test_custom_op_list_exception(self): def amp_guard_upsupported_fp16_op(self): data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32') - with fluid.dygraph.guard(): + with base.dygraph.guard(): conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False) - data = fluid.dygraph.to_variable(data) + data = base.dygraph.to_variable(data) with paddle.amp.amp_guard(True): out_amp_fp16 = conv2d(data) out_amp_fp32 = paddle.expand_as( @@ -164,14 +164,14 @@ def amp_guard_upsupported_fp16_op(self): out_purefp16_fp32 = paddle.expand_as( out_purefp16_fp16, out_purefp16_fp16 ) # expand_as_v2 has no fp16 kernel - self.assertTrue(data.dtype == fluid.core.VarDesc.VarType.FP32) - self.assertTrue(out_amp_fp16.dtype == fluid.core.VarDesc.VarType.FP16) - self.assertTrue(out_amp_fp32.dtype == fluid.core.VarDesc.VarType.FP32) + self.assertTrue(data.dtype == base.core.VarDesc.VarType.FP32) + self.assertTrue(out_amp_fp16.dtype == base.core.VarDesc.VarType.FP16) + self.assertTrue(out_amp_fp32.dtype == base.core.VarDesc.VarType.FP32) self.assertTrue( - out_purefp16_fp16.dtype == fluid.core.VarDesc.VarType.FP16 + out_purefp16_fp16.dtype == base.core.VarDesc.VarType.FP16 ) self.assertTrue( - out_purefp16_fp32.dtype == fluid.core.VarDesc.VarType.FP32 + out_purefp16_fp32.dtype == base.core.VarDesc.VarType.FP32 ) def test_amp_guard_upsupported_fp16_op(self): @@ -180,9 +180,9 @@ def test_amp_guard_upsupported_fp16_op(self): def mode_exception(self): def func(): data = np.random.uniform(-1, 1, [10, 3, 32, 32]).astype('float32') - with fluid.dygraph.guard(): + with base.dygraph.guard(): conv2d = paddle.nn.Conv2D(3, 2, 3, bias_attr=False) - data = fluid.dygraph.to_variable(data) + data = base.dygraph.to_variable(data) with paddle.amp.amp_guard(level='O'): out = conv2d(data) @@ -196,7 +196,7 @@ class TestAmpScaler(unittest.TestCase): def scale(self): if not paddle.amp.is_float16_supported(): return - with fluid.dygraph.guard(): + with base.dygraph.guard(): with paddle.amp.auto_cast(dtype='float16'): data = paddle.rand([10, 1024]) scaler = paddle.amp.AmpScaler(init_loss_scaling=1024) @@ -214,7 +214,7 @@ def minimize(self): def run_simple_conv(inp_np, use_scaler=True): paddle.seed(10) paddle.framework.random._manual_program_seed(10) - with fluid.dygraph.guard(): + with base.dygraph.guard(): model = SimpleConv( num_channels=3, num_filters=64, @@ -226,7 +226,7 @@ def run_simple_conv(inp_np, use_scaler=True): learning_rate=0.01, parameters=model.parameters() ) scaler = paddle.amp.AmpScaler(init_loss_scaling=1024) - data = fluid.dygraph.to_variable(inp_np) + data = base.dygraph.to_variable(inp_np) out = model(data) loss = paddle.mean(out) @@ -275,7 +275,7 @@ def step(self): def run_simple_conv(inp_np, use_scaler=True): paddle.seed(10) paddle.framework.random._manual_program_seed(10) - with fluid.dygraph.guard(): + with base.dygraph.guard(): model = SimpleConv( num_channels=3, num_filters=64, @@ -287,7 +287,7 @@ def run_simple_conv(inp_np, use_scaler=True): learning_rate=0.01, parameters=model.parameters() ) scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - data = fluid.dygraph.to_variable(inp_np) + data = base.dygraph.to_variable(inp_np) out = model(data) loss = paddle.mean(out) @@ -320,7 +320,7 @@ def test_step(self): def nan_inf(self): inp_np = np.random.random(size=[1, 3, 128, 128]).astype(np.float32) inp_np[0][1][2][3] = np.nan - with fluid.dygraph.guard(): + with base.dygraph.guard(): model = SimpleConv( num_channels=3, num_filters=64, @@ -335,7 +335,7 @@ def nan_inf(self): learning_rate=0.01, parameters=model.parameters() ) scaler = paddle.amp.AmpScaler(init_loss_scaling=1024) - data = fluid.dygraph.to_variable(inp_np) + data = base.dygraph.to_variable(inp_np) with paddle.amp.auto_cast(dtype='float16'): out = model(data) loss = paddle.mean(out) @@ -408,7 +408,7 @@ def test_step_update_exception(self): self.step_update_exception() def test_get_and_set(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): scaler = paddle.amp.GradScaler( enable=True, init_loss_scaling=1024, @@ -437,7 +437,7 @@ def test_get_and_set(self): self.assertEqual(scaler.get_init_loss_scaling() == 100, True) def test_state_dict_and_load_state_dict(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): scaler1 = paddle.amp.GradScaler( enable=True, init_loss_scaling=14, @@ -515,7 +515,7 @@ def train_resnet( batch_size=batch_size, drop_last=True, ) - train_loader = fluid.io.DataLoader.from_generator( + train_loader = base.io.DataLoader.from_generator( capacity=4, use_double_buffer=True, iterable=True, @@ -566,7 +566,7 @@ def train_resnet( if param.trainable: np_array = np.array(param._grad_ivar().value().get_tensor()) dy_grad_value[ - param.name + fluid.core.grad_var_suffix() + param.name + base.core.grad_var_suffix() ] = np_array resnet.clear_gradients() @@ -585,7 +585,7 @@ def train_resnet( def test_with_state_dict(self): def func_isinstance(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): out_use_state_dict = self.train_resnet( enable_amp=True, use_data_loader=True, use_save_load=True ) @@ -603,7 +603,7 @@ def func_isinstance(): class TestAmpDecorator(unittest.TestCase): def test_mode_exception(self): def func(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): model = paddle.nn.Conv2D(3, 2, 3, bias_attr=False) opt = paddle.optimizer.SGD(parameters=model.parameters()) model, opt = paddle.amp.decorate( @@ -619,7 +619,7 @@ def __init__(self): print("A fake Model") model = MyModel() - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.amp.decorate(models=model, optimizers=None, level='O2') self.assertRaises(TypeError, test_error_model) @@ -627,7 +627,7 @@ def __init__(self): def test_error_distributed_model(): model = paddle.nn.Conv2D(3, 2, 3, bias_attr=False) model = paddle.DataParallel(model) - with fluid.dygraph.guard(): + with base.dygraph.guard(): model = paddle.amp.decorate(models=model, level='O2') self.assertRaises(RuntimeError, test_error_distributed_model) @@ -639,7 +639,7 @@ def __init__(self): model = paddle.nn.Conv2D(3, 2, 3, bias_attr=False) opt = MyOptimizer() - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.amp.decorate(models=model, optimizers=opt, level='O2') self.assertRaises(TypeError, test_error_optimizer) @@ -809,7 +809,7 @@ def train_resnet( batch_size=batch_size, drop_last=True, ) - train_loader = fluid.io.DataLoader.from_generator( + train_loader = base.io.DataLoader.from_generator( capacity=4, use_double_buffer=True, iterable=True, @@ -869,7 +869,7 @@ def train_resnet( if param.trainable: np_array = np.array(param._grad_ivar().value().get_tensor()) dy_grad_value[ - param.name + fluid.core.grad_var_suffix() + param.name + base.core.grad_var_suffix() ] = np_array resnet.clear_gradients() @@ -909,7 +909,7 @@ def train_resnet( def test_with_save_load(self): def func_isinstance(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): out_use_save_load = self.train_resnet( enable_amp=True, use_data_loader=True, use_save_load=True ) @@ -1101,7 +1101,7 @@ def train_resnet( batch_size=batch_size, drop_last=True, ) - train_loader = fluid.io.DataLoader.from_generator( + train_loader = base.io.DataLoader.from_generator( capacity=4, use_double_buffer=True, iterable=True, @@ -1157,7 +1157,7 @@ def train_resnet( if param.trainable: np_array = np.array(param._grad_ivar().value().get_tensor()) dy_grad_value[ - param.name + fluid.core.grad_var_suffix() + param.name + base.core.grad_var_suffix() ] = np_array resnet.clear_gradients() @@ -1171,7 +1171,7 @@ def train_resnet( def test_resnet(self): def func_isinstance(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): out_fp32 = self.train_resnet(enable_amp=False) out_amp = self.train_resnet(enable_amp=True) out_pure_fp16 = self.train_resnet(enable_amp=True, level='O2') @@ -1187,7 +1187,7 @@ def func_isinstance(): def test_with_data_loader(self): def func_isinstance(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): out_fp32 = self.train_resnet( enable_amp=False, use_data_loader=True ) @@ -1209,7 +1209,7 @@ def func_isinstance(): def test_param_group(self): def func_isinstance(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): out_fp32 = self.train_resnet( enable_amp=False, use_data_loader=True, use_param_group=True ) @@ -1244,7 +1244,7 @@ def train_resnet(self, enable_amp=True, level='O1'): batch_size = train_parameters["batch_size"] batch_num = 1 - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) @@ -1291,8 +1291,8 @@ def train_resnet(self, enable_amp=True, level='O1'): .astype('int64') .reshape(-1, 1) ) - img = fluid.dygraph.to_variable(dy_x_data) - label = fluid.dygraph.to_variable(y_data) + img = base.dygraph.to_variable(dy_x_data) + label = base.dygraph.to_variable(y_data) label.stop_gradient = True with paddle.amp.amp_guard(enable=enable_amp, level=level): out = resnet(img) @@ -1316,7 +1316,7 @@ def train_resnet(self, enable_amp=True, level='O1'): param._grad_ivar().value().get_tensor() ) dy_grad_value[ - param.name + fluid.core.grad_var_suffix() + param.name + base.core.grad_var_suffix() ] = np_array resnet.clear_gradients() @@ -1350,15 +1350,15 @@ class TestLayerNormFp16(unittest.TestCase): def test_layer_norm_fp16(self): def func_isinstance(): - if fluid.is_compiled_with_cuda(): - with fluid.dygraph.guard(fluid.CUDAPlace(0)): + if base.is_compiled_with_cuda(): + with base.dygraph.guard(base.CUDAPlace(0)): x = paddle.rand([2, 2, 2, 3]) layer_norm = paddle.nn.LayerNorm(x.shape[1:]) with paddle.amp.auto_cast(custom_white_list=['layer_norm']): out = layer_norm(x) self.assertTrue( - out.dtype == fluid.core.VarDesc.VarType.FP16 + out.dtype == base.core.VarDesc.VarType.FP16 ) func_isinstance() @@ -1387,8 +1387,8 @@ def train(self, enable_amp=True, amp_level='O1'): def test_bf16(self): def func_isinstance(): if ( - fluid.core.is_compiled_with_cuda() - and fluid.core.is_bfloat16_supported(paddle.CUDAPlace(0)) + base.core.is_compiled_with_cuda() + and base.core.is_bfloat16_supported(paddle.CUDAPlace(0)) ): out_fp32 = self.train(enable_amp=False) out_bf16_O1 = self.train(enable_amp=True, amp_level='O1') @@ -1432,7 +1432,7 @@ def backward(ctx, grad): class TestAmpWithHook(unittest.TestCase): def test_hook_change_dtype(self): def func_isinstance(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): v = paddle.rand([3, 3]) v.stop_gradient = False @@ -1452,7 +1452,7 @@ def foo(grad): def test_hook_change_place(self): def func_isinstance(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): v = paddle.rand([3, 3]) v.stop_gradient = False diff --git a/test/collective/fleet/test_parallel_dygraph_control_flow.py b/test/collective/fleet/test_parallel_dygraph_control_flow.py index 71ebfc8b109ee..ee5ec6a01edc7 100644 --- a/test/collective/fleet/test_parallel_dygraph_control_flow.py +++ b/test/collective/fleet/test_parallel_dygraph_control_flow.py @@ -17,7 +17,7 @@ from legacy_test.test_dist_base import TestDistBase -from paddle import fluid +from paddle import base flag_name = os.path.splitext(__file__)[0] @@ -30,7 +30,7 @@ def _setup_config(self): self._find_unused_parameters = True def test_net(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "parallel_dygraph_control_flow_same.py", delta=1e-5, @@ -65,7 +65,7 @@ def _setup_config(self): self._find_unused_parameters = True def test_net(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "parallel_dygraph_control_flow_different.py", delta=1e-5, diff --git a/test/collective/fleet/test_parallel_dygraph_mnist.py b/test/collective/fleet/test_parallel_dygraph_mnist.py index f3c8b8f5c6d56..352ef30ac8d9d 100644 --- a/test/collective/fleet/test_parallel_dygraph_mnist.py +++ b/test/collective/fleet/test_parallel_dygraph_mnist.py @@ -19,7 +19,7 @@ from legacy_test.spawn_runner_base import TestDistSpawnRunner from legacy_test.test_dist_base import TestDistBase -from paddle import fluid +from paddle import base flag_name = os.path.splitext(__file__)[0] @@ -32,7 +32,7 @@ def _setup_config(self): self._find_unused_parameters = True def test_mnist(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( os.path.abspath("../../legacy_test/parallel_dygraph_mnist.py"), delta=1e-5, @@ -52,7 +52,7 @@ def _setup_config(self): self._enforce_place = "XPU" def test_mnist_xpu(self): - if fluid.core.is_compiled_with_xpu(): + if base.core.is_compiled_with_xpu(): self.check_with_place( os.path.abspath("../../legacy_test/parallel_dygraph_mnist.py"), delta=1e-4, @@ -63,7 +63,7 @@ def test_mnist_xpu(self): class TestParallelDygraphMnistSpawn(TestDistSpawnRunner): def test_mnist_with_spawn(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_dist_result_with_spawn(test_class=TestMnist, delta=1e-5) @@ -77,7 +77,7 @@ def _setup_config(self): self._find_unused_parameters = False def test_mnist(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( os.path.abspath("../../legacy_test/parallel_dygraph_mnist.py"), delta=1e-5, @@ -95,7 +95,7 @@ def _setup_config(self): self._use_fleet_api = True def test_mnist(self): - if fluid.core.is_compiled_with_xpu(): + if base.core.is_compiled_with_xpu(): self.check_with_place( os.path.abspath("../../legacy_test/parallel_dygraph_mnist.py"), delta=1e-4, diff --git a/test/collective/fleet/test_parallel_dygraph_no_sync.py b/test/collective/fleet/test_parallel_dygraph_no_sync.py index 180eb65cffc42..05b575afe9900 100644 --- a/test/collective/fleet/test_parallel_dygraph_no_sync.py +++ b/test/collective/fleet/test_parallel_dygraph_no_sync.py @@ -21,7 +21,7 @@ from parallel_dygraph_no_sync_control_flow import TestNoSyncControlFlow from parallel_dygraph_no_sync_unused_params import TestNoSyncUnusedParam -from paddle import fluid +from paddle import base flag_name = os.path.splitext(__file__)[0] @@ -34,7 +34,7 @@ def _setup_config(self): self._find_unused_parameters = False def test_no_sync(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "parallel_dygraph_no_sync.py", delta=1e-5, @@ -51,7 +51,7 @@ def _setup_config(self): self._find_unused_parameters = True def test_no_sync_ununsed_param(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "parallel_dygraph_no_sync_unused_params.py", delta=1e-5, @@ -68,7 +68,7 @@ def _setup_config(self): self._find_unused_parameters = True def test_no_sync_control_flow(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "parallel_dygraph_no_sync_control_flow.py", delta=1e-5, @@ -79,7 +79,7 @@ def test_no_sync_control_flow(self): class TestParallelDygraphNoSyncSpawn(TestDistSpawnRunner): def test_no_sync_with_spawn(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_dist_result_with_spawn(test_class=TestNoSync, delta=1e-5) @@ -88,7 +88,7 @@ def _args_config(self, args): args.find_unused_parameters = True def test_no_sync_with_spawn(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_dist_result_with_spawn( test_class=TestNoSyncUnusedParam, delta=1e-5 ) @@ -99,7 +99,7 @@ def _args_config(self, args): args.find_unused_parameters = True def test_no_sync_with_spawn(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_dist_result_with_spawn( test_class=TestNoSyncControlFlow, delta=1e-5 ) diff --git a/test/collective/fleet/test_parallel_dygraph_qat.py b/test/collective/fleet/test_parallel_dygraph_qat.py index 5457e97b27df9..a5b736ce5b291 100644 --- a/test/collective/fleet/test_parallel_dygraph_qat.py +++ b/test/collective/fleet/test_parallel_dygraph_qat.py @@ -18,7 +18,7 @@ import time import unittest -from paddle import fluid +from paddle import base from paddle.distributed.utils.launch_utils import ( TrainerProc, find_free_ports, @@ -107,8 +107,8 @@ def start_local_trainers( class TestMultipleGpus(unittest.TestCase): def run_2gpu(self, target_file_name, eager_mode=True): if ( - not fluid.core.is_compiled_with_cuda() - or fluid.core.get_cuda_device_count() == 0 + not base.core.is_compiled_with_cuda() + or base.core.get_cuda_device_count() == 0 ): return diff --git a/test/collective/fleet/test_parallel_dygraph_se_resnext.py b/test/collective/fleet/test_parallel_dygraph_se_resnext.py index 6916d72745314..124762096cef1 100644 --- a/test/collective/fleet/test_parallel_dygraph_se_resnext.py +++ b/test/collective/fleet/test_parallel_dygraph_se_resnext.py @@ -19,7 +19,7 @@ from spawn_runner_base import TestDistSpawnRunner from test_dist_base import TestDistBase -from paddle import fluid +from paddle import base flag_name = os.path.splitext(__file__)[0] @@ -31,7 +31,7 @@ def _setup_config(self): self._dygraph = True def test_se_resnext(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "parallel_dygraph_se_resnext.py", delta=0.01, @@ -42,7 +42,7 @@ def test_se_resnext(self): class TestParallelDygraphSeResNeXtSpawn(TestDistSpawnRunner): def test_se_resnext_with_spawn(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_dist_result_with_spawn( test_class=TestSeResNeXt, delta=0.01 ) diff --git a/test/collective/fleet/test_parallel_dygraph_sparse_embedding.py b/test/collective/fleet/test_parallel_dygraph_sparse_embedding.py index ebcb01c3dbc2a..48be7158ea46c 100644 --- a/test/collective/fleet/test_parallel_dygraph_sparse_embedding.py +++ b/test/collective/fleet/test_parallel_dygraph_sparse_embedding.py @@ -22,7 +22,7 @@ from spawn_runner_base import TestDistSpawnRunner from test_dist_base import TestDistBase -from paddle import fluid +from paddle import base flag_name = os.path.splitext(__file__)[0] @@ -34,7 +34,7 @@ def _setup_config(self): self._dygraph = True def test_sparse_embedding(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( os.path.abspath( "../../legacy_test/parallel_dygraph_sparse_embedding.py" @@ -52,7 +52,7 @@ def _setup_config(self): self._dygraph = True def test_sparse_embedding_fp64(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( os.path.abspath( "../../legacy_test/parallel_dygraph_sparse_embedding_fp64.py" @@ -65,7 +65,7 @@ def test_sparse_embedding_fp64(self): class TestParallelDygraphSparseEmdeddingSpawn(TestDistSpawnRunner): def test_sparse_embedding_with_spawn(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_dist_result_with_spawn( test_class=TestSparseEmbedding, delta=1e-5 ) diff --git a/test/collective/fleet/test_parallel_dygraph_sparse_embedding_over_height.py b/test/collective/fleet/test_parallel_dygraph_sparse_embedding_over_height.py index ee95b428fdf26..b92707d407719 100644 --- a/test/collective/fleet/test_parallel_dygraph_sparse_embedding_over_height.py +++ b/test/collective/fleet/test_parallel_dygraph_sparse_embedding_over_height.py @@ -21,7 +21,7 @@ from legacy_test.spawn_runner_base import TestDistSpawnRunner from legacy_test.test_dist_base import TestDistBase -from paddle import fluid +from paddle import base flag_name = os.path.splitext(__file__)[0] @@ -33,7 +33,7 @@ def _setup_config(self): self._dygraph = True def test_sparse_embedding(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( os.path.abspath( "../../legacy_test/parallel_dygraph_sparse_embedding_over_height.py" @@ -46,7 +46,7 @@ def test_sparse_embedding(self): class TestParallelDygraphSparseEmdeddingOverHeightSpawn(TestDistSpawnRunner): def test_sparse_embedding_with_spawn(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_dist_result_with_spawn( test_class=TestSparseEmbeddingOverHeight, delta=1e-5 ) diff --git a/test/collective/fleet/test_parallel_dygraph_sync_batch_norm.py b/test/collective/fleet/test_parallel_dygraph_sync_batch_norm.py index 9aac85f7e494e..45e0e2e90e852 100644 --- a/test/collective/fleet/test_parallel_dygraph_sync_batch_norm.py +++ b/test/collective/fleet/test_parallel_dygraph_sync_batch_norm.py @@ -17,7 +17,7 @@ from legacy_test.test_dist_base import TestDistBase -from paddle import fluid +from paddle import base flag_name = os.path.splitext(__file__)[0] @@ -29,7 +29,7 @@ def _setup_config(self): self._dygraph = True def test_mnist(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "parallel_dygraph_sync_batch_norm.py", delta=1e-5, diff --git a/test/collective/fleet/test_parallel_dygraph_transformer.py b/test/collective/fleet/test_parallel_dygraph_transformer.py index f856fe58aa4e6..5e03eec84ce39 100644 --- a/test/collective/fleet/test_parallel_dygraph_transformer.py +++ b/test/collective/fleet/test_parallel_dygraph_transformer.py @@ -17,7 +17,7 @@ from legacy_test.test_dist_base import TestDistBase -from paddle import fluid +from paddle import base flag_name = os.path.splitext(__file__)[0] @@ -29,7 +29,7 @@ def _setup_config(self): self._dygraph = True def test_transformer(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "parallel_dygraph_transformer.py", delta=1e-5, @@ -47,7 +47,7 @@ def _setup_config(self): self._find_unused_parameters = False def test_transformer(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "parallel_dygraph_transformer.py", delta=1e-5, diff --git a/test/collective/fleet/test_parallel_dygraph_unused_variables.py b/test/collective/fleet/test_parallel_dygraph_unused_variables.py index dfe1b997d9345..9a2e900b10a4d 100644 --- a/test/collective/fleet/test_parallel_dygraph_unused_variables.py +++ b/test/collective/fleet/test_parallel_dygraph_unused_variables.py @@ -22,7 +22,7 @@ from spawn_runner_base import TestDistSpawnRunner from test_dist_base import TestDistBase -from paddle import fluid +from paddle import base flag_name = os.path.splitext(__file__)[0] @@ -34,7 +34,7 @@ def _setup_config(self): self._dygraph = True def test_net(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( os.path.abspath( "../../legacy_test/parallel_dygraph_unused_variables.py" @@ -55,7 +55,7 @@ def _setup_config(self): class TestSparseEmbeddingUnusedVarsSpawn(TestDistSpawnRunner): def test_mnist_with_spawn(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_dist_result_with_spawn( test_class=TestSparseEmbeddingUnusedVars, delta=1e-5 ) @@ -68,7 +68,7 @@ def _setup_config(self): self._dygraph = True def test_net(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( os.path.abspath( "../../legacy_test/parallel_dygraph_none_var.py" @@ -86,7 +86,7 @@ def _setup_config(self): self._dygraph = True def test_mnist(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( os.path.abspath( "../../legacy_test/parallel_dygraph_shared_unused_var.py" diff --git a/test/collective/fleet/test_pipeline.py b/test/collective/fleet/test_pipeline.py index e86280c50073a..b8ce3d2c5e794 100644 --- a/test/collective/fleet/test_pipeline.py +++ b/test/collective/fleet/test_pipeline.py @@ -18,7 +18,7 @@ from legacy_test.test_dist_base import TestDistBase import paddle -from paddle import fluid +from paddle import base paddle.enable_static() flag_name = os.path.splitext(__file__)[0] @@ -36,7 +36,7 @@ def need_envs(self): return {} def test_dist_train(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): # TODO (sandyhouse) fix the delta value. # Now pipeline only gets the loss value of the last # microbatch, so it is not consistable with the @@ -50,7 +50,7 @@ def test_dist_train(self): ) def test_dist_train_multi_device(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "pipeline_mnist_multi_device.py", check_error_log=True, @@ -60,7 +60,7 @@ def test_dist_train_multi_device(self): ) def test_dist_train_one_device(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "pipeline_mnist_one_device.py", check_error_log=True, diff --git a/test/collective/fleet/test_recv_save_op.py b/test/collective/fleet/test_recv_save_op.py index 442b15198b1ec..b032cae1f5dcf 100644 --- a/test/collective/fleet/test_recv_save_op.py +++ b/test/collective/fleet/test_recv_save_op.py @@ -23,9 +23,9 @@ from dist_test_utils import remove_ps_flag from op import Operator -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.framework import Program, program_guard +from paddle import base +from paddle.base import core +from paddle.base.framework import Program, program_guard from paddle.incubate.distributed.fleet.parameter_server.mode import ( DistributedMode, ) @@ -33,12 +33,12 @@ def run_pserver(pserver_id): remove_ps_flag(os.getpid()) - scope = fluid.core.Scope() + scope = base.core.Scope() program = Program() - with fluid.scope_guard(scope): + with base.scope_guard(scope): with program_guard(program, startup_program=Program()): # create table parameter in scope - place = fluid.CPUPlace() + place = base.CPUPlace() # create and initialize Param Variable param = scope.var('table').get_tensor() @@ -61,7 +61,7 @@ def run_pserver(pserver_id): }, ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(program) @@ -96,9 +96,9 @@ def _get_pserver_port(self, pid): return port def _run_nce_op_two_pserver(self, place, port0, port1, model_file): - scope = fluid.core.Scope() + scope = base.core.Scope() program = Program() - with fluid.scope_guard(scope): + with base.scope_guard(scope): with program_guard(program, startup_program=Program()): emaps = ['127.0.0.1:' + str(port0), '127.0.0.1:' + str(port1)] @@ -118,12 +118,12 @@ def _run_nce_op_two_pserver(self, place, port0, port1, model_file): remote_recv_op.run(scope, place) def _load_slice_var(self, model_file): - load_prog = fluid.Program() + load_prog = base.Program() load_block = load_prog.global_block() origin = load_block.create_var( name="var.origin", - type=fluid.core.VarDesc.VarType.LOD_TENSOR, + type=base.core.VarDesc.VarType.LOD_TENSOR, shape=[10, 8], dtype="float32", persistable=True, @@ -131,7 +131,7 @@ def _load_slice_var(self, model_file): slice0 = load_block.create_var( name="var.slice0", - type=fluid.core.VarDesc.VarType.LOD_TENSOR, + type=base.core.VarDesc.VarType.LOD_TENSOR, shape=[3, 8], dtype="float32", persistable=True, @@ -139,7 +139,7 @@ def _load_slice_var(self, model_file): slice1 = load_block.create_var( name="var.slice1", - type=fluid.core.VarDesc.VarType.LOD_TENSOR, + type=base.core.VarDesc.VarType.LOD_TENSOR, shape=[5, 8], dtype="float32", persistable=True, @@ -174,12 +174,12 @@ def _load_slice_var(self, model_file): }, ) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) exe.run(load_prog) - origin_var = fluid.global_scope().find_var("var.origin") - slice0_var = fluid.global_scope().find_var("var.slice0") - slice1_var = fluid.global_scope().find_var("var.slice1") + origin_var = base.global_scope().find_var("var.origin") + slice0_var = base.global_scope().find_var("var.slice0") + slice1_var = base.global_scope().find_var("var.slice1") origin = np.array(origin_var.get_tensor()) slice0 = np.array(slice0_var.get_tensor()) diff --git a/test/collective/fleet/test_static_model_parallel.py b/test/collective/fleet/test_static_model_parallel.py index 299fdd8370679..cc39e732e2f81 100644 --- a/test/collective/fleet/test_static_model_parallel.py +++ b/test/collective/fleet/test_static_model_parallel.py @@ -32,9 +32,9 @@ def _setup_config(self): self._pipeline_mode = True def test_dist_static_model_parallel(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "static_model_parallel_by_row.py", delta=1e-5, @@ -43,9 +43,9 @@ def test_dist_static_model_parallel(self): ) def test_dist_static_model_parallel2(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "static_model_parallel_by_col.py", delta=1e-5, @@ -54,9 +54,9 @@ def test_dist_static_model_parallel2(self): ) def test_dist_static_model_parallel3(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "static_model_parallel_embedding.py", delta=1e-5, diff --git a/test/collective/fleet/test_tcp_store.py b/test/collective/fleet/test_tcp_store.py index 29dab1f6be04d..7e054f4ebf5c7 100644 --- a/test/collective/fleet/test_tcp_store.py +++ b/test/collective/fleet/test_tcp_store.py @@ -22,7 +22,7 @@ class TestTCPStore(unittest.TestCase): def test_tcp_store(self): dist_port = int(os.getenv("PADDLE_DIST_UT_PORT", 6170)) print("get dist_port:", dist_port) - store = paddle.fluid.core.TCPStore("127.0.0.1", dist_port, True, 1, 1) + store = paddle.base.core.TCPStore("127.0.0.1", dist_port, True, 1, 1) store.add("my", 3) ret1 = store.get('my') store.add("my", 3) diff --git a/test/collective/parallel_embedding_api.py b/test/collective/parallel_embedding_api.py index 544078a855b8c..826c3dbe95fd6 100644 --- a/test/collective/parallel_embedding_api.py +++ b/test/collective/parallel_embedding_api.py @@ -19,7 +19,7 @@ ) import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet paddle.enable_static() @@ -30,7 +30,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): fleet.init(is_collective=True) np.random.seed(2020) # (num_embeddings, embedding_dim) = (12, 8) @@ -44,13 +44,13 @@ def get_model(self, main_prog, startup_program, rank): ) per_part_size = size[0] // 2 if rank == 0: - param_attr = paddle.fluid.ParamAttr( + param_attr = paddle.base.ParamAttr( initializer=paddle.nn.initializer.Assign( np_array[0:per_part_size, :] ), ) else: - param_attr = paddle.fluid.ParamAttr( + param_attr = paddle.base.ParamAttr( initializer=paddle.nn.initializer.Assign( np_array[per_part_size : size[0], :] ), diff --git a/test/collective/process_group_gloo.py b/test/collective/process_group_gloo.py index b6ae187cc5ffa..ae1cebbc359e2 100644 --- a/test/collective/process_group_gloo.py +++ b/test/collective/process_group_gloo.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core class TestProcessGroupFp32(unittest.TestCase): @@ -37,10 +37,10 @@ def test_create_process_group_gloo(self): nranks = paddle.distributed.ParallelEnv().nranks rank = paddle.distributed.ParallelEnv().local_rank is_master = True if rank == 0 else False - store = paddle.fluid.core.TCPStore( + store = paddle.base.core.TCPStore( "127.0.0.1", 6272, is_master, nranks, 30 ) - pg = paddle.fluid.core.ProcessGroupGloo.create(store, rank, nranks) + pg = paddle.base.core.ProcessGroupGloo.create(store, rank, nranks) # test allreduce sum # rank 0 diff --git a/test/collective/process_group_mpi.py b/test/collective/process_group_mpi.py index b5d9fd4b74377..5c0873c6234ce 100644 --- a/test/collective/process_group_mpi.py +++ b/test/collective/process_group_mpi.py @@ -27,8 +27,8 @@ _set_group_map_backend, _set_group_map_by_name, ) -from paddle.fluid import core -from paddle.fluid.framework import _set_expected_place +from paddle.base import core +from paddle.base.framework import _set_expected_place ctypes.CDLL("libmpi.so", mode=ctypes.RTLD_GLOBAL) diff --git a/test/collective/row_parallel_linear_api.py b/test/collective/row_parallel_linear_api.py index e220ad468f699..715c85181727e 100644 --- a/test/collective/row_parallel_linear_api.py +++ b/test/collective/row_parallel_linear_api.py @@ -19,7 +19,7 @@ ) import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet paddle.enable_static() @@ -30,7 +30,7 @@ def __init__(self): self.global_ring_id = 0 def get_model(self, main_prog, startup_program, rank): - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): fleet.init(is_collective=True) np.random.seed(2020) np_array = np.random.rand(1000, 16) @@ -41,13 +41,13 @@ def get_model(self, main_prog, startup_program, rank): paddle.distributed.broadcast(data, src=0) data = paddle.split(data, 2, axis=1)[rank] if rank == 0: - param_attr = paddle.fluid.ParamAttr( + param_attr = paddle.base.ParamAttr( initializer=paddle.nn.initializer.Assign( np_array[0:500, :] ), ) else: - param_attr = paddle.fluid.ParamAttr( + param_attr = paddle.base.ParamAttr( initializer=paddle.nn.initializer.Assign( np_array[500:1000, :] ), diff --git a/test/collective/test_collective_allreduce_api.py b/test/collective/test_collective_allreduce_api.py index 64d3885e8a6a9..1566449f08edc 100644 --- a/test/collective/test_collective_allreduce_api.py +++ b/test/collective/test_collective_allreduce_api.py @@ -27,7 +27,7 @@ def _setup_config(self): pass def test_allreduce_nccl(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): self.check_with_place( "collective_allreduce_api.py", "allreduce", "nccl" ) @@ -60,7 +60,7 @@ def test_allreduce_nccl_with_comm_context(self): ) def test_allreduce_bkcl(self): - if paddle.fluid.core.is_compiled_with_xpu(): + if paddle.base.core.is_compiled_with_xpu(): self.check_with_place( "collective_allreduce_api.py", "allreduce", "bkcl" ) diff --git a/test/collective/test_collective_cpu_barrier_with_gloo.py b/test/collective/test_collective_cpu_barrier_with_gloo.py index cf491f173c08c..70f7d495b6412 100644 --- a/test/collective/test_collective_cpu_barrier_with_gloo.py +++ b/test/collective/test_collective_cpu_barrier_with_gloo.py @@ -19,7 +19,7 @@ from contextlib import closing import paddle -from paddle import fluid +from paddle import base port_set = set() paddle.enable_static() @@ -64,15 +64,15 @@ def barrier_func(self, id, rank_num, server_endpoint, out_dict, sleep_time): def barrier_op(self, id, rank_num, server_endpoint, out_dict, sleep_time): try: - main_prog = fluid.Program() - startup_prog = fluid.Program() + main_prog = base.Program() + startup_prog = base.Program() paddle.distributed.gloo_init_parallel_env( id, rank_num, server_endpoint ) - place = fluid.CPUPlace() - with fluid.program_guard(main_prog, startup_prog): + place = base.CPUPlace() + with base.program_guard(main_prog, startup_prog): paddle.distributed.barrier() - exe = fluid.Executor(place) + exe = base.Executor(place) # Run barrier to synchronize processes after starting exe.run(main_prog) # Let rank 0 sleep for one second and check that all processes diff --git a/test/collective/test_collective_reduce_api.py b/test/collective/test_collective_reduce_api.py index b5f4be7519369..d56488f124128 100644 --- a/test/collective/test_collective_reduce_api.py +++ b/test/collective/test_collective_reduce_api.py @@ -27,7 +27,7 @@ def _setup_config(self): pass def test_reduce_nccl(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): self.check_with_place("collective_reduce_api.py", "reduce", "nccl") def test_reduce_nccl_with_comm_context(self): @@ -47,7 +47,7 @@ def test_reduce_nccl_with_comm_context(self): if self._nccl_version >= 21000: dtypes_to_test.append("bfloat16") for dtype in dtypes_to_test: - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): for red_type in red_types_to_test: self.check_with_place( "collective_reduce_api.py", @@ -59,7 +59,7 @@ def test_reduce_nccl_with_comm_context(self): ) def test_reduce_bkcl(self): - if paddle.fluid.core.is_compiled_with_xpu(): + if paddle.base.core.is_compiled_with_xpu(): self.check_with_place("collective_reduce_api.py", "reduce", "bkcl") def test_reduce_gloo(self): diff --git a/test/collective/test_collective_sendrecv_api.py b/test/collective/test_collective_sendrecv_api.py index 378476f36420a..7fe9e571c5dee 100644 --- a/test/collective/test_collective_sendrecv_api.py +++ b/test/collective/test_collective_sendrecv_api.py @@ -26,7 +26,7 @@ def _setup_config(self): pass # def test_sendrecv_nccl(self): - # if paddle.fluid.core.is_compiled_with_cuda(): + # if paddle.base.core.is_compiled_with_cuda(): # self.check_with_place("collective_sendrecv_api.py", "sendrecv", # "nccl") @@ -44,7 +44,7 @@ def test_sendrecv_nccl_with_comm_context(self): if self._nccl_version >= 21000: dtypes_to_test.append("bfloat16") for dtype in dtypes_to_test: - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): self.check_with_place( "collective_sendrecv_api.py", "sendrecv", diff --git a/test/collective/test_gen_nccl_id_op.py b/test/collective/test_gen_nccl_id_op.py index 846e1e672ca27..8af379307237d 100644 --- a/test/collective/test_gen_nccl_id_op.py +++ b/test/collective/test_gen_nccl_id_op.py @@ -21,7 +21,7 @@ os.environ['GLOG_vmodule'] = "gen_nccl_id_op*=10,gen_comm_id*=10" import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/contrib/test_bf16_utils.py b/test/contrib/test_bf16_utils.py index 75ce0045b39ab..049f8500c8cc6 100644 --- a/test/contrib/test_bf16_utils.py +++ b/test/contrib/test_bf16_utils.py @@ -15,8 +15,8 @@ import unittest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.static import amp paddle.enable_static() @@ -109,13 +109,13 @@ def test_amp_lists_(self): ) def test_find_op_index(self): - block = fluid.default_main_program().global_block() + block = base.default_main_program().global_block() op_desc = core.OpDesc() idx = amp.fp16_utils.find_op_index(block.desc, op_desc) assert idx == -1 def test_is_in_fp32_varnames(self): - block = fluid.default_main_program().global_block() + block = base.default_main_program().global_block() var1 = block.create_var(name="X", shape=[3], dtype='float32') var2 = block.create_var(name="Y", shape=[3], dtype='float32') @@ -137,7 +137,7 @@ def test_is_in_fp32_varnames(self): assert amp.bf16.amp_utils._is_in_fp32_varnames(op1, amp_lists_2) def test_find_true_post_op(self): - block = fluid.default_main_program().global_block() + block = base.default_main_program().global_block() var1 = block.create_var(name="X", shape=[3], dtype='float32') var2 = block.create_var(name="Y", shape=[3], dtype='float32') @@ -152,9 +152,9 @@ def test_find_true_post_op(self): assert res == [op2] def test_find_true_post_op_with_search_all(self): - program = fluid.Program() + program = base.Program() block = program.current_block() - startup_block = fluid.default_startup_program().global_block() + startup_block = base.default_startup_program().global_block() var1 = block.create_var(name="X", shape=[3], dtype='float32') var2 = block.create_var(name="Y", shape=[3], dtype='float32') diff --git a/test/contrib/test_correlation.py b/test/contrib/test_correlation.py index 39f9c20da42ea..59259a278c5a2 100644 --- a/test/contrib/test_correlation.py +++ b/test/contrib/test_correlation.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base.dygraph.base import to_variable paddle.enable_static() @@ -81,7 +81,7 @@ def corr( class TestCorrelationOp(unittest.TestCase): def test_check_output(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return np.random.seed(13) np.set_printoptions(threshold=np.inf) @@ -128,8 +128,8 @@ def test_check_output(self): optimizer = paddle.optimizer.Momentum(0.0001, 0.9) optimizer.minimize(loss) - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CUDAPlace(0) + exe = base.Executor(place) res = exe.run( feed={'x1': x1_np, 'x2': x2_np}, fetch_list=[out.name, loss.name] ) @@ -156,14 +156,14 @@ def forward(self, x1, x2): class TestCorrelationOpDyGraph(unittest.TestCase): def test_check_output(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return np.random.seed(13) np.set_printoptions(threshold=np.inf) x_shape = (2, 10, 3, 3) x_type = 'float32' - place = fluid.CUDAPlace(0) - with fluid.dygraph.guard(place): + place = base.CUDAPlace(0) + with base.dygraph.guard(place): x1_np = np.random.randn(2, 3, 4, 5).astype(x_type) x2_np = np.random.randn(2, 3, 4, 5).astype(x_type) out_np = corr( diff --git a/test/contrib/test_fp16_utils.py b/test/contrib/test_fp16_utils.py index 1469c73375827..028cf1563cb06 100644 --- a/test/contrib/test_fp16_utils.py +++ b/test/contrib/test_fp16_utils.py @@ -15,8 +15,8 @@ import unittest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.static.amp import fp16_utils paddle.enable_static() @@ -24,13 +24,13 @@ class AMPTest(unittest.TestCase): def test_find_op_index(self): - block = fluid.default_main_program().global_block() + block = base.default_main_program().global_block() op_desc = core.OpDesc() idx = fp16_utils.find_op_index(block.desc, op_desc) assert idx == -1 def test_find_true_post_op(self): - block = fluid.default_main_program().global_block() + block = base.default_main_program().global_block() var1 = block.create_var(name="X", shape=[3], dtype='float32') var2 = block.create_var(name="Y", shape=[3], dtype='float32') diff --git a/test/contrib/test_image_classification_fp16.py b/test/contrib/test_image_classification_fp16.py index 9aac48b5fe2dd..f04df38b03234 100644 --- a/test/contrib/test_image_classification_fp16.py +++ b/test/contrib/test_image_classification_fp16.py @@ -27,7 +27,7 @@ import nets import paddle -from paddle import fluid +from paddle import base from paddle.static.amp import decorate paddle.enable_static() @@ -110,11 +110,11 @@ def train(net_type, use_cuda, save_dirname, is_local): classdim = 10 data_shape = [3, 32, 32] - train_program = fluid.Program() - startup_prog = fluid.Program() + train_program = base.Program() + startup_prog = base.Program() train_program.random_seed = 123 startup_prog.random_seed = 456 - with fluid.program_guard(train_program, startup_prog): + with base.program_guard(train_program, startup_prog): images = paddle.static.data( name='pixel', shape=[-1] + data_shape, dtype='float32' ) @@ -167,9 +167,9 @@ def train(net_type, use_cuda, save_dirname, is_local): paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE ) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) + feeder = base.DataFeeder(place=place, feed_list=[images, label]) def train_loop(main_program): exe.run(startup_prog) @@ -257,11 +257,11 @@ def infer(use_cuda, save_dirname=None): if save_dirname is None: return - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) - inference_scope = fluid.core.Scope() - with fluid.scope_guard(inference_scope): + inference_scope = base.core.Scope() + with base.scope_guard(inference_scope): # Use paddle.static.io.load_inference_model to obtain the inference program desc, # the feed_target_names (the names of variables that will be fed # data using feed operators), and the fetch_targets (variables that @@ -305,7 +305,7 @@ def tearDown(self): self.temp_dir.cleanup() def main(self, net_type, use_cuda, is_local=True): - if use_cuda and not fluid.core.is_compiled_with_cuda(): + if use_cuda and not base.core.is_compiled_with_cuda(): return # Directory for saving the trained model @@ -477,11 +477,11 @@ def test_resnet_cuda(self): @contextlib.contextmanager def scope_prog_guard(self): - prog = fluid.Program() - startup_prog = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + scope = base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): yield @@ -490,7 +490,7 @@ def decorate_with_data_loader(self): main_prog = paddle.static.Program() start_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, start_prog): - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): image = paddle.static.data( name='image', shape=[-1, 3, 224, 224], dtype='float32' ) diff --git a/test/contrib/test_multi_precision_fp16_train.py b/test/contrib/test_multi_precision_fp16_train.py index 0b5f58e550462..137f2269173f3 100644 --- a/test/contrib/test_multi_precision_fp16_train.py +++ b/test/contrib/test_multi_precision_fp16_train.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.io import Dataset from paddle.static.amp.fp16_utils import cast_model_to_fp16 @@ -99,11 +99,11 @@ def train(use_pure_fp16=True, use_nesterov=False, optimizer=""): data_shape = [3, 32, 32] PASS_NUM = 1 - train_program = fluid.Program() - startup_prog = fluid.Program() + train_program = base.Program() + startup_prog = base.Program() train_program.random_seed = 123 startup_prog.random_seed = 456 - with fluid.program_guard(train_program, startup_prog): + with base.program_guard(train_program, startup_prog): images = paddle.static.data( name='pixel', shape=[-1] + data_shape, dtype='float32' ) @@ -160,9 +160,9 @@ def train(use_pure_fp16=True, use_nesterov=False, optimizer=""): drop_last=True, ) - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(place=place, feed_list=[images, label]) + place = base.CUDAPlace(0) + exe = base.Executor(place) + feeder = base.DataFeeder(place=place, feed_list=[images, label]) def train_loop(): exe.run(startup_prog) @@ -206,7 +206,7 @@ def train_loop(): class TestImageMultiPrecision(unittest.TestCase): def test_resnet_pure_fp16(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return def do_test(use_nesterov=False, optimizer=""): @@ -263,11 +263,11 @@ def do_test(use_nesterov=False, optimizer=""): @contextlib.contextmanager def scope_prog_guard(self): - prog = fluid.Program() - startup_prog = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + scope = base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): yield @@ -276,7 +276,7 @@ def decorate_with_data_loader(self): main_prog = paddle.static.Program() start_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, start_prog): - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): image = paddle.static.data( name='image', shape=[-1, 3, 224, 224], dtype='float32' ) @@ -301,14 +301,14 @@ def decorate_with_data_loader(self): block = main_prog.global_block() for op in block.ops: if op.type == "mul": - op._set_attr('in_dtype', fluid.core.VarDesc.VarType.FP32) - op._set_attr('out_dtype', fluid.core.VarDesc.VarType.FP32) - op._set_attr('dtype', fluid.core.VarDesc.VarType.FP32) + op._set_attr('in_dtype', base.core.VarDesc.VarType.FP32) + op._set_attr('out_dtype', base.core.VarDesc.VarType.FP32) + op._set_attr('dtype', base.core.VarDesc.VarType.FP32) cast_model_to_fp16(main_prog, use_fp16_guard=False) def test_non_iterable_dataloader(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.decorate_with_data_loader() diff --git a/test/custom_kernel/custom_kernel_dot_c_setup.py b/test/custom_kernel/custom_kernel_dot_c_setup.py index c8cb7f4de4002..da6bd72e3d766 100644 --- a/test/custom_kernel/custom_kernel_dot_c_setup.py +++ b/test/custom_kernel/custom_kernel_dot_c_setup.py @@ -55,7 +55,7 @@ def build_extensions(self): # libs path paddle_custom_kernel_library_dir = [ - os.path.join(site_packages_path, 'paddle', 'fluid'), + os.path.join(site_packages_path, 'paddle', 'base'), ] # libs diff --git a/test/custom_kernel/custom_kernel_dot_setup.py b/test/custom_kernel/custom_kernel_dot_setup.py index 7bf6f2fbe6f2a..cce752284ef05 100644 --- a/test/custom_kernel/custom_kernel_dot_setup.py +++ b/test/custom_kernel/custom_kernel_dot_setup.py @@ -56,7 +56,7 @@ def build_extensions(self): # libs path paddle_custom_kernel_library_dir = [ - os.path.join(path, 'paddle', 'fluid') for path in site_packages_path + os.path.join(path, 'paddle', 'base') for path in site_packages_path ] # libs diff --git a/test/custom_op/custom_raw_op_kernel_op_setup.py b/test/custom_op/custom_raw_op_kernel_op_setup.py index bd035a0e3efe6..41af03872ef31 100644 --- a/test/custom_op/custom_raw_op_kernel_op_setup.py +++ b/test/custom_op/custom_raw_op_kernel_op_setup.py @@ -17,7 +17,7 @@ from utils import extra_compile_args, paddle_includes import paddle -from paddle.fluid import core +from paddle.base import core from paddle.utils.cpp_extension import CppExtension, CUDAExtension, setup if paddle.is_compiled_with_cuda(): diff --git a/test/custom_op/test_custom_concat.py b/test/custom_op/test_custom_concat.py index a1d93de4f2d3f..4fa1bcb618db8 100644 --- a/test/custom_op/test_custom_concat.py +++ b/test/custom_op/test_custom_concat.py @@ -31,9 +31,9 @@ run_cmd(cmd, True) if os.name == 'nt': - test_include = "..\\python\\paddle\\fluid\\tests\\custom_op" + test_include = "..\\python\\paddle\\base\\tests\\custom_op" else: - test_include = "../python/paddle/fluid/tests/custom_op" + test_include = "../python/paddle/base/tests/custom_op" paddle_includes.append(test_include) custom_ops = load( diff --git a/test/custom_op/test_custom_conj.py b/test/custom_op/test_custom_conj.py index f51038ae1b34c..6b4677175cd83 100644 --- a/test/custom_op/test_custom_conj.py +++ b/test/custom_op/test_custom_conj.py @@ -42,8 +42,8 @@ def is_complex(dtype): return ( - dtype == paddle.fluid.core.VarDesc.VarType.COMPLEX64 - or dtype == paddle.fluid.core.VarDesc.VarType.COMPLEX128 + dtype == paddle.base.core.VarDesc.VarType.COMPLEX64 + or dtype == paddle.base.core.VarDesc.VarType.COMPLEX128 ) diff --git a/test/custom_runtime/process_group_xccl.py b/test/custom_runtime/process_group_xccl.py index 0e4181ba04ad3..b753d0c3ff485 100644 --- a/test/custom_runtime/process_group_xccl.py +++ b/test/custom_runtime/process_group_xccl.py @@ -18,14 +18,14 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core def init_process_group(strategy=None): nranks = paddle.distributed.ParallelEnv().nranks rank = paddle.distributed.ParallelEnv().local_rank is_master = True if rank == 0 else False - store = paddle.fluid.core.TCPStore("127.0.0.1", 6173, is_master, nranks) + store = paddle.base.core.TCPStore("127.0.0.1", 6173, is_master, nranks) pg_group = core.ProcessGroupCustom.create( store, paddle.distributed.ParallelEnv().device_type, @@ -100,13 +100,13 @@ def test_create_process_group_xccl(self): if pg.rank() == 0: task = pg.broadcast(tensor_x, 0, sync_op=True) task.wait() - # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) + # paddle.base.core._custom_device_synchronize("custom_cpu", -1) assert task.is_completed() # assert np.array_equal(broadcast_result, tensor_x) else: task = pg.broadcast(tensor_y, 0, sync_op=True) task.wait() - # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) + # paddle.base.core._custom_device_synchronize("custom_cpu", -1) assert task.is_completed() # assert np.array_equal(broadcast_result, tensor_y) @@ -138,12 +138,12 @@ def test_create_process_group_xccl(self): if pg.rank() == 0: task = pg.all_gather(tensor_out, tensor_x, sync_op=True) task.wait() - # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) + # paddle.base.core._custom_device_synchronize("custom_cpu", -1) # rank 1 else: task = pg.all_gather(tensor_out, tensor_y, sync_op=True) task.wait() - # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) + # paddle.base.core._custom_device_synchronize("custom_cpu", -1) out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2]) out_2 = paddle.slice( tensor_out, [0], [out_shape[0] // 2], [out_shape[0]] @@ -169,12 +169,12 @@ def test_create_process_group_xccl(self): if pg.rank() == 0: task = pg.alltoall(tensor_x, tensor_out1) task.wait() - # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) + # paddle.base.core._custom_device_synchronize("custom_cpu", -1) # rank 1 else: task = pg.alltoall(tensor_y, tensor_out2) task.wait() - # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) + # paddle.base.core._custom_device_synchronize("custom_cpu", -1) out1_2 = paddle.slice( tensor_out1, [0], [self.shape[0] // 2], [self.shape[0]] ) @@ -195,12 +195,12 @@ def test_create_process_group_xccl(self): if pg.rank() == 0: task = pg.reduce(tensor_x, 0) task.wait() - # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) + # paddle.base.core._custom_device_synchronize("custom_cpu", -1) # rank 1 else: task = pg.reduce(tensor_y, 0) task.wait() - # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) + # paddle.base.core._custom_device_synchronize("custom_cpu", -1) # if pg.rank() == 0: # assert np.array_equal(tensor_x, sum_result) print("test reduce sum api ok\n") @@ -216,12 +216,12 @@ def test_create_process_group_xccl(self): if pg.rank() == 0: task = pg.scatter(tensor_x, tensor_y, 0) task.wait() - # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) + # paddle.base.core._custom_device_synchronize("custom_cpu", -1) # rank 1 else: task = pg.scatter(tensor_x, tensor_y, 0) task.wait() - # paddle.fluid.core._custom_device_synchronize("custom_cpu", -1) + # paddle.base.core._custom_device_synchronize("custom_cpu", -1) out1 = paddle.slice(tensor_x, [0], [0], [self.shape[0]]) out2 = paddle.slice(tensor_x, [0], [self.shape[0]], [self.shape[0] * 2]) # if pg.rank() == 0: diff --git a/test/custom_runtime/test_custom_op_setup.py b/test/custom_runtime/test_custom_op_setup.py index 6ed6fd79e4ab3..5686b87afbbd8 100644 --- a/test/custom_runtime/test_custom_op_setup.py +++ b/test/custom_runtime/test_custom_op_setup.py @@ -133,7 +133,7 @@ def setUp(self): # [Why specific paddle_includes directory?] # Add paddle_includes to pass CI, for more details, - # please refer to the comments in `paddle/fluid/tests/custom_op/utils.py`` + # please refer to the comments in `paddle/base/tests/custom_op/utils.py`` paddle_includes = [] for site_packages_path in getsitepackages(): paddle_includes.append( diff --git a/test/distributed_passes/auto_parallel_pass_test_base.py b/test/distributed_passes/auto_parallel_pass_test_base.py index 11dbd3f361d34..69c2d051c7db3 100644 --- a/test/distributed_passes/auto_parallel_pass_test_base.py +++ b/test/distributed_passes/auto_parallel_pass_test_base.py @@ -84,7 +84,7 @@ def _run_gpu_main(self, model, apply_pass, dump_file, **kwargs): paddle.static.Program(), paddle.static.Program() ): with paddle.static.scope_guard(scope): - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): ( main_prog, startup_prog, @@ -109,7 +109,7 @@ def _run_gpu_main(self, model, apply_pass, dump_file, **kwargs): print(f'batch {batch_id}, outputs {output_dict}') all_fetch_values.append(fetch_values) batch_id += 1 - except paddle.fluid.core.EOFException: + except paddle.base.core.EOFException: data_loader.reset() break with open(dump_file, "wb") as f: @@ -178,7 +178,7 @@ def gen_data(): ) data_holder = [tokens, position_ids, attention_mask, labels, loss_mask] - data_loader = paddle.fluid.io.DataLoader.from_generator( + data_loader = paddle.base.io.DataLoader.from_generator( feed_list=data_holder, capacity=70, iterable=False ) data_loader.set_batch_generator(gen_data, paddle.static.cuda_places()) diff --git a/test/distributed_passes/dist_pass_test_base.py b/test/distributed_passes/dist_pass_test_base.py index 70b5356071d58..dc8da03bd6a4f 100644 --- a/test/distributed_passes/dist_pass_test_base.py +++ b/test/distributed_passes/dist_pass_test_base.py @@ -137,7 +137,7 @@ def _run_gpu_main(self, model, apply_pass, dump_file, **kwargs): paddle.static.Program(), paddle.static.Program() ): with paddle.static.scope_guard(scope): - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): main_prog, startup_prog, inputs, outputs, reader = model( place, **kwargs ) diff --git a/test/distributed_passes/test_auto_parallel_gradient_merge_pass.py b/test/distributed_passes/test_auto_parallel_gradient_merge_pass.py index 9e90288912b2d..f1aef2b30ca7c 100644 --- a/test/distributed_passes/test_auto_parallel_gradient_merge_pass.py +++ b/test/distributed_passes/test_auto_parallel_gradient_merge_pass.py @@ -174,7 +174,7 @@ def gen_data(): ) input.stop_gradient = False data_holder = [input, label] - data_loader = paddle.fluid.io.DataLoader.from_generator( + data_loader = paddle.base.io.DataLoader.from_generator( feed_list=data_holder, capacity=70, iterable=False ) data_loader.set_batch_generator( diff --git a/test/distributed_passes/test_dist_inplace_addto_pass.py b/test/distributed_passes/test_dist_inplace_addto_pass.py index 3a07d24e8aed8..e383c8d98a270 100644 --- a/test/distributed_passes/test_dist_inplace_addto_pass.py +++ b/test/distributed_passes/test_dist_inplace_addto_pass.py @@ -47,7 +47,7 @@ class TestInplaceAddtoPass(DistPassTestBase): def init(self): self.atol = 0.0 self.rtol = 0.0 - paddle.fluid.set_flags({"FLAGS_max_inplace_grad_add": 8}) + paddle.base.set_flags({"FLAGS_max_inplace_grad_add": 8}) def get_model(self, place, batch_size=32, image_shape=[224, 224, 3]): image = paddle.static.data( diff --git a/test/distribution/test_dirichlet_op.py b/test/distribution/test_dirichlet_op.py index 0e51e4463f8d5..b2c1c15bbf91f 100644 --- a/test/distribution/test_dirichlet_op.py +++ b/test/distribution/test_dirichlet_op.py @@ -24,7 +24,7 @@ ) import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/distribution/test_distribution_bernoulli.py b/test/distribution/test_distribution_bernoulli.py index 1a00503eed19f..0419268674cca 100644 --- a/test/distribution/test_distribution_bernoulli.py +++ b/test/distribution/test_distribution_bernoulli.py @@ -29,7 +29,7 @@ import paddle from paddle.distribution import Bernoulli from paddle.distribution.kl import kl_divergence -from paddle.fluid.data_feeder import convert_dtype +from paddle.base.data_feeder import convert_dtype np.random.seed(2023) paddle.seed(2023) @@ -132,7 +132,7 @@ def _probs_to_logits(self, probs, is_binary=False): class BernoulliTest(unittest.TestCase): def setUp(self): paddle.disable_static(self.place) - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): # just for convenience self.dtype = self.expected_dtype @@ -229,7 +229,7 @@ def init_dynamic_data(self, probs, default_dtype, dtype): ) class BernoulliTestFeature(BernoulliTest): def test_mean(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self.rv_paddle.mean, self.rv_np.mean, @@ -238,7 +238,7 @@ def test_mean(self): ) def test_variance(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self.rv_paddle.variance, self.rv_np.variance, @@ -265,7 +265,7 @@ def test_variance(self): ] ) def test_log_prob(self, value): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): if convert_dtype(value.dtype) == convert_dtype( self.rv_paddle.probs.dtype ): @@ -297,7 +297,7 @@ def test_log_prob(self, value): ] ) def test_prob(self, value): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): if convert_dtype(value.dtype) == convert_dtype( self.rv_paddle.probs.dtype ): @@ -331,7 +331,7 @@ def test_prob(self, value): ] ) def test_cdf(self, value): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): if convert_dtype(value.dtype) == convert_dtype( self.rv_paddle.probs.dtype ): @@ -349,7 +349,7 @@ def test_cdf(self, value): self.rv_paddle.cdf(value) def test_entropy(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self.rv_paddle.entropy(), self.rv_np.entropy(), @@ -358,7 +358,7 @@ def test_entropy(self): ) def test_kl_divergence(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): other_probs = paddle.to_tensor([0.9], dtype=self.dtype) rv_paddle_other = Bernoulli(other_probs) @@ -489,7 +489,7 @@ def test_kl_divergence(self): ) class BernoulliTestSample(BernoulliTest): def test_sample(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): sample_np = self.rv_np.sample(self.shape) sample_paddle = self.rv_paddle.sample(self.shape) @@ -520,7 +520,7 @@ def test_sample(self): ) def test_rsample(self, temperature): """Compare two samples from `rsample` method, one from scipy `sample` and another from paddle `rsample`.""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): sample_np = self.rv_np.sample(self.shape) rsample_paddle = self.rv_paddle.rsample(self.shape, temperature) @@ -548,7 +548,7 @@ def test_rsample(self, temperature): ) def test_rsample_backpropagation(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): self.rv_paddle.probs.stop_gradient = False rsample_paddle = self.rv_paddle.rsample(self.shape) rsample_paddle = paddle.nn.functional.sigmoid(rsample_paddle) @@ -573,7 +573,7 @@ def setUp(self): ] ) def test_bad_init(self, probs, error): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): self.assertRaises(error, Bernoulli, probs) @parameterize_func( @@ -585,7 +585,7 @@ def test_bad_init(self, probs, error): ] ) def test_bad_broadcast(self, probs, value): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): rv = Bernoulli(probs) self.assertRaises(ValueError, rv.cdf, value) self.assertRaises(ValueError, rv.log_prob, value) diff --git a/test/distribution/test_distribution_beta.py b/test/distribution/test_distribution_beta.py index ecedfa2d86a70..92c581a92f5c9 100644 --- a/test/distribution/test_distribution_beta.py +++ b/test/distribution/test_distribution_beta.py @@ -45,7 +45,7 @@ def setUp(self): self._paddle_beta = paddle.distribution.Beta(alpha, beta) def test_mean(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_beta.mean, scipy.stats.beta.mean(self.alpha, self.beta), @@ -54,7 +54,7 @@ def test_mean(self): ) def test_variance(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_beta.variance, scipy.stats.beta.var(self.alpha, self.beta), @@ -66,7 +66,7 @@ def test_prob(self): value = [np.random.rand(*self._paddle_beta.alpha.shape)] for v in value: - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_beta.prob(paddle.to_tensor(v)), scipy.stats.beta.pdf(v, self.alpha, self.beta), @@ -78,7 +78,7 @@ def test_log_prob(self): value = [np.random.rand(*self._paddle_beta.alpha.shape)] for v in value: - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_beta.log_prob(paddle.to_tensor(v)), scipy.stats.beta.logpdf(v, self.alpha, self.beta), @@ -87,7 +87,7 @@ def test_log_prob(self): ) def test_entropy(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_beta.entropy(), scipy.stats.beta.entropy(self.alpha, self.beta), diff --git a/test/distribution/test_distribution_categorical.py b/test/distribution/test_distribution_categorical.py index 446321872684d..d87c72e73438c 100644 --- a/test/distribution/test_distribution_categorical.py +++ b/test/distribution/test_distribution_categorical.py @@ -18,7 +18,7 @@ from test_distribution import DistributionNumpy import paddle -from paddle import fluid +from paddle import base from paddle.distribution import Categorical, Distribution, Normal, Uniform np.random.seed(2022) @@ -56,10 +56,10 @@ class CategoricalTest(unittest.TestCase): def setUp(self, use_gpu=False, batch_size=3, dims=5): self.use_gpu = use_gpu if not use_gpu: - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() self.gpu_id = -1 else: - self.place = fluid.CUDAPlace(0) + self.place = base.CUDAPlace(0) self.gpu_id = 0 self.batch_size = batch_size @@ -70,8 +70,8 @@ def setUp(self, use_gpu=False, batch_size=3, dims=5): self.init_dynamic_data(batch_size, dims) paddle.enable_static() - self.test_program = fluid.Program() - self.executor = fluid.Executor(self.place) + self.test_program = base.Program() + self.executor = base.Executor(self.place) self.init_static_data(batch_size, dims) def init_numpy_data(self, batch_size, dims): @@ -102,7 +102,7 @@ def init_dynamic_data(self, batch_size, dims): self.value = paddle.to_tensor(self.value_np) def init_static_data(self, batch_size, dims): - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.logits_static = paddle.static.data( name='logits', shape=self.logits_shape, dtype='float32' ) @@ -168,7 +168,7 @@ def test_categorical_distribution_dygraph(self, tolerance=1e-6): def test_categorical_distribution_static(self, tolerance=1e-6): paddle.enable_static() - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): categorical = Categorical(self.logits_static) other_categorical = Categorical(self.other_logits_static) @@ -186,7 +186,7 @@ def test_categorical_distribution_static(self, tolerance=1e-6): 'value': self.value_np, } - self.executor.run(fluid.default_startup_program()) + self.executor.run(base.default_startup_program()) fetch_list = self.executor.run( program=self.test_program, feed=feed_vars, fetch_list=fetch_list ) @@ -210,7 +210,7 @@ def init_numpy_data(self, batch_size, dims): self.value_shape = [3] def init_static_data(self, batch_size, dims): - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.logits_static = paddle.static.data( name='logits', shape=self.logits_shape, dtype='float64' ) @@ -231,7 +231,7 @@ def init_dynamic_data(self, batch_size, dims): self.value = paddle.to_tensor(self.value_np) def init_static_data(self, batch_size, dims): - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.logits_static = self.logits_np self.other_logits_static = self.other_logits_np self.value_static = paddle.static.data( @@ -260,7 +260,7 @@ def init_dynamic_data(self, batch_size, dims): self.value = paddle.to_tensor(self.value_np) def init_static_data(self, batch_size, dims): - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.logits_static = self.logits_np self.other_logits_static = self.other_logits_np self.value_static = paddle.static.data( @@ -341,7 +341,7 @@ def init_dynamic_data(self, batch_size, dims): self.value = paddle.to_tensor(self.value_np) def init_static_data(self, batch_size, dims): - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.logits_static = self.logits_np.tolist() self.other_logits_static = self.other_logits_np.tolist() self.value_static = paddle.static.data( @@ -358,7 +358,7 @@ def init_dynamic_data(self, batch_size, dims): self.value = paddle.to_tensor(self.value_np) def init_static_data(self, batch_size, dims): - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.logits_static = tuple(self.logits_np.tolist()) self.other_logits_static = tuple(self.other_logits_np.tolist()) self.value_static = paddle.static.data( diff --git a/test/distribution/test_distribution_cauchy.py b/test/distribution/test_distribution_cauchy.py index d6ea9b882370a..a2ab7ca3d9b2d 100644 --- a/test/distribution/test_distribution_cauchy.py +++ b/test/distribution/test_distribution_cauchy.py @@ -29,7 +29,7 @@ import paddle from paddle.distribution import Cauchy from paddle.distribution.kl import kl_divergence -from paddle.fluid.data_feeder import convert_dtype +from paddle.base.data_feeder import convert_dtype np.random.seed(2023) paddle.seed(2023) @@ -91,7 +91,7 @@ def kl_divergence(self, other): class CauchyTest(unittest.TestCase): def setUp(self): paddle.disable_static(self.place) - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): # just for convenience self.dtype = self.expected_dtype @@ -275,7 +275,7 @@ class CauchyTestFeature(CauchyTest): ] ) def test_log_prob(self, value): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): if convert_dtype(value.dtype) == convert_dtype( self.rv_paddle.loc.dtype ): @@ -302,7 +302,7 @@ def test_log_prob(self, value): ] ) def test_prob(self, value): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): if convert_dtype(value.dtype) == convert_dtype( self.rv_paddle.loc.dtype ): @@ -329,7 +329,7 @@ def test_prob(self, value): ] ) def test_cdf(self, value): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): if convert_dtype(value.dtype) == convert_dtype( self.rv_paddle.loc.dtype ): @@ -347,7 +347,7 @@ def test_cdf(self, value): self.rv_paddle.cdf(value) def test_entropy(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self.rv_paddle.entropy(), self.rv_np.entropy(), @@ -362,7 +362,7 @@ def test_entropy(self): ] ) def test_kl_divergence(self, loc, scale): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): # convert loc/scale to paddle's dtype(float32/float64) rv_paddle_other = Cauchy( loc=paddle.full((), loc, dtype=self.rv_paddle.loc.dtype), @@ -557,7 +557,7 @@ def test_kl_divergence(self, loc, scale): ) class CauchyTestSample(CauchyTest): def test_sample(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): sample_np = self.rv_np.sample(self.shape) sample_paddle = self.rv_paddle.sample(self.shape) @@ -581,7 +581,7 @@ def test_sample(self): ) def test_rsample(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): sample_np = self.rv_np.sample(self.shape) rsample_paddle = self.rv_paddle.rsample(self.shape) @@ -605,7 +605,7 @@ def test_rsample(self): ) def test_rsample_backpropagation(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): self.rv_paddle.loc.stop_gradient = False self.rv_paddle.scale.stop_gradient = False rsample_paddle = self.rv_paddle.rsample(self.shape) @@ -638,14 +638,14 @@ def setUp(self): ] ) def test_bad_init(self, loc, scale, error): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): self.assertRaises(error, Cauchy, loc, scale) def test_bad_property(self): """For property like mean/variance/stddev which is undefined in math, we should raise `ValueError` instead of `NotImplementedError`. """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): rv = Cauchy(loc=0.0, scale=1.0) with self.assertRaises(ValueError): _ = rv.mean @@ -661,7 +661,7 @@ def test_bad_property(self): ] ) def test_bad_sample_shape_type(self, shape): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): rv = Cauchy(loc=0.0, scale=1.0) with self.assertRaises(TypeError): @@ -680,7 +680,7 @@ def test_bad_sample_shape_type(self, shape): ] ) def test_bad_value_type(self, value): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): rv = Cauchy(loc=0.0, scale=1.0) with self.assertRaises(TypeError): @@ -698,7 +698,7 @@ def test_bad_value_type(self, value): ] ) def test_bad_kl_other_type(self, other): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): rv = Cauchy(loc=0.0, scale=1.0) with self.assertRaises(TypeError): @@ -714,7 +714,7 @@ def test_bad_kl_other_type(self, other): ] ) def test_bad_broadcast(self, loc, scale, value): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): rv = Cauchy(loc=loc, scale=scale) self.assertRaises(ValueError, rv.cdf, value) self.assertRaises(ValueError, rv.log_prob, value) diff --git a/test/distribution/test_distribution_dirichlet.py b/test/distribution/test_distribution_dirichlet.py index 6db297beb1106..308c703d3b568 100644 --- a/test/distribution/test_distribution_dirichlet.py +++ b/test/distribution/test_distribution_dirichlet.py @@ -39,7 +39,7 @@ def setUp(self): ) def test_mean(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_diric.mean, scipy.stats.dirichlet.mean(self.concentration), @@ -48,7 +48,7 @@ def test_mean(self): ) def test_variance(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_diric.variance, scipy.stats.dirichlet.var(self.concentration), @@ -61,7 +61,7 @@ def test_prob(self): value = [v / v.sum() for v in value] for v in value: - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_diric.prob(paddle.to_tensor(v)), scipy.stats.dirichlet.pdf(v, self.concentration), @@ -74,7 +74,7 @@ def test_log_prob(self): value = [v / v.sum() for v in value] for v in value: - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_diric.log_prob(paddle.to_tensor(v)), scipy.stats.dirichlet.logpdf(v, self.concentration), @@ -83,7 +83,7 @@ def test_log_prob(self): ) def test_entropy(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_diric.entropy(), scipy.stats.dirichlet.entropy(self.concentration), diff --git a/test/distribution/test_distribution_geometric.py b/test/distribution/test_distribution_geometric.py index 10dcf330b3b02..dd2ef33242c06 100644 --- a/test/distribution/test_distribution_geometric.py +++ b/test/distribution/test_distribution_geometric.py @@ -59,7 +59,7 @@ def setUp(self): self._paddle_geom = geometric.Geometric(probs) def test_mean(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_geom.mean, scipy.stats.geom.mean(self.probs), @@ -68,7 +68,7 @@ def test_mean(self): ) def test_variance(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_geom.variance, scipy.stats.geom.var(self.probs), @@ -77,7 +77,7 @@ def test_variance(self): ) def test_stddev(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_geom.stddev, scipy.stats.geom.std(self.probs), @@ -86,7 +86,7 @@ def test_stddev(self): ) def test_entropy(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_geom.entropy(), scipy.stats.geom.entropy(self.probs), @@ -181,7 +181,7 @@ def test_rsample(self): def test_back_rsample(self): sample_shape = (100000,) - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): self._paddle_geom.probs.stop_gradient = False rs_value = self._paddle_geom.rsample(sample_shape) @@ -236,7 +236,7 @@ def setUp(self): ) def test_pmf(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_geom.pmf(self.value), scipy.stats.geom.pmf(self.value, self.probs), @@ -245,7 +245,7 @@ def test_pmf(self): ) def test_log_pmf(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_geom.log_pmf(self.value), scipy.stats.geom.logpmf(self.value, self.probs), @@ -254,7 +254,7 @@ def test_log_pmf(self): ) def test_cdf(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( self._paddle_geom.cdf(self.value), scipy.stats.geom.cdf(self.value, self.probs), diff --git a/test/distribution/test_distribution_lognormal.py b/test/distribution/test_distribution_lognormal.py index e7069f7b2b11f..53e0ba55f4436 100644 --- a/test/distribution/test_distribution_lognormal.py +++ b/test/distribution/test_distribution_lognormal.py @@ -130,7 +130,7 @@ def test_entropy(self): ) def test_probs(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): probs = self.paddle_lognormal.probs(paddle.to_tensor(self.value)) np_probs = self.np_lognormal.probs(self.value) np.testing.assert_allclose( @@ -141,7 +141,7 @@ def test_probs(self): ) def test_log_prob(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): log_prob = self.paddle_lognormal.log_prob( paddle.to_tensor(self.value) ) diff --git a/test/distribution/test_distribution_normal.py b/test/distribution/test_distribution_normal.py index 4edc07e1a3b6a..6f13fc42ae016 100644 --- a/test/distribution/test_distribution_normal.py +++ b/test/distribution/test_distribution_normal.py @@ -22,7 +22,7 @@ from test_distribution import DistributionNumpy import paddle -from paddle import fluid +from paddle import base from paddle.distribution import Normal np.random.seed(2022) @@ -74,10 +74,10 @@ class NormalTest(unittest.TestCase): def setUp(self, use_gpu=False, batch_size=2, dims=3): self.use_gpu = use_gpu if not use_gpu: - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() self.gpu_id = -1 else: - self.place = fluid.CUDAPlace(0) + self.place = base.CUDAPlace(0) self.gpu_id = 0 self.init_numpy_data(batch_size, dims) @@ -86,8 +86,8 @@ def setUp(self, use_gpu=False, batch_size=2, dims=3): self.init_dynamic_data(batch_size, dims) paddle.enable_static() - self.test_program = fluid.Program() - self.executor = fluid.Executor(self.place) + self.test_program = base.Program() + self.executor = base.Executor(self.place) self.init_static_data(batch_size, dims) def init_numpy_data(self, batch_size, dims): @@ -115,7 +115,7 @@ def init_static_data(self, batch_size, dims): self.static_scale = self.scale_np self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1], dtype='float32' ) @@ -167,7 +167,7 @@ def test_normal_distribution_dygraph(self, sample_shape=7, tolerance=1e-6): def test_normal_distribution_static(self, sample_shape=7, tolerance=1e-6): paddle.enable_static() - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): normal = Normal(self.static_loc, self.static_scale) sample = normal.sample([sample_shape]) @@ -189,7 +189,7 @@ def test_normal_distribution_static(self, sample_shape=7, tolerance=1e-6): 'other_scale': self.other_scale_np, } - self.executor.run(fluid.default_startup_program()) + self.executor.run(base.default_startup_program()) fetch_list = self.executor.run( program=self.test_program, feed=feed_vars, fetch_list=fetch_list ) @@ -235,7 +235,7 @@ def init_static_data(self, batch_size, dims): self.static_scale = self.scale_np self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1, dims], dtype='float32' ) @@ -264,7 +264,7 @@ def init_static_data(self, batch_size, dims): self.static_scale = self.scale_np self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1, dims], dtype='float32' ) @@ -300,7 +300,7 @@ def init_static_data(self, batch_size, dims): self.static_scale = self.scale_np self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1, dims], dtype='float64' ) @@ -332,7 +332,7 @@ def init_dynamic_data(self, batch_size, dims): self.dynamic_other_scale = paddle.to_tensor(self.other_scale_np) def init_static_data(self, batch_size, dims): - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_loc = paddle.static.data( name='loc', shape=[-1, dims], dtype='float32' ) @@ -380,7 +380,7 @@ def init_dynamic_data(self, batch_size, dims): ) def init_static_data(self, batch_size, dims): - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_loc = paddle.static.data( name='loc', shape=[-1, dims], dtype='float64' ) @@ -428,7 +428,7 @@ def init_dynamic_data(self, batch_size, dims): ) def init_static_data(self, batch_size, dims): - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_loc = paddle.static.data( name='loc', shape=[-1, dims], dtype='float64' ) @@ -475,7 +475,7 @@ def init_static_data(self, batch_size, dims): self.static_scale = self.scale_np self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1, dims], dtype='float32' ) @@ -510,7 +510,7 @@ def init_static_data(self, batch_size, dims): self.static_scale = self.scale_np self.static_other_loc = self.other_loc_np self.static_other_scale = self.other_scale_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1, dims], dtype='float32' ) diff --git a/test/distribution/test_distribution_uniform.py b/test/distribution/test_distribution_uniform.py index ca59b5118e677..8283017ab7349 100644 --- a/test/distribution/test_distribution_uniform.py +++ b/test/distribution/test_distribution_uniform.py @@ -18,7 +18,7 @@ from test_distribution import DistributionNumpy import paddle -from paddle import fluid +from paddle import base from paddle.distribution import Uniform np.random.seed(2022) @@ -56,10 +56,10 @@ class UniformTest(unittest.TestCase): def setUp(self, use_gpu=False, batch_size=5, dims=6): self.use_gpu = use_gpu if not use_gpu: - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() self.gpu_id = -1 else: - self.place = fluid.CUDAPlace(0) + self.place = base.CUDAPlace(0) self.gpu_id = 0 self.init_numpy_data(batch_size, dims) @@ -68,8 +68,8 @@ def setUp(self, use_gpu=False, batch_size=5, dims=6): self.init_dynamic_data(batch_size, dims) paddle.enable_static() - self.test_program = fluid.Program() - self.executor = fluid.Executor(self.place) + self.test_program = base.Program() + self.executor = base.Executor(self.place) self.init_static_data(batch_size, dims) def init_numpy_data(self, batch_size, dims): @@ -86,7 +86,7 @@ def init_dynamic_data(self, batch_size, dims): def init_static_data(self, batch_size, dims): self.static_low = self.low_np self.static_high = self.high_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1], dtype='float32' ) @@ -111,7 +111,7 @@ def compare_with_numpy(self, fetch_list, sample_shape=7, tolerance=1e-6): def test_uniform_distribution_static(self, sample_shape=7, tolerance=1e-6): paddle.enable_static() - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): uniform = Uniform(self.static_low, self.static_high) sample = uniform.sample([sample_shape]) entropy = uniform.entropy() @@ -125,7 +125,7 @@ def test_uniform_distribution_static(self, sample_shape=7, tolerance=1e-6): 'values': self.values_np, } - self.executor.run(fluid.default_startup_program()) + self.executor.run(base.default_startup_program()) fetch_list = self.executor.run( program=self.test_program, feed=feed_vars, fetch_list=fetch_list ) @@ -168,7 +168,7 @@ def init_numpy_data(self, batch_size, dims): def init_static_data(self, batch_size, dims): self.static_low = self.low_np self.static_high = self.high_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1, dims], dtype='float32' ) @@ -186,7 +186,7 @@ def init_numpy_data(self, batch_size, dims): def init_static_data(self, batch_size, dims): self.static_low = self.low_np self.static_high = self.high_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1, dims], dtype='float32' ) @@ -209,7 +209,7 @@ def init_dynamic_data(self, batch_size, dims): def init_static_data(self, batch_size, dims): self.static_low = self.low_np self.static_high = self.high_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1, dims], dtype='float64' ) @@ -230,7 +230,7 @@ def init_dynamic_data(self, batch_size, dims): self.dynamic_values = paddle.to_tensor(self.values_np) def init_static_data(self, batch_size, dims): - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_low = paddle.static.data( name='low', shape=[-1, dims], dtype='float32' ) @@ -257,7 +257,7 @@ def init_dynamic_data(self, batch_size, dims): self.dynamic_values = paddle.to_tensor(self.values_np, dtype='float64') def init_static_data(self, batch_size, dims): - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_low = paddle.static.data( name='low', shape=[-1, dims], dtype='float64' ) @@ -284,7 +284,7 @@ def init_dynamic_data(self, batch_size, dims): self.dynamic_values = paddle.to_tensor(self.values_np, dtype='float32') def init_static_data(self, batch_size, dims): - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_low = paddle.static.data( name='low', shape=[-1, dims], dtype='float64' ) @@ -309,7 +309,7 @@ def init_numpy_data(self, batch_size, dims): def init_static_data(self, batch_size, dims): self.static_low = self.low_np self.static_high = self.high_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1, dims], dtype='float32' ) @@ -331,7 +331,7 @@ def init_numpy_data(self, batch_size, dims): def init_static_data(self, batch_size, dims): self.static_low = self.low_np self.static_high = self.high_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1, dims], dtype='float32' ) @@ -353,7 +353,7 @@ def init_numpy_data(self, batch_size, dims): def init_static_data(self, batch_size, dims): self.static_low = self.low_np self.static_high = self.high_np - with fluid.program_guard(self.test_program): + with base.program_guard(self.test_program): self.static_values = paddle.static.data( name='values', shape=[-1, dims], dtype='float32' ) diff --git a/test/distribution/test_kl.py b/test/distribution/test_kl.py index 444f469e0a630..b1f0b16b3437a 100644 --- a/test/distribution/test_kl.py +++ b/test/distribution/test_kl.py @@ -52,7 +52,7 @@ def setUp(self): ) def test_kl_divergence(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( paddle.distribution.kl_divergence(self.p, self.q), self.scipy_kl_beta_beta(self.a1, self.b1, self.a2, self.b2), @@ -87,7 +87,7 @@ def setUp(self): self.q = paddle.distribution.Dirichlet(paddle.to_tensor(self.conc2)) def test_kl_divergence(self): - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( paddle.distribution.kl_divergence(self.p, self.q), self.scipy_kl_diric_diric(self.conc1, self.conc2), diff --git a/test/dygraph_to_static/bert_dygraph_model.py b/test/dygraph_to_static/bert_dygraph_model.py index 7406f2bfaa5a5..d10fa881270c0 100644 --- a/test/dygraph_to_static/bert_dygraph_model.py +++ b/test/dygraph_to_static/bert_dygraph_model.py @@ -15,7 +15,7 @@ from transformer_dygraph_model import MultiHeadAttention, PrePostProcessLayer import paddle -from paddle import fluid +from paddle import base from paddle.jit.api import to_static from paddle.nn import Layer, Linear @@ -35,7 +35,7 @@ def __init__( self._i2h = Linear( in_features=d_model, out_features=d_inner_hid, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name=name + '_fc_0.w_0', initializer=param_initializer ), bias_attr=name + '_fc_0.b_0', @@ -44,7 +44,7 @@ def __init__( self._h2o = Linear( in_features=d_inner_hid, out_features=d_model, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name=name + '_fc_1.w_0', initializer=param_initializer ), bias_attr=name + '_fc_1.b_0', @@ -207,7 +207,7 @@ def __init__(self, config, return_pooled_out=True, use_fp16=False): self._src_emb = paddle.nn.Embedding( self._voc_size, self._emb_size, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name=self._word_emb_name, initializer=self._param_initializer ), ) @@ -215,7 +215,7 @@ def __init__(self, config, return_pooled_out=True, use_fp16=False): self._pos_emb = paddle.nn.Embedding( self._max_position_seq_len, self._emb_size, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name=self._pos_emb_name, initializer=self._param_initializer ), ) @@ -223,7 +223,7 @@ def __init__(self, config, return_pooled_out=True, use_fp16=False): self._sent_emb = paddle.nn.Embedding( self._sent_types, self._emb_size, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name=self._sent_emb_name, initializer=self._param_initializer ), ) @@ -231,7 +231,7 @@ def __init__(self, config, return_pooled_out=True, use_fp16=False): self.pooled_fc = Linear( in_features=self._emb_size, out_features=self._emb_size, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name="pooled_fc.w_0", initializer=self._param_initializer ), bias_attr="pooled_fc.b_0", @@ -332,13 +332,13 @@ def __init__( self.pooled_fc = Linear( in_features=self._emb_size, out_features=self._emb_size, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name="mask_lm_trans_fc.w_0", initializer=self._param_initializer ), bias_attr="mask_lm_trans_fc.b_0", ) - self.mask_lm_out_bias_attr = fluid.ParamAttr( + self.mask_lm_out_bias_attr = base.ParamAttr( name="mask_lm_out_fc.b_0", initializer=paddle.nn.initializer.Constant(value=0.0), ) @@ -347,7 +347,7 @@ def __init__( self.out_fc = Linear( in_features=self._emb_size, out_features=self._voc_size, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name="mask_lm_out_fc.w_0", initializer=self._param_initializer, ), @@ -364,7 +364,7 @@ def __init__( self.next_sent_fc = Linear( in_features=self._emb_size, out_features=2, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name="next_sent_fc.w_0", initializer=self._param_initializer ), bias_attr="next_sent_fc.b_0", diff --git a/test/dygraph_to_static/darknet.py b/test/dygraph_to_static/darknet.py index 396dfab950b0d..7606407a18615 100644 --- a/test/dygraph_to_static/darknet.py +++ b/test/dygraph_to_static/darknet.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle -from paddle.fluid.param_attr import ParamAttr +from paddle.base.param_attr import ParamAttr from paddle.nn import BatchNorm from paddle.regularizer import L2Decay diff --git a/test/dygraph_to_static/dygraph_to_static_util.py b/test/dygraph_to_static/dygraph_to_static_util.py index 69a7c3ba93fc9..c95cab80f1112 100644 --- a/test/dygraph_to_static/dygraph_to_static_util.py +++ b/test/dygraph_to_static/dygraph_to_static_util.py @@ -19,7 +19,7 @@ import numpy as np from paddle import set_flags, static -from paddle.fluid import core +from paddle.base import core @contextlib.contextmanager diff --git a/test/dygraph_to_static/ifelse_simple_func.py b/test/dygraph_to_static/ifelse_simple_func.py index 14540375cfb05..c72ccca1ac7ae 100644 --- a/test/dygraph_to_static/ifelse_simple_func.py +++ b/test/dygraph_to_static/ifelse_simple_func.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle -from paddle import fluid +from paddle import base def add_fn(x): @@ -378,7 +378,7 @@ def __init__(self): def if_tensor_case(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) mean = paddle.mean(x) # It is equivalent to `if mean != 0` diff --git a/test/dygraph_to_static/predictor_utils.py b/test/dygraph_to_static/predictor_utils.py index 8f0420f50d3bf..7ecb516f2c77d 100644 --- a/test/dygraph_to_static/predictor_utils.py +++ b/test/dygraph_to_static/predictor_utils.py @@ -16,8 +16,8 @@ import numpy as np -from paddle import fluid -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle import base +from paddle.base.core import AnalysisConfig, create_paddle_predictor class PredictorTools: @@ -47,7 +47,7 @@ def _load_model_and_set_config(self): else: config = AnalysisConfig(os.path.join(self.model_path)) - if fluid.is_compiled_with_cuda(): + if base.is_compiled_with_cuda(): config.enable_use_gpu(100, 0) else: config.disable_gpu() @@ -78,7 +78,7 @@ def _get_analysis_outputs(self, config): tensor = predictor.get_input_tensor(name) feed_data = self.feeds_var[i] tensor.copy_from_cpu(np.array(feed_data)) - if type(feed_data) == fluid.LoDTensor: + if type(feed_data) == base.LoDTensor: tensor.set_lod(feed_data.lod()) # ensure no diff in multiple repeat times diff --git a/test/dygraph_to_static/seq2seq_dygraph_model.py b/test/dygraph_to_static/seq2seq_dygraph_model.py index 77eda7ab401bf..48c2ced2c2d8f 100644 --- a/test/dygraph_to_static/seq2seq_dygraph_model.py +++ b/test/dygraph_to_static/seq2seq_dygraph_model.py @@ -16,9 +16,9 @@ from seq2seq_utils import Seq2SeqModelHyperParams as args import paddle -from paddle import fluid -from paddle.fluid import ParamAttr -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import ParamAttr +from paddle.base.dygraph.base import to_variable from paddle.jit.api import to_static from paddle.nn import Embedding, Layer @@ -120,7 +120,7 @@ def __init__( self.src_embeder = Embedding( self.src_vocab_size, self.hidden_size, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=uniform_initializer(init_scale) ), ) @@ -129,7 +129,7 @@ def __init__( self.tar_vocab_size, self.hidden_size, sparse=False, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=uniform_initializer(init_scale) ), ) @@ -547,7 +547,7 @@ def __init__( self.src_embeder = Embedding( self.src_vocab_size, self.hidden_size, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name='source_embedding', initializer=uniform_initializer(init_scale), ), @@ -557,7 +557,7 @@ def __init__( self.tar_vocab_size, self.hidden_size, sparse=False, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name='target_embedding', initializer=uniform_initializer(init_scale), ), diff --git a/test/dygraph_to_static/simnet_dygraph_model.py b/test/dygraph_to_static/simnet_dygraph_model.py index 341785a9d6a5a..9bf53450e6fa0 100644 --- a/test/dygraph_to_static/simnet_dygraph_model.py +++ b/test/dygraph_to_static/simnet_dygraph_model.py @@ -15,7 +15,7 @@ from functools import reduce import paddle -import paddle.fluid.param_attr as attr +import paddle.base.param_attr as attr from paddle.jit.api import to_static from paddle.nn import Layer @@ -301,12 +301,12 @@ class FC(Layer): Examples: .. code-block:: python - from paddle.fluid.dygraph.base import to_variable - import paddle.fluid as fluid - from paddle.fluid.dygraph import FC + from paddle.base.dygraph.base import to_variable + import paddle.base as base + from paddle.base.dygraph import FC import numpy as np data = np.random.uniform(-1, 1, [30, 10, 32]).astype('float32') - with fluid.dygraph.guard(): + with base.dygraph.guard(): fc = FC("fc", 64, num_flatten_dims=2) data = to_variable(data) conv = fc(data) diff --git a/test/dygraph_to_static/test_assert.py b/test/dygraph_to_static/test_assert.py index d57084aa06f62..dc01413d0c8be 100644 --- a/test/dygraph_to_static/test_assert.py +++ b/test/dygraph_to_static/test_assert.py @@ -18,13 +18,13 @@ from dygraph_to_static_util import test_and_compare_with_new_ir import paddle -from paddle import fluid +from paddle import base from paddle.jit.api import to_static @paddle.jit.to_static def dyfunc_assert_variable(x): - x_v = fluid.dygraph.to_variable(x) + x_v = base.dygraph.to_variable(x) assert x_v @@ -38,10 +38,10 @@ def _run(self, func, x, with_exception, to_static): paddle.jit.enable_to_static(to_static) if with_exception: with self.assertRaises(BaseException): - with fluid.dygraph.guard(): + with base.dygraph.guard(): func(x) else: - with fluid.dygraph.guard(): + with base.dygraph.guard(): func(x) def _run_dy_static(self, func, x, with_exception): diff --git a/test/dygraph_to_static/test_ast_util.py b/test/dygraph_to_static/test_ast_util.py index d17780029bd81..52920d81433c6 100644 --- a/test/dygraph_to_static/test_ast_util.py +++ b/test/dygraph_to_static/test_ast_util.py @@ -26,7 +26,7 @@ import paddle import paddle.nn.functional as F -from paddle import fluid +from paddle import base from paddle.jit.dy2static.utils import ast_to_func from paddle.utils import gast @@ -55,8 +55,8 @@ def test_ast2func_dygraph(self): funcs = [dyfunc_with_if_else, dyfunc_with_if_else2, nested_if_else] x_data = np.random.random([10, 16]).astype('float32') for func in funcs: - with fluid.dygraph.guard(): - x_v = fluid.dygraph.to_variable(x_data) + with base.dygraph.guard(): + x_v = base.dygraph.to_variable(x_data) true_ret = func(x_v).numpy() test_ret = self._ast2func(func)(x_v).numpy() self.assertTrue((true_ret == test_ret).all()) @@ -71,12 +71,12 @@ def func(x): return loss x_data = np.random.random([10, 16]).astype('float32') - main_program = fluid.Program() - with fluid.program_guard(main_program): + main_program = base.Program() + with base.program_guard(main_program): x_v = paddle.assign(x_data) true_ret = func(x_v) test_ret = self._ast2func(func)(x_v) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) ret = exe.run(main_program, fetch_list=[true_ret, test_ret]) self.assertTrue((ret[0] == ret[1]).all()) diff --git a/test/dygraph_to_static/test_basic_api_transformation.py b/test/dygraph_to_static/test_basic_api_transformation.py index 17e72b793f144..efa9caa17dd51 100644 --- a/test/dygraph_to_static/test_basic_api_transformation.py +++ b/test/dygraph_to_static/test_basic_api_transformation.py @@ -19,9 +19,9 @@ from dygraph_to_static_util import test_and_compare_with_new_ir import paddle -from paddle import fluid, to_tensor -from paddle.fluid import dygraph -from paddle.fluid.dygraph import to_variable +from paddle import base, to_tensor +from paddle.base import dygraph +from paddle.base.dygraph import to_variable from paddle.jit.api import dygraph_to_static_func from paddle.jit.dy2static.utils import is_dygraph_api from paddle.utils import gast @@ -35,7 +35,7 @@ def dyfunc_to_variable(x): - res = fluid.dygraph.to_variable(x, name=None, zero_copy=None) + res = base.dygraph.to_variable(x, name=None, zero_copy=None) return res @@ -82,24 +82,24 @@ def setUp(self): dyfunc_to_variable_3, ] self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) def get_dygraph_output(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): res = self.dygraph_func(self.input).numpy() return res @test_and_compare_with_new_ir(True) def get_static_output(self): - main_program = fluid.Program() + main_program = base.Program() main_program.random_seed = SEED - with fluid.program_guard(main_program): + with base.program_guard(main_program): static_out = dygraph_to_static_func(self.dygraph_func)(self.input) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) static_res = exe.run(main_program, fetch_list=static_out) return static_res[0] @@ -118,17 +118,17 @@ def dyfunc_BilinearTensorProduct(layer1, layer2): 5, 4, 1000, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.99) ), - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.5) ), ) res = bilinearTensorProduct( - fluid.dygraph.base.to_variable(layer1), - fluid.dygraph.base.to_variable(layer2), + base.dygraph.base.to_variable(layer1), + base.dygraph.base.to_variable(layer2), ) return res @@ -157,7 +157,7 @@ def dyfunc_Conv3D(input): weight_attr=paddle.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.99) ), - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.5) ), ) @@ -170,10 +170,10 @@ def dyfunc_Conv2DTranspose(input): 3, 12, 12, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.99) ), - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.5) ), ) @@ -222,7 +222,7 @@ def dyfunc_Pool2D(input): def dyfunc_Prelu(input): prelu0 = paddle.nn.PReLU( - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(1.0) ), ) @@ -239,25 +239,25 @@ def setUp(self): self.dygraph_func = dyfunc_Pool2D def get_dygraph_output(self): - with fluid.dygraph.guard(): - fluid.default_startup_program.random_seed = SEED - fluid.default_main_program.random_seed = SEED - data = fluid.dygraph.to_variable(self.input) + with base.dygraph.guard(): + base.default_startup_program.random_seed = SEED + base.default_main_program.random_seed = SEED + data = base.dygraph.to_variable(self.input) res = self.dygraph_func(data).numpy() return res @test_and_compare_with_new_ir(True) def get_static_output(self): - startup_program = fluid.Program() + startup_program = base.Program() startup_program.random_seed = SEED - main_program = fluid.Program() + main_program = base.Program() main_program.random_seed = SEED - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): data = paddle.assign(self.input) static_out = dygraph_to_static_func(self.dygraph_func)(data) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) static_res = exe.run(main_program, fetch_list=static_out) return static_res[0] @@ -275,24 +275,24 @@ def setUp(self): self.dygraph_func = dyfunc_BilinearTensorProduct def get_dygraph_output(self): - with fluid.dygraph.guard(): - fluid.default_startup_program.random_seed = SEED - fluid.default_main_program.random_seed = SEED + with base.dygraph.guard(): + base.default_startup_program.random_seed = SEED + base.default_main_program.random_seed = SEED res = self.dygraph_func(self.input1, self.input2).numpy() return res @test_and_compare_with_new_ir(True) def get_static_output(self): - startup_program = fluid.Program() + startup_program = base.Program() startup_program.random_seed = SEED - main_program = fluid.Program() + main_program = base.Program() main_program.random_seed = SEED - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): static_out = dygraph_to_static_func(self.dygraph_func)( self.input1, self.input2 ) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) static_res = exe.run(main_program, fetch_list=static_out) return static_res[0] @@ -401,22 +401,22 @@ def setUp(self): self.dygraph_func = dyfunc_CosineDecay def get_dygraph_output(self): - with fluid.dygraph.guard(): - fluid.default_startup_program.random_seed = SEED - fluid.default_main_program.random_seed = SEED + with base.dygraph.guard(): + base.default_startup_program.random_seed = SEED + base.default_main_program.random_seed = SEED res = self.dygraph_func().numpy() return res @test_and_compare_with_new_ir(True) def get_static_output(self): - startup_program = fluid.Program() + startup_program = base.Program() startup_program.random_seed = SEED - main_program = fluid.Program() + main_program = base.Program() main_program.random_seed = SEED - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): static_out = dygraph_to_static_func(self.dygraph_func)() - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) static_res = exe.run(main_program, fetch_list=static_out) return static_res[0] @@ -432,23 +432,23 @@ def setUp(self): self.dygraph_func = dyfunc_ExponentialDecay def get_dygraph_output(self): - with fluid.dygraph.guard(): - fluid.default_startup_program.random_seed = SEED - fluid.default_main_program.random_seed = SEED + with base.dygraph.guard(): + base.default_startup_program.random_seed = SEED + base.default_main_program.random_seed = SEED res = self.dygraph_func() return res @test_and_compare_with_new_ir(True) def get_static_output(self): - startup_program = fluid.Program() + startup_program = base.Program() startup_program.random_seed = SEED - main_program = fluid.Program() + main_program = base.Program() main_program.random_seed = SEED - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): static_out = dygraph_to_static_func(self.dygraph_func)() static_out = paddle.to_tensor(static_out) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) static_res = exe.run(main_program, fetch_list=static_out) return static_res[0] @@ -459,23 +459,23 @@ def setUp(self): self.dygraph_func = dyfunc_InverseTimeDecay def get_dygraph_output(self): - with fluid.dygraph.guard(): - fluid.default_startup_program.random_seed = SEED - fluid.default_main_program.random_seed = SEED + with base.dygraph.guard(): + base.default_startup_program.random_seed = SEED + base.default_main_program.random_seed = SEED res = self.dygraph_func() return res @test_and_compare_with_new_ir(True) def get_static_output(self): - startup_program = fluid.Program() + startup_program = base.Program() startup_program.random_seed = SEED - main_program = fluid.Program() + main_program = base.Program() main_program.random_seed = SEED - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): static_out = dygraph_to_static_func(self.dygraph_func)() static_out = paddle.to_tensor(static_out) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) static_res = exe.run(main_program, fetch_list=static_out) return static_res[0] @@ -486,23 +486,23 @@ def setUp(self): self.dygraph_func = dyfunc_NaturalExpDecay def get_dygraph_output(self): - with fluid.dygraph.guard(): - fluid.default_startup_program.random_seed = SEED - fluid.default_main_program.random_seed = SEED + with base.dygraph.guard(): + base.default_startup_program.random_seed = SEED + base.default_main_program.random_seed = SEED res = self.dygraph_func() return res @test_and_compare_with_new_ir(True) def get_static_output(self): - startup_program = fluid.Program() + startup_program = base.Program() startup_program.random_seed = SEED - main_program = fluid.Program() + main_program = base.Program() main_program.random_seed = SEED - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): static_out = dygraph_to_static_func(self.dygraph_func)() static_out = paddle.to_tensor(static_out) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) static_res = exe.run(main_program, fetch_list=static_out) return static_res[0] @@ -523,19 +523,19 @@ def setUp(self): self.dygraph_func = dyfunc_PolynomialDecay def get_dygraph_output(self): - with fluid.dygraph.guard(): - fluid.default_startup_program.random_seed = SEED - fluid.default_main_program.random_seed = SEED + with base.dygraph.guard(): + base.default_startup_program.random_seed = SEED + base.default_main_program.random_seed = SEED res = self.dygraph_func() return res def _dygraph_fn(): - from paddle import fluid + from paddle import base x = np.random.random((1, 3)).astype('float32') - with fluid.dygraph.guard(): - fluid.dygraph.to_variable(x) + with base.dygraph.guard(): + base.dygraph.to_variable(x) np.random.random(1) diff --git a/test/dygraph_to_static/test_bert.py b/test/dygraph_to_static/test_bert.py index 431c9425c8f60..2e0feffa24b58 100644 --- a/test/dygraph_to_static/test_bert.py +++ b/test/dygraph_to_static/test_bert.py @@ -24,12 +24,12 @@ from predictor_utils import PredictorTools import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX place = ( - fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) SEED = 2020 STEP_NUM = 10 @@ -93,9 +93,9 @@ def tearDown(self): self.temp_dir.cleanup() def train(self, bert_config, data_reader, to_static): - with fluid.dygraph.guard(place): - fluid.default_main_program().random_seed = SEED - fluid.default_startup_program().random_seed = SEED + with base.dygraph.guard(place): + base.default_main_program().random_seed = SEED + base.default_startup_program().random_seed = SEED fake_dataset = FakeBertDataset(data_reader, STEP_NUM) data_loader = paddle.io.DataLoader( @@ -175,7 +175,7 @@ def train_static(self, bert_config, data_reader): def predict_static(self, data): paddle.enable_static() - exe = fluid.Executor(place) + exe = base.Executor(place) # load inference model [ inference_program, @@ -197,7 +197,7 @@ def predict_static(self, data): def predict_dygraph(self, bert_config, data): paddle.jit.enable_to_static(False) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): bert = PretrainModelLayer( config=bert_config, weight_sharing=False, use_fp16=False ) @@ -206,7 +206,7 @@ def predict_dygraph(self, bert_config, data): bert.set_dict(model_dict) bert.eval() - input_vars = [fluid.dygraph.to_variable(x) for x in data] + input_vars = [base.dygraph.to_variable(x) for x in data] ( src_ids, pos_ids, @@ -230,7 +230,7 @@ def predict_dygraph(self, bert_config, data): return pred_res def predict_dygraph_jit(self, data): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): bert = paddle.jit.load(self.model_save_prefix) bert.eval() diff --git a/test/dygraph_to_static/test_bmn.py b/test/dygraph_to_static/test_bmn.py index 4c0987941f9cc..f5f8d35759869 100644 --- a/test/dygraph_to_static/test_bmn.py +++ b/test/dygraph_to_static/test_bmn.py @@ -22,9 +22,9 @@ from predictor_utils import PredictorTools import paddle -from paddle import fluid -from paddle.fluid import ParamAttr -from paddle.fluid.dygraph import to_variable +from paddle import base +from paddle.base import ParamAttr +from paddle.base.dygraph import to_variable from paddle.jit import to_static from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX @@ -34,8 +34,8 @@ # Note: Set True to eliminate randomness. # 1. For one operation, cuDNN has several algorithms, # some algorithm results are non-deterministic, like convolution algorithms. -if fluid.is_compiled_with_cuda(): - fluid.set_flags({'FLAGS_cudnn_deterministic': True}) +if base.is_compiled_with_cuda(): + base.set_flags({'FLAGS_cudnn_deterministic': True}) def get_interp1d_mask( @@ -215,7 +215,7 @@ def __init__(self, cfg): self.num_sample, self.num_sample_perbin, ) - self.sample_mask = fluid.dygraph.base.to_variable(sample_mask) + self.sample_mask = base.dygraph.base.to_variable(sample_mask) self.sample_mask.stop_gradient = True self.p_conv3d1 = paddle.nn.Conv3D( @@ -642,9 +642,9 @@ class TestTrain(unittest.TestCase): def setUp(self): self.args = Args() self.place = ( - fluid.CPUPlace() - if not fluid.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + base.CPUPlace() + if not base.is_compiled_with_cuda() + else base.CUDAPlace(0) ) self.temp_dir = tempfile.TemporaryDirectory() @@ -661,7 +661,7 @@ def train_bmn(self, args, place, to_static): paddle.jit.enable_to_static(to_static) loss_data = [] - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) global local_random @@ -834,7 +834,7 @@ def verify_predict(self): def predict_dygraph(self, data): paddle.jit.enable_to_static(False) - with fluid.dygraph.guard(self.place): + with base.dygraph.guard(self.place): bmn = BMN(self.args) # load dygraph trained parameters model_dict = paddle.load(self.dy_param_path + ".pdparams") @@ -849,7 +849,7 @@ def predict_dygraph(self, data): def predict_static(self, data): paddle.enable_static() - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) # load inference model [ inference_program, @@ -870,7 +870,7 @@ def predict_static(self, data): return pred_res def predict_dygraph_jit(self, data): - with fluid.dygraph.guard(self.place): + with base.dygraph.guard(self.place): bmn = paddle.jit.load(self.model_save_prefix) bmn.eval() diff --git a/test/dygraph_to_static/test_break_continue.py b/test/dygraph_to_static/test_break_continue.py index 946dfb9850c59..dc5e74ff8d193 100644 --- a/test/dygraph_to_static/test_break_continue.py +++ b/test/dygraph_to_static/test_break_continue.py @@ -18,7 +18,7 @@ from dygraph_to_static_util import ast_only_test, dy2static_unittest import paddle -from paddle import fluid +from paddle import base from paddle.jit.api import to_static from paddle.jit.dy2static.utils import Dygraph2StaticException @@ -39,12 +39,12 @@ def test_error(self): with self.assertRaisesRegex(Dygraph2StaticException, self.error): paddle.jit.enable_to_static(True) self.assertTrue(to_static(self.dyfunc)(self.x)) - paddle.fluid.dygraph.base.global_var._in_declarative_mode_ = False + paddle.base.dygraph.base.global_var._in_declarative_mode_ = False paddle.jit.enable_to_static(False) def test_continue_in_for(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) for i in range(10): x += 1 if i > 5: @@ -55,7 +55,7 @@ def test_continue_in_for(x): def test_continue_in_for_at_end(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) for i in range(10): x += 1 if i > 5: @@ -64,7 +64,7 @@ def test_continue_in_for_at_end(x): def test_continue_in_while(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0) while i < 10: i += 1 @@ -76,7 +76,7 @@ def test_continue_in_while(x): def test_break_in_for(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) for i in range(10): x += 1 if i > 5: @@ -87,7 +87,7 @@ def test_break_in_for(x): def test_break_in_for_at_end(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) for i in range(10): x += 1 if i > 5: @@ -96,7 +96,7 @@ def test_break_in_for_at_end(x): def test_break_in_while(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0) while i < 10: i += 1 @@ -108,7 +108,7 @@ def test_break_in_while(x): def test_break_continue_in_for(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) for i in range(1, 10, 1): if i <= 4: @@ -138,7 +138,7 @@ def test_break_continue_in_for(x): def test_for_in_else(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) # Case 1: if False: @@ -169,7 +169,7 @@ def __init__(self): self.c = 5 foo = Foo() - i = fluid.dygraph.to_variable(x) + i = base.dygraph.to_variable(x) while i < 10: foo.b = paddle.zeros(shape=[1], dtype='float32') foo.c = foo.b + foo.a @@ -209,9 +209,9 @@ class TestContinueInFor(unittest.TestCase): def setUp(self): self.input = np.zeros(1).astype('int64') self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.init_dygraph_func() @@ -219,12 +219,12 @@ def init_dygraph_func(self): self.dygraph_func = test_continue_in_for def run_dygraph_mode(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): res = self.dygraph_func(self.input) return res.numpy() def run_static_mode(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): res = to_static(self.dygraph_func)(self.input) return res.numpy() diff --git a/test/dygraph_to_static/test_build_strategy.py b/test/dygraph_to_static/test_build_strategy.py index 14e29b9ef4508..92968fabf28f5 100644 --- a/test/dygraph_to_static/test_build_strategy.py +++ b/test/dygraph_to_static/test_build_strategy.py @@ -31,7 +31,7 @@ def setUp(self): self.build_strategy.enable_addto = True self.resnet_helper = ResNetHelper() # NOTE: for enable_addto - paddle.fluid.set_flags({"FLAGS_max_inplace_grad_add": 8}) + paddle.base.set_flags({"FLAGS_max_inplace_grad_add": 8}) def train(self, to_static): paddle.jit.enable_to_static(to_static) @@ -82,12 +82,12 @@ def test_resnet(self): @ast_only_test def test_in_static_mode_mkldnn(self): - paddle.fluid.set_flags({'FLAGS_use_mkldnn': True}) + paddle.base.set_flags({'FLAGS_use_mkldnn': True}) try: - if paddle.fluid.core.is_compiled_with_mkldnn(): + if paddle.base.core.is_compiled_with_mkldnn(): self.resnet_helper.train(True, self.build_strategy) finally: - paddle.fluid.set_flags({'FLAGS_use_mkldnn': False}) + paddle.base.set_flags({'FLAGS_use_mkldnn': False}) class TestError(unittest.TestCase): diff --git a/test/dygraph_to_static/test_cache_program.py b/test/dygraph_to_static/test_cache_program.py index eaf45b49a61e2..0958cb0ff5716 100644 --- a/test/dygraph_to_static/test_cache_program.py +++ b/test/dygraph_to_static/test_cache_program.py @@ -20,7 +20,7 @@ from test_fetch_feed import Linear, Pool2D import paddle -from paddle import fluid +from paddle import base from paddle.jit.api import to_static from paddle.jit.dy2static import convert_to_static @@ -35,7 +35,7 @@ def setUp(self): def test_cache(self): prev_ops, cur_ops = Counter(), Counter() prev_out, cur_out = None, None - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): static_net = self.dygraph_class() for batch_id in range(self.batch_num): out = static_net(paddle.to_tensor(self.data)) @@ -47,7 +47,7 @@ def test_cache(self): cur_ops = Counter( [ op.type - for op in fluid.default_main_program().block(0).ops + for op in base.default_main_program().block(0).ops ] ) if batch_id > 0: @@ -94,14 +94,14 @@ def train_dygraph(self): def train(self, to_static=False): paddle.jit.enable_to_static(to_static) - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): dygraph_net = self.dygraph_class() adam = paddle.optimizer.Adam( learning_rate=0.001, parameters=dygraph_net.parameters() ) loss_data = [] for batch_id in range(self.batch_num): - input = fluid.dygraph.to_variable(self.data) + input = base.dygraph.to_variable(self.data) pred, avg_loss = dygraph_net(input) loss_data.append(avg_loss.numpy()) @@ -125,7 +125,7 @@ def test_with_optimizer(self): def simple_func(x): - inputs = fluid.dygraph.to_variable(x) + inputs = base.dygraph.to_variable(x) mean = paddle.mean(inputs) return mean @@ -140,7 +140,7 @@ def test_cache(self): @to_static def sum_even_until_limit(max_len, limit): - ret_sum = fluid.dygraph.to_variable(np.zeros(1).astype('int32')) + ret_sum = base.dygraph.to_variable(np.zeros(1).astype('int32')) for i in range(max_len): if i % 2 > 0: continue @@ -152,8 +152,8 @@ def sum_even_until_limit(max_len, limit): def sum_under_while(limit): - i = fluid.dygraph.to_variable(np.zeros(1).astype('int32')) - ret_sum = fluid.dygraph.to_variable(np.zeros(1).astype('int32')) + i = base.dygraph.to_variable(np.zeros(1).astype('int32')) + ret_sum = base.dygraph.to_variable(np.zeros(1).astype('int32')) while i <= limit: ret_sum += i i += 1 @@ -162,7 +162,7 @@ def sum_under_while(limit): class TestToOutputWithCache(unittest.TestCase): def test_output(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): ret = sum_even_until_limit(80, 10) self.assertEqual(ret.numpy(), 30) diff --git a/test/dygraph_to_static/test_cast.py b/test/dygraph_to_static/test_cast.py index c524cae6cf83b..23e31f2b4cc99 100644 --- a/test/dygraph_to_static/test_cast.py +++ b/test/dygraph_to_static/test_cast.py @@ -17,7 +17,7 @@ import numpy as np from dygraph_to_static_util import ast_only_test, test_and_compare_with_new_ir -from paddle import fluid +from paddle import base from paddle.jit.api import to_static SEED = 2020 @@ -26,20 +26,20 @@ @to_static def test_bool_cast(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x = bool(x) return x @to_static def test_int_cast(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x = int(x) return x def test_float_cast(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x = float(x) return x @@ -52,7 +52,7 @@ def test_not_var_cast(x): @to_static def test_mix_cast(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x = int(x) x = float(x) x = bool(x) @@ -63,9 +63,9 @@ def test_mix_cast(x): class TestCastBase(unittest.TestCase): def setUp(self): self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.prepare() self.set_func() @@ -84,7 +84,7 @@ def set_func(self): self.func = test_bool_cast def do_test(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): res = self.func(self.input) return res diff --git a/test/dygraph_to_static/test_cinn_prim.py b/test/dygraph_to_static/test_cinn_prim.py index 960e9bcfc6c0e..0bf905ec846f9 100644 --- a/test/dygraph_to_static/test_cinn_prim.py +++ b/test/dygraph_to_static/test_cinn_prim.py @@ -23,7 +23,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core def apply_to_static(net, use_cinn): diff --git a/test/dygraph_to_static/test_cinn_prim_gelu.py b/test/dygraph_to_static/test_cinn_prim_gelu.py index 88fa501f7696b..be2e8f67c1e98 100644 --- a/test/dygraph_to_static/test_cinn_prim_gelu.py +++ b/test/dygraph_to_static/test_cinn_prim_gelu.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core TOLERANCE = { "float16": {"rtol": 1e-3, "atol": 1e-3}, diff --git a/test/dygraph_to_static/test_cinn_prim_layer_norm.py b/test/dygraph_to_static/test_cinn_prim_layer_norm.py index 9e15c98ed8a38..18c48883d75a6 100644 --- a/test/dygraph_to_static/test_cinn_prim_layer_norm.py +++ b/test/dygraph_to_static/test_cinn_prim_layer_norm.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core TOLERANCE = { "float16": {"rtol": 1e-2, "atol": 1e-2}, diff --git a/test/dygraph_to_static/test_cinn_prim_mean.py b/test/dygraph_to_static/test_cinn_prim_mean.py index 65451ffad5911..cb32f5b466035 100644 --- a/test/dygraph_to_static/test_cinn_prim_mean.py +++ b/test/dygraph_to_static/test_cinn_prim_mean.py @@ -19,7 +19,7 @@ import paddle from paddle import tensor -from paddle.fluid import core +from paddle.base import core TOLERANCE = { "float16": {"rtol": 1e-3, "atol": 1e-3}, diff --git a/test/dygraph_to_static/test_convert_call.py b/test/dygraph_to_static/test_convert_call.py index 7b54ea5956134..79f23351cb6dd 100644 --- a/test/dygraph_to_static/test_convert_call.py +++ b/test/dygraph_to_static/test_convert_call.py @@ -20,7 +20,7 @@ import paddle import paddle.jit.dy2static as _jst -from paddle import fluid +from paddle import base from paddle.jit.dy2static.convert_call_func import CONVERSION_OPTIONS from paddle.jit.dy2static.utils import func_to_source_code @@ -42,7 +42,7 @@ def dyfunc_with_if(x_v): @paddle.jit.to_static def nested_func(x_v): - x_v = fluid.dygraph.to_variable(x_v) + x_v = base.dygraph.to_variable(x_v) def fn1(): return x_v @@ -81,9 +81,9 @@ class TestRecursiveCall1(unittest.TestCase): def setUp(self): self.input = np.random.random([10, 16]).astype('float32') self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.init_test_func() @@ -92,13 +92,13 @@ def init_test_func(self): def get_dygraph_output(self): paddle.jit.enable_to_static(False) - with fluid.dygraph.guard(): + with base.dygraph.guard(): res = self.dyfunc(self.input).numpy() return res def get_static_output(self): paddle.jit.enable_to_static(True) - with fluid.dygraph.guard(): + with base.dygraph.guard(): res = self.dyfunc(self.input).numpy() return res @@ -174,9 +174,9 @@ class TestRecursiveCall2(unittest.TestCase): def setUp(self): self.input = np.random.random((1, 3, 3, 5)).astype('float32') self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.set_func() @@ -184,8 +184,8 @@ def set_func(self): self.dygraph_func = MyLayer() def _run(self): - with fluid.dygraph.guard(): - data = fluid.dygraph.to_variable(self.input) + with base.dygraph.guard(): + data = base.dygraph.to_variable(self.input) res = self.dygraph_func(data) return res.numpy() diff --git a/test/dygraph_to_static/test_cycle_gan.py b/test/dygraph_to_static/test_cycle_gan.py index dbe3d07dfd264..9b8001d9f68b4 100644 --- a/test/dygraph_to_static/test_cycle_gan.py +++ b/test/dygraph_to_static/test_cycle_gan.py @@ -31,7 +31,7 @@ import numpy as np from PIL import Image, ImageOps -from paddle import fluid +from paddle import base # Use GPU:0 to elimate the influence of other tasks. os.environ["CUDA_VISIBLE_DEVICES"] = "1" @@ -39,7 +39,7 @@ from dygraph_to_static_util import test_and_compare_with_new_ir import paddle -from paddle.fluid.dygraph import to_variable +from paddle.base.dygraph import to_variable from paddle.jit.api import to_static from paddle.nn import BatchNorm @@ -48,8 +48,8 @@ # some algorithm results are non-deterministic, like convolution algorithms. # 2. If include BatchNorm, please set `use_global_stats=True` to avoid using # cudnnBatchNormalizationBackward which is non-deterministic. -if fluid.is_compiled_with_cuda(): - fluid.set_flags({'FLAGS_cudnn_deterministic': True}) +if base.is_compiled_with_cuda(): + base.set_flags({'FLAGS_cudnn_deterministic': True}) # set False to speed up training. use_cudnn = False @@ -353,7 +353,7 @@ def __init__( if not use_bias: con_bias_attr = False else: - con_bias_attr = fluid.ParamAttr( + con_bias_attr = base.ParamAttr( initializer=paddle.nn.initializer.Constant(0.0) ) @@ -371,16 +371,16 @@ def __init__( # Note(Aurelius84): The calculation of GPU kernel in BN is non-deterministic, # failure rate is 1/100 in Dev but seems incremental in CE platform. # If on GPU, we disable BN temporarily. - if fluid.is_compiled_with_cuda(): + if base.is_compiled_with_cuda(): norm = False if norm: self.bn = BatchNorm( use_global_stats=True, # set True to use deterministic algorithm num_channels=num_filters, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Normal(1.0, 0.02) ), - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(0.0) ), trainable_statistics=True, @@ -420,7 +420,7 @@ def __init__( if not use_bias: de_bias_attr = False else: - de_bias_attr = fluid.ParamAttr( + de_bias_attr = base.ParamAttr( initializer=paddle.nn.initializer.Constant(0.0) ) @@ -430,21 +430,21 @@ def __init__( filter_size, stride=stride, padding=padding, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Normal(mean=0.0, std=stddev) ), bias_attr=de_bias_attr, ) - if fluid.is_compiled_with_cuda(): + if base.is_compiled_with_cuda(): norm = False if norm: self.bn = BatchNorm( use_global_stats=True, # set True to use deterministic algorithm num_channels=num_filters, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Normal(1.0, 0.02) ), - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(0.0) ), trainable_statistics=True, @@ -550,21 +550,21 @@ def optimizer_setting(parameters): def train(args, to_static): place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) paddle.jit.enable_to_static(to_static) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): max_images_num = args.max_images_num data_shape = [-1] + args.image_shape random.seed(SEED) np.random.seed(SEED) - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + base.default_startup_program().random_seed = SEED + base.default_main_program().random_seed = SEED A_pool = ImagePool() B_pool = ImagePool() @@ -704,7 +704,7 @@ def test_train(self): # Note(Aurelius84): Because we disable BN on GPU, # but here we enhance the check on CPU by `np.array_equal` # which means the dy_out and st_out shall be exactly same. - if not fluid.is_compiled_with_cuda(): + if not base.is_compiled_with_cuda(): np.testing.assert_array_equal(dy_out, st_out) else: np.testing.assert_allclose(dy_out, st_out, rtol=1e-5, atol=1e-8) diff --git a/test/dygraph_to_static/test_declarative.py b/test/dygraph_to_static/test_declarative.py index 9ea71498ee317..807babfced90b 100644 --- a/test/dygraph_to_static/test_declarative.py +++ b/test/dygraph_to_static/test_declarative.py @@ -20,8 +20,8 @@ from test_basic_api_transformation import dyfunc_to_variable import paddle -from paddle import fluid -from paddle.fluid.dygraph import to_variable +from paddle import base +from paddle.base.dygraph import to_variable from paddle.jit.api import to_static from paddle.jit.dy2static.program_translator import ( ConcreteProgram, @@ -90,7 +90,7 @@ def func_with_list_dict(self, dl): class TestStaticFunctionInstance(unittest.TestCase): def test_instance_same_class(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): net_1 = SimpleNet() net_2 = SimpleNet() @@ -114,7 +114,7 @@ def tearDown(self): self.temp_dir.cleanup() def test_with_input_spec(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): x = to_variable(np.ones([4, 10]).astype('float32')) y = to_variable(np.ones([4, 10]).astype('float32') * 2) int_val = 4.0 @@ -150,7 +150,7 @@ def test_with_input_spec(self): out = net.func_with_list_dict([int_np, {'x': x, 'y': y}]) def test_with_error(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): x = to_variable(np.ones([4, 10]).astype('float32')) y = to_variable(np.ones([4, 10]).astype('float32') * 2) int_val = 4.0 @@ -174,7 +174,7 @@ def test_with_error(self): net.add_func(x, y) def test_concrete_program(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): x = to_variable(np.ones([4, 10]).astype('float32')) y = to_variable(np.ones([4, 10]).astype('float32') * 2) int_val = 4.0 @@ -213,7 +213,7 @@ def setUp(self): paddle.jit.enable_to_static(True) def test_with_different_input(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): x_data = np.ones([16, 10]).astype('float32') y_data = np.ones([10]).astype('float32') * 2 z_data = np.ones([10]).astype('float32') * 2.2 @@ -298,7 +298,7 @@ def test_get_concrete_program(self): ) def test_concrete_program(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): # usage 1 foo_1 = paddle.jit.to_static( foo_func, @@ -358,7 +358,7 @@ def test_error(self): paddle.jit.enable_to_static(False) with self.assertRaises(AssertionError): # AssertionError: We Only support to_variable in imperative mode, - # please use fluid.dygraph.guard() as context to run it in imperative Mode + # please use base.dygraph.guard() as context to run it in imperative Mode func(np.ones(5).astype("int32")) diff --git a/test/dygraph_to_static/test_dict.py b/test/dygraph_to_static/test_dict.py index 263f4c069bdb9..576bbffda4c05 100644 --- a/test/dygraph_to_static/test_dict.py +++ b/test/dygraph_to_static/test_dict.py @@ -17,11 +17,11 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.jit import to_static PLACE = ( - fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) @@ -53,7 +53,7 @@ def __init__(self, hidden_size=16, output_size=16): ) def forward(self, input, cache=None): - input = fluid.dygraph.to_variable(input) + input = base.dygraph.to_variable(input) q = self.q_fc(input) k = self.k_fc(input) @@ -82,7 +82,7 @@ def __init__(self, batch_size=64, hidden_size=16, output_size=16): @to_static def forward(self, input, max_len=4): - input = fluid.dygraph.to_variable(input) + input = base.dygraph.to_variable(input) cache = { "k": paddle.tensor.fill_constant( shape=[self.batch_size, self.output_size], @@ -120,7 +120,7 @@ def update_cache(cache): class TestNetWithDict(unittest.TestCase): """ TestCase for the transformation from control flow `if/else` - dependent on tensor in Dygraph into Static `fluid.layers.cond`. + dependent on tensor in Dygraph into Static `base.layers.cond`. """ def setUp(self): @@ -135,7 +135,7 @@ def _run_dygraph(self): def train(self, to_static=False): paddle.jit.enable_to_static(to_static) - with fluid.dygraph.guard(PLACE): + with base.dygraph.guard(PLACE): net = MainNetWithDict(batch_size=self.batch_size) ret = net(self.x) return ret.numpy() @@ -235,7 +235,7 @@ def setUp(self): def train(self, to_static=False): paddle.jit.enable_to_static(to_static) - with fluid.dygraph.guard(PLACE): + with base.dygraph.guard(PLACE): net = NetWithDictPop() ret = net(z=0, x=self.x, y=True) return ret.numpy() diff --git a/test/dygraph_to_static/test_duplicate_output.py b/test/dygraph_to_static/test_duplicate_output.py index 08066874a0630..2358bc945d6f7 100644 --- a/test/dygraph_to_static/test_duplicate_output.py +++ b/test/dygraph_to_static/test_duplicate_output.py @@ -20,10 +20,10 @@ np.random.seed(1) -if paddle.fluid.is_compiled_with_cuda(): - place = paddle.fluid.CUDAPlace(0) +if paddle.base.is_compiled_with_cuda(): + place = paddle.base.CUDAPlace(0) else: - place = paddle.fluid.CPUPlace() + place = paddle.base.CPUPlace() class SimpleNet(paddle.nn.Layer): @@ -40,7 +40,7 @@ def forward(self, x): class TestDuplicateOutput(unittest.TestCase): """ TestCase for the transformation from control flow `if/else` - dependent on tensor in Dygraph into Static `fluid.layers.cond`. + dependent on tensor in Dygraph into Static `base.layers.cond`. """ def setUp(self): diff --git a/test/dygraph_to_static/test_error.py b/test/dygraph_to_static/test_error.py index 9602594fb54d4..22d2d9676fbd4 100644 --- a/test/dygraph_to_static/test_error.py +++ b/test/dygraph_to_static/test_error.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.jit.dy2static import error from paddle.jit.dy2static.origin_info import unwrap @@ -33,7 +33,7 @@ def inner_func(): @paddle.jit.to_static def func_error_in_compile_time(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) inner_func() if paddle.mean(x) < 0: x_v = x - 1 @@ -44,14 +44,14 @@ def func_error_in_compile_time(x): @paddle.jit.to_static def func_error_in_compile_time_2(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x = paddle.reshape(x, shape=[1, 2]) return x @paddle.jit.to_static def func_error_in_runtime(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32") x = paddle.reshape(x, shape=[1, two]) return x @@ -102,7 +102,7 @@ def test_func(self): @paddle.jit.to_static def func_error_in_runtime_with_empty_line(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32") x = paddle.reshape(x, shape=[1, two]) @@ -292,7 +292,7 @@ def set_message(self): self.filepath ), 'def func_error_in_compile_time_2(x):', - 'x = fluid.dygraph.to_variable(x)', + 'x = base.dygraph.to_variable(x)', 'x = paddle.reshape(x, shape=[1, 2])', '<--- HERE', 'return x', @@ -341,7 +341,7 @@ def set_message(self): 'File "{}", line 56, in func_error_in_runtime'.format( self.filepath ), - 'x = fluid.dygraph.to_variable(x)', + 'x = base.dygraph.to_variable(x)', 'two = paddle.tensor.fill_constant(shape=[1], value=2, dtype="int32")', 'x = paddle.reshape(x, shape=[1, two])', '<--- HERE', diff --git a/test/dygraph_to_static/test_eval_frame.py b/test/dygraph_to_static/test_eval_frame.py index dfa5e04b44ffb..e653e67393e7a 100644 --- a/test/dygraph_to_static/test_eval_frame.py +++ b/test/dygraph_to_static/test_eval_frame.py @@ -57,9 +57,9 @@ def add(a, b): x = 1 y = 2 - paddle.fluid.core.set_eval_frame(callback) + paddle.base.core.set_eval_frame(callback) assert add(x, y) == 2, "should be 2" - paddle.fluid.core.set_eval_frame(None) + paddle.base.core.set_eval_frame(None) assert add(x, y) == 3, "should be 3" diff --git a/test/dygraph_to_static/test_fetch_feed.py b/test/dygraph_to_static/test_fetch_feed.py index d830896e5b065..0d754a595ac96 100644 --- a/test/dygraph_to_static/test_fetch_feed.py +++ b/test/dygraph_to_static/test_fetch_feed.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.jit.api import to_static SEED = 2020 @@ -69,9 +69,9 @@ def setUp(self): def train(self, to_static=False): paddle.jit.enable_to_static(to_static) - with fluid.dygraph.guard(): + with base.dygraph.guard(): dy_layer = self.dygraph_class() - x = fluid.dygraph.to_variable(self.data) + x = base.dygraph.to_variable(self.data) prediction = dy_layer(x) if isinstance(prediction, (list, tuple)): prediction = prediction[0] diff --git a/test/dygraph_to_static/test_for_enumerate.py b/test/dygraph_to_static/test_for_enumerate.py index 1aca0549213c2..bbb64e8756ea3 100644 --- a/test/dygraph_to_static/test_for_enumerate.py +++ b/test/dygraph_to_static/test_for_enumerate.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.static import InputSpec @@ -27,7 +27,7 @@ @paddle.jit.to_static def for_in_range(x): z = paddle.tensor.fill_constant([1], 'int32', 0) - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) for i in range(x.numpy().item()): z = z + i return z @@ -55,7 +55,7 @@ def for_enumerate_list(x_array): @paddle.jit.to_static def for_iter_var_numpy(x_array): z = paddle.tensor.fill_constant([1], 'int32', 0) - x_array = fluid.dygraph.to_variable(x_array) + x_array = base.dygraph.to_variable(x_array) for x in x_array.numpy(): z = z + x return z @@ -66,7 +66,7 @@ def for_iter_var_numpy(x_array): def for_enumerate_var_numpy(x_array): y = paddle.tensor.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0) - x_array = fluid.dygraph.to_variable(x_array) + x_array = base.dygraph.to_variable(x_array) for i, x in enumerate(x_array.numpy()): y = y + i z = z + x @@ -78,7 +78,7 @@ def for_enumerate_var_numpy(x_array): def for_enumerate_var_numpy_with_start(x_array): y = paddle.tensor.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0) - x_array = fluid.dygraph.to_variable(x_array) + x_array = base.dygraph.to_variable(x_array) for i, x in enumerate(x_array.numpy(), 1): y = y + i z = z + x @@ -89,7 +89,7 @@ def for_enumerate_var_numpy_with_start(x_array): @paddle.jit.to_static def for_in_range_with_break(x): z = paddle.tensor.fill_constant([1], 'int32', 0) - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) for i in range(x.numpy()[0]): z = z + i if i > 2: @@ -102,7 +102,7 @@ def for_in_range_with_break(x): def for_enumerate_var_numpy_with_break(x_array): y = paddle.tensor.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0) - x_array = fluid.dygraph.to_variable(x_array) + x_array = base.dygraph.to_variable(x_array) for i, x in enumerate(x_array.numpy()): y = y + i z = z + x @@ -116,7 +116,7 @@ def for_enumerate_var_numpy_with_break(x_array): def for_enumerate_var_numpy_with_continue(x_array): y = paddle.tensor.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0) - x_array = fluid.dygraph.to_variable(x_array) + x_array = base.dygraph.to_variable(x_array) for i, x in enumerate(x_array.numpy()): y = y + i if i > 2: @@ -130,7 +130,7 @@ def for_enumerate_var_numpy_with_continue(x_array): def for_enumerate_var_numpy_with_start_break(x_array): y = paddle.tensor.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0) - x_array = fluid.dygraph.to_variable(x_array) + x_array = base.dygraph.to_variable(x_array) for i, x in enumerate(x_array.numpy(), 1): y = y + i z = z + x @@ -144,7 +144,7 @@ def for_enumerate_var_numpy_with_start_break(x_array): def for_enumerate_var_numpy_with_start_continue(x_array): y = paddle.tensor.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0) - x_array = fluid.dygraph.to_variable(x_array) + x_array = base.dygraph.to_variable(x_array) for i, x in enumerate(x_array.numpy(), 1): y = y + i if i > 2: @@ -157,7 +157,7 @@ def for_enumerate_var_numpy_with_start_continue(x_array): @paddle.jit.to_static def for_iter_var(x_array): z = paddle.tensor.fill_constant([1], 'int32', 0) - x_array = fluid.dygraph.to_variable(x_array) + x_array = base.dygraph.to_variable(x_array) for x in x_array: z = z + x @@ -169,7 +169,7 @@ def for_iter_var(x_array): def for_enumerate_var(x_array): y = paddle.tensor.fill_constant([1], 'int32', 0) z = paddle.tensor.fill_constant([1], 'int32', 0) - x_array = fluid.dygraph.to_variable(x_array) + x_array = base.dygraph.to_variable(x_array) for i, x in enumerate(x_array): y = y + i z = z + x @@ -180,7 +180,7 @@ def for_enumerate_var(x_array): @paddle.jit.to_static def for_iter_var_list(x): # 1. prepare data, ref test_list.py - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) iter_num = paddle.tensor.fill_constant(shape=[1], value=5, dtype="int32") a = [] for i in range(iter_num): @@ -196,7 +196,7 @@ def for_iter_var_list(x): @paddle.jit.to_static def for_enumerate_var_list(x): # 1. prepare data, ref test_list.py - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) iter_num = paddle.tensor.fill_constant(shape=[1], value=5, dtype="int32") a = [] for i in range(iter_num): @@ -214,7 +214,7 @@ def for_enumerate_var_list(x): @paddle.jit.to_static def for_enumerate_var_with_nested_range(x_array): x = paddle.tensor.fill_constant([1], 'int32', 0) - x_array = fluid.dygraph.to_variable(x_array) + x_array = base.dygraph.to_variable(x_array) for i, num in enumerate(x_array): for idx in range(num): x = x + num @@ -225,7 +225,7 @@ def for_enumerate_var_with_nested_range(x_array): @paddle.jit.to_static def for_iter_var_idx(x_array): z = paddle.tensor.fill_constant([1], 'int32', 0) - x_array = fluid.dygraph.to_variable(x_array) + x_array = base.dygraph.to_variable(x_array) for x in x_array[0:]: z = z + x @@ -356,9 +356,9 @@ def tensor_array_slice_in_enumerate(): class TestTransformBase(unittest.TestCase): def setUp(self): self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.set_input() self.set_test_func() @@ -373,7 +373,7 @@ def set_test_func(self): def _run(self, to_static): paddle.jit.enable_to_static(to_static) - with fluid.dygraph.guard(): + with base.dygraph.guard(): return self.dygraph_func(self.input) def get_dygraph_output(self): @@ -401,7 +401,7 @@ def transformed_result_compare(self): class TestTransformForOriginalList(TestTransform): def _run(self, to_static): paddle.jit.enable_to_static(to_static) - with fluid.dygraph.guard(): + with base.dygraph.guard(): return self.dygraph_func() diff --git a/test/dygraph_to_static/test_full_name_usage.py b/test/dygraph_to_static/test_full_name_usage.py index f9cbbeb639a51..0332480891e16 100644 --- a/test/dygraph_to_static/test_full_name_usage.py +++ b/test/dygraph_to_static/test_full_name_usage.py @@ -18,12 +18,12 @@ from dygraph_to_static_util import ast_only_test import paddle -from paddle import fluid +from paddle import base @paddle.jit.to_static def dygraph_decorated_func(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) if paddle.mean(x) > 0: x_v = x - 1 else: @@ -33,7 +33,7 @@ def dygraph_decorated_func(x): @paddle.jit.to_static def jit_decorated_func(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) if paddle.mean(x) > 0: x_v = x - 1 else: @@ -63,7 +63,7 @@ class TestFullNameDecorator(unittest.TestCase): def test_run_success(self): x = np.ones([1, 2]).astype("float32") answer = np.zeros([1, 2]).astype("float32") - with fluid.dygraph.guard(): + with base.dygraph.guard(): np.testing.assert_allclose( dygraph_decorated_func(x).numpy(), answer, rtol=1e-05 ) diff --git a/test/dygraph_to_static/test_ifelse.py b/test/dygraph_to_static/test_ifelse.py index 7c3fe0315aa0a..253cab46f6fcc 100644 --- a/test/dygraph_to_static/test_ifelse.py +++ b/test/dygraph_to_static/test_ifelse.py @@ -28,7 +28,7 @@ dyfunc_with_if_else2, dyfunc_with_if_else3, dyfunc_with_if_else_with_list_generator, - fluid, + base, if_tensor_case, if_with_and_or, if_with_and_or_1, @@ -44,15 +44,15 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core from paddle.jit.dy2static.utils import Dygraph2StaticException np.random.seed(1) -if fluid.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) +if base.is_compiled_with_cuda(): + place = base.CUDAPlace(0) else: - place = fluid.CPUPlace() + place = base.CPUPlace() @dy2static_unittest @@ -68,14 +68,14 @@ def test_error(self): with self.assertRaisesRegex(Dygraph2StaticException, self.error): paddle.jit.enable_to_static(True) self.assertTrue(paddle.jit.to_static(self.dyfunc)(self.x)) - paddle.fluid.dygraph.base.global_var._in_declarative_mode_ = False + paddle.base.dygraph.base.global_var._in_declarative_mode_ = False paddle.jit.enable_to_static(False) class TestDygraphIfElse(unittest.TestCase): """ TestCase for the transformation from control flow `if/else` - dependent on tensor in Dygraph into Static `fluid.layers.cond`. + dependent on tensor in Dygraph into Static `base.layers.cond`. """ def setUp(self): @@ -86,8 +86,8 @@ def _run_static(self): return self._run_dygraph(to_static=True) def _run_dygraph(self, to_static=False): - with fluid.dygraph.guard(place): - x_v = fluid.dygraph.to_variable(self.x) + with base.dygraph.guard(place): + x_v = base.dygraph.to_variable(self.x) if to_static: ret = paddle.jit.to_static(self.dyfunc)(x_v) else: @@ -241,7 +241,7 @@ def setUp(self): class TestDygraphIfElseNet(unittest.TestCase): """ TestCase for the transformation from control flow `if/else` - dependent on tensor in Dygraph into Static `fluid.layers.cond`. + dependent on tensor in Dygraph into Static `base.layers.cond`. """ def setUp(self): @@ -257,9 +257,9 @@ def _run_dygraph(self): def _run(self, to_static=False): paddle.jit.enable_to_static(to_static) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): net = self.Net() - x_v = fluid.dygraph.to_variable(self.x) + x_v = base.dygraph.to_variable(self.x) ret = net(x_v) return ret.numpy() @@ -473,7 +473,7 @@ def test_ast_to_func(self): # that the code block is under @to_static, but in this UT # an exception is thrown during Dy2St, making the `_in_declarative_mode_` # a wrong value. So We need set `_in_declarative_mode_` to False manually. - paddle.fluid.dygraph.base.global_var._in_declarative_mode_ = False + paddle.base.dygraph.base.global_var._in_declarative_mode_ = False paddle.jit.enable_to_static(False) diff --git a/test/dygraph_to_static/test_lac.py b/test/dygraph_to_static/test_lac.py index e3382d26964e4..a650a25c25b07 100644 --- a/test/dygraph_to_static/test_lac.py +++ b/test/dygraph_to_static/test_lac.py @@ -23,8 +23,8 @@ os.environ["CUDA_VISIBLE_DEVICES"] = "2" import paddle -from paddle import _legacy_C_ops, fluid -from paddle.fluid.dygraph import to_variable +from paddle import _legacy_C_ops, base +from paddle.base.dygraph import to_variable from paddle.framework import in_dynamic_mode from paddle.jit.api import to_static from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX @@ -96,7 +96,7 @@ def __init__(self, input_dim, grnn_hidden_dim, init_bound, h_0=None): self.pre_gru = paddle.nn.Linear( in_features=input_dim, out_features=grnn_hidden_dim * 3, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-init_bound, high=init_bound ), @@ -107,7 +107,7 @@ def __init__(self, input_dim, grnn_hidden_dim, init_bound, h_0=None): self.gru = DynamicGRU( size=grnn_hidden_dim, h_0=h_0, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-init_bound, high=init_bound ), @@ -118,7 +118,7 @@ def __init__(self, input_dim, grnn_hidden_dim, init_bound, h_0=None): self.pre_gru_r = paddle.nn.Linear( in_features=input_dim, out_features=grnn_hidden_dim * 3, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-init_bound, high=init_bound ), @@ -130,7 +130,7 @@ def __init__(self, input_dim, grnn_hidden_dim, init_bound, h_0=None): size=grnn_hidden_dim, is_reverse=True, h_0=h_0, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-init_bound, high=init_bound ), @@ -363,7 +363,7 @@ def __init__(self, args, length=None): self.word_embedding = paddle.nn.Embedding( self.vocab_size, self.word_emb_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( learning_rate=self.emb_lr, name="word_emb", initializer=paddle.nn.initializer.Uniform( @@ -405,7 +405,7 @@ def __init__(self, args, length=None): self.fc = paddle.nn.Linear( in_features=self.grnn_hidden_dim * 2, out_features=self.num_labels, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-self.init_bound, high=self.init_bound ), @@ -414,14 +414,14 @@ def __init__(self, args, length=None): ) self.linear_chain_crf = LinearChainCRF( - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name='linear_chain_crfw', learning_rate=self.crf_lr ), size=self.num_labels, ) self.crf_decoding = CRFDecoding( - param_attr=fluid.ParamAttr(name='crfw', learning_rate=self.crf_lr), + param_attr=base.ParamAttr(name='crfw', learning_rate=self.crf_lr), size=self.num_labels, ) # share weight @@ -504,7 +504,7 @@ def __reader__(): def create_dataloader(reader, place): - data_loader = fluid.io.DataLoader.from_generator( + data_loader = base.io.DataLoader.from_generator( capacity=16, use_double_buffer=True, iterable=True ) @@ -517,9 +517,9 @@ class TestLACModel(unittest.TestCase): def setUp(self): self.args = Args() self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.temp_dir = tempfile.TemporaryDirectory() self.model_save_dir = os.path.join(self.temp_dir.name, 'inference') @@ -531,11 +531,11 @@ def setUp(self): def train(self, args, to_static): paddle.jit.enable_to_static(to_static) place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) @@ -646,7 +646,7 @@ def verify_predict(self): def predict_dygraph(self, batch): words, targets, length = batch paddle.jit.enable_to_static(False) - with fluid.dygraph.guard(self.place): + with base.dygraph.guard(self.place): model = LexNet(self.args) # load dygraph trained parameters model_dict = paddle.load(self.dy_param_path + ".pdparams") @@ -665,7 +665,7 @@ def predict_static(self, batch): Load inference model to test it's ok for prediction. """ paddle.enable_static() - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) # load inference model [ inference_program, @@ -688,7 +688,7 @@ def predict_static(self, batch): def predict_dygraph_jit(self, batch): words, targets, length = batch - with fluid.dygraph.guard(self.place): + with base.dygraph.guard(self.place): model = paddle.jit.load(self.model_save_prefix) model.eval() diff --git a/test/dygraph_to_static/test_lambda.py b/test/dygraph_to_static/test_lambda.py index bab465afe7821..c1ff57147564c 100644 --- a/test/dygraph_to_static/test_lambda.py +++ b/test/dygraph_to_static/test_lambda.py @@ -18,11 +18,11 @@ import paddle import paddle.nn.functional as F -from paddle import fluid +from paddle import base def call_lambda_as_func(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) add_func = lambda x, y: x + y mean_func = lambda x: paddle.mean(x) @@ -35,7 +35,7 @@ def call_lambda_as_func(x): def call_lambda_directly(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) y = (lambda x, y: x + y)(x, x) out = (lambda x: paddle.mean(x))(y) @@ -44,7 +44,7 @@ def call_lambda_directly(x): def call_lambda_in_func(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) add_func = lambda x: x + 1 @@ -55,7 +55,7 @@ def call_lambda_in_func(x): def call_lambda_with_ifExpr(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) add_func = lambda x: x + 1 @@ -66,7 +66,7 @@ def call_lambda_with_ifExpr(x): def call_lambda_with_ifExpr2(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) add_func = lambda x: x + 1 @@ -84,9 +84,9 @@ def setUp(self): self.x = np.random.random([10, 16]).astype('float32') self.x = np.array([1, 3]).astype('float32') self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.init_func() @@ -103,8 +103,8 @@ def run_static(self, func): return self.run_dygraph(func, to_static=True) def run_dygraph(self, func, to_static=False): - with fluid.dygraph.guard(self.place): - x_v = fluid.dygraph.to_variable(self.x) + with base.dygraph.guard(self.place): + x_v = base.dygraph.to_variable(self.x) if to_static: ret = paddle.jit.to_static(func)(x_v) else: diff --git a/test/dygraph_to_static/test_len.py b/test/dygraph_to_static/test_len.py index eb72977f16bba..e2cee7c4dc8b4 100644 --- a/test/dygraph_to_static/test_len.py +++ b/test/dygraph_to_static/test_len.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.jit.dy2static import Call from paddle.nn import clip @@ -27,13 +27,13 @@ def len_with_tensor(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x_len = len(x) return x_len def len_with_lod_tensor_array(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=0) arr = paddle.tensor.array_write(x, i=i) @@ -45,9 +45,9 @@ def len_with_lod_tensor_array(x): class TestLen(unittest.TestCase): def setUp(self): self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.x_data = np.random.random([10, 16]).astype('float32') self.init_func() @@ -56,13 +56,13 @@ def init_func(self): self.func = len_with_tensor def _run(self, to_static): - with fluid.dygraph.guard(self.place): + with base.dygraph.guard(self.place): if to_static: out = paddle.jit.to_static(self.func)(self.x_data) else: out = self.func(self.x_data) - if isinstance(out, fluid.core.eager.Tensor): + if isinstance(out, base.core.eager.Tensor): out = out.numpy() return out @@ -80,14 +80,14 @@ def init_func(self): # Note: Variable(SelectedRows) is not exposed directly in dygraph. # The unittest is used to test coverage by fake transformed code. def len_with_selected_rows(place): - block = fluid.default_main_program().global_block() + block = base.default_main_program().global_block() # create selected_rows variable var = block.create_var( name="X", dtype="float32", shape=[-1], persistable=True, - type=fluid.core.VarDesc.VarType.SELECTED_ROWS, + type=base.core.VarDesc.VarType.SELECTED_ROWS, ) # y is Variable(SelectedRows) y = clip.merge_selected_rows(var) @@ -102,23 +102,23 @@ def len_with_selected_rows(place): row_numel = 2 np_array = np.ones((len(x_rows), row_numel)).astype("float32") - x_var = fluid.global_scope().var("X").get_selected_rows() + x_var = base.global_scope().var("X").get_selected_rows() x_var.set_rows(x_rows) x_var.set_height(20) x_tensor = x_var.get_tensor() x_tensor.set(np_array, place) - exe = fluid.Executor(place=place) - result = exe.run(fluid.default_main_program(), fetch_list=[y_len, z_len]) + exe = base.Executor(place=place) + result = exe.run(base.default_main_program(), fetch_list=[y_len, z_len]) return result class TestLenWithSelectedRows(unittest.TestCase): def setUp(self): self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) def test_len(self): diff --git a/test/dygraph_to_static/test_list.py b/test/dygraph_to_static/test_list.py index 4a68cab9b63fe..b0febb2b0c9ee 100644 --- a/test/dygraph_to_static/test_list.py +++ b/test/dygraph_to_static/test_list.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base SEED = 2020 np.random.seed(SEED) @@ -27,7 +27,7 @@ # Situation 1: Test list append def test_list_append_without_control_flow(x): # Python list will not be transformed. - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) a = [] # It's a plain python control flow which won't be transformed if 2 > 1: @@ -36,7 +36,7 @@ def test_list_append_without_control_flow(x): def test_list_append_in_if(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) a = [] if x.numpy()[0] > 0: a.append(x) @@ -49,7 +49,7 @@ def test_list_append_in_if(x): def test_list_append_in_for_loop(x, iter_num): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) # Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor iter_num = paddle.tensor.fill_constant( shape=[1], value=iter_num, dtype="int32" @@ -61,7 +61,7 @@ def test_list_append_in_for_loop(x, iter_num): def test_list_append_in_for_subscript(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) iter_num = paddle.shape(x)[0] a = [] for i in range(iter_num): @@ -72,7 +72,7 @@ def test_list_append_in_for_subscript(x): def test_list_append_in_while_loop_subscript(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) iter_num = paddle.shape(x)[0] a = [] i = 0 @@ -85,7 +85,7 @@ def test_list_append_in_while_loop_subscript(x): def test_list_append_in_for_loop_with_concat(x, iter_num): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) a = [] # Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor iter_num = paddle.tensor.fill_constant( @@ -98,7 +98,7 @@ def test_list_append_in_for_loop_with_concat(x, iter_num): def test_list_append_in_while_loop(x, iter_num): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) iter_num = paddle.tensor.fill_constant( shape=[1], value=iter_num, dtype="int32" ) @@ -111,7 +111,7 @@ def test_list_append_in_while_loop(x, iter_num): def test_list_append_in_while_loop_with_stack(x, iter_num): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) iter_num = paddle.tensor.fill_constant( shape=[1], value=iter_num, dtype="int32" ) @@ -134,7 +134,7 @@ def test_tensor_array_slice(x, iter_num): # Situation 2: Test list pop def test_list_pop_without_control_flow_1(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) a = [] if 2 > 1: a.append(x) @@ -143,7 +143,7 @@ def test_list_pop_without_control_flow_1(x): def test_list_pop_without_control_flow_2(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) a = [] if 2 > 1: a.append(x) @@ -153,7 +153,7 @@ def test_list_pop_without_control_flow_2(x): def test_list_pop_in_if(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) a = [] b = [x * 2 + (x + 1)] if x.numpy()[0] > 0: @@ -169,7 +169,7 @@ def test_list_pop_in_if(x): def test_list_pop_in_for_loop(x, iter_num): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) # Use `fill_constant` so that static analysis can analyze the type of iter_num is Tensor iter_num = paddle.tensor.fill_constant( shape=[1], value=iter_num, dtype="int32" @@ -188,7 +188,7 @@ def test_list_pop_in_for_loop(x, iter_num): def test_list_pop_in_while_loop(x, iter_num): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) iter_num = paddle.tensor.fill_constant( shape=[1], value=iter_num, dtype="int32" ) @@ -210,9 +210,9 @@ def test_list_pop_in_while_loop(x, iter_num): class TestListWithoutControlFlow(unittest.TestCase): def setUp(self): self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.init_data() @@ -242,7 +242,7 @@ def run_dygraph_mode(self): return self.train(to_static=False) def train(self, to_static=False): - with fluid.dygraph.guard(): + with base.dygraph.guard(): if to_static: res = paddle.jit.to_static(self.dygraph_func)(self.input) else: @@ -284,7 +284,7 @@ def init_dygraph_func(self): ] def train(self, to_static=False): - with fluid.dygraph.guard(): + with base.dygraph.guard(): if to_static: # print(paddle.jit.to_static(self.dygraph_func).code) res = paddle.jit.to_static(self.dygraph_func)( diff --git a/test/dygraph_to_static/test_logical.py b/test/dygraph_to_static/test_logical.py index f23a8beb07278..3b00903bc478c 100644 --- a/test/dygraph_to_static/test_logical.py +++ b/test/dygraph_to_static/test_logical.py @@ -20,7 +20,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.jit.dy2static.logical_transformer import cmpop_node_to_str from paddle.utils import gast @@ -172,7 +172,7 @@ def setUp(self): self.input = np.array([3]).astype('int32') self.place = ( paddle.CUDAPlace(0) - if fluid.is_compiled_with_cuda() + if base.is_compiled_with_cuda() else paddle.CPUPlace() ) self._set_test_func() @@ -184,7 +184,7 @@ def _set_test_func(self): def _run(self, to_static): paddle.jit.enable_to_static(to_static) - with fluid.dygraph.guard(self.place): + with base.dygraph.guard(self.place): result = self.dygraph_func(self.input) return result.numpy() diff --git a/test/dygraph_to_static/test_loop.py b/test/dygraph_to_static/test_loop.py index e89f9def593b6..77f568e2c5eec 100644 --- a/test/dygraph_to_static/test_loop.py +++ b/test/dygraph_to_static/test_loop.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle import fluid +from paddle import base from paddle.jit.dy2static.loop_transformer import NameVisitor from paddle.utils import gast @@ -28,7 +28,7 @@ def while_loop_dyfunc(x): - i = fluid.dygraph.to_variable(x) + i = base.dygraph.to_variable(x) while x < 10: i = i + x x = x + 1 @@ -47,7 +47,7 @@ def while_loop_dyfunc_without_tensor(x): def while_loop_dyfun_with_conflict_var(x): - i = fluid.dygraph.to_variable(x) + i = base.dygraph.to_variable(x) def relu(y): # 'y' is not visible outside the scope. @@ -65,12 +65,12 @@ def relu(y): def while_loop_dyfunc_with_none(x): i = ( - fluid.dygraph.to_variable(x) + base.dygraph.to_variable(x) if x is not None - else fluid.dygraph.to_variable(x + 1) + else base.dygraph.to_variable(x + 1) ) # Use `to_variable` so that static analysis can analyze the type of X is Tensor - x = fluid.dygraph.to_variable( + x = base.dygraph.to_variable( x ) # TODO(liym27): Delete it if the type of parameter x can be resolved flag = 1 @@ -133,7 +133,7 @@ def for_break_single_return(max_len): def while_loop_bool_op(x): - i = fluid.dygraph.to_variable(x) + i = base.dygraph.to_variable(x) while x <= -1 or x < -3 or (x < -7 or x < -5) or (x >= 0 and x < 10): i = i + x @@ -142,7 +142,7 @@ def while_loop_bool_op(x): def while_loop_bool_op2(x): - i = fluid.dygraph.to_variable(x) + i = base.dygraph.to_variable(x) a = 1 # In the while condition, there are both Paddle Variable and non-Variable. @@ -161,7 +161,7 @@ def __init__(self): self.c = 5 foo = Foo() - i = fluid.dygraph.to_variable(x) + i = base.dygraph.to_variable(x) while i < 10: foo.b = paddle.zeros(shape=[1], dtype='float32') foo.c = foo.b + foo.a @@ -302,9 +302,9 @@ def test_nested_loop_vars(self): class TestTransformWhileLoop(unittest.TestCase): def setUp(self): self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.x = np.zeros(shape=(1), dtype=np.int32) self._init_dyfunc() @@ -319,9 +319,9 @@ def _run_dygraph(self): return self._run(to_static=False) def _run(self, to_static): - with fluid.dygraph.guard(self.place): + with base.dygraph.guard(self.place): # Set the input of dyfunc to Tensor - tensor_x = fluid.dygraph.to_variable(self.x, zero_copy=False) + tensor_x = base.dygraph.to_variable(self.x, zero_copy=False) if to_static: ret = paddle.jit.to_static(self.dyfunc)(tensor_x) else: @@ -381,9 +381,9 @@ def _init_dyfunc(self): class TestTransformForLoop(unittest.TestCase): def setUp(self): self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.len = 100 self._init_dyfunc() @@ -398,7 +398,7 @@ def _run_dygraph(self): return self._run(to_static=False) def _run(self, to_static): - with fluid.dygraph.guard(self.place): + with base.dygraph.guard(self.place): if to_static: ret = paddle.jit.to_static(self.dyfunc)(self.len) else: diff --git a/test/dygraph_to_static/test_mnist.py b/test/dygraph_to_static/test_mnist.py index f6c178aab1845..5aeb16e87564d 100644 --- a/test/dygraph_to_static/test_mnist.py +++ b/test/dygraph_to_static/test_mnist.py @@ -22,17 +22,17 @@ from predictor_utils import PredictorTools import paddle -from paddle import fluid -from paddle.fluid.dygraph import to_variable -from paddle.fluid.dygraph.base import switch_to_static_graph +from paddle import base +from paddle.base.dygraph import to_variable +from paddle.base.dygraph.base import switch_to_static_graph from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX from paddle.nn import Linear from paddle.optimizer import Adam SEED = 2020 -if paddle.fluid.is_compiled_with_cuda(): - paddle.fluid.set_flags({'FLAGS_cudnn_deterministic': True}) +if paddle.base.is_compiled_with_cuda(): + paddle.base.set_flags({'FLAGS_cudnn_deterministic': True}) class SimpleImgConvPool(paddle.nn.Layer): @@ -131,9 +131,9 @@ def setUp(self): self.epoch_num = 1 self.batch_size = 64 self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.train_reader = paddle.batch( paddle.dataset.mnist.train(), @@ -174,11 +174,11 @@ def test_mnist_to_static(self): def test_mnist_declarative_cpu_vs_mkldnn(self): dygraph_loss_cpu = self.train_dygraph() - fluid.set_flags({'FLAGS_use_mkldnn': True}) + base.set_flags({'FLAGS_use_mkldnn': True}) try: dygraph_loss_mkldnn = self.train_dygraph() finally: - fluid.set_flags({'FLAGS_use_mkldnn': False}) + base.set_flags({'FLAGS_use_mkldnn': False}) np.testing.assert_allclose( dygraph_loss_cpu, dygraph_loss_mkldnn, @@ -190,9 +190,9 @@ def test_mnist_declarative_cpu_vs_mkldnn(self): def train(self, to_static=False): loss_data = [] - with fluid.dygraph.guard(self.place): - fluid.default_main_program().random_seed = SEED - fluid.default_startup_program().random_seed = SEED + with base.dygraph.guard(self.place): + base.default_main_program().random_seed = SEED + base.default_startup_program().random_seed = SEED mnist = MNIST() if to_static: mnist = paddle.jit.to_static(mnist) @@ -301,7 +301,7 @@ def jit_load_and_run_inference_static( self, model_path, model_filename, params_filename, inputs ): paddle.enable_static() - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) [ inference_program, feed_target_names, diff --git a/test/dygraph_to_static/test_mnist_amp.py b/test/dygraph_to_static/test_mnist_amp.py index 667181adbbe86..e5e11062aad7b 100644 --- a/test/dygraph_to_static/test_mnist_amp.py +++ b/test/dygraph_to_static/test_mnist_amp.py @@ -22,8 +22,8 @@ import paddle from paddle.optimizer import Adam -if paddle.fluid.is_compiled_with_cuda(): - paddle.fluid.set_flags({'FLAGS_cudnn_deterministic': True}) +if paddle.base.is_compiled_with_cuda(): + paddle.base.set_flags({'FLAGS_cudnn_deterministic': True}) class TestAMP(TestMNIST): diff --git a/test/dygraph_to_static/test_mnist_pure_fp16.py b/test/dygraph_to_static/test_mnist_pure_fp16.py index 2815f47ffa7fe..9d5ae58edbbd7 100644 --- a/test/dygraph_to_static/test_mnist_pure_fp16.py +++ b/test/dygraph_to_static/test_mnist_pure_fp16.py @@ -21,8 +21,8 @@ import paddle -if paddle.fluid.is_compiled_with_cuda(): - paddle.fluid.set_flags({'FLAGS_cudnn_deterministic': True}) +if paddle.base.is_compiled_with_cuda(): + paddle.base.set_flags({'FLAGS_cudnn_deterministic': True}) class TestPureFP16(TestMNIST): @@ -34,7 +34,7 @@ def train_dygraph(self): @test_and_compare_with_new_ir(False) def test_mnist_to_static(self): - if paddle.fluid.is_compiled_with_cuda(): + if paddle.base.is_compiled_with_cuda(): dygraph_loss = self.train_dygraph() static_loss = self.train_static() # NOTE: In pure fp16 training, loss is not stable, so we enlarge atol here. diff --git a/test/dygraph_to_static/test_mobile_net.py b/test/dygraph_to_static/test_mobile_net.py index 649f00c3eec0a..0478be61f6428 100644 --- a/test/dygraph_to_static/test_mobile_net.py +++ b/test/dygraph_to_static/test_mobile_net.py @@ -23,8 +23,8 @@ from predictor_utils import PredictorTools import paddle -from paddle import fluid -from paddle.fluid.param_attr import ParamAttr +from paddle import base +from paddle.base.param_attr import ParamAttr from paddle.jit.api import to_static from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX from paddle.nn import BatchNorm, Linear @@ -32,8 +32,8 @@ # Note: Set True to eliminate randomness. # 1. For one operation, cuDNN has several algorithms, # some algorithm results are non-deterministic, like convolution algorithms. -if fluid.is_compiled_with_cuda(): - fluid.set_flags({'FLAGS_cudnn_deterministic': True}) +if base.is_compiled_with_cuda(): + base.set_flags({'FLAGS_cudnn_deterministic': True}) SEED = 2020 @@ -496,9 +496,9 @@ class Args: print_step = 1 train_step = 10 place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) model_save_dir = None model_save_prefix = None @@ -509,7 +509,7 @@ class Args: def train_mobilenet(args, to_static): paddle.jit.enable_to_static(to_static) - with fluid.dygraph.guard(args.place): + with base.dygraph.guard(args.place): np.random.seed(SEED) paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) @@ -601,7 +601,7 @@ def train_mobilenet(args, to_static): def predict_static(args, data): paddle.enable_static() - exe = fluid.Executor(args.place) + exe = base.Executor(args.place) # load inference model [ @@ -625,7 +625,7 @@ def predict_static(args, data): def predict_dygraph(args, data): paddle.jit.enable_to_static(False) - with fluid.dygraph.guard(args.place): + with base.dygraph.guard(args.place): if args.model == "MobileNetV1": model = MobileNetV1(class_dim=args.class_dim, scale=1.0) elif args.model == "MobileNetV2": @@ -635,13 +635,13 @@ def predict_dygraph(args, data): model.set_dict(model_dict) model.eval() - pred_res = model(fluid.dygraph.to_variable(data)) + pred_res = model(base.dygraph.to_variable(data)) return pred_res.numpy() def predict_dygraph_jit(args, data): - with fluid.dygraph.guard(args.place): + with base.dygraph.guard(args.place): model = paddle.jit.load(args.model_save_prefix) model.eval() diff --git a/test/dygraph_to_static/test_partial_program.py b/test/dygraph_to_static/test_partial_program.py index 8b646acab64fc..ddb77a04a2628 100644 --- a/test/dygraph_to_static/test_partial_program.py +++ b/test/dygraph_to_static/test_partial_program.py @@ -19,7 +19,7 @@ from test_fetch_feed import Linear import paddle -from paddle import fluid +from paddle import base from paddle.jit.api import to_static SEED = 2020 @@ -50,7 +50,7 @@ def nested_output(x, y): def fake_data(shape): x_data = np.random.random(shape).astype('float32') - return fluid.dygraph.to_variable(x_data) + return base.dygraph.to_variable(x_data) @dy2static_unittest @@ -73,7 +73,7 @@ def fake_input(self): ] def _run(self, to_static): - with fluid.dygraph.guard(): + with base.dygraph.guard(): if self.x is None or self.y is None: self.fake_input() @@ -97,7 +97,7 @@ def setUp(self): self.y = None def _run(self, to_static): - with fluid.dygraph.guard(): + with base.dygraph.guard(): if self.x is None or self.y is None: self.x = fake_data([10, 16]) self.y = fake_data([10, 16]) @@ -119,7 +119,7 @@ def test_nest(self): self.assertTrue(len(dygraph_res) == len(static_res)) for dy_var, st_var in zip(dygraph_res, static_res): - if isinstance(dy_var, fluid.core.eager.Tensor): + if isinstance(dy_var, base.core.eager.Tensor): np.testing.assert_allclose( dy_var.numpy(), st_var.numpy(), rtol=1e-05 ) @@ -131,11 +131,11 @@ def test_nest(self): class TestWithTrainAndEval(unittest.TestCase): @ast_only_test def test_switch_eval_and_train(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): linear_net = Linear() linear_net = paddle.jit.to_static(linear_net) x_data = np.random.random((4, 10)).astype('float32') - x = fluid.dygraph.to_variable(x_data) + x = base.dygraph.to_variable(x_data) linear_net(x) _, train_partial_layer = linear_net.forward.program_cache.last()[-1] @@ -164,11 +164,11 @@ def test_switch_eval_and_train(self): class TestWithNoGrad(unittest.TestCase): @ast_only_test def test_with_no_grad(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): linear_net = Linear() linear_net = paddle.jit.to_static(linear_net) x_data = np.random.random((5, 10)).astype('float32') - x = fluid.dygraph.to_variable(x_data) + x = base.dygraph.to_variable(x_data) with paddle.no_grad(): linear_net.train() @@ -201,8 +201,8 @@ class TestPruneUnusedParamInProgram(unittest.TestCase): def test_prune(self): input_ids = np.array([[15, 11, 6, 3, 18, 13]]).astype("float32") - place = fluid.CPUPlace() - with fluid.dygraph.guard(place): + place = base.CPUPlace() + with base.dygraph.guard(place): model = GPT2LMHeadModel() model.eval() input_ids = paddle.to_tensor(input_ids) diff --git a/test/dygraph_to_static/test_partial_program_hook.py b/test/dygraph_to_static/test_partial_program_hook.py index 8910b9b42214a..cb177862692d3 100644 --- a/test/dygraph_to_static/test_partial_program_hook.py +++ b/test/dygraph_to_static/test_partial_program_hook.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle.fluid import core +from paddle.base import core from paddle.jit.dy2static import partial_program, program_translator diff --git a/test/dygraph_to_static/test_print.py b/test/dygraph_to_static/test_print.py index 1f8ab755f8939..fceed1c0e1cad 100644 --- a/test/dygraph_to_static/test_print.py +++ b/test/dygraph_to_static/test_print.py @@ -17,7 +17,7 @@ import numpy import paddle -from paddle import fluid +from paddle import base from paddle.jit import to_static @@ -99,7 +99,7 @@ def set_test_func(self): def _run(self, to_static): paddle.jit.enable_to_static(to_static) - with fluid.dygraph.guard(): + with base.dygraph.guard(): self.dygraph_func(self.input) def get_dygraph_output(self): diff --git a/test/dygraph_to_static/test_program_translator.py b/test/dygraph_to_static/test_program_translator.py index 609272619ef17..ac23492bc33b4 100644 --- a/test/dygraph_to_static/test_program_translator.py +++ b/test/dygraph_to_static/test_program_translator.py @@ -26,7 +26,7 @@ import paddle import paddle.jit.dy2static as _jst -from paddle import fluid +from paddle import base from paddle.jit.api import to_static from paddle.jit.dy2static.utils import func_to_source_code from paddle.utils import gast @@ -39,8 +39,8 @@ # Because initialized ops will be added into program and be executed many times. # The parameters are assumed to initialized outside of the function. def simple_func(x, weight_numpy): - x = fluid.dygraph.to_variable(x) - w = fluid.dygraph.to_variable(weight_numpy) + x = base.dygraph.to_variable(x) + w = base.dygraph.to_variable(weight_numpy) y = paddle.matmul(x, w) z = paddle.mean(y) return z @@ -48,8 +48,8 @@ def simple_func(x, weight_numpy): @to_static def decorated_simple_func(x, weight_numpy): - x = fluid.dygraph.to_variable(x) - w = fluid.dygraph.to_variable(weight_numpy) + x = base.dygraph.to_variable(x) + w = base.dygraph.to_variable(weight_numpy) y = paddle.matmul(x, w) z = paddle.mean(y) return z @@ -219,19 +219,19 @@ def setUp(self): @ast_only_test def test_raise_error(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.jit.enable_to_static(True) net = NetWithError() with self.assertRaises(ValueError): - net(fluid.dygraph.to_variable(self.x)) + net(base.dygraph.to_variable(self.x)) def test_enable_disable_declarative(self): paddle.jit.enable_to_static(True) - with fluid.dygraph.guard(): + with base.dygraph.guard(): static_output = decorated_simple_func(self.x, self.weight) paddle.jit.enable_to_static(False) - with fluid.dygraph.guard(): + with base.dygraph.guard(): dygraph_output = decorated_simple_func(self.x, self.weight) np.testing.assert_allclose( static_output.numpy(), diff --git a/test/dygraph_to_static/test_ptb_lm.py b/test/dygraph_to_static/test_ptb_lm.py index 540586c4ee972..cf00a1475c896 100644 --- a/test/dygraph_to_static/test_ptb_lm.py +++ b/test/dygraph_to_static/test_ptb_lm.py @@ -19,8 +19,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base.dygraph.base import to_variable from paddle.jit.api import to_static from paddle.optimizer import SGD @@ -48,7 +48,7 @@ def __init__( for i in range(self._num_layers): weight_1 = self.create_parameter( - attr=fluid.ParamAttr( + attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-self._init_scale, high=self._init_scale ) @@ -61,7 +61,7 @@ def __init__( ) self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1)) bias_1 = self.create_parameter( - attr=fluid.ParamAttr( + attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-self._init_scale, high=self._init_scale ) @@ -156,7 +156,7 @@ def __init__( vocab_size, hidden_size, sparse=False, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name='embedding_para', initializer=paddle.nn.initializer.Uniform( low=-init_scale, high=init_scale @@ -164,7 +164,7 @@ def __init__( ), ) self.softmax_weight = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype="float32", default_initializer=paddle.nn.initializer.Uniform( @@ -172,7 +172,7 @@ def __init__( ), ) self.softmax_bias = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.vocab_size], dtype="float32", default_initializer=paddle.nn.initializer.Uniform( @@ -235,7 +235,7 @@ def train(place): vocab_size = 1000 batch_num = 200 - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) ptb_model = PtbModel( @@ -322,9 +322,9 @@ def train_static(place): class TestPtb(unittest.TestCase): def setUp(self): self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) def test_check_result(self): diff --git a/test/dygraph_to_static/test_reinforcement_learning.py b/test/dygraph_to_static/test_reinforcement_learning.py index e8aa1dbf9bfef..82e75bec891c9 100644 --- a/test/dygraph_to_static/test_reinforcement_learning.py +++ b/test/dygraph_to_static/test_reinforcement_learning.py @@ -21,8 +21,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid.dygraph import to_variable +from paddle import base +from paddle.base.dygraph import to_variable from paddle.jit.api import to_static from paddle.nn import Layer @@ -65,7 +65,7 @@ def train(args, place, to_static): env = gym.make('CartPole-v0') env.seed(SEED) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) local_random = np.random.RandomState(SEED) @@ -204,9 +204,9 @@ def finish_episode(): class TestDeclarative(unittest.TestCase): def setUp(self): self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.args = Args() diff --git a/test/dygraph_to_static/test_resnet.py b/test/dygraph_to_static/test_resnet.py index 72d0e19518f10..4dc8170da9de9 100644 --- a/test/dygraph_to_static/test_resnet.py +++ b/test/dygraph_to_static/test_resnet.py @@ -23,8 +23,8 @@ from predictor_utils import PredictorTools import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX from paddle.nn import BatchNorm @@ -37,12 +37,12 @@ batch_size = 2 epoch_num = 1 place = ( - fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) -if fluid.is_compiled_with_cuda(): - fluid.set_flags({'FLAGS_cudnn_deterministic': True}) +if base.is_compiled_with_cuda(): + base.set_flags({'FLAGS_cudnn_deterministic': True}) def optimizer_setting(parameter_list=None): @@ -189,7 +189,7 @@ def __init__(self, layers=50, class_dim=102): self.out = paddle.nn.Linear( self.pool2d_avg_output, class_dim, - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Uniform(-stdv, stdv) ), ) @@ -256,7 +256,7 @@ def train(self, to_static, build_strategy=None): """ Tests model decorated by `dygraph_to_static_output` in static graph mode. For users, the model is defined in dygraph mode and trained in static graph mode. """ - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): np.random.seed(SEED) paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) @@ -337,20 +337,20 @@ def train(self, to_static, build_strategy=None): def predict_dygraph(self, data): paddle.jit.enable_to_static(False) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): resnet = ResNet() model_dict = paddle.load(self.dy_state_dict_save_path + '.pdparams') resnet.set_dict(model_dict) resnet.eval() - pred_res = resnet(fluid.dygraph.to_variable(data)) + pred_res = resnet(base.dygraph.to_variable(data)) return pred_res.numpy() def predict_static(self, data): paddle.enable_static() - exe = fluid.Executor(place) + exe = base.Executor(place) [ inference_program, feed_target_names, @@ -371,7 +371,7 @@ def predict_static(self, data): return pred_res[0] def predict_dygraph_jit(self, data): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): resnet = paddle.jit.load(self.model_save_prefix) resnet.eval() @@ -468,12 +468,12 @@ def test_resnet_composite_forward_backward(self): ) def test_in_static_mode_mkldnn(self): - fluid.set_flags({'FLAGS_use_mkldnn': True}) + base.set_flags({'FLAGS_use_mkldnn': True}) try: - if paddle.fluid.core.is_compiled_with_mkldnn(): + if paddle.base.core.is_compiled_with_mkldnn(): self.resnet_helper.train(to_static=True) finally: - fluid.set_flags({'FLAGS_use_mkldnn': False}) + base.set_flags({'FLAGS_use_mkldnn': False}) if __name__ == '__main__': diff --git a/test/dygraph_to_static/test_resnet_amp.py b/test/dygraph_to_static/test_resnet_amp.py index 96f3b48d342e1..afb85d18b5921 100644 --- a/test/dygraph_to_static/test_resnet_amp.py +++ b/test/dygraph_to_static/test_resnet_amp.py @@ -20,26 +20,26 @@ from test_resnet import SEED, ResNet, optimizer_setting import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core # NOTE: Reduce batch_size from 8 to 2 to avoid unittest timeout. batch_size = 2 epoch_num = 1 place = ( - fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) -if fluid.is_compiled_with_cuda(): - fluid.set_flags({'FLAGS_cudnn_deterministic': True}) +if base.is_compiled_with_cuda(): + base.set_flags({'FLAGS_cudnn_deterministic': True}) def train(to_static, build_strategy=None): """ Tests model decorated by `dygraph_to_static_output` in static graph mode. For users, the model is defined in dygraph mode and trained in static graph mode. """ - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): np.random.seed(SEED) paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) diff --git a/test/dygraph_to_static/test_resnet_pure_fp16.py b/test/dygraph_to_static/test_resnet_pure_fp16.py index a52aa3a41c79f..c878372fbd406 100644 --- a/test/dygraph_to_static/test_resnet_pure_fp16.py +++ b/test/dygraph_to_static/test_resnet_pure_fp16.py @@ -20,16 +20,16 @@ from test_resnet import SEED, ResNet, optimizer_setting import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core # NOTE: Reduce batch_size from 8 to 2 to avoid unittest timeout. batch_size = 2 epoch_num = 1 -if fluid.is_compiled_with_cuda(): - fluid.set_flags({'FLAGS_cudnn_deterministic': True}) +if base.is_compiled_with_cuda(): + base.set_flags({'FLAGS_cudnn_deterministic': True}) def train(to_static, build_strategy=None): @@ -123,7 +123,7 @@ def train(self, to_static): @test_and_compare_with_new_ir(False) def test_resnet(self): - if fluid.is_compiled_with_cuda(): + if base.is_compiled_with_cuda(): static_loss = self.train(to_static=True) dygraph_loss = self.train(to_static=False) # NOTE: In pure fp16 training, loss is not stable, so we enlarge atol here. @@ -138,7 +138,7 @@ def test_resnet(self): ) def test_resnet_composite(self): - if fluid.is_compiled_with_cuda(): + if base.is_compiled_with_cuda(): core._set_prim_backward_enabled(True) static_loss = self.train(to_static=True) core._set_prim_backward_enabled(False) diff --git a/test/dygraph_to_static/test_resnet_v2.py b/test/dygraph_to_static/test_resnet_v2.py index 29e3552b89816..2b4e9676c5f36 100644 --- a/test/dygraph_to_static/test_resnet_v2.py +++ b/test/dygraph_to_static/test_resnet_v2.py @@ -23,7 +23,7 @@ from predictor_utils import PredictorTools import paddle -from paddle.fluid import core +from paddle.base import core SEED = 2020 IMAGENET1000 = 1281167 @@ -39,7 +39,7 @@ if paddle.is_compiled_with_cuda(): - paddle.fluid.set_flags({'FLAGS_cudnn_deterministic': True}) + paddle.base.set_flags({'FLAGS_cudnn_deterministic': True}) def optimizer_setting(parameter_list=None): @@ -133,7 +133,7 @@ def forward(self, inputs): y = paddle.add(x=short, y=conv2) # TODO: uncomment this lines to reproduce the oneDNN segment fault error. - # layer_helper = paddle.fluid.layer_helper.LayerHelper( + # layer_helper = paddle.base.layer_helper.LayerHelper( # self.full_name(), act='relu' # ) # return layer_helper.append_activation(y) @@ -474,12 +474,12 @@ def test_resnet_composite(self): ) def test_in_static_mode_mkldnn(self): - paddle.fluid.set_flags({'FLAGS_use_mkldnn': True}) + paddle.base.set_flags({'FLAGS_use_mkldnn': True}) try: - if paddle.fluid.core.is_compiled_with_mkldnn(): + if paddle.base.core.is_compiled_with_mkldnn(): self.train(to_static=True) finally: - paddle.fluid.set_flags({'FLAGS_use_mkldnn': False}) + paddle.base.set_flags({'FLAGS_use_mkldnn': False}) if __name__ == '__main__': diff --git a/test/dygraph_to_static/test_return.py b/test/dygraph_to_static/test_return.py index d8bf86e208277..41c622e9ed03a 100644 --- a/test/dygraph_to_static/test_return.py +++ b/test/dygraph_to_static/test_return.py @@ -19,8 +19,8 @@ from ifelse_simple_func import dyfunc_with_if_else import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.jit import to_static from paddle.jit.dy2static.utils import Dygraph2StaticException @@ -30,13 +30,13 @@ @to_static def test_return_base(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) return x @to_static def test_inside_func_base(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) def inner_func(x): return x @@ -46,7 +46,7 @@ def inner_func(x): @to_static def test_return_if(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) if x < 0: x -= 1 return -x @@ -56,7 +56,7 @@ def test_return_if(x): @to_static def test_return_if_else(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) if x > 0: x += 10086 return x @@ -69,7 +69,7 @@ def test_return_if_else(x): @to_static def test_return_in_while(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) i = paddle.tensor.fill_constant(shape=[1], dtype='int32', value=0) while i < 10: i += 1 @@ -82,7 +82,7 @@ def test_return_in_while(x): @to_static def test_return_in_for(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) for i in range(10): if i <= 4: x += 1 @@ -94,13 +94,13 @@ def test_return_in_for(x): @to_static def test_recursive_return(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) return dyfunc_with_if_else(x) @to_static def test_return_different_length_if_body(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) y = x + 1 if x > 0: # x = to_variable(np.ones(1)) so it will return here @@ -111,7 +111,7 @@ def test_return_different_length_if_body(x): @to_static def test_return_different_length_else(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) y = x + 1 if x < 0: return x, y @@ -122,13 +122,13 @@ def test_return_different_length_else(x): @to_static def test_no_return(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) y = x + 1 @to_static def test_return_none(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) y = x + 1 if x > 0: # x = to_variable(np.ones(1)) so it will return here @@ -139,7 +139,7 @@ def test_return_none(x): @to_static def test_return_no_variable(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) y = x + 1 if x < 0: return x, y @@ -150,14 +150,14 @@ def test_return_no_variable(x): @to_static def test_return_list_one_value(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x += 1 return [x] @to_static def test_return_list_many_values(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x += 1 y = x * 2 z = x * x @@ -166,14 +166,14 @@ def test_return_list_many_values(x): @to_static def test_return_tuple_one_value(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x += 1 return (x,) @to_static def test_return_tuple_many_values(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x += 1 y = x * 2 z = x * x @@ -268,9 +268,9 @@ class TestReturnBase(unittest.TestCase): def setUp(self): self.input = np.ones(1).astype('int32') self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.init_dygraph_func() @@ -279,7 +279,7 @@ def init_dygraph_func(self): def _run(self, to_static=False): paddle.jit.enable_to_static(to_static) - with fluid.dygraph.guard(): + with base.dygraph.guard(): res = self.dygraph_func(self.input) if isinstance(res, (tuple, list)): return tuple(r.numpy() for r in res) diff --git a/test/dygraph_to_static/test_save_inference_model.py b/test/dygraph_to_static/test_save_inference_model.py index b050c201c13ca..23f8633a4e681 100644 --- a/test/dygraph_to_static/test_save_inference_model.py +++ b/test/dygraph_to_static/test_save_inference_model.py @@ -20,7 +20,7 @@ from dygraph_to_static_util import ast_only_test import paddle -from paddle import fluid +from paddle import base from paddle.jit.api import to_static from paddle.jit.dy2static.partial_program import partial_program_from from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX @@ -30,7 +30,7 @@ np.random.seed(SEED) place = ( - fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) @@ -58,11 +58,11 @@ def tearDown(self): def test_save_inference_model(self): fc_size = 20 x_data = np.random.random((fc_size, fc_size)).astype('float32') - with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + with base.dygraph.guard(place): + base.default_startup_program().random_seed = SEED + base.default_main_program().random_seed = SEED - x = fluid.dygraph.to_variable(x_data) + x = base.dygraph.to_variable(x_data) layer = SimpleFcLayer(fc_size) adam = paddle.optimizer.SGD( learning_rate=0.1, parameters=layer.parameters() @@ -125,7 +125,7 @@ def load_and_run_inference( self, model_path, model_filename, params_filename, inputs ): paddle.enable_static() - exe = fluid.Executor(place) + exe = base.Executor(place) [ inference_program, feed_target_names, @@ -151,9 +151,9 @@ def test_param_type(self): paddle.jit.enable_to_static(True) x_data = np.random.random((20, 20)).astype('float32') - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): net = SimpleFcLayer(20) - x = fluid.dygraph.to_variable(x_data) + x = base.dygraph.to_variable(x_data) out = net(x) program_cache = net.forward.program_cache @@ -163,7 +163,7 @@ def test_param_type(self): concrete_program.parameters = params[0] # TypeError: Type of self._params should be list or tuple, - # but received . + # but received . with self.assertRaises(TypeError): partial_program_from(concrete_program) diff --git a/test/dygraph_to_static/test_save_load.py b/test/dygraph_to_static/test_save_load.py index d817f77913fd8..7e1eae4858e83 100644 --- a/test/dygraph_to_static/test_save_load.py +++ b/test/dygraph_to_static/test_save_load.py @@ -22,15 +22,15 @@ import paddle import paddle.nn.functional as F -from paddle import fluid, nn -from paddle.fluid import core +from paddle import base, nn +from paddle.base import core from paddle.nn import BatchNorm from paddle.optimizer import Adam np.random.seed(2020) place = ( - fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) @@ -71,9 +71,9 @@ def test_save_load_same_result(self): x_data = np.random.randn(30, 10, 32).astype('float32') batch_num = 3 - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.jit.enable_to_static(True) - x = fluid.dygraph.to_variable(x_data) + x = base.dygraph.to_variable(x_data) net = Linear(32, 64) adam = Adam(learning_rate=0.1, parameters=net.parameters()) @@ -92,7 +92,7 @@ def test_save_load_same_result(self): static_out, static_loss = net(x) # load parameters into dygraph - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): dygraph_net = Linear(32, 64) # Load parameters @@ -101,7 +101,7 @@ def test_save_load_same_result(self): # Switch into eval mode. dygraph_net.eval() - x = fluid.dygraph.to_variable(x_data) + x = base.dygraph.to_variable(x_data) # predict output paddle.jit.enable_to_static(False) dygraph_out, dygraph_loss = dygraph_net(x) @@ -115,7 +115,7 @@ def test_save_load_same_result(self): @ast_only_test def test_save_load_prim(self): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): self.x = paddle.randn([4, 2, 6, 6], dtype="float32") self.x.stop_gradient = False net = PrimeNet(data_layout="NCHW") @@ -156,7 +156,7 @@ def test_save_load_prim(self): @ast_only_test def test_save_load_prim_with_hook(self): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): self.x = paddle.randn([4, 2, 6, 6], dtype="float32") self.x.stop_gradient = False net = PrimeNet(data_layout="NCHW") diff --git a/test/dygraph_to_static/test_se_resnet.py b/test/dygraph_to_static/test_se_resnet.py index 34f16e7ae6f53..80e7257962e62 100644 --- a/test/dygraph_to_static/test_se_resnet.py +++ b/test/dygraph_to_static/test_se_resnet.py @@ -24,8 +24,8 @@ from predictor_utils import PredictorTools import paddle -from paddle import fluid -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base.dygraph.base import to_variable from paddle.jit.api import to_static from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX from paddle.nn import BatchNorm, Linear @@ -39,14 +39,14 @@ STEP_NUM = 10 place = ( - fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) # Note: Set True to eliminate randomness. # 1. For one operation, cuDNN has several algorithms, # some algorithm results are non-deterministic, like convolution algorithms. -if fluid.is_compiled_with_cuda(): - fluid.set_flags({'FLAGS_cudnn_deterministic': True}) +if base.is_compiled_with_cuda(): + base.set_flags({'FLAGS_cudnn_deterministic': True}) train_parameters = { "learning_strategy": { @@ -130,7 +130,7 @@ def __init__(self, num_channels, reduction_ratio): self._fc = Linear( num_channels, num_channels // reduction_ratio, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform(-stdv, stdv) ), ) @@ -138,7 +138,7 @@ def __init__(self, num_channels, reduction_ratio): self._excitation = Linear( num_channels // reduction_ratio, num_channels, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform(-stdv, stdv) ), ) @@ -315,7 +315,7 @@ def __init__(self, layers=50, class_dim=102): self.out = Linear( self.pool2d_avg_output, class_dim, - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Uniform(-stdv, stdv) ), ) @@ -377,7 +377,7 @@ def train(self, train_reader, to_static): np.random.seed(SEED) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) se_resnext = SeResNeXt() @@ -473,7 +473,7 @@ def train(self, train_reader, to_static): def predict_dygraph(self, data): paddle.jit.enable_to_static(False) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): se_resnext = SeResNeXt() model_dict = paddle.load(self.dy_state_dict_save_path + '.pdparams') @@ -481,15 +481,15 @@ def predict_dygraph(self, data): se_resnext.eval() label = np.random.random([1, 1]).astype("int64") - img = fluid.dygraph.to_variable(data) - label = fluid.dygraph.to_variable(label) + img = base.dygraph.to_variable(data) + label = base.dygraph.to_variable(label) pred_res, _, _, _ = se_resnext(img, label) return pred_res.numpy() def predict_static(self, data): paddle.enable_static() - exe = fluid.Executor(place) + exe = base.Executor(place) [ inference_program, feed_target_names, @@ -510,7 +510,7 @@ def predict_static(self, data): return pred_res[0] def predict_dygraph_jit(self, data): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): se_resnext = paddle.jit.load(self.model_save_prefix) se_resnext.eval() diff --git a/test/dygraph_to_static/test_sentiment.py b/test/dygraph_to_static/test_sentiment.py index 3da23c0849216..8e701e48b5b81 100644 --- a/test/dygraph_to_static/test_sentiment.py +++ b/test/dygraph_to_static/test_sentiment.py @@ -18,8 +18,8 @@ from test_lac import DynamicGRU import paddle -from paddle import fluid -from paddle.fluid.dygraph import to_variable +from paddle import base +from paddle.base.dygraph import to_variable from paddle.jit.api import to_static from paddle.nn import Embedding, Linear @@ -28,8 +28,8 @@ # Note: Set True to eliminate randomness. # 1. For one operation, cuDNN has several algorithms, # some algorithm results are non-deterministic, like convolution algorithms. -if fluid.is_compiled_with_cuda(): - fluid.set_flags({'FLAGS_cudnn_deterministic': True}) +if base.is_compiled_with_cuda(): + base.set_flags({'FLAGS_cudnn_deterministic': True}) class SimpleConvPool(paddle.nn.Layer): @@ -170,7 +170,7 @@ def __init__(self, dict_dim, batch_size, seq_len): self.embedding = Embedding( self.dict_dim + 1, self.emb_dim, - weight_attr=fluid.ParamAttr(learning_rate=30), + weight_attr=base.ParamAttr(learning_rate=30), sparse=False, ) h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32") @@ -218,7 +218,7 @@ def __init__(self, dict_dim, batch_size, seq_len): self.embedding = Embedding( self.dict_dim + 1, self.emb_dim, - weight_attr=fluid.ParamAttr(learning_rate=30), + weight_attr=base.ParamAttr(learning_rate=30), sparse=False, ) h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32") @@ -304,12 +304,12 @@ class Args: def train(args, to_static): paddle.jit.enable_to_static(to_static) place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): np.random.seed(SEED) paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) @@ -317,7 +317,7 @@ def train(args, to_static): train_reader = fake_data_reader( args.class_num, args.vocab_size, args.batch_size, args.padding_size ) - train_loader = fluid.io.DataLoader.from_generator(capacity=24) + train_loader = base.io.DataLoader.from_generator(capacity=24) train_loader.set_sample_list_generator(train_reader) if args.model_type == 'cnn_net': diff --git a/test/dygraph_to_static/test_seq2seq.py b/test/dygraph_to_static/test_seq2seq.py index 3478bf47efed3..e39a4da16ba13 100644 --- a/test/dygraph_to_static/test_seq2seq.py +++ b/test/dygraph_to_static/test_seq2seq.py @@ -22,11 +22,11 @@ from seq2seq_utils import Seq2SeqModelHyperParams, get_data_iter import paddle -from paddle import fluid +from paddle import base from paddle.nn import ClipGradByGlobalNorm place = ( - fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) STEP_NUM = 10 PRINT_STEP = 2 @@ -45,9 +45,9 @@ def prepare_input(batch): def train(args, attn_model=False): - with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = 2020 - fluid.default_main_program().random_seed = 2020 + with base.dygraph.guard(place): + base.default_startup_program().random_seed = 2020 + base.default_main_program().random_seed = 2020 if attn_model: model = AttentionModel( @@ -87,7 +87,7 @@ def train(args, attn_model=False): batch_start_time = time.time() input_data_feed, word_num = prepare_input(batch) input_data_feed = [ - fluid.dygraph.to_variable(np_inp) for np_inp in input_data_feed + base.dygraph.to_variable(np_inp) for np_inp in input_data_feed ] word_count += word_num loss = model(input_data_feed) @@ -132,7 +132,7 @@ def train(args, attn_model=False): def infer(args, attn_model=False): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): if attn_model: model = AttentionModel( args.hidden_size, @@ -168,7 +168,7 @@ def infer(args, attn_model=False): for batch_id, batch in enumerate(train_data_iter): input_data_feed, word_num = prepare_input(batch) input_data_feed = [ - fluid.dygraph.to_variable(np_inp) for np_inp in input_data_feed + base.dygraph.to_variable(np_inp) for np_inp in input_data_feed ] outputs = model.beam_search(input_data_feed) break diff --git a/test/dygraph_to_static/test_simnet.py b/test/dygraph_to_static/test_simnet.py index 09ea063f9ad8e..7d6cad6d03381 100644 --- a/test/dygraph_to_static/test_simnet.py +++ b/test/dygraph_to_static/test_simnet.py @@ -21,7 +21,7 @@ from simnet_dygraph_model import BOW, HingeLoss import paddle -from paddle import fluid +from paddle import base SEED = 102 random.seed(SEED) @@ -128,12 +128,12 @@ def train(conf_dict, to_static): paddle.jit.enable_to_static(to_static) # Get device - if fluid.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.is_compiled_with_cuda(): + place = base.CUDAPlace(0) else: - place = fluid.CPUPlace() + place = base.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) @@ -179,8 +179,8 @@ def train(conf_dict, to_static): class TestSimnet(unittest.TestCase): @test_and_compare_with_new_ir(True) def test_dygraph_static_same_loss(self): - if fluid.is_compiled_with_cuda(): - fluid.set_flags({"FLAGS_cudnn_deterministic": True}) + if base.is_compiled_with_cuda(): + base.set_flags({"FLAGS_cudnn_deterministic": True}) conf_dict = create_conf_dict() dygraph_loss = train(conf_dict, to_static=False) static_loss = train(conf_dict, to_static=True) diff --git a/test/dygraph_to_static/test_simnet_v2.py b/test/dygraph_to_static/test_simnet_v2.py index 316464ab79132..a54cfe14dcbf8 100644 --- a/test/dygraph_to_static/test_simnet_v2.py +++ b/test/dygraph_to_static/test_simnet_v2.py @@ -180,7 +180,7 @@ class TestSimnet(unittest.TestCase): @test_and_compare_with_new_ir(True) def test_dygraph_static_same_loss(self): if paddle.is_compiled_with_cuda(): - paddle.fluid.set_flags({"FLAGS_cudnn_deterministic": True}) + paddle.base.set_flags({"FLAGS_cudnn_deterministic": True}) conf_dict = create_conf_dict() dygraph_loss = train(conf_dict, to_static=False) static_loss = train(conf_dict, to_static=True) diff --git a/test/dygraph_to_static/test_static_analysis.py b/test/dygraph_to_static/test_static_analysis.py index b849bbe47261a..e4e5afb574417 100644 --- a/test/dygraph_to_static/test_static_analysis.py +++ b/test/dygraph_to_static/test_static_analysis.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.jit.dy2static import NodeVarType, StaticAnalysisVisitor from paddle.utils import gast @@ -77,10 +77,10 @@ def func_to_test3(): def func_to_test4(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(0.1, 1, [1, 2]) b = 1 + a - c = fluid.dygraph.to_variable(b) + c = base.dygraph.to_variable(b) d = (c + 1) * 0.3 @@ -123,7 +123,7 @@ def inner_unknown_func(x): def func_to_test6(x, y=1): - i = fluid.dygraph.to_variable(x) + i = base.dygraph.to_variable(x) def add(x, y): return x + y diff --git a/test/dygraph_to_static/test_tensor_memcpy_on_cpu.py b/test/dygraph_to_static/test_tensor_memcpy_on_cpu.py index 45f3a0a555018..a8e955be9e863 100644 --- a/test/dygraph_to_static/test_tensor_memcpy_on_cpu.py +++ b/test/dygraph_to_static/test_tensor_memcpy_on_cpu.py @@ -48,7 +48,7 @@ def _run(self, to_static): return x1.place, x2.place, x2.numpy() def test_tensor_cpu_on_default_cpu(self): - paddle.fluid.framework._set_expected_place(paddle.CPUPlace()) + paddle.base.framework._set_expected_place(paddle.CPUPlace()) dygraph_x1_place, dygraph_place, dygraph_res = self._run( to_static=False ) @@ -68,7 +68,7 @@ def _run(self, to_static): return x1.place, x2.place, x2.numpy() def test_tensor_cuda_on_default_cpu(self): - if not paddle.fluid.is_compiled_with_cuda(): + if not paddle.base.is_compiled_with_cuda(): return """ @@ -77,7 +77,7 @@ def test_tensor_cuda_on_default_cpu(self): whether is still taking effect or not. See ConstructDeviceContext() in interpreter_util.cc. """ - paddle.fluid.framework._set_expected_place(paddle.CPUPlace()) + paddle.base.framework._set_expected_place(paddle.CPUPlace()) dygraph_x1_place, dygraph_place, dygraph_res = self._run( to_static=False ) @@ -97,10 +97,10 @@ def _run(self, to_static): return x1.place, x2.place, x2.numpy() def test_with_warning_on_cpu(self): - if not paddle.fluid.is_compiled_with_cuda(): + if not paddle.base.is_compiled_with_cuda(): return - paddle.fluid.framework._set_expected_place(paddle.CPUPlace()) + paddle.base.framework._set_expected_place(paddle.CPUPlace()) x1 = paddle.ones([1, 2, 3]) with self.assertWarns(UserWarning, msg="ignored") as cm: diff --git a/test/dygraph_to_static/test_tensor_memcpy_on_gpu.py b/test/dygraph_to_static/test_tensor_memcpy_on_gpu.py index de642dd4087e8..30e8e55611959 100644 --- a/test/dygraph_to_static/test_tensor_memcpy_on_gpu.py +++ b/test/dygraph_to_static/test_tensor_memcpy_on_gpu.py @@ -49,13 +49,13 @@ def _run(self, to_static): return x1.place, x2.place, x2.numpy() def test_tensor_cpu_on_default_gpu(self): - if paddle.fluid.is_compiled_with_cuda(): + if paddle.base.is_compiled_with_cuda(): place = paddle.CUDAPlace( int(os.environ.get('FLAGS_selected_gpus', 0)) ) else: return - paddle.fluid.framework._set_expected_place(place) + paddle.base.framework._set_expected_place(place) dygraph_x1_place, dygraph_place, dygraph_res = self._run( to_static=False ) @@ -75,13 +75,13 @@ def _run(self, to_static): return x1.place, x2.place, x2.numpy() def test_tensor_cuda_on_default_gpu(self): - if paddle.fluid.is_compiled_with_cuda(): + if paddle.base.is_compiled_with_cuda(): place = paddle.CUDAPlace( int(os.environ.get('FLAGS_selected_gpus', 0)) ) else: return - paddle.fluid.framework._set_expected_place(place) + paddle.base.framework._set_expected_place(place) dygraph_x1_place, dygraph_place, dygraph_res = self._run( to_static=False ) @@ -101,13 +101,13 @@ def _run(self, to_static): return x1.place, x2.place, x2.numpy() def test_with_warning_on_gpu(self): - if paddle.fluid.is_compiled_with_cuda(): + if paddle.base.is_compiled_with_cuda(): place = paddle.CUDAPlace( int(os.environ.get('FLAGS_selected_gpus', 0)) ) else: return - paddle.fluid.framework._set_expected_place(place) + paddle.base.framework._set_expected_place(place) x1 = paddle.ones([1, 2, 3]) with self.assertWarns(UserWarning, msg="ignored") as cm: diff --git a/test/dygraph_to_static/test_tensor_shape.py b/test/dygraph_to_static/test_tensor_shape.py index 6bf0a0f4016cc..ac4a943296782 100644 --- a/test/dygraph_to_static/test_tensor_shape.py +++ b/test/dygraph_to_static/test_tensor_shape.py @@ -18,11 +18,11 @@ from dygraph_to_static_util import ast_only_test, dy2static_unittest import paddle -from paddle import fluid +from paddle import base def dyfunc_tensor_shape_1(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) res = paddle.reshape(x, shape=x.shape) return res @@ -37,33 +37,33 @@ def dyfunc_tensor_shape_2(x): def dyfunc_tensor_shape_3(x): # Transform y.shape but run y.shape actually because y is not Tensor - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) y = paddle.ones([1, 5]) res = paddle.reshape(x, shape=y.shape) return res def dyfunc_tensor_shape_4(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) res = paddle.reshape(x, shape=(-1, x.shape[0], len(x.shape))) return res def dyfunc_tensor_shape_5(x): - # `res = fluid.layers.reshape(x, shape=(-1, s))` to - # `res = fluid.layers.reshape(x, shape=(-1, + # `res = base.layers.reshape(x, shape=(-1, s))` to + # `res = base.layers.reshape(x, shape=(-1, # paddle.jit.dy2static.convert_var_shape(x)[0]))` - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) s = x.shape[0] res = paddle.reshape(x, shape=(-1, s)) return res def dyfunc_tensor_shape_6(x): - # `res = fluid.layers.reshape(x, shape=(-1, s))` to - # `res = fluid.layers.reshape(x, shape=(-1, + # `res = base.layers.reshape(x, shape=(-1, s))` to + # `res = base.layers.reshape(x, shape=(-1, # paddle.jit.dy2static.convert_var_shape(x)[0:]))` - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) s = x.shape[0:] res = paddle.reshape(x, shape=s) return res @@ -96,14 +96,14 @@ def dyfunc_paddle_shape_api(x): # paddle.shape will not be converted. a = paddle.shape(x)[0] # alias api will also not be converted. - alias_old_api = paddle.fluid.layers + alias_old_api = paddle.base.layers b = paddle.shape(x)[1] res = paddle.reshape(x, shape=(b, a)) return res def dyfunc_with_if_1(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) res = paddle.reshape(x, [-1, 1]) x_shape_0 = x.shape[0] if x_shape_0 < 1: @@ -121,7 +121,7 @@ def dyfunc_with_if_1(x): def dyfunc_with_if_2(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) # `len(x.shape)` will not be transformed because x.shape is not used by Paddle api. if len(x.shape) < 1: res = x @@ -132,7 +132,7 @@ def dyfunc_with_if_2(x): def dyfunc_with_for_1(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32") # `x.shape[0]` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]` for i in range(x.shape[0]): @@ -141,7 +141,7 @@ def dyfunc_with_for_1(x): def dyfunc_with_for_2(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x_shape_0 = x.shape[0] res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32") @@ -152,7 +152,7 @@ def dyfunc_with_for_2(x): def dyfunc_with_for_3(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32") # `len(x.shape)` is not transformed. for i in range(len(x.shape)): @@ -162,7 +162,7 @@ def dyfunc_with_for_3(x): def dyfunc_with_while_1(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32") # `x.shape[0]` is transformed into `paddle.jit.dy2static.convert_var_shape(x)[0]` i = 1 @@ -173,7 +173,7 @@ def dyfunc_with_while_1(x): def dyfunc_with_while_2(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x_shape_0 = x.shape[0] res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32") i = 1 @@ -185,7 +185,7 @@ def dyfunc_with_while_2(x): def dyfunc_with_while_3(x): - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) x_shape = x.shape res = paddle.tensor.fill_constant(value=0, shape=[1], dtype="int32") i = 1 @@ -236,9 +236,9 @@ class TestTensorShapeBasic(unittest.TestCase): def setUp(self): self.input = np.ones(5).astype("int32") self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self._set_input_spec() self._set_expected_op_num() @@ -251,7 +251,7 @@ def _set_input_spec(self): self.input_spec = [paddle.static.InputSpec(shape=[5], dtype="int32")] def _run(self, to_static): - with fluid.dygraph.guard(): + with base.dygraph.guard(): if to_static: res = paddle.jit.to_static(self.dygraph_func)( self.input diff --git a/test/dygraph_to_static/test_to_tensor.py b/test/dygraph_to_static/test_to_tensor.py index e13cc9cc700a1..bab0ad018b76f 100644 --- a/test/dygraph_to_static/test_to_tensor.py +++ b/test/dygraph_to_static/test_to_tensor.py @@ -22,8 +22,8 @@ ) import paddle -from paddle.fluid import core -from paddle.fluid.framework import Program, program_guard +from paddle.base import core +from paddle.base.framework import Program, program_guard def case0(x): diff --git a/test/dygraph_to_static/test_transformer.py b/test/dygraph_to_static/test_transformer.py index 0942937bb68b7..da5a3a7edacc6 100644 --- a/test/dygraph_to_static/test_transformer.py +++ b/test/dygraph_to_static/test_transformer.py @@ -27,11 +27,11 @@ ) import paddle -from paddle import fluid +from paddle import base trainer_count = 1 place = ( - fluid.CUDAPlace(0) if fluid.is_compiled_with_cuda() else fluid.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) SEED = 10 STEP_NUM = 10 @@ -41,11 +41,11 @@ def train_static(args, batch_generator): paddle.enable_static() paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - train_prog = fluid.Program() - startup_prog = fluid.Program() + train_prog = base.Program() + startup_prog = base.Program() - with fluid.program_guard(train_prog, startup_prog): - with fluid.unique_name.guard(): + with base.program_guard(train_prog, startup_prog): + with base.unique_name.guard(): # define input and reader input_field_names = ( util.encoder_data_input_fields @@ -121,7 +121,7 @@ def train_static(args, batch_generator): step_idx = 0 total_batch_num = 0 avg_loss = [] - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup_prog) for pass_id in range(args.epoch): batch_id = 0 @@ -182,7 +182,7 @@ def train_static(args, batch_generator): def train_dygraph(args, batch_generator): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): if SEED is not None: paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) @@ -324,7 +324,7 @@ def train_dygraph(args, batch_generator): def predict_dygraph(args, batch_generator): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) @@ -420,8 +420,8 @@ def predict_dygraph(args, batch_generator): def predict_static(args, batch_generator): - test_prog = fluid.Program() - with fluid.program_guard(test_prog): + test_prog = base.Program() + with base.program_guard(test_prog): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) @@ -483,7 +483,7 @@ def predict_static(args, batch_generator): test_prog = test_prog.clone(for_test=True) # define the executor and program for training - exe = fluid.Executor(place) + exe = base.Executor(place) util.load( test_prog, os.path.join(args.save_static_model_path, "transformer"), exe diff --git a/test/dygraph_to_static/test_tsm.py b/test/dygraph_to_static/test_tsm.py index 3febac5d22799..e68406bd4c9ab 100644 --- a/test/dygraph_to_static/test_tsm.py +++ b/test/dygraph_to_static/test_tsm.py @@ -23,8 +23,8 @@ from tsm_config_utils import merge_configs, parse_config, print_configs import paddle -from paddle import fluid -from paddle.fluid.dygraph import to_variable +from paddle import base +from paddle.base.dygraph import to_variable from paddle.jit.api import to_static from paddle.nn import BatchNorm, Linear @@ -43,7 +43,7 @@ def parse_args(): parser.add_argument( '--use_gpu', type=bool, - default=fluid.is_compiled_with_cuda(), + default=base.is_compiled_with_cuda(), help='default use gpu.', ) args = parser.parse_args( @@ -71,15 +71,15 @@ def __init__( stride=stride, padding=(filter_size - 1) // 2, groups=1, - weight_attr=fluid.param_attr.ParamAttr(), + weight_attr=base.param_attr.ParamAttr(), bias_attr=False, ) self._batch_norm = BatchNorm( num_filters, act=act, - param_attr=fluid.param_attr.ParamAttr(), - bias_attr=fluid.param_attr.ParamAttr(), + param_attr=base.param_attr.ParamAttr(), + bias_attr=base.param_attr.ParamAttr(), ) def forward(self, inputs): @@ -301,11 +301,11 @@ def train(args, fake_data_reader, to_static): valid_config = merge_configs(config, 'valid', vars(args)) print_configs(train_config, 'Train') - place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() + place = base.CUDAPlace(0) if args.use_gpu else base.CPUPlace() random.seed(0) np.random.seed(0) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.seed(1000) paddle.framework.random._manual_program_seed(1000) @@ -387,8 +387,8 @@ def train(args, fake_data_reader, to_static): class TestTsm(unittest.TestCase): @test_and_compare_with_new_ir(False) def test_dygraph_static_same_loss(self): - if fluid.is_compiled_with_cuda(): - fluid.set_flags({"FLAGS_cudnn_deterministic": True}) + if base.is_compiled_with_cuda(): + base.set_flags({"FLAGS_cudnn_deterministic": True}) args = parse_args() fake_data_reader = FakeDataReader("train", parse_config(args.config)) dygraph_loss = train(args, fake_data_reader, to_static=False) diff --git a/test/dygraph_to_static/test_typehint.py b/test/dygraph_to_static/test_typehint.py index e349bbce669f1..630c22c981756 100644 --- a/test/dygraph_to_static/test_typehint.py +++ b/test/dygraph_to_static/test_typehint.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base SEED = 2020 np.random.seed(SEED) @@ -35,9 +35,9 @@ def function(x: A) -> A: class TestTransformWhileLoop(unittest.TestCase): def setUp(self): self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.x = np.zeros(shape=(1), dtype=np.int32) self._init_dyfunc() @@ -52,9 +52,9 @@ def _run_dygraph(self): return self._run(to_static=False) def _run(self, to_static): - with fluid.dygraph.guard(self.place): + with base.dygraph.guard(self.place): # Set the input of dyfunc to Tensor - tensor_x = fluid.dygraph.to_variable(self.x, zero_copy=False) + tensor_x = base.dygraph.to_variable(self.x, zero_copy=False) if to_static: ret = paddle.jit.to_static(self.dyfunc)(tensor_x) else: diff --git a/test/dygraph_to_static/test_unuseful_inputs.py b/test/dygraph_to_static/test_unuseful_inputs.py index 821afa9be876e..74f3a4acbdd21 100644 --- a/test/dygraph_to_static/test_unuseful_inputs.py +++ b/test/dygraph_to_static/test_unuseful_inputs.py @@ -64,7 +64,7 @@ def forward(self, x): class TestDuplicateOutput(unittest.TestCase): """ TestCase for the transformation from control flow `if/else` - dependent on tensor in Dygraph into Static `fluid.layers.cond`. + dependent on tensor in Dygraph into Static `base.layers.cond`. """ def test_case(self): diff --git a/test/dygraph_to_static/test_word2vec.py b/test/dygraph_to_static/test_word2vec.py index 424d1f3a7ef83..82fe2e1c0d516 100644 --- a/test/dygraph_to_static/test_word2vec.py +++ b/test/dygraph_to_static/test_word2vec.py @@ -20,7 +20,7 @@ from dygraph_to_static_util import test_and_compare_with_new_ir import paddle -from paddle import fluid +from paddle import base from paddle.jit.api import to_static from paddle.nn import Embedding @@ -226,7 +226,7 @@ def __init__(self, name_scope, vocab_size, embedding_size, init_scale=0.1): self.embedding = Embedding( self.vocab_size, self.embedding_size, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name='embedding_para', initializer=paddle.nn.initializer.Uniform( low=-0.5 / self.embedding_size, @@ -238,7 +238,7 @@ def __init__(self, name_scope, vocab_size, embedding_size, init_scale=0.1): self.embedding_out = Embedding( self.vocab_size, self.embedding_size, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name='embedding_out_para', initializer=paddle.nn.initializer.Uniform( low=-0.5 / self.embedding_size, @@ -281,13 +281,13 @@ def train(to_static): np.random.seed(0) place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = 1000 - fluid.default_main_program().random_seed = 1000 + with base.dygraph.guard(place): + base.default_startup_program().random_seed = 1000 + base.default_main_program().random_seed = 1000 skip_gram_model = SkipGram( "skip_gram_model", vocab_size, embedding_size @@ -302,9 +302,9 @@ def train(to_static): for center_words, target_words, label, eval_words in build_batch( dataset, batch_size, epoch_num ): - center_words_var = fluid.dygraph.to_variable(center_words) - target_words_var = fluid.dygraph.to_variable(target_words) - label_var = fluid.dygraph.to_variable(label) + center_words_var = base.dygraph.to_variable(center_words) + target_words_var = base.dygraph.to_variable(target_words) + label_var = base.dygraph.to_variable(label) pred, loss = skip_gram_model( center_words_var, target_words_var, label_var ) diff --git a/test/dygraph_to_static/test_yolov3.py b/test/dygraph_to_static/test_yolov3.py index 4b649107bc303..3f31b666c7f31 100644 --- a/test/dygraph_to_static/test_yolov3.py +++ b/test/dygraph_to_static/test_yolov3.py @@ -21,8 +21,8 @@ from yolov3 import YOLOv3, cfg import paddle -from paddle import fluid -from paddle.fluid.dygraph import to_variable +from paddle import base +from paddle.base.dygraph import to_variable paddle.enable_static() random.seed(0) @@ -83,10 +83,10 @@ def train(to_static): random.seed(0) np.random.seed(0) - place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace() - with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = 1000 - fluid.default_main_program().random_seed = 1000 + place = base.CUDAPlace(0) if cfg.use_gpu else base.CPUPlace() + with base.dygraph.guard(place): + base.default_startup_program().random_seed = 1000 + base.default_main_program().random_seed = 1000 model = YOLOv3(3, is_train=True) boundaries = cfg.lr_steps diff --git a/test/dygraph_to_static/transformer_dygraph_model.py b/test/dygraph_to_static/transformer_dygraph_model.py index d2d89cea6d227..dd31fbd7d6a0f 100644 --- a/test/dygraph_to_static/transformer_dygraph_model.py +++ b/test/dygraph_to_static/transformer_dygraph_model.py @@ -16,8 +16,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid.dygraph import to_variable +from paddle import base +from paddle.base.dygraph import to_variable from paddle.jit.api import dygraph_to_static_func from paddle.nn import Layer, Linear @@ -58,10 +58,10 @@ def __init__(self, process_cmd, d_model, dropout_rate): "layer_norm_%d" % len(list(self.children())), paddle.nn.LayerNorm( normalized_shape=d_model, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(1.0) ), - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(0.0) ), ), @@ -104,25 +104,25 @@ def __init__( in_features=d_model, out_features=d_key * n_head, bias_attr=False, - weight_attr=fluid.ParamAttr(initializer=param_initializer), + weight_attr=base.ParamAttr(initializer=param_initializer), ) self.k_fc = Linear( in_features=d_model, out_features=d_key * n_head, bias_attr=False, - weight_attr=fluid.ParamAttr(initializer=param_initializer), + weight_attr=base.ParamAttr(initializer=param_initializer), ) self.v_fc = Linear( in_features=d_model, out_features=d_value * n_head, bias_attr=False, - weight_attr=fluid.ParamAttr(initializer=param_initializer), + weight_attr=base.ParamAttr(initializer=param_initializer), ) self.proj_fc = Linear( in_features=d_value * n_head, out_features=d_model, bias_attr=False, - weight_attr=fluid.ParamAttr(initializer=param_initializer), + weight_attr=base.ParamAttr(initializer=param_initializer), ) def forward(self, queries, keys, values, attn_bias, cache=None): @@ -289,7 +289,7 @@ def __init__(self, vocab_size, emb_dim, bos_idx=0): self.word_embedder = paddle.nn.Embedding( vocab_size, emb_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Normal(0.0, emb_dim**-0.5) ), ) @@ -324,7 +324,7 @@ def __init__( self.pos_encoder = paddle.nn.Embedding( max_length, self.emb_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Assign( position_encoding_init(max_length, self.emb_dim) ), @@ -516,7 +516,7 @@ def __init__( self.pos_encoder = paddle.nn.Embedding( max_length, self.emb_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Assign( position_encoding_init(max_length, self.emb_dim) ), diff --git a/test/dygraph_to_static/yolov3.py b/test/dygraph_to_static/yolov3.py index b13a7eb8e16ea..f72ee8bb8f8ff 100644 --- a/test/dygraph_to_static/yolov3.py +++ b/test/dygraph_to_static/yolov3.py @@ -18,8 +18,8 @@ from darknet import ConvBNLayer, DarkNet53_conv_body import paddle -from paddle import _legacy_C_ops, fluid -from paddle.fluid.param_attr import ParamAttr +from paddle import _legacy_C_ops, base +from paddle.base.param_attr import ParamAttr from paddle.jit.api import to_static from paddle.regularizer import L2Decay @@ -107,11 +107,11 @@ def __setattr__(self, name, value): # derived learning rate the to get the final learning rate. cfg.learning_rate = 0.001 # maximum number of iterations -cfg.max_iter = 20 if fluid.is_compiled_with_cuda() else 1 +cfg.max_iter = 20 if base.is_compiled_with_cuda() else 1 # Disable mixup in last N iter -cfg.no_mixup_iter = 10 if fluid.is_compiled_with_cuda() else 1 +cfg.no_mixup_iter = 10 if base.is_compiled_with_cuda() else 1 # warm up to learning rate -cfg.warm_up_iter = 10 if fluid.is_compiled_with_cuda() else 1 +cfg.warm_up_iter = 10 if base.is_compiled_with_cuda() else 1 cfg.warm_up_factor = 0.0 # lr steps_with_decay cfg.lr_steps = [400000, 450000] @@ -124,7 +124,7 @@ def __setattr__(self, name, value): # ENV options # # support both CPU and GPU -cfg.use_gpu = fluid.is_compiled_with_cuda() +cfg.use_gpu = base.is_compiled_with_cuda() # Class number cfg.class_num = 80 diff --git a/test/fft/test_fft.py b/test/fft/test_fft.py index bcb4199777c1a..a49e030fa6353 100644 --- a/test/fft/test_fft.py +++ b/test/fft/test_fft.py @@ -117,7 +117,7 @@ def decorate(cls): class TestFft(unittest.TestCase): def test_fft(self): """Test fft with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.fft(self.x, self.n, self.axis, self.norm), paddle.fft.fft( @@ -156,7 +156,7 @@ def test_fft(self): class TestIfft(unittest.TestCase): def test_fft(self): """Test ifft with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.ifft(self.x, self.n, self.axis, self.norm), paddle.fft.ifft( @@ -242,7 +242,7 @@ def test_fft(self): class TestFft2(unittest.TestCase): def test_fft2(self): """Test fft2 with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.fft2(self.x, self.n, self.axis, self.norm), paddle.fft.fft2( @@ -314,7 +314,7 @@ def test_fft2(self): - axis type error - norm out of range """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.fft2( paddle.to_tensor(self.x), self.n, self.axis, self.norm @@ -349,7 +349,7 @@ def test_fft2(self): class TestFftn(unittest.TestCase): def test_fftn(self): """Test fftn with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.fftn(self.x, self.n, self.axis, self.norm), paddle.fft.fftn( @@ -388,7 +388,7 @@ def test_fftn(self): class TestIFftn(unittest.TestCase): def test_ifftn(self): """Test ifftn with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.ifftn(self.x, self.n, self.axis, self.norm), paddle.fft.ifftn( @@ -452,7 +452,7 @@ def test_ifftn(self): class TestHfft(unittest.TestCase): def test_hfft(self): """Test hfft with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.hfft(self.x, self.n, self.axis, self.norm), paddle.fft.hfft( @@ -516,7 +516,7 @@ def test_hfft(self): class TestIrfft(unittest.TestCase): def test_irfft(self): """Test irfft with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.irfft(self.x, self.n, self.axis, self.norm), paddle.fft.irfft( @@ -580,7 +580,7 @@ def test_irfft(self): class TestIrfftn(unittest.TestCase): def test_irfftn(self): """Test irfftn with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.irfftn(self.x, self.n, self.axis, self.norm), paddle.fft.irfftn( @@ -644,7 +644,7 @@ def test_irfftn(self): class TestHfftn(unittest.TestCase): def test_hfftn(self): """Test hfftn with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.hfftn(self.x, self.n, self.axis, self.norm), paddle.fft.hfftn( @@ -702,7 +702,7 @@ def test_hfftn(self): class TestHfft2(unittest.TestCase): def test_hfft2(self): """Test hfft2 with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.hfft2(self.x, self.s, self.axis, self.norm), paddle.fft.hfft2( @@ -759,7 +759,7 @@ def test_hfft2(self): class TestIrfft2(unittest.TestCase): def test_irfft2(self): """Test irfft2 with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.irfft2(self.x, self.s, self.axis, self.norm), paddle.fft.irfft2( @@ -845,7 +845,7 @@ def test_hfft(self): - axis type error - norm out of range """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.hfft( paddle.to_tensor(self.x), self.n, self.axis, self.norm @@ -917,7 +917,7 @@ def test_irfft(self): - axis out of range - norm out of range """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.irfft( paddle.to_tensor(self.x), self.n, self.axis, self.norm @@ -1008,7 +1008,7 @@ def test_hfft2(self): - the dimensions of n and axis are different - norm out of range """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.hfft2( paddle.to_tensor(self.x), self.n, self.axis, self.norm @@ -1097,7 +1097,7 @@ def test_irfft2(self): - the dimensions of n and axis are different - norm out of range """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.irfft2( paddle.to_tensor(self.x), self.n, self.axis, self.norm @@ -1187,7 +1187,7 @@ def test_hfftn(self): - the dimensions of n and axis are different - norm out of range """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.hfftn( paddle.to_tensor(self.x), self.n, self.axis, self.norm @@ -1266,7 +1266,7 @@ def test_irfftn(self): - norm out of range - the dimensions of n and axis are different """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.irfftn( paddle.to_tensor(self.x), self.n, self.axis, self.norm @@ -1300,7 +1300,7 @@ def test_irfftn(self): class TestRfft(unittest.TestCase): def test_rfft(self): """Test rfft with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.rfft(self.x, self.n, self.axis, self.norm), paddle.fft.rfft( @@ -1380,7 +1380,7 @@ def test_rfft(self): class TestRfft2(unittest.TestCase): def test_rfft2(self): """Test rfft2 with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.rfft2(self.x, self.n, self.axis, self.norm), paddle.fft.rfft2( @@ -1444,7 +1444,7 @@ def test_rfft2(self): - norm out of range - the dimensions of n and axis are different """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.rfft2( paddle.to_tensor(self.x), self.n, self.axis, self.norm @@ -1478,7 +1478,7 @@ def test_rfft2(self): class TestRfftn(unittest.TestCase): def test_rfftn(self): """Test rfftn with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.rfftn(self.x, self.n, self.axis, self.norm), paddle.fft.rfftn( @@ -1531,7 +1531,7 @@ def test_rfftn(self): - norm out of range - the dimensions of n and axis are different """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.rfftn( paddle.to_tensor(self.x), self.n, self.axis, self.norm @@ -1565,7 +1565,7 @@ def test_rfftn(self): class TestIhfft(unittest.TestCase): def test_ihfft(self): """Test ihfft with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.ihfft(self.x, self.n, self.axis, self.norm), paddle.fft.ihfft( @@ -1609,7 +1609,7 @@ def test_ihfft(self): - axis out of range - norm out of range """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.ihfft( paddle.to_tensor(self.x), self.n, self.axis, self.norm @@ -1644,7 +1644,7 @@ def test_ihfft(self): class TestIhfft2(unittest.TestCase): def test_ihfft2(self): """Test ihfft2 with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.ihfft2(self.x, self.n, self.axis, self.norm), paddle.fft.ihfft2( @@ -1716,7 +1716,7 @@ def test_ihfft2(self): - axis out of range - norm out of range """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.ihfft2( paddle.to_tensor(self.x), self.n, self.axis, self.norm @@ -1750,7 +1750,7 @@ def test_ihfft2(self): class TestIhfftn(unittest.TestCase): def test_ihfftn(self): """Test ihfftn with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.ihfftn(self.x, self.n, self.axis, self.norm), paddle.fft.ihfftn( @@ -1795,7 +1795,7 @@ def test_ihfftn(self): - axis out of range - norm out of range """ - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.ihfftn( paddle.to_tensor(self.x), self.n, self.axis, self.norm @@ -1813,7 +1813,7 @@ def test_ihfftn(self): class TestFftFreq(unittest.TestCase): def test_fftfreq(self): """Test fftfreq with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.fftfreq(self.n, self.d).astype(self.dtype), paddle.fft.fftfreq(self.n, self.d, self.dtype).numpy(), @@ -1834,7 +1834,7 @@ def test_fftfreq(self): class TestFftFreqException(unittest.TestCase): def test_fftfreq2(self): """Test fftfreq with d = 0""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.fftfreq(self.n, self.d, self.dtype) @@ -1850,7 +1850,7 @@ def test_fftfreq2(self): class TestRfftFreq(unittest.TestCase): def test_rfftfreq(self): """Test rfftfreq with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.rfftfreq(self.n, self.d).astype(self.dtype), paddle.fft.rfftfreq(self.n, self.d, self.dtype).numpy(), @@ -1871,7 +1871,7 @@ def test_rfftfreq(self): class TestRfftFreqException(unittest.TestCase): def test_rfftfreq2(self): """Test fftfreq with d = 0""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): with self.assertRaises(self.expect_exception): paddle.fft.rfftfreq(self.n, self.d, self.dtype) @@ -1894,7 +1894,7 @@ def test_rfftfreq2(self): class TestFftShift(unittest.TestCase): def test_fftshift(self): """Test fftshift with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.fftshift(self.x, self.axes), paddle.fft.fftshift( @@ -1923,7 +1923,7 @@ def test_fftshift(self): class TestIfftShift(unittest.TestCase): def test_ifftshift(self): """Test ifftshift with norm condition""" - with paddle.fluid.dygraph.guard(self.place): + with paddle.base.dygraph.guard(self.place): np.testing.assert_allclose( scipy.fft.ifftshift(self.x, self.axes), paddle.fft.ifftshift( diff --git a/test/indexing/test_getitem.py b/test/indexing/test_getitem.py index 6ecd7750ec795..df0801ad4b61d 100644 --- a/test/indexing/test_getitem.py +++ b/test/indexing/test_getitem.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid.variable_index import _getitem_static +from paddle.base.variable_index import _getitem_static class TestGetitemInDygraph(unittest.TestCase): diff --git a/test/indexing/test_setitem.py b/test/indexing/test_setitem.py index b5e23ed309de1..b188d84ca2a66 100644 --- a/test/indexing/test_setitem.py +++ b/test/indexing/test_setitem.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid.variable_index import _setitem_static +from paddle.base.variable_index import _setitem_static class TestSetitemInDygraph(unittest.TestCase): diff --git a/test/ipu/custom_ops/test_custom_leaky_relu_ipu.py b/test/ipu/custom_ops/test_custom_leaky_relu_ipu.py index edb6b819c5b34..f8fbfc0af4052 100644 --- a/test/ipu/custom_ops/test_custom_leaky_relu_ipu.py +++ b/test/ipu/custom_ops/test_custom_leaky_relu_ipu.py @@ -24,7 +24,7 @@ from paddle.utils.cpp_extension import load sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from op_test_ipu import IPUOpTest, np_dtype_to_fluid_str +from op_test_ipu import IPUOpTest, np_dtype_to_base_str def load_custom_ops(): @@ -60,7 +60,7 @@ def set_feed_attr(self): self.feed_shape = [x.shape for x in self.feed.values()] self.feed_list = list(self.feed.keys()) self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + np_dtype_to_base_str(x.dtype) for x in self.feed.values() ] def set_attrs(self): diff --git a/test/ipu/distributed/test_dist_pod128_sample.py b/test/ipu/distributed/test_dist_pod128_sample.py index 9b0a33dfd87fb..94e87bab49afe 100644 --- a/test/ipu/distributed/test_dist_pod128_sample.py +++ b/test/ipu/distributed/test_dist_pod128_sample.py @@ -46,7 +46,7 @@ def TestDistTraining(): attrs = {"size": [128, 16], "padding_idx": -1, "dtype": 'float32'} - scope = paddle.fluid.core.Scope() + scope = paddle.base.core.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() main_prog.random_seed = 42 @@ -55,7 +55,7 @@ def TestDistTraining(): np.random.seed(42) input_data = np.random.uniform(0, 127, size=[128, 3, 2, 1]).astype(np.int32) - with paddle.fluid.scope_guard(scope): + with paddle.base.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data(name="x", shape=[3, 2, 1], dtype='int64') with paddle.static.ipu_shard_guard(index=0, stage=0): diff --git a/test/ipu/distributed/test_dist_sample.py b/test/ipu/distributed/test_dist_sample.py index a5506db7e349f..45c5d0d366b9f 100644 --- a/test/ipu/distributed/test_dist_sample.py +++ b/test/ipu/distributed/test_dist_sample.py @@ -67,13 +67,13 @@ def Test(use_dist, file_name): attrs = {"size": [128, 16], "padding_idx": -1, "dtype": 'float32'} - scope = paddle.fluid.core.Scope() + scope = paddle.base.core.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() main_prog.random_seed = 42 startup_prog.random_seed = 42 - with paddle.fluid.scope_guard(scope): + with paddle.base.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data(name="x", shape=[3, 2, 1], dtype='int64') diff --git a/test/ipu/op_test_ipu.py b/test/ipu/op_test_ipu.py index 2564f76599c3d..aab7e64b2fc9f 100644 --- a/test/ipu/op_test_ipu.py +++ b/test/ipu/op_test_ipu.py @@ -23,7 +23,7 @@ import paddle import paddle.static -map_np_dtype_to_fluid_dtype = { +map_np_dtype_to_base_dtype = { 'bool': "bool", 'int8': "int8", 'uint8': "uint8", @@ -35,8 +35,8 @@ } -def np_dtype_to_fluid_str(dtype: np.dtype) -> str: - return map_np_dtype_to_fluid_dtype[dtype.name] +def np_dtype_to_base_str(dtype: np.dtype) -> str: + return map_np_dtype_to_base_dtype[dtype.name] class ExecutionModeFull(IntEnum): diff --git a/test/ipu/test_dy2static_fp16_ipu.py b/test/ipu/test_dy2static_fp16_ipu.py index 90e140e08664a..3b37073bb0a73 100644 --- a/test/ipu/test_dy2static_fp16_ipu.py +++ b/test/ipu/test_dy2static_fp16_ipu.py @@ -102,7 +102,7 @@ def _test(self, use_ipu=False): result.append(loss) if use_ipu: - paddle.fluid.core.IpuBackend.get_instance().weights_to_host() + paddle.base.core.IpuBackend.get_instance().weights_to_host() paddle.save(model.state_dict(), model_path) paddle.save(optim.state_dict(), optim_path) diff --git a/test/ipu/test_dy2static_ipu.py b/test/ipu/test_dy2static_ipu.py index 22f755dea5271..b98bdc0351400 100644 --- a/test/ipu/test_dy2static_ipu.py +++ b/test/ipu/test_dy2static_ipu.py @@ -167,7 +167,7 @@ def _test(self, use_ipu=False): result.append(loss) if use_ipu: - paddle.fluid.core.IpuBackend.get_instance().weights_to_host() + paddle.base.core.IpuBackend.get_instance().weights_to_host() paddle.save(model.state_dict(), model_path) paddle.save(optim.state_dict(), optim_path) diff --git a/test/ipu/test_greater_op_ipu.py b/test/ipu/test_greater_op_ipu.py index 68ee9902256c0..d40368d70262e 100644 --- a/test/ipu/test_greater_op_ipu.py +++ b/test/ipu/test_greater_op_ipu.py @@ -28,7 +28,7 @@ def setUp(self): self.set_test_op() def set_test_op(self): - self.op = paddle.fluid.layers.greater_than + self.op = paddle.base.layers.greater_than def set_op_attrs(self): self.attrs = {} @@ -115,22 +115,22 @@ def test_case3(self): class TestLessThan(TestGreaterThan): def set_test_op(self): - self.op = paddle.fluid.layers.less_than + self.op = paddle.base.layers.less_than class TestEqual(TestGreaterThan): def set_test_op(self): - self.op = paddle.fluid.layers.equal + self.op = paddle.base.layers.equal class TestGreaterEqual(TestGreaterThan): def set_test_op(self): - self.op = paddle.fluid.layers.greater_equal + self.op = paddle.base.layers.greater_equal class TestLessEqual(TestGreaterThan): def set_test_op(self): - self.op = paddle.fluid.layers.less_equal + self.op = paddle.base.layers.less_equal if __name__ == "__main__": diff --git a/test/ipu/test_identity_loss_ipu.py b/test/ipu/test_identity_loss_ipu.py index 14f0b4484f765..819e97b64e665 100644 --- a/test/ipu/test_identity_loss_ipu.py +++ b/test/ipu/test_identity_loss_ipu.py @@ -15,13 +15,13 @@ import unittest import numpy as np -from op_test_ipu import IPUOpTest, np_dtype_to_fluid_str +from op_test_ipu import IPUOpTest, np_dtype_to_base_str import paddle import paddle.optimizer import paddle.static -from paddle import fluid -from paddle.fluid import compiler +from paddle import base +from paddle.base import compiler paddle.enable_static() @@ -49,18 +49,18 @@ def set_feed_attr(self): self.feed_shape = [x.shape for x in self.feed.values()] self.feed_list = list(self.feed.keys()) self.feed_dtype = [ - np_dtype_to_fluid_str(x.dtype) for x in self.feed.values() + np_dtype_to_base_str(x.dtype) for x in self.feed.values() ] def _test_base(self, reduction): - scope = fluid.core.Scope() + scope = base.core.Scope() main_prog = paddle.static.Program() startup_prog = paddle.static.Program() SEED = 0 main_prog.random_seed = SEED startup_prog.random_seed = SEED - with fluid.scope_guard(scope): + with base.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data( name=self.feed_list[0], diff --git a/test/ipu/test_inference_model_io_ipu.py b/test/ipu/test_inference_model_io_ipu.py index 418c3da94a1b3..6151928fea282 100644 --- a/test/ipu/test_inference_model_io_ipu.py +++ b/test/ipu/test_inference_model_io_ipu.py @@ -58,12 +58,12 @@ def _test_save(self): startup_prog = paddle.static.Program() main_prog.random_seed = self.SEED startup_prog.random_seed = self.SEED - generator = paddle.fluid.unique_name.UniqueNameGenerator() + generator = paddle.base.unique_name.UniqueNameGenerator() self.full_name = '/'.join( [self.attrs['path'].name, self.attrs['model_name']] ) - with paddle.fluid.unique_name.guard(generator): + with paddle.base.unique_name.guard(generator): with paddle.static.scope_guard(scope): with paddle.static.program_guard(main_prog, startup_prog): x = paddle.static.data( diff --git a/test/ipu/test_one_hot_v2_op_ipu.py b/test/ipu/test_one_hot_v2_op_ipu.py index 9118f756fd4e7..9e9fafb044d96 100644 --- a/test/ipu/test_one_hot_v2_op_ipu.py +++ b/test/ipu/test_one_hot_v2_op_ipu.py @@ -46,7 +46,7 @@ def build_model(self): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], dtype='int32' ) - out = paddle.fluid.input.one_hot(x, **self.attrs) + out = paddle.base.input.one_hot(x, **self.attrs) self.fetch_list = [out.name] def run_model(self, exec_mode): diff --git a/test/ipu/test_save_load_ipu.py b/test/ipu/test_save_load_ipu.py index c11b9fdd27b8e..7012f1cacf33d 100644 --- a/test/ipu/test_save_load_ipu.py +++ b/test/ipu/test_save_load_ipu.py @@ -52,8 +52,8 @@ def set_optimizer(self): @IPUOpTest.static_graph def build_model(self): - generator = paddle.fluid.unique_name.UniqueNameGenerator() - with paddle.fluid.unique_name.guard(generator): + generator = paddle.base.unique_name.UniqueNameGenerator() + with paddle.base.unique_name.guard(generator): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], diff --git a/test/ipu/test_weight_sharing_ipu.py b/test/ipu/test_weight_sharing_ipu.py index 9f114fec99ab6..abe7435a3d938 100644 --- a/test/ipu/test_weight_sharing_ipu.py +++ b/test/ipu/test_weight_sharing_ipu.py @@ -59,12 +59,12 @@ def build_model(self): input=x, size=[768, 768], dtype='float32', - param_attr=paddle.fluid.ParamAttr(name='word_embedding'), + param_attr=paddle.base.ParamAttr(name='word_embedding'), is_sparse=False, ) with paddle.static.ipu_shard_guard(index=1, stage=1): z = paddle.static.nn.fc( - x=y, size=768, weight_attr=paddle.fluid.ParamAttr(name="fc") + x=y, size=768, weight_attr=paddle.base.ParamAttr(name="fc") ) with paddle.static.ipu_shard_guard(index=0, stage=2): out = paddle.matmul( diff --git a/test/ir/inference/auto_scan_test.py b/test/ir/inference/auto_scan_test.py index 4140449cf1ba9..105ea3e0925b7 100755 --- a/test/ir/inference/auto_scan_test.py +++ b/test/ir/inference/auto_scan_test.py @@ -34,7 +34,7 @@ import paddle import paddle.inference as paddle_infer -from paddle.fluid.core import PassVersionChecker +from paddle.base.core import PassVersionChecker from paddle.static.log_helper import get_logger LOGLEVEL = os.environ.get("PADDLE_TEST_LOGLEVEL", "INFO").upper() diff --git a/test/ir/inference/inference_pass_test.py b/test/ir/inference/inference_pass_test.py index 5cbf69b3527be..8d8a484f77cf1 100644 --- a/test/ir/inference/inference_pass_test.py +++ b/test/ir/inference/inference_pass_test.py @@ -21,9 +21,9 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, create_paddle_predictor class InferencePassTest(unittest.TestCase): @@ -31,8 +31,8 @@ def __init__(self, methodName='runTest'): paddle.enable_static() super().__init__(methodName) paddle.enable_static() - self.main_program = fluid.Program() - self.startup_program = fluid.Program() + self.main_program = base.Program() + self.startup_program = base.Program() self.feeds = None self.fetch_list = None @@ -57,7 +57,7 @@ def _get_place(self): def _save_models( self, dirname, feeded_var_names, target_vars, executor, program, scope ): - with fluid.scope_guard(scope): + with base.scope_guard(scope): # save models as combined but sometimes params is null # To adapt to this situation, the path needs to be adjusted to the old version format. feeded_vars = [] @@ -92,7 +92,7 @@ def _get_paddle_outs(self, executor, program, scope): ''' Return PaddlePaddle outputs. ''' - with fluid.scope_guard(scope): + with base.scope_guard(scope): outs = executor.run( program=program, feed=self.feeds, @@ -114,7 +114,7 @@ def _get_inference_outs(self, config): tensor = predictor.get_input_tensor(name) feed_data = list(self.feeds.values())[i] tensor.copy_from_cpu(np.array(feed_data)) - if type(feed_data) == fluid.LoDTensor: + if type(feed_data) == base.LoDTensor: tensor.set_lod(feed_data.lod()) predictor.zero_copy_run() @@ -201,11 +201,11 @@ def check_output_with_option( or disable TensorRT, enable MKLDNN or disable MKLDNN are all the same. ''' - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() - executor = fluid.Executor(place) - scope = fluid.Scope() + place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() + executor = base.Executor(place) + scope = base.Scope() device = "GPU" if use_gpu else "CPU" - with fluid.scope_guard(scope): + with base.scope_guard(scope): executor.run(self.startup_program) self._save_models( self.path, diff --git a/test/ir/inference/program_config.py b/test/ir/inference/program_config.py index 3c4d82126b59a..4516c2cb4ad0c 100644 --- a/test/ir/inference/program_config.py +++ b/test/ir/inference/program_config.py @@ -18,10 +18,10 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core, framework -from paddle.fluid.executor import global_scope -from paddle.fluid.framework import ( +from paddle import base +from paddle.base import core, framework +from paddle.base.executor import global_scope +from paddle.base.framework import ( IrGraph, IrNode, Operator, @@ -291,7 +291,7 @@ def create_fake_model(program_config): '''Create a Paddle model(in memory) according to the given config.''' paddle.enable_static() main_program_desc = core.ProgramDesc() - util_program = fluid.Program() + util_program = base.Program() main_block_desc = main_program_desc.block(0) var_desc = main_block_desc.var(b"feed") @@ -409,10 +409,10 @@ def create_fake_model(program_config): model = main_program_desc.serialize_to_string() util_program._sync_with_cpp() - place = fluid.CPUPlace() - executor = fluid.Executor(place) - scope = fluid.Scope() - with fluid.scope_guard(scope): + place = base.CPUPlace() + executor = base.Executor(place) + scope = base.Scope() + with base.scope_guard(scope): executor.run(util_program) params = scope.find_var("out_var_0").get_bytes() diff --git a/test/ir/inference/quant_dequant_test.py b/test/ir/inference/quant_dequant_test.py index 32155b8d22d38..2dfef926933ff 100644 --- a/test/ir/inference/quant_dequant_test.py +++ b/test/ir/inference/quant_dequant_test.py @@ -21,10 +21,10 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, Variable, core -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor -from paddle.fluid.framework import IrGraph +from paddle import base +from paddle.base import Program, Variable, core +from paddle.base.core import AnalysisConfig, create_paddle_predictor +from paddle.base.framework import IrGraph from paddle.static.io import append_fetch_ops, prepend_feed_ops from paddle.static.quantization import ( AddQuantDequantPass, @@ -39,10 +39,10 @@ class QuantDequantTest(unittest.TestCase): def __init__(self, methodName='runTest'): super().__init__(methodName) paddle.enable_static() - self.main_program = fluid.Program() - self.startup_program = fluid.Program() - self.test_main_program = fluid.Program() - self.test_startup_program = fluid.Program() + self.main_program = base.Program() + self.startup_program = base.Program() + self.test_main_program = base.Program() + self.test_startup_program = base.Program() self.feeds = None self.fetch_list = None self.enable_mkldnn = False @@ -64,7 +64,7 @@ def __init__(self, methodName='runTest'): def _normalize_program(self, program, feed_vars, fetch_vars): if not isinstance(program, Program): raise TypeError( - "program type must be `fluid.Program`, but received `%s`" + "program type must be `base.Program`, but received `%s`" % type(program) ) if not isinstance(feed_vars, list): @@ -127,7 +127,7 @@ def _save_models( if var.name in feeded_var_names: feeded_vars.append(var) - with fluid.scope_guard(scope): + with base.scope_guard(scope): paddle.static.io.save_inference_model( dirname, feeded_vars, @@ -155,7 +155,7 @@ def _get_paddle_outs(self, feed, fetch_list, executor, program, scope): ''' Return PaddlePaddle outputs. ''' - with fluid.scope_guard(scope): + with base.scope_guard(scope): outs = executor.run( program=program, feed=feed, @@ -177,7 +177,7 @@ def _get_inference_outs(self, config): tensor = predictor.get_input_tensor(name) feed_data = list(self.feeds.values())[i] tensor.copy_from_cpu(np.array(feed_data)) - if type(feed_data) == fluid.LoDTensor: + if type(feed_data) == base.LoDTensor: tensor.set_lod(feed_data.lod()) predictor.zero_copy_run() @@ -244,12 +244,12 @@ def check_output_with_option( or disable TensorRT, enable MKLDNN or disable MKLDNN are all the same. ''' - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() - executor = fluid.Executor(place) - scope = fluid.Scope() + place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() + executor = base.Executor(place) + scope = base.Scope() device = "GPU" if use_gpu else "CPU" - with fluid.scope_guard(scope): + with base.scope_guard(scope): executor.run(self.startup_program) executor.run(self.test_startup_program) main_graph = IrGraph(core.Graph(self.main_program.desc), for_test=False) @@ -273,11 +273,11 @@ def check_output_with_option( scale_training_pass = OutScaleForTrainingPass(scope=scope, place=place) scale_training_pass.apply(main_graph) - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.memory_optimize = False build_strategy.enable_inplace = False build_strategy.fuse_all_reduce_ops = False - binary = fluid.CompiledProgram(main_graph.graph) + binary = base.CompiledProgram(main_graph.graph) iters = 10 batch_size = 1 @@ -285,10 +285,10 @@ def check_output_with_option( paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), batch_size=batch_size, ) - feeder = fluid.DataFeeder( + feeder = base.DataFeeder( feed_list=[self.data, self.label], place=place ) - with fluid.scope_guard(scope): + with base.scope_guard(scope): for _ in range(iters): data = next(train_reader()) loss_v = executor.run( @@ -308,7 +308,7 @@ def check_output_with_option( self.main_program = test_graph.to_program() - with fluid.scope_guard(scope): + with base.scope_guard(scope): self.main_program = self._normalize_program( self.main_program, self.data, self.fetch_list ) @@ -455,6 +455,6 @@ def __init__( self.disable_trt_plugin_fp16 = disable_trt_plugin_fp16 def quant_dequant(self): - place = fluid.CPUPlace() - exe = fluid.Executor(place) - scope = fluid.Scope() + place = base.CPUPlace() + exe = base.Executor(place) + scope = base.Scope() diff --git a/test/ir/inference/test_inplace_op_pass.py b/test/ir/inference/test_inplace_op_pass.py index 9885c8a32924d..c001b44e2f513 100644 --- a/test/ir/inference/test_inplace_op_pass.py +++ b/test/ir/inference/test_inplace_op_pass.py @@ -20,7 +20,7 @@ from auto_scan_test import PassAutoScanTest from program_config import OpConfig, ProgramConfig, TensorConfig -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/ir/inference/test_mkldnn_cpu_bfloat16_pass.py b/test/ir/inference/test_mkldnn_cpu_bfloat16_pass.py index df39289b94986..ec6f71cd62436 100644 --- a/test/ir/inference/test_mkldnn_cpu_bfloat16_pass.py +++ b/test/ir/inference/test_mkldnn_cpu_bfloat16_pass.py @@ -18,14 +18,14 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid.core import PassVersionChecker +from paddle import base +from paddle.base.core import PassVersionChecker class TestMKLDNNCpuBfloat16Pass(InferencePassTest): def setUp(self): self.init_data() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): x = paddle.static.data( name='x', shape=[-1] + self.shape_x, dtype=self.d_type ) diff --git a/test/ir/inference/test_mkldnn_elt_act_fuse_pass.py b/test/ir/inference/test_mkldnn_elt_act_fuse_pass.py index 582d3efec9d3f..e49aff343cd23 100644 --- a/test/ir/inference/test_mkldnn_elt_act_fuse_pass.py +++ b/test/ir/inference/test_mkldnn_elt_act_fuse_pass.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid.core import PassVersionChecker +from paddle import base +from paddle.base.core import PassVersionChecker class ElementwiseActivationOneDNNFusePassTest(InferencePassTest): @@ -30,7 +30,7 @@ class ElementwiseActivationOneDNNFusePassTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data_A = paddle.static.data( name="data_A", shape=[-1, 3, 100, 100], dtype="float32" ) diff --git a/test/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py b/test/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py index 2d13b668f319d..8ebc5d0564333 100644 --- a/test/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py +++ b/test/ir/inference/test_mkldnn_matmul_op_output_fuse_pass.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle import fluid +from paddle import base class TestMKLDNNMatmulFuseOp(InferencePassTest): @@ -31,7 +31,7 @@ def init_data(self): self.enable_mkldnn = True def make_network(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): x = paddle.static.data( name='x', shape=[-1] + self.shape_x, dtype=self.d_type ) @@ -73,7 +73,7 @@ def init_data(self): class TestMKLDNNMatmulOpNotFusedWrongTransposeAxis(TestMKLDNNMatmulFuseOp): def make_network(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): x = paddle.static.data( name='x', shape=[-1] + self.shape_x, dtype=self.d_type ) @@ -96,7 +96,7 @@ def init_data(self): self.enable_mkldnn = True def make_network(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): x = paddle.static.data( name='x', shape=[-1] + self.shape_x, dtype=self.d_type ) diff --git a/test/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py b/test/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py index f05e91f04188c..cdfe819ce69af 100644 --- a/test/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py +++ b/test/ir/inference/test_mkldnn_reshape_transpose_matmul_v2_fuse_pass.py @@ -18,8 +18,8 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid.core import PassVersionChecker +from paddle import base +from paddle.base.core import PassVersionChecker class TestReshapeTransposeMatmulV2OneDNNFusePass(InferencePassTest): @@ -28,7 +28,7 @@ def setUp(self): self.tranpose_perm = [0, 2, 1, 3] self.pass_name = 'reshape_transpose_matmul_mkldnn_fuse_pass' - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=self.data_shape, dtype="float32" ) diff --git a/test/ir/inference/test_trt_c_allreduce_infer_script.py b/test/ir/inference/test_trt_c_allreduce_infer_script.py index 9985a3fc2ec76..b82300ac9fb6a 100644 --- a/test/ir/inference/test_trt_c_allreduce_infer_script.py +++ b/test/ir/inference/test_trt_c_allreduce_infer_script.py @@ -20,7 +20,7 @@ import paddle from paddle.distributed import fleet -from paddle.fluid import core +from paddle.base import core from paddle.inference import Config, PrecisionType, create_predictor diff --git a/test/ir/inference/test_trt_conv3d_op.py b/test/ir/inference/test_trt_conv3d_op.py index 8251f31d31525..605d13963314f 100644 --- a/test/ir/inference/test_trt_conv3d_op.py +++ b/test/ir/inference/test_trt_conv3d_op.py @@ -18,16 +18,16 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker class TensorRTSubgraphPassConv3dTest(InferencePassTest): def setUp(self): self.init_params() self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 3, 6, 32, 32], dtype="float32" ) @@ -111,7 +111,7 @@ def set_params(self): class DynamicShapeTensorRTSubgraphPassConv3dTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 6, -1, -1, -1], dtype="float32" ) diff --git a/test/ir/inference/test_trt_conv3d_transpose_op.py b/test/ir/inference/test_trt_conv3d_transpose_op.py index 4ce5e50e91710..238fcbbbe2b4d 100644 --- a/test/ir/inference/test_trt_conv3d_transpose_op.py +++ b/test/ir/inference/test_trt_conv3d_transpose_op.py @@ -18,15 +18,15 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker class TensorRTSubgraphPassConv3dTransposeTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 4, 4, 32, 32], dtype="float32" ) @@ -93,7 +93,7 @@ def set_params(self): class DynamicShapeTensorRTSubgraphPassConv3dTransposeTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 6, -1, -1, -1], dtype="float32" ) diff --git a/test/ir/inference/test_trt_conv_pass.py b/test/ir/inference/test_trt_conv_pass.py index 1ba73cbc6576b..2408c59c1b50a 100644 --- a/test/ir/inference/test_trt_conv_pass.py +++ b/test/ir/inference/test_trt_conv_pass.py @@ -19,9 +19,9 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker os.environ['NVIDIA_TF32_OVERRIDE'] = '0' @@ -29,7 +29,7 @@ class TensorRTSubgraphPassConvTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) @@ -107,7 +107,7 @@ def set_params(self): class TensorRTSubgraphPassConvTransposeTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) @@ -206,7 +206,7 @@ def set_params(self): class DynamicShapeTensorRTSubgraphPassConvTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 6, -1, -1], dtype="float32" ) diff --git a/test/ir/inference/test_trt_conv_quant_dequant_pass.py b/test/ir/inference/test_trt_conv_quant_dequant_pass.py index c7f7d03cf3ca6..e3c8e95b24dfe 100644 --- a/test/ir/inference/test_trt_conv_quant_dequant_pass.py +++ b/test/ir/inference/test_trt_conv_quant_dequant_pass.py @@ -19,9 +19,9 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest): @@ -69,13 +69,13 @@ def network(): self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 # self.test_startup_program.random_seed = 2 - with fluid.unique_name.guard(): - with fluid.program_guard(self.main_program, self.startup_program): + with base.unique_name.guard(): + with base.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = paddle.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) - with fluid.unique_name.guard(): - with fluid.program_guard( + with base.unique_name.guard(): + with base.program_guard( self.test_main_program, self.startup_program ): network() @@ -179,13 +179,13 @@ def network(): self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 # self.test_startup_program.random_seed = 2 - with fluid.unique_name.guard(): - with fluid.program_guard(self.main_program, self.startup_program): + with base.unique_name.guard(): + with base.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = paddle.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) - with fluid.unique_name.guard(): - with fluid.program_guard( + with base.unique_name.guard(): + with base.program_guard( self.test_main_program, self.startup_program ): network() @@ -287,13 +287,13 @@ def network(): self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 # self.test_startup_program.random_seed = 2 - with fluid.unique_name.guard(): - with fluid.program_guard(self.main_program, self.startup_program): + with base.unique_name.guard(): + with base.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = paddle.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) - with fluid.unique_name.guard(): - with fluid.program_guard( + with base.unique_name.guard(): + with base.program_guard( self.test_main_program, self.startup_program ): network() diff --git a/test/ir/inference/test_trt_convert_preln_residual_bias.py b/test/ir/inference/test_trt_convert_preln_residual_bias.py index f9431e07511c3..6f9763b58e344 100644 --- a/test/ir/inference/test_trt_convert_preln_residual_bias.py +++ b/test/ir/inference/test_trt_convert_preln_residual_bias.py @@ -166,7 +166,7 @@ def generate_trt_nodes_num(attrs, dynamic_shape): attrs = [ program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - # for static_shape, fall back to fluid fused op + # for static_shape, fall back to base fused op clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 program_config.set_input_type(np.float32) diff --git a/test/ir/inference/test_trt_convert_preln_residual_no_bias.py b/test/ir/inference/test_trt_convert_preln_residual_no_bias.py index 7be194a953a2e..c9e4162847684 100644 --- a/test/ir/inference/test_trt_convert_preln_residual_no_bias.py +++ b/test/ir/inference/test_trt_convert_preln_residual_no_bias.py @@ -155,7 +155,7 @@ def generate_trt_nodes_num(attrs, dynamic_shape): program_config.ops[i].attrs for i in range(len(program_config.ops)) ] - # for static_shape, fall back to fluid fused op + # for static_shape, fall back to base fused op clear_dynamic_shape() self.trt_param.precision = paddle_infer.PrecisionType.Float32 program_config.set_input_type(np.float32) diff --git a/test/ir/inference/test_trt_deformable_conv.py b/test/ir/inference/test_trt_deformable_conv.py index c5bc20449e379..73088b3ee959d 100644 --- a/test/ir/inference/test_trt_deformable_conv.py +++ b/test/ir/inference/test_trt_deformable_conv.py @@ -19,9 +19,9 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker os.environ['NVIDIA_TF32_OVERRIDE'] = '0' @@ -29,7 +29,7 @@ class TRTDeformableConvTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): input = paddle.static.data( name='input', shape=self.input_size, dtype=self.dtype ) diff --git a/test/ir/inference/test_trt_dynamic_shape.py b/test/ir/inference/test_trt_dynamic_shape.py index 0a5ef451b93b8..dfab8a666e55f 100644 --- a/test/ir/inference/test_trt_dynamic_shape.py +++ b/test/ir/inference/test_trt_dynamic_shape.py @@ -18,14 +18,14 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig class TRTDynamicShapeTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 3, 16, 16], dtype="float32" ) diff --git a/test/ir/inference/test_trt_elementwise_op.py b/test/ir/inference/test_trt_elementwise_op.py index 7f4a34db52fbb..fd351fb87c4ec 100644 --- a/test/ir/inference/test_trt_elementwise_op.py +++ b/test/ir/inference/test_trt_elementwise_op.py @@ -20,15 +20,15 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn class TensorRTSubgraphPassElementwiseBroadcastTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data1 = paddle.static.data( name="data1", shape=[-1, 3, 64, 64], dtype="float32" ) diff --git a/test/ir/inference/test_trt_fc_fuse_pass.py b/test/ir/inference/test_trt_fc_fuse_pass.py index b89ffae51bbe5..a3b06a2156030 100644 --- a/test/ir/inference/test_trt_fc_fuse_pass.py +++ b/test/ir/inference/test_trt_fc_fuse_pass.py @@ -18,14 +18,14 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig class FCFusePassTRTTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[32, 128, 2, 2], dtype="float32" ) @@ -55,7 +55,7 @@ def test_check_output(self): class FCFusePassTRTStaticDims4Cols1Test(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[32, 128, 32, 8], dtype="float32" ) @@ -83,7 +83,7 @@ def test_check_output(self): class FCFusePassTRTStaticDims4Cols2Test(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[3, 24, 16, 16], dtype="float32" ) @@ -111,7 +111,7 @@ def test_check_output(self): class FCFusePassTRTDynamicDims2Test(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[32, 128], dtype="float32" ) @@ -145,7 +145,7 @@ def test_check_output(self): class FCFusePassTRTDynamicDims3Cols1Test(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[32, 128, 32], dtype="float32" ) @@ -179,7 +179,7 @@ def test_check_output(self): class FCFusePassTRTDynamicDims3Cols2Test(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[32, 128, 32], dtype="float32" ) @@ -213,7 +213,7 @@ def test_check_output(self): class FCFusePassTRTDynamicDims4Cols1Test(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[32, 12, 4, 6], dtype="float32" ) @@ -249,7 +249,7 @@ def test_check_output(self): class FCFusePassTRTDynamicDims4Cols2Test(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[32, 128, 32, 32], dtype="float32" ) @@ -285,7 +285,7 @@ def test_check_output(self): class FCFusePassTRTDynamicDims4Cols3Test(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[32, 128, 32, 32], dtype="float32" ) diff --git a/test/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py b/test/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py index 5e93b651f24fc..078d14fb5fbe7 100644 --- a/test/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py +++ b/test/ir/inference/test_trt_fc_fuse_quant_dequant_pass.py @@ -19,9 +19,9 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker class FCQuantDequantFusePassTRTDims3Cols1Test(QuantDequantTest): @@ -54,13 +54,13 @@ def network(): self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 # self.test_startup_program.random_seed = 2 - with fluid.unique_name.guard(): - with fluid.program_guard(self.main_program, self.startup_program): + with base.unique_name.guard(): + with base.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = paddle.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) - with fluid.unique_name.guard(): - with fluid.program_guard( + with base.unique_name.guard(): + with base.program_guard( self.test_main_program, self.startup_program ): network() @@ -128,13 +128,13 @@ def network(): self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 # self.test_startup_program.random_seed = 2 - with fluid.unique_name.guard(): - with fluid.program_guard(self.main_program, self.startup_program): + with base.unique_name.guard(): + with base.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = paddle.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) - with fluid.unique_name.guard(): - with fluid.program_guard( + with base.unique_name.guard(): + with base.program_guard( self.test_main_program, self.startup_program ): network() @@ -204,13 +204,13 @@ def network(): self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 # self.test_startup_program.random_seed = 2 - with fluid.unique_name.guard(): - with fluid.program_guard(self.main_program, self.startup_program): + with base.unique_name.guard(): + with base.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = paddle.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) - with fluid.unique_name.guard(): - with fluid.program_guard( + with base.unique_name.guard(): + with base.program_guard( self.test_main_program, self.startup_program ): network() diff --git a/test/ir/inference/test_trt_flatten_op.py b/test/ir/inference/test_trt_flatten_op.py index ad9f37f224dd9..e1489ab969ca5 100644 --- a/test/ir/inference/test_trt_flatten_op.py +++ b/test/ir/inference/test_trt_flatten_op.py @@ -18,15 +18,15 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn class TRTFlattenTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) @@ -55,7 +55,7 @@ def test_check_output(self): class TRTFlattenDynamicTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) diff --git a/test/ir/inference/test_trt_gather_nd_op.py b/test/ir/inference/test_trt_gather_nd_op.py index a6389756df915..c78d544b92391 100644 --- a/test/ir/inference/test_trt_gather_nd_op.py +++ b/test/ir/inference/test_trt_gather_nd_op.py @@ -18,15 +18,15 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn class TRTGatherNdTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 3, 4], dtype="float32" ) @@ -65,7 +65,7 @@ def test_check_output(self): class TRTGatherNdFp16Test(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 1280, 192], dtype="float32" ) diff --git a/test/ir/inference/test_trt_gather_op.py b/test/ir/inference/test_trt_gather_op.py index b432438ebdbd1..96092ff85e358 100644 --- a/test/ir/inference/test_trt_gather_op.py +++ b/test/ir/inference/test_trt_gather_op.py @@ -18,15 +18,15 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker class TRTGatherTest1(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name='data', shape=[-1, 128], dtype='float32' ) @@ -69,7 +69,7 @@ def test_check_output(self): class TRTGatherTest2(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name='data', shape=[16, 64], dtype='float32' ) diff --git a/test/ir/inference/test_trt_inspector.py b/test/ir/inference/test_trt_inspector.py index a1bb1579891ed..52d02fd1213cb 100644 --- a/test/ir/inference/test_trt_inspector.py +++ b/test/ir/inference/test_trt_inspector.py @@ -20,15 +20,15 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig class TensorRTInspectorTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[1, 16, 16], dtype="float32" ) diff --git a/test/ir/inference/test_trt_instance_norm_op.py b/test/ir/inference/test_trt_instance_norm_op.py index fdf3523e880ce..5002579438f8d 100644 --- a/test/ir/inference/test_trt_instance_norm_op.py +++ b/test/ir/inference/test_trt_instance_norm_op.py @@ -21,9 +21,9 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn @@ -42,7 +42,7 @@ def build(self): 1 << 30, self.bs, 2, self.precision, self.serialize, False ) - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): shape = [-1, self.channel, self.height, self.width] data = paddle.static.data(name='in', shape=shape, dtype='float32') instance_norm_out = nn.instance_norm(data) diff --git a/test/ir/inference/test_trt_matmul.py b/test/ir/inference/test_trt_matmul.py index 87e24a545416c..77baebabb5385 100644 --- a/test/ir/inference/test_trt_matmul.py +++ b/test/ir/inference/test_trt_matmul.py @@ -18,16 +18,16 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn class TensorRTMatMulDims2Test(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[24, 24], dtype="float32" ) @@ -66,7 +66,7 @@ def test_check_output(self): class TensorRTMatMulTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 6, 24, 24], dtype="float32" ) @@ -126,8 +126,8 @@ def set_params(self): class TensorRTMatMulBroadcastTest(InferencePassTest): def setUp(self): self.set_params() - place = fluid.CPUPlace() - with fluid.program_guard(self.main_program, self.startup_program): + place = base.CPUPlace() + with base.program_guard(self.main_program, self.startup_program): data_x = paddle.static.data( name="data_x", shape=[-1, 6, 24], dtype="float32" ) diff --git a/test/ir/inference/test_trt_matmul_quant_dequant.py b/test/ir/inference/test_trt_matmul_quant_dequant.py index 808df546b6249..03f0e303c4668 100644 --- a/test/ir/inference/test_trt_matmul_quant_dequant.py +++ b/test/ir/inference/test_trt_matmul_quant_dequant.py @@ -19,9 +19,9 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest): @@ -63,13 +63,13 @@ def network(): self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 # self.test_startup_program.random_seed = 2 - with fluid.unique_name.guard(): - with fluid.program_guard(self.main_program, self.startup_program): + with base.unique_name.guard(): + with base.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = paddle.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) - with fluid.unique_name.guard(): - with fluid.program_guard( + with base.unique_name.guard(): + with base.program_guard( self.test_main_program, self.startup_program ): network() @@ -175,13 +175,13 @@ def network(): self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 # self.test_startup_program.random_seed = 2 - with fluid.unique_name.guard(): - with fluid.program_guard(self.main_program, self.startup_program): + with base.unique_name.guard(): + with base.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = paddle.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) - with fluid.unique_name.guard(): - with fluid.program_guard( + with base.unique_name.guard(): + with base.program_guard( self.test_main_program, self.startup_program ): network() @@ -286,13 +286,13 @@ def network(): self.startup_program.random_seed = 2 self.test_main_program.random_seed = 2 # self.test_startup_program.random_seed = 2 - with fluid.unique_name.guard(): - with fluid.program_guard(self.main_program, self.startup_program): + with base.unique_name.guard(): + with base.program_guard(self.main_program, self.startup_program): self.loss, result = network() opt = paddle.optimizer.Adam(learning_rate=0.0001) opt.minimize(self.loss) - with fluid.unique_name.guard(): - with fluid.program_guard( + with base.unique_name.guard(): + with base.program_guard( self.test_main_program, self.startup_program ): network() diff --git a/test/ir/inference/test_trt_multiclass_nms3_op.py b/test/ir/inference/test_trt_multiclass_nms3_op.py index 9b83863a87ea9..a38a597cf856f 100644 --- a/test/ir/inference/test_trt_multiclass_nms3_op.py +++ b/test/ir/inference/test_trt_multiclass_nms3_op.py @@ -19,10 +19,10 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker -from paddle.fluid.layer_helper import LayerHelper +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode from paddle.static import nn @@ -217,7 +217,7 @@ def setUp(self): ) def build(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): boxes = paddle.static.data( name='bboxes', shape=[-1, self.num_boxes, 4], dtype='float32' ) diff --git a/test/ir/inference/test_trt_nearest_interp_op.py b/test/ir/inference/test_trt_nearest_interp_op.py index 31d4edf85b80e..254bcc818e5ea 100644 --- a/test/ir/inference/test_trt_nearest_interp_op.py +++ b/test/ir/inference/test_trt_nearest_interp_op.py @@ -18,9 +18,9 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn @@ -28,7 +28,7 @@ class TRTNearestInterpTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): if self.data_layout == 'NCHW': shape = [ -1, diff --git a/test/ir/inference/test_trt_nearest_interp_v2_op.py b/test/ir/inference/test_trt_nearest_interp_v2_op.py index 6749246ebbb15..88353fbe1a58b 100644 --- a/test/ir/inference/test_trt_nearest_interp_v2_op.py +++ b/test/ir/inference/test_trt_nearest_interp_v2_op.py @@ -19,9 +19,9 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn @@ -29,7 +29,7 @@ class TRTNearestInterpTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): if self.data_layout == 'NCHW': shape = [ -1, diff --git a/test/ir/inference/test_trt_pad_op.py b/test/ir/inference/test_trt_pad_op.py index 71139a039e5c9..aa79ff13b5a40 100644 --- a/test/ir/inference/test_trt_pad_op.py +++ b/test/ir/inference/test_trt_pad_op.py @@ -18,15 +18,15 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig from paddle.static import nn class PadOpTRTTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[1, 3, 128, 128], dtype="float32" ) diff --git a/test/ir/inference/test_trt_pool3d_op.py b/test/ir/inference/test_trt_pool3d_op.py index f64ff97e4e8db..66a05775b071d 100644 --- a/test/ir/inference/test_trt_pool3d_op.py +++ b/test/ir/inference/test_trt_pool3d_op.py @@ -21,9 +21,9 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker class TensorRTPool3dTest(InferencePassTest): @@ -57,7 +57,7 @@ def build_network(self): 1 << 30, self.bs, 0, self.precision, self.serialize, False ) - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name='data', shape=[-1, self.channel, self.depth, self.height, self.width], @@ -188,7 +188,7 @@ def build_network(self): 1 << 30, self.bs, 0, self.precision, self.serialize, False ) - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name='data', shape=[-1, self.channel, self.depth, self.height, self.width], @@ -289,7 +289,7 @@ def build_network(self): 1 << 30, self.bs, 0, self.precision, self.serialize, False ) - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name='data', shape=[-1, self.channel, self.depth, self.height, self.width], diff --git a/test/ir/inference/test_trt_pool_op.py b/test/ir/inference/test_trt_pool_op.py index 8826a3f06cd01..37ffe6452e0f5 100644 --- a/test/ir/inference/test_trt_pool_op.py +++ b/test/ir/inference/test_trt_pool_op.py @@ -21,9 +21,9 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn @@ -58,7 +58,7 @@ def build_network(self): 1 << 30, self.bs, 0, self.precision, self.serialize, False ) - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name='data', shape=[-1, self.channel, self.height, self.width], diff --git a/test/ir/inference/test_trt_reduce_sum_op.py b/test/ir/inference/test_trt_reduce_sum_op.py index f4c456e35fdb0..9380867c38478 100644 --- a/test/ir/inference/test_trt_reduce_sum_op.py +++ b/test/ir/inference/test_trt_reduce_sum_op.py @@ -18,15 +18,15 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn class TRTReduceSumTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 3, 10, 192], dtype="float32" ) @@ -59,7 +59,7 @@ def test_check_output(self): class TRTReduceSumAllTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 3, 10, 192], dtype="float32" ) diff --git a/test/ir/inference/test_trt_reshape_op.py b/test/ir/inference/test_trt_reshape_op.py index 96e8d2189ea91..4e9261ae3d795 100644 --- a/test/ir/inference/test_trt_reshape_op.py +++ b/test/ir/inference/test_trt_reshape_op.py @@ -18,9 +18,9 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn @@ -35,7 +35,7 @@ def setUp(self): self.input_shape[1], self.input_shape[2], ] - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name='data', shape=self.data_shape, dtype='float32' ) @@ -73,7 +73,7 @@ def setUp(self): self.input_shape[1], self.input_shape[2], ] - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name='data', shape=self.data_shape, dtype='float32' ) @@ -100,7 +100,7 @@ def setUp(self): self.input_shape[1], self.input_shape[2], ] - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name='data', shape=self.data_shape, dtype='float32' ) @@ -127,7 +127,7 @@ def setUp(self): self.input_shape[1], self.input_shape[2], ] - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name='data', shape=self.data_shape, dtype='float32' ) diff --git a/test/ir/inference/test_trt_scale_op.py b/test/ir/inference/test_trt_scale_op.py index c1e7084c300f1..c1d693696b954 100644 --- a/test/ir/inference/test_trt_scale_op.py +++ b/test/ir/inference/test_trt_scale_op.py @@ -18,15 +18,15 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn class TRTScaleTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 512], dtype="float32" ) @@ -58,7 +58,7 @@ def test_check_output(self): class TRTScaleShape2Test(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 512, 512], dtype="float32" ) diff --git a/test/ir/inference/test_trt_shuffle_channel_detect_pass.py b/test/ir/inference/test_trt_shuffle_channel_detect_pass.py index 8a1388719d8d0..fa91f600e5564 100644 --- a/test/ir/inference/test_trt_shuffle_channel_detect_pass.py +++ b/test/ir/inference/test_trt_shuffle_channel_detect_pass.py @@ -18,14 +18,14 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn class ShuffleChannelFuseTRTPassTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) diff --git a/test/ir/inference/test_trt_skip_layernorm_fuse_pass.py b/test/ir/inference/test_trt_skip_layernorm_fuse_pass.py index 1bf140a365aae..4d671c33cdfb8 100644 --- a/test/ir/inference/test_trt_skip_layernorm_fuse_pass.py +++ b/test/ir/inference/test_trt_skip_layernorm_fuse_pass.py @@ -20,14 +20,14 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker class SkipLayernormFusePassTest0(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data1 = paddle.static.data( name="data1", shape=[-1, 3, 128, 128], dtype="float32" ) @@ -73,7 +73,7 @@ def test_check_output(self): class SkipLayernormFusePassTest1(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data1 = paddle.static.data( name="data1", shape=[-1, 256, 1536], dtype="float32" ) @@ -121,7 +121,7 @@ def test_check_output(self): class SkipLayernormFusePassTest2(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data1 = paddle.static.data( name="data1", shape=[-1, 128, 64, 768], dtype="float32" ) @@ -169,7 +169,7 @@ def test_check_output(self): class SkipLayernormFusePassTest3(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data1 = paddle.static.data( name="data1", shape=[-1, 128, 128], dtype="float32" ) diff --git a/test/ir/inference/test_trt_slice_dynamic_plugin.py b/test/ir/inference/test_trt_slice_dynamic_plugin.py index 5a155194ef45c..7712d00041a8a 100644 --- a/test/ir/inference/test_trt_slice_dynamic_plugin.py +++ b/test/ir/inference/test_trt_slice_dynamic_plugin.py @@ -18,9 +18,9 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig # normal starts && ends @@ -45,7 +45,7 @@ def setUpTensorRTParams(self): def setUp(self): self.setUpSliceParams() self.setUpTensorRTParams() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[3, 3, 3, 3], dtype="float32" ) diff --git a/test/ir/inference/test_trt_slice_plugin.py b/test/ir/inference/test_trt_slice_plugin.py index 5596729a93aee..ab8e9cab62bf6 100644 --- a/test/ir/inference/test_trt_slice_plugin.py +++ b/test/ir/inference/test_trt_slice_plugin.py @@ -18,9 +18,9 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig from paddle.static import nn @@ -40,7 +40,7 @@ def setUpTensorRTParams(self): def setUp(self): self.setUpSliceParams() self.setUpTensorRTParams() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[3, 3, 3, 3], dtype="float32" ) @@ -111,7 +111,7 @@ class SlicePluginTRTTestInt32(SlicePluginTRTTest): def setUp(self): self.setUpSliceParams() self.setUpTensorRTParams() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[3, 3, 3, 3], dtype="int32" ) @@ -138,7 +138,7 @@ def setUpTensorRTParams(self): def setUp(self): self.setUpSliceParams() self.setUpTensorRTParams() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[3, 3, 3, 3], dtype="int32" ) diff --git a/test/ir/inference/test_trt_subgraph_pass.py b/test/ir/inference/test_trt_subgraph_pass.py index 4031a882758b9..b343eaf89322d 100644 --- a/test/ir/inference/test_trt_subgraph_pass.py +++ b/test/ir/inference/test_trt_subgraph_pass.py @@ -20,14 +20,14 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base.core import AnalysisConfig, PassVersionChecker from paddle.static import nn class TensorRTSubgraphPassFcTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) @@ -54,7 +54,7 @@ def test_check_output(self): class TensorRTSubgraphPassConcatTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data1 = paddle.static.data( name="data1", shape=[-1, 3, 64, 64], dtype="float32" ) @@ -84,7 +84,7 @@ def test_check_output(self): class TensorRTSubgraphPassSplitTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) @@ -110,7 +110,7 @@ def test_check_output(self): class TensorRTSubgraphPassSplitSerializeTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) @@ -139,7 +139,7 @@ def test_check_output(self): class TensorRTSubgraphPassDynamicSplitFp16SerializeTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) @@ -176,15 +176,15 @@ def test_check_output(self): class TensorRTSubgraphPassInstanceNormTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) - param_attr = fluid.ParamAttr( + param_attr = base.ParamAttr( name='instance_norm_w', initializer=paddle.nn.initializer.Constant(value=1.0), ) - bias_attr = fluid.ParamAttr( + bias_attr = base.ParamAttr( name='instance_norm_b', initializer=paddle.nn.initializer.Constant(value=0.0), ) @@ -213,7 +213,7 @@ def test_check_output(self): class TensorRTSubgraphPassTransposeTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) @@ -243,7 +243,7 @@ def test_check_output(self): class TensorRTSubgraphPassLayerNormTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) @@ -274,7 +274,7 @@ def test_check_output(self): class TensorRTSubgraphPassLayerNormDynamicTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[-1, 3, 64, 64], dtype="float32" ) @@ -362,7 +362,7 @@ def set_params(self): class TensorRTSubgraphPassElementwiseTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data1 = paddle.static.data( name="data1", shape=[-1, 3, 64, 64], dtype="float32" ) @@ -418,7 +418,7 @@ def test_check_output(self): class TensorRTSubgraphPassElementwiseBroadcastDynamicTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data1 = paddle.static.data( name="data1", shape=[-1, 3, 64, 64], dtype="float32" ) diff --git a/test/ir/inference/test_trt_tile_op.py b/test/ir/inference/test_trt_tile_op.py index 014e541a4f126..94e3d67265e1a 100644 --- a/test/ir/inference/test_trt_tile_op.py +++ b/test/ir/inference/test_trt_tile_op.py @@ -18,14 +18,14 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig, PassVersionChecker class TRTTileTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[4, 3, 224, 256], dtype="float32" ) @@ -52,7 +52,7 @@ def test_check_output(self): class TRTTileExpandTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[1, 1, 1, 1], dtype="float32" ) @@ -79,7 +79,7 @@ def test_check_output(self): class TRTTileExpandStaticTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[1, 1, 1, 1], dtype="float32" ) @@ -106,7 +106,7 @@ def test_check_output(self): class TRTTileExpandHalfTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[1, 1, 1, 1], dtype="float32" ) diff --git a/test/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py b/test/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py index ce29fe4762d81..b0886a99c7d12 100644 --- a/test/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py +++ b/test/ir/inference/test_trt_transpose_flatten_concat_fuse_pass.py @@ -18,14 +18,14 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.core import AnalysisConfig +from paddle import base +from paddle.base import core +from paddle.base.core import AnalysisConfig class TransposeFlattenConcatFusePassTRTTest(InferencePassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data1 = paddle.static.data( name="data1", shape=[8, 32, 128], dtype="float32" ) diff --git a/test/ir/inference/test_trt_tuned_dynamic_shape.py b/test/ir/inference/test_trt_tuned_dynamic_shape.py index 12e3e76dc3fd6..1c69f3f1db735 100644 --- a/test/ir/inference/test_trt_tuned_dynamic_shape.py +++ b/test/ir/inference/test_trt_tuned_dynamic_shape.py @@ -19,18 +19,18 @@ import paddle paddle.enable_static() -from paddle import fluid +from paddle import base from paddle.inference import Config, create_predictor class TRTTunedDynamicShapeTest(unittest.TestCase): def get_model(self): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CUDAPlace(0) + exe = base.Executor(place) - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) diff --git a/test/ir/inference/test_trt_yolo_box_op.py b/test/ir/inference/test_trt_yolo_box_op.py index d625f65b4ece1..5856a4a6055cc 100644 --- a/test/ir/inference/test_trt_yolo_box_op.py +++ b/test/ir/inference/test_trt_yolo_box_op.py @@ -18,14 +18,14 @@ from inference_pass_test import InferencePassTest import paddle -from paddle import fluid -from paddle.fluid.core import AnalysisConfig, PassVersionChecker +from paddle import base +from paddle.base.core import AnalysisConfig, PassVersionChecker class TRTYoloBoxTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): image_shape = [self.bs, self.channel, self.height, self.width] image = paddle.static.data( name='image', shape=image_shape, dtype='float32' @@ -79,7 +79,7 @@ def test_check_output(self): class TRTYoloBoxFP16Test(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): image_shape = [self.bs, self.channel, self.height, self.width] image = paddle.static.data( name='image', shape=image_shape, dtype='float32' @@ -131,7 +131,7 @@ def test_check_output(self): class TRTYoloBoxIoUAwareTest(InferencePassTest): def setUp(self): self.set_params() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): image_shape = [self.bs, self.channel, self.height, self.width] image = paddle.static.data( name='image', shape=image_shape, dtype='float32' diff --git a/test/ir/inference/test_yolo_box_post.py b/test/ir/inference/test_yolo_box_post.py index 28fe1043cdefb..8985f2d48d3ba 100644 --- a/test/ir/inference/test_yolo_box_post.py +++ b/test/ir/inference/test_yolo_box_post.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper paddle.enable_static() diff --git a/test/ir/new_ir/test_data_op.py b/test/ir/new_ir/test_data_op.py index 90e40e3635d7c..a7659e32486c4 100644 --- a/test/ir/new_ir/test_data_op.py +++ b/test/ir/new_ir/test_data_op.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper def data(): diff --git a/test/ir/new_ir/test_ir_pybind.py b/test/ir/new_ir/test_ir_pybind.py index 26844acc958f8..63e9703cedf9f 100644 --- a/test/ir/new_ir/test_ir_pybind.py +++ b/test/ir/new_ir/test_ir_pybind.py @@ -77,7 +77,7 @@ def test_value(self): tanh_op = newir_program.block().ops[3] self.assertEqual( - matmul_op.result(0).dtype, paddle.fluid.core.DataType.FLOAT32 + matmul_op.result(0).dtype, paddle.base.core.DataType.FLOAT32 ) self.assertEqual(matmul_op.result(0).shape, [4, 4]) self.assertEqual( @@ -161,8 +161,8 @@ def test_attr(self): self.assertEqual(conv_attr["paddings"], [0, 0]) self.assertEqual(conv_attr["padding_algorithm"], "EXPLICIT") self.assertEqual(conv_attr["groups"], 1) - self.assertEqual(full_attr["dtype"], paddle.fluid.core.DataType.FLOAT32) - self.assertTrue(isinstance(full_attr["place"], paddle.fluid.core.Place)) + self.assertEqual(full_attr["dtype"], paddle.base.core.DataType.FLOAT32) + self.assertTrue(isinstance(full_attr["place"], paddle.base.core.Place)) def test_operands(self): newir_program = get_ir_program() diff --git a/test/ir/new_ir/test_ir_vjp.py b/test/ir/new_ir/test_ir_vjp.py index 595715aa5f22f..c770153738d2b 100644 --- a/test/ir/new_ir/test_ir_vjp.py +++ b/test/ir/new_ir/test_ir_vjp.py @@ -16,7 +16,7 @@ import paddle from paddle import ir -from paddle.fluid.core import call_vjp, has_vjp +from paddle.base.core import call_vjp, has_vjp paddle.enable_static() diff --git a/test/ir/new_ir/test_special_op_translator.py b/test/ir/new_ir/test_special_op_translator.py index 5763ad03e5948..24eadccf034bd 100644 --- a/test/ir/new_ir/test_special_op_translator.py +++ b/test/ir/new_ir/test_special_op_translator.py @@ -18,7 +18,7 @@ import paddle from paddle import ir -from paddle.fluid import core +from paddle.base import core from paddle.framework import LayerHelper paddle.enable_static() diff --git a/test/ir/pass_test.py b/test/ir/pass_test.py index 176bd46616d09..2d809e2f5e9bb 100644 --- a/test/ir/pass_test.py +++ b/test/ir/pass_test.py @@ -19,16 +19,16 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.framework import Block +from paddle import base +from paddle.base import core +from paddle.base.framework import Block class PassTest(unittest.TestCase): @classmethod def setUpClass(self): - self.main_program = fluid.Program() - self.startup_program = fluid.Program() + self.main_program = base.Program() + self.startup_program = base.Program() self.feeds = None self.fetch_list = None self.pass_names = None @@ -41,9 +41,9 @@ def setUpClass(self): random.seed(124) def _get_places(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) return places def grad(self, var): @@ -51,9 +51,9 @@ def grad(self, var): return self.main_program.global_block().var(grad_name) def append_gradients(self, outs): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): loss = paddle.mean(outs) - fluid.backward.append_backward(loss) + base.backward.append_backward(loss) def check_output(self, startup_on_cpu=False, atol=1e-5): ''' @@ -84,7 +84,7 @@ def _run_program(self, executor, program): def _apply_ir_passes(self): graph = core.Graph(self.main_program.desc) - graph.set_not_owned("__param_scope__", fluid.global_scope()) + graph.set_not_owned("__param_scope__", base.global_scope()) for attr_name, attr_value in self.graph_attrs.items(): graph.set(attr_name, attr_value) @@ -101,7 +101,7 @@ def _apply_ir_passes(self): ir_pass.set(key, attrs[key]) trans_pass = pass_builder.append_pass("graph_to_program_pass") - opt_program = fluid.Program() + opt_program = base.Program() trans_pass.set_not_owned("program", opt_program.desc) for p in pass_builder.all_passes(): p.apply(graph) @@ -120,10 +120,10 @@ def check_output_with_place(self, place, startup_on_cpu=False, atol=1e-5): after apply all specified passes, then copy the parameters to GPUPlace. We can set startup_on_cpu to True to test inference pass. ''' - executor = fluid.Executor(place) + executor = base.Executor(place) if startup_on_cpu: # Initialize parameters on CPU - cpu_executor = fluid.Executor(fluid.CPUPlace()) + cpu_executor = base.Executor(base.CPUPlace()) cpu_executor.run(self.startup_program) outs, lods = self._run_program(cpu_executor, self.main_program) else: @@ -140,7 +140,7 @@ def check_output_with_place(self, place, startup_on_cpu=False, atol=1e-5): opt_program = self._apply_ir_passes() self.check_program(opt_program) - if startup_on_cpu and not isinstance(place, fluid.CPUPlace): + if startup_on_cpu and not isinstance(place, base.CPUPlace): warnings.warn( "Parameters are on CPU, and will be transferred to GPU " "automatically by data transform." diff --git a/test/ir/test_fuse_resnet_unit.py b/test/ir/test_fuse_resnet_unit.py index d76a806c0c8f5..f9599386f8bfc 100644 --- a/test/ir/test_fuse_resnet_unit.py +++ b/test/ir/test_fuse_resnet_unit.py @@ -18,7 +18,7 @@ import paddle import paddle.incubate -from paddle.fluid import core +from paddle.base import core paddle.enable_static() np.random.seed(0) @@ -49,7 +49,7 @@ def test_fuse_resenet_unit(self): out = batch_norm(conv2d(x)) graph = core.Graph(program.desc) core.get_pass("fuse_resnet_unit").apply(graph) - after_program = paddle.fluid.framework.IrGraph(graph).to_program() + after_program = paddle.base.framework.IrGraph(graph).to_program() params = paddle.static.amp.cast_model_to_fp16(program) after_params = paddle.static.amp.cast_model_to_fp16(after_program) exe = paddle.static.Executor(place) diff --git a/test/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py b/test/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py index dbdcdffdf5be1..4c0b5d5689885 100644 --- a/test/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py +++ b/test/ir/test_ir_embedding_eltwise_layernorm_fuse_pass.py @@ -18,13 +18,13 @@ from pass_test import PassTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class EmbEltwiseLayerNormFusePassTest(PassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): word_id = paddle.static.data( name="word_id", shape=[1, 128], @@ -127,7 +127,7 @@ def test_check_output(self): self.pass_attrs = { "embedding_eltwise_layernorm_fuse_pass": {"use_gpu": True} } - place = fluid.CUDAPlace(0) + place = base.CUDAPlace(0) self.check_output_with_place(place, startup_on_cpu=True) diff --git a/test/ir/test_ir_fc_fuse_pass.py b/test/ir/test_ir_fc_fuse_pass.py index 5ad3692db9a03..78fa2c29f33bf 100644 --- a/test/ir/test_ir_fc_fuse_pass.py +++ b/test/ir/test_ir_fc_fuse_pass.py @@ -18,13 +18,13 @@ from pass_test import PassTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class FCFusePassTest(PassTest): def setUp(self): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): data = paddle.static.data( name="data", shape=[32, 128], dtype="float32", lod_level=0 ) @@ -46,7 +46,7 @@ def test_check_output(self): use_gpu_set.append(True) for use_gpu in use_gpu_set: self.pass_attrs = {"fc_fuse_pass": {"use_gpu": use_gpu}} - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() + place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() self.check_output_with_place(place, startup_on_cpu=True) diff --git a/test/ir/test_ir_fusion_group_pass.py b/test/ir/test_ir_fusion_group_pass.py index 67f7e797db5b6..91a9dafa77744 100644 --- a/test/ir/test_ir_fusion_group_pass.py +++ b/test/ir/test_ir_fusion_group_pass.py @@ -18,13 +18,13 @@ from pass_test import PassTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class FusionGroupPassTest(PassTest): def build_program(self, dtype): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 2) self.feed_vars.append( paddle.static.data(name="data2", shape=[128, 128], dtype=dtype) @@ -61,15 +61,15 @@ def _prepare_feed_vars(self, shape, dtype, num_data, stop_gradient=True): def _feed_random_data(self, feed_vars): feeds = {} for var in feed_vars: - if var.type != fluid.core.VarDesc.VarType.LOD_TENSOR: + if var.type != base.core.VarDesc.VarType.LOD_TENSOR: raise TypeError("Feed data of non LoDTensor is not supported.") shape = var.shape - if var.dtype == fluid.core.VarDesc.VarType.FP32: + if var.dtype == base.core.VarDesc.VarType.FP32: dtype = "float32" - elif var.dtype == fluid.core.VarDesc.VarType.FP64: + elif var.dtype == base.core.VarDesc.VarType.FP64: dtype = "float64" - elif var.dtype == fluid.core.VarDesc.VarType.FP16: + elif var.dtype == base.core.VarDesc.VarType.FP16: dtype = "float16" else: raise ValueError("Unsupported dtype %s" % var.dtype) @@ -79,12 +79,12 @@ def _feed_random_data(self, feed_vars): def test_check_output(self): if core.is_compiled_with_cuda(): self.pass_attrs = {"fusion_group_pass": {"use_gpu": True}} - self.check_output_with_place(fluid.CUDAPlace(0)) + self.check_output_with_place(base.CUDAPlace(0)) class FusionGroupPassComplicatedTest(FusionGroupPassTest): def build_program(self, dtype): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 64], dtype, 5, False) one = paddle.tensor.fill_constant(shape=[1], dtype=dtype, value=1.0) @@ -107,7 +107,7 @@ def build_program(self, dtype): class FusionGroupPassInplaceTest(FusionGroupPassTest): def build_program(self, dtype): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 3) self.feed_vars.append( paddle.static.data(name="data3", shape=[128, 32], dtype=dtype) @@ -133,7 +133,7 @@ def setUp(self): class FusionGroupPassTestCastAndFP16(FusionGroupPassTest): def build_program(self, dtype): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 2) self.feed_vars.append( paddle.static.data(name="data2", shape=[128, 128], dtype=dtype) @@ -164,7 +164,7 @@ def build_program(self, dtype): class FusionGroupPassSumTest(FusionGroupPassTest): def build_program(self, dtype): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([32, 128], dtype, 3) self.feed_vars.append( paddle.static.data(name="data3", shape=[128, 128], dtype=dtype) @@ -188,7 +188,7 @@ def build_program(self, dtype): class FusionGroupPassCastTest(FusionGroupPassTest): def build_program(self, dtype): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([2, 2], dtype, 2) tmp_0 = paddle.add(self.feed_vars[0], self.feed_vars[1]) @@ -210,7 +210,7 @@ def setUp(self): class FusionGroupPassFillConstantTest(FusionGroupPassTest): def build_program(self, dtype): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): self.feed_vars = self._prepare_feed_vars([2, 2], dtype, 2) tmp_0 = paddle.add(self.feed_vars[0], self.feed_vars[1]) diff --git a/test/ir/test_ir_generate_pass.py b/test/ir/test_ir_generate_pass.py index a4b57f9bba063..062569c5211aa 100644 --- a/test/ir/test_ir_generate_pass.py +++ b/test/ir/test_ir_generate_pass.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core from paddle.incubate.passes import ir from paddle.static import InputSpec @@ -274,7 +274,7 @@ def check_multi_add_to_sum(self, pass_type): core.get_pass(pass_type).apply(graph) after_node_nums = len(graph.nodes()) self.assertEqual(after_node_nums, before_node_nums - 2) - after_program = paddle.fluid.framework.IrGraph(graph).to_program() + after_program = paddle.base.framework.IrGraph(graph).to_program() executor = paddle.static.Executor(paddle.CPUPlace()) executor.run(startup_program) feed = { @@ -309,7 +309,7 @@ def test_generate_combine_mul_v1(self): core.get_pass("generate_combine_mul_v1").apply(graph) after_node_nums = len(graph.nodes()) self.assertEqual(after_node_nums, before_node_nums + 4) - after_program = paddle.fluid.framework.IrGraph(graph).to_program() + after_program = paddle.base.framework.IrGraph(graph).to_program() executor = paddle.static.Executor(paddle.CPUPlace()) executor.run(startup_program) feed = { @@ -357,7 +357,7 @@ def check_generate_simplify_inference(self, pass_type): core.get_pass(pass_type).apply(graph) after_node_nums = len(graph.nodes()) self.assertEqual(after_node_nums, before_node_nums - 6) - after_program = paddle.fluid.framework.IrGraph(graph).to_program() + after_program = paddle.base.framework.IrGraph(graph).to_program() executor = paddle.static.Executor(paddle.CPUPlace()) executor.run(startup_program) feed = {"x": np.random.random([10, 16, 16]).astype("float32")} @@ -393,7 +393,7 @@ def test_generate_layer_norm_fuse_pass(self): core.get_pass("generate_layer_norm_fuse_pass").apply(graph) after_node_nums = len(graph.nodes()) self.assertEqual(after_node_nums, before_node_nums - 14) - after_program = paddle.fluid.framework.IrGraph(graph).to_program() + after_program = paddle.base.framework.IrGraph(graph).to_program() executor = paddle.static.Executor(paddle.CPUPlace()) executor.run(startup_program) feed = {"x": np.random.random([3, 64, 120]).astype("float32")} diff --git a/test/ir/test_ir_graph_to_program_pass.py b/test/ir/test_ir_graph_to_program_pass.py index d98768e901c17..22af43f7f9a01 100644 --- a/test/ir/test_ir_graph_to_program_pass.py +++ b/test/ir/test_ir_graph_to_program_pass.py @@ -15,14 +15,14 @@ import unittest import paddle -from paddle import fluid, static +from paddle import base, static paddle.enable_static() def program_to_IRGraph(program): - graph = fluid.core.Graph(program.desc) - ir_graph = fluid.framework.IrGraph(graph, for_test=False) + graph = base.core.Graph(program.desc) + ir_graph = base.framework.IrGraph(graph, for_test=False) return ir_graph @@ -152,7 +152,7 @@ def multiblock_model(): b = static.data(name='b', shape=[10, 1], dtype='int64') cond = paddle.greater_than(a, b) - ie = fluid.layers.IfElse(cond) + ie = base.layers.IfElse(cond) with ie.true_block(): hidden = paddle.nn.functional.relu(data) ie.output(hidden) diff --git a/test/ir/test_ir_skip_layernorm_pass.py b/test/ir/test_ir_skip_layernorm_pass.py index 04b7aaad2fb27..015538bcd9b45 100644 --- a/test/ir/test_ir_skip_layernorm_pass.py +++ b/test/ir/test_ir_skip_layernorm_pass.py @@ -17,14 +17,14 @@ from pass_test import PassTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class SkipLayerNormFusePassTest(PassTest): def setUp(self): paddle.enable_static() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): x = paddle.static.data( name="x", shape=[128, 768], dtype="float32", lod_level=0 ) @@ -48,7 +48,7 @@ def test_check_program(self): if core.is_compiled_with_cuda(): use_gpu_set.append(True) for use_gpu in use_gpu_set: - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() + place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() opt_program = self._apply_ir_passes() self.check_program(opt_program) diff --git a/test/ir/test_ir_subgraph_python_interface.py b/test/ir/test_ir_subgraph_python_interface.py index 08f614fa062a6..6738dc6dccef6 100644 --- a/test/ir/test_ir_subgraph_python_interface.py +++ b/test/ir/test_ir_subgraph_python_interface.py @@ -17,9 +17,9 @@ from eager_op_test import OpTestTool import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.framework import IrGraph, Program, program_guard +from paddle import base +from paddle.base import core +from paddle.base.framework import IrGraph, Program, program_guard from paddle.static.quantization import QuantizationTransformPass paddle.enable_static() @@ -78,9 +78,9 @@ def false_func(): def test_quant_sub_graphs(self, use_cuda=False): graph, sub_graphs = self.build_graph_with_sub_graph() - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() transform_pass = QuantizationTransformPass( - scope=fluid.global_scope(), + scope=base.global_scope(), place=place, activation_quantize_type='abs_max', weight_quantize_type='range_abs_max', diff --git a/test/ir/test_ir_yolo_box_pass.py b/test/ir/test_ir_yolo_box_pass.py index 5947a71b93c7b..5ee434acef1f8 100644 --- a/test/ir/test_ir_yolo_box_pass.py +++ b/test/ir/test_ir_yolo_box_pass.py @@ -15,8 +15,8 @@ import unittest import paddle -from paddle.fluid import core -from paddle.fluid.layer_helper import LayerHelper +from paddle.base import core +from paddle.base.layer_helper import LayerHelper paddle.enable_static() @@ -89,7 +89,7 @@ def test_yolo_box_pass(self): ) graph = core.Graph(program.desc) core.get_pass("yolo_box_fuse_pass").apply(graph) - graph = paddle.fluid.framework.IrGraph(graph) + graph = paddle.base.framework.IrGraph(graph) op_nodes = graph.all_op_nodes() for op_node in op_nodes: op_type = op_node.op().type() diff --git a/test/legacy_test/auto_checkpoint_utils.py b/test/legacy_test/auto_checkpoint_utils.py index 6f8a58a267655..eff2829ec4221 100644 --- a/test/legacy_test/auto_checkpoint_utils.py +++ b/test/legacy_test/auto_checkpoint_utils.py @@ -18,10 +18,10 @@ import numpy as np import paddle -import paddle.fluid.incubate.checkpoint.auto_checkpoint as acp -from paddle import fluid -from paddle.fluid import unique_name -from paddle.fluid.framework import program_guard +import paddle.base.incubate.checkpoint.auto_checkpoint as acp +from paddle import base +from paddle.base import unique_name +from paddle.base.framework import program_guard BATCH_NUM = 4 BATCH_SIZE = 1 @@ -30,7 +30,7 @@ CLASS_NUM = 2 USE_GPU = False # whether use GPU to run model -places = fluid.cuda_places() if USE_GPU else fluid.cpu_places() +places = base.cuda_places() if USE_GPU else base.cpu_places() logger = None @@ -86,10 +86,10 @@ def simple_net(): sgd, loss, image, label = simple_net() if minimize: - compiled = fluid.CompiledProgram(main_prog) + compiled = base.CompiledProgram(main_prog) else: compiled = None - loader = fluid.io.DataLoader.from_generator( + loader = base.io.DataLoader.from_generator( feed_list=[image, label], capacity=64, use_double_buffer=True, @@ -106,15 +106,15 @@ def simple_net(): return compiled, loader, sgd, loss, image, label def _generate(self): - main_prog = fluid.Program() - startup_prog = fluid.Program() - exe = fluid.Executor(places[0]) + main_prog = base.Program() + startup_prog = base.Program() + exe = base.Executor(places[0]) return exe, main_prog, startup_prog def _reset_generator(self): - unique_name.generator = fluid.unique_name.UniqueNameGenerator() - acp.generator = fluid.unique_name.UniqueNameGenerator() + unique_name.generator = base.unique_name.UniqueNameGenerator() + acp.generator = base.unique_name.UniqueNameGenerator() acp.g_acp_type = None acp.g_checker = acp.AutoCheckpointChecker() acp.g_program_attr = {} diff --git a/test/legacy_test/auto_parallel_gpt_model.py b/test/legacy_test/auto_parallel_gpt_model.py index 4d5e1955e23f1..3f25aeb19b64c 100644 --- a/test/legacy_test/auto_parallel_gpt_model.py +++ b/test/legacy_test/auto_parallel_gpt_model.py @@ -809,7 +809,7 @@ def forward( x_dims_mapping = ["x"] + [None for i in range(len(x.shape) - 1)] w_dims_mapping = ["y"] + [None for i in range(len(w.shape) - 1)] - with paddle.fluid.name_scope('skip_quant'): + with paddle.base.name_scope('skip_quant'): if mesh: matmul = auto.shard_op( paddle.matmul, mesh, [x_dims_mapping, w_dims_mapping, None] diff --git a/test/legacy_test/check_nan_inf_base.py b/test/legacy_test/check_nan_inf_base.py index fad2ebaa752e5..31dbb1bed98a9 100644 --- a/test/legacy_test/check_nan_inf_base.py +++ b/test/legacy_test/check_nan_inf_base.py @@ -19,8 +19,8 @@ os.environ["FLAGS_check_nan_inf"] = "1" import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -73,16 +73,16 @@ def net(): def check(use_cuda): - main = fluid.Program() - startup = fluid.Program() - scope = fluid.core.Scope() + main = base.Program() + startup = base.Program() + scope = base.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(main, startup): + with base.scope_guard(scope): + with base.program_guard(main, startup): y_predict, avg_cost, acc_top1 = net() - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) exe.run(startup) step = 0.0 diff --git a/test/legacy_test/collective_allgather_op.py b/test/legacy_test/collective_allgather_op.py index fb2841468bfb1..516603f71affc 100644 --- a/test/legacy_test/collective_allgather_op.py +++ b/test/legacy_test/collective_allgather_op.py @@ -15,8 +15,8 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -28,7 +28,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 nranks = 2 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) diff --git a/test/legacy_test/collective_reducescatter_op.py b/test/legacy_test/collective_reducescatter_op.py index 2d10b66fb88dc..bffbafd8a341c 100644 --- a/test/legacy_test/collective_reducescatter_op.py +++ b/test/legacy_test/collective_reducescatter_op.py @@ -15,8 +15,8 @@ from test_collective_base import TestCollectiveRunnerBase, runtime_main import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -28,7 +28,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 nranks = 2 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[-1, 10, 1000], dtype='float32' ) diff --git a/test/legacy_test/decorator_helper.py b/test/legacy_test/decorator_helper.py index d0164f7beac64..d713878c3fb2d 100644 --- a/test/legacy_test/decorator_helper.py +++ b/test/legacy_test/decorator_helper.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle import fluid +from paddle import base __all__ = ['many_times', 'prog_scope'] @@ -31,11 +31,11 @@ def __fn__(*args, **kwargs): def prog_scope(): def __impl__(fn): def __fn__(*args, **kwargs): - prog = fluid.Program() - startup_prog = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + scope = base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): fn(*args, **kwargs) return __fn__ diff --git a/test/legacy_test/detected_gpu.py b/test/legacy_test/detected_gpu.py index 55249b8c8cb9e..a33916714ee45 100644 --- a/test/legacy_test/detected_gpu.py +++ b/test/legacy_test/detected_gpu.py @@ -14,14 +14,14 @@ import sys -from paddle import fluid +from paddle import base -print("compile with cuda:", fluid.core.is_compiled_with_cuda()) -print("get_cuda_device_count:", fluid.core.get_cuda_device_count()) +print("compile with cuda:", base.core.is_compiled_with_cuda()) +print("get_cuda_device_count:", base.core.get_cuda_device_count()) if ( - fluid.core.is_compiled_with_cuda() - and fluid.core.get_cuda_device_count() > 0 + base.core.is_compiled_with_cuda() + and base.core.get_cuda_device_count() > 0 ): sys.exit(0) else: diff --git a/test/legacy_test/detected_xpu.py b/test/legacy_test/detected_xpu.py index df22fea92462f..3d7e944c3b40d 100644 --- a/test/legacy_test/detected_xpu.py +++ b/test/legacy_test/detected_xpu.py @@ -14,12 +14,12 @@ import sys -from paddle import fluid +from paddle import base -print("compile with xpu:", fluid.core.is_compiled_with_xpu()) -print("get_xpu_device_count:", fluid.core.get_xpu_device_count()) +print("compile with xpu:", base.core.is_compiled_with_xpu()) +print("get_xpu_device_count:", base.core.get_xpu_device_count()) -if fluid.core.is_compiled_with_xpu() and fluid.core.get_xpu_device_count() > 0: +if base.core.is_compiled_with_xpu() and base.core.get_xpu_device_count() > 0: sys.exit(0) else: sys.exit(1) diff --git a/test/legacy_test/dist_allreduce_op.py b/test/legacy_test/dist_allreduce_op.py index 2a35dbdf18143..ba2ec0180a299 100644 --- a/test/legacy_test/dist_allreduce_op.py +++ b/test/legacy_test/dist_allreduce_op.py @@ -18,7 +18,7 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet paddle.enable_static() @@ -27,8 +27,8 @@ paddle.dataset.mnist.fetch() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 def cnn_model(data): @@ -39,7 +39,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -50,7 +50,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -64,7 +64,7 @@ def cnn_model(data): x=conv_pool_2, size=SIZE, activation="softmax", - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -92,7 +92,7 @@ def get_model(self, batch_size=2, single_device=False): input=predict, label=label, total=batch_size_tensor ) - inference_program = fluid.default_main_program().clone() + inference_program = base.default_main_program().clone() # Reader train_reader = paddle.batch( diff --git a/test/legacy_test/dist_ctr.py b/test/legacy_test/dist_ctr.py index 3edee71832412..7b1010067cea0 100644 --- a/test/legacy_test/dist_ctr.py +++ b/test/legacy_test/dist_ctr.py @@ -18,14 +18,14 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base IS_SPARSE = True os.environ['PADDLE_ENABLE_REMOTE_PREFETCH'] = "1" # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 class TestDistCTR2x2(TestDistRunnerBase): @@ -57,7 +57,7 @@ def get_model(self, batch_size=2): is_distributed=False, input=dnn_data, size=[dnn_input_dim, dnn_layer_dims[0]], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="deep_embedding", initializer=paddle.nn.initializer.Constant(value=0.01), ), @@ -72,7 +72,7 @@ def get_model(self, batch_size=2): x=dnn_out, size=dim, activation="relu", - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), name='dnn-fc-%d' % i, @@ -84,7 +84,7 @@ def get_model(self, batch_size=2): is_distributed=False, input=lr_data, size=[lr_input_dim, 1], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="wide_embedding", initializer=paddle.nn.initializer.Constant(value=0.01), ), @@ -108,7 +108,7 @@ def get_model(self, batch_size=2): ) avg_cost = paddle.mean(x=cost) - inference_program = paddle.fluid.default_main_program().clone() + inference_program = paddle.base.default_main_program().clone() regularization = None use_l2_decay = bool(os.getenv('USE_L2_DECAY', 0)) diff --git a/test/legacy_test/dist_ctr_reader.py b/test/legacy_test/dist_ctr_reader.py index 657904fad0793..23f4daf2a5d8f 100644 --- a/test/legacy_test/dist_ctr_reader.py +++ b/test/legacy_test/dist_ctr_reader.py @@ -16,7 +16,7 @@ import tarfile import paddle -from paddle.fluid.log_helper import get_logger +from paddle.base.log_helper import get_logger logger = get_logger("paddle", logging.INFO) diff --git a/test/legacy_test/dist_fleet_ctr.py b/test/legacy_test/dist_fleet_ctr.py index 419344edfae5d..64c4f69a55654 100644 --- a/test/legacy_test/dist_fleet_ctr.py +++ b/test/legacy_test/dist_fleet_ctr.py @@ -25,13 +25,13 @@ from test_dist_fleet_base import FleetDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base paddle.enable_static() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 def fake_ctr_reader(): @@ -85,14 +85,14 @@ def net(self, args, is_train=True, batch_size=4, lr=0.01): if args.reader == "pyreader": if is_train: - self.reader = fluid.io.PyReader( + self.reader = base.io.PyReader( feed_list=datas, capacity=64, iterable=False, use_double_buffer=False, ) else: - self.test_reader = fluid.io.PyReader( + self.test_reader = base.io.PyReader( feed_list=datas, capacity=64, iterable=False, @@ -105,7 +105,7 @@ def net(self, args, is_train=True, batch_size=4, lr=0.01): is_distributed=False, input=dnn_data, size=[dnn_input_dim, dnn_layer_dims[0]], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="deep_embedding", initializer=paddle.nn.initializer.Constant(value=0.01), ), @@ -121,7 +121,7 @@ def net(self, args, is_train=True, batch_size=4, lr=0.01): x=dnn_out, size=dim, activation="relu", - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), name='dnn-fc-%d' % i, @@ -133,7 +133,7 @@ def net(self, args, is_train=True, batch_size=4, lr=0.01): is_distributed=False, input=lr_data, size=[lr_input_dim, 1], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="wide_embedding", initializer=paddle.nn.initializer.Constant(value=0.01), ), @@ -174,7 +174,7 @@ def check_model_right(self, dirname): with open(model_filename, "rb") as f: program_desc_str = f.read() - program = fluid.Program.parse_from_string(program_desc_str) + program = base.Program.parse_from_string(program_desc_str) with open(os.path.join(dirname, "__model__.proto"), "w") as wn: wn.write(str(program)) @@ -204,7 +204,7 @@ def do_distributed_testing(self, fleet): batch_idx, loss_val ) fleet.util.print_on_rank(message, 0) - except fluid.core.EOFException: + except base.core.EOFException: self.test_reader.reset() pass_time = time.time() - pass_start @@ -218,7 +218,7 @@ def do_pyreader_training(self, fleet): fleet(Fleet api): the fleet object of Parameter Server, define distribute training role """ exe = self.get_executor() - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) fleet.init_worker() batch_size = 4 @@ -231,7 +231,7 @@ def do_pyreader_training(self, fleet): pass_start = time.time() while True: loss_val = exe.run( - program=fluid.default_main_program(), + program=base.default_main_program(), fetch_list=[self.avg_cost.name], ) loss_val = np.mean(loss_val) @@ -246,7 +246,7 @@ def do_pyreader_training(self, fleet): fleet.util.print_on_rank(message, 0) pass_time = time.time() - pass_start - except fluid.core.EOFException: + except base.core.EOFException: self.reader.reset() dirname = os.getenv("SAVE_DIRNAME", None) @@ -265,7 +265,7 @@ def do_dataset_training_queuedataset(self, fleet): train_file_list = ctr_dataset_reader.prepare_fake_data() exe = self.get_executor() - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) fleet.init_worker() thread_num = 2 @@ -289,7 +289,7 @@ def do_dataset_training_queuedataset(self, fleet): pass_start = time.time() dataset.set_filelist(filelist) exe.train_from_dataset( - program=fluid.default_main_program(), + program=base.default_main_program(), dataset=dataset, fetch_list=[self.avg_cost], fetch_info=["cost"], @@ -318,7 +318,7 @@ def do_dataset_training(self, fleet): train_file_list = ctr_dataset_reader.prepare_fake_data() exe = self.get_executor() - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) fleet.init_worker() thread_num = 2 @@ -326,7 +326,7 @@ def do_dataset_training(self, fleet): filelist = train_file_list # config dataset - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_use_var(self.feeds) dataset.set_batch_size(128) dataset.set_thread(2) @@ -344,7 +344,7 @@ def do_dataset_training(self, fleet): for epoch_id in range(1): pass_start = time.time() exe.train_from_dataset( - program=fluid.default_main_program(), + program=base.default_main_program(), dataset=dataset, fetch_list=[self.avg_cost], fetch_info=["cost"], @@ -381,8 +381,8 @@ def do_dataset_training(self, fleet): fleet.save_dense_params( exe, dense_param_dirname, - fluid.global_scope(), - fluid.default_main_program(), + base.global_scope(), + base.default_main_program(), ) save_one_table_dirname = os.getenv("SAVE_ONE_TABLE_DIRNAME", None) diff --git a/test/legacy_test/dist_fleet_ctr_ps_gpu.py b/test/legacy_test/dist_fleet_ctr_ps_gpu.py index 43253c4674a7c..bf109e6a61306 100644 --- a/test/legacy_test/dist_fleet_ctr_ps_gpu.py +++ b/test/legacy_test/dist_fleet_ctr_ps_gpu.py @@ -26,11 +26,11 @@ from test_dist_fleet_base import runtime_main import paddle -from paddle import fluid +from paddle import base # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 class TestDistGpuPsCTR2x2(TestDistCTR2x2): @@ -44,7 +44,7 @@ def check_model_right(self, dirname): with open(model_filename, "rb") as f: program_desc_str = f.read() - program = fluid.Program.parse_from_string(program_desc_str) + program = base.Program.parse_from_string(program_desc_str) with open(os.path.join(dirname, "__model__.proto"), "w") as wn: wn.write(str(program)) @@ -55,8 +55,8 @@ def do_pyreader_training(self, fleet): fleet(Fleet api): the fleet object of Parameter Server, define distribute training role """ device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace(device_id) - exe = fluid.Executor(place) + place = base.CUDAPlace(device_id) + exe = base.Executor(place) exe.run(fleet.startup_program) fleet.init_worker() @@ -86,7 +86,7 @@ def do_pyreader_training(self, fleet): fleet.util.print_on_rank(message, 0) pass_time = time.time() - pass_start - except fluid.core.EOFException: + except base.core.EOFException: self.reader.reset() model_dir = tempfile.mkdtemp() @@ -107,8 +107,8 @@ def do_dataset_training(self, fleet): ) = ctr_dataset_reader.prepare_data() device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace(device_id) - exe = fluid.Executor(place) + place = base.CUDAPlace(device_id) + exe = base.Executor(place) exe.run(fleet.startup_program) fleet.init_worker() diff --git a/test/legacy_test/dist_fleet_debug_gloo.py b/test/legacy_test/dist_fleet_debug_gloo.py index e4d86d57a873a..20ff924798d79 100644 --- a/test/legacy_test/dist_fleet_debug_gloo.py +++ b/test/legacy_test/dist_fleet_debug_gloo.py @@ -21,7 +21,7 @@ ) logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s") -logger = logging.getLogger("fluid") +logger = logging.getLogger("base") logger.setLevel(logging.INFO) # role = role_maker.GeneralRoleMaker( # init_timeout_seconds=100, diff --git a/test/legacy_test/dist_fleet_heter_pipeline_ctr.py b/test/legacy_test/dist_fleet_heter_pipeline_ctr.py index db7bcf8fac1a4..cb33e527d4155 100644 --- a/test/legacy_test/dist_fleet_heter_pipeline_ctr.py +++ b/test/legacy_test/dist_fleet_heter_pipeline_ctr.py @@ -22,13 +22,13 @@ from test_dist_fleet_heter_base import FleetDistHeterRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base paddle.enable_static() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 class TestHeterPipelinePsCTR2x2(FleetDistHeterRunnerBase): @@ -48,7 +48,7 @@ def net(self, args, batch_size=4, lr=0.01): """ dnn_input_dim, lr_input_dim = int(1e5), int(1e5) - with fluid.device_guard("cpu"): + with base.device_guard("cpu"): dnn_data = paddle.static.data( name="dnn_data", shape=[-1, 1], @@ -76,7 +76,7 @@ def net(self, args, batch_size=4, lr=0.01): is_distributed=False, input=dnn_data, size=[dnn_input_dim, dnn_layer_dims[0]], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="deep_embedding", initializer=paddle.nn.initializer.Constant(value=0.01), ), @@ -92,7 +92,7 @@ def net(self, args, batch_size=4, lr=0.01): is_distributed=False, input=lr_data, size=[lr_input_dim, 1], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="wide_embedding", initializer=paddle.nn.initializer.Constant(value=0.01), ), @@ -102,20 +102,20 @@ def net(self, args, batch_size=4, lr=0.01): input=lr_embedding, pool_type="sum" ) - with fluid.device_guard("gpu"): + with base.device_guard("gpu"): for i, dim in enumerate(dnn_layer_dims[1:]): fc = paddle.static.nn.fc( x=dnn_out, size=dim, activation="relu", - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), name='dnn-fc-%d' % i, ) dnn_out = fc - with fluid.device_guard("cpu"): + with base.device_guard("cpu"): merge_layer = paddle.concat([dnn_out, lr_pool], axis=1) label = paddle.cast(label, dtype="int64") predict = paddle.static.nn.fc( @@ -141,20 +141,20 @@ def check_model_right(self, dirname): with open(model_filename, "rb") as f: program_desc_str = f.read() - program = fluid.Program.parse_from_string(program_desc_str) + program = base.Program.parse_from_string(program_desc_str) with open(os.path.join(dirname, "__model__.proto"), "w") as wn: wn.write(str(program)) def do_dataset_training(self, fleet): train_file_list = ctr_dataset_reader.prepare_fake_data() - exe = fluid.Executor(fluid.CPUPlace()) - real_program = fluid.default_main_program()._heter_pipeline_opt[ + exe = base.Executor(base.CPUPlace()) + real_program = base.default_main_program()._heter_pipeline_opt[ "section_program" ] print(real_program) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) fleet.init_worker() thread_num = int(os.getenv("CPU_NUM", 2)) @@ -164,7 +164,7 @@ def do_dataset_training(self, fleet): print(f"filelist: {filelist}") # config dataset - dataset = fluid.DatasetFactory().create_dataset() + dataset = base.DatasetFactory().create_dataset() dataset.set_batch_size(batch_size) dataset.set_use_var(self.feeds) pipe_command = 'python3 ctr_dataset_reader.py' @@ -177,7 +177,7 @@ def do_dataset_training(self, fleet): pass_start = time.time() dataset.set_filelist(filelist) exe.train_from_dataset( - program=fluid.default_main_program(), + program=base.default_main_program(), dataset=dataset, fetch_list=[self.avg_cost], fetch_info=["cost"], @@ -189,10 +189,10 @@ def do_dataset_training(self, fleet): exe.close() def do_dataset_heter_training(self, fleet): - exe = fluid.Executor() - exe.run(fluid.default_startup_program()) + exe = base.Executor() + exe.run(base.default_startup_program()) fleet.init_worker() - real_program = fluid.default_main_program()._heter_pipeline_opt[ + real_program = base.default_main_program()._heter_pipeline_opt[ "section_program" ] print(real_program) @@ -202,7 +202,7 @@ def do_dataset_heter_training(self, fleet): pass_start = time.time() exe.train_from_dataset( - program=fluid.default_main_program(), + program=base.default_main_program(), fetch_list=[self.avg_cost], fetch_info=["cost"], print_period=2, @@ -216,7 +216,7 @@ def do_dataset_heter_training(self, fleet): # pass_start = time.time() # dataset.set_filelist(filelist) # exe.train_from_dataset( - # program=fluid.default_main_program(), + # program=base.default_main_program(), # dataset=dataset, # fetch_list=[self.avg_cost], # fetch_info=["cost"], diff --git a/test/legacy_test/dist_fleet_raw_program_optimizer.py b/test/legacy_test/dist_fleet_raw_program_optimizer.py index 69b5bf88702f4..e753ad3cc9da2 100644 --- a/test/legacy_test/dist_fleet_raw_program_optimizer.py +++ b/test/legacy_test/dist_fleet_raw_program_optimizer.py @@ -18,7 +18,7 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -28,8 +28,8 @@ paddle.dataset.mnist.fetch() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 def cnn_model(data): @@ -40,7 +40,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -51,7 +51,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -65,7 +65,7 @@ def cnn_model(data): x=conv_pool_2, size=SIZE, activation="softmax", - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -93,7 +93,7 @@ def get_model(self, batch_size=2, single_device=False): input=predict, label=label, total=batch_size_tensor ) - test_program = fluid.default_main_program().clone(for_test=True) + test_program = base.default_main_program().clone(for_test=True) # Reader train_reader = paddle.batch( diff --git a/test/legacy_test/dist_fleet_raw_program_optimizer_fuse_allreduce.py b/test/legacy_test/dist_fleet_raw_program_optimizer_fuse_allreduce.py index f19bb7424c6ab..7f118af943d12 100644 --- a/test/legacy_test/dist_fleet_raw_program_optimizer_fuse_allreduce.py +++ b/test/legacy_test/dist_fleet_raw_program_optimizer_fuse_allreduce.py @@ -18,7 +18,7 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -28,8 +28,8 @@ paddle.dataset.mnist.fetch() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 def cnn_model(data): @@ -40,7 +40,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -51,7 +51,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -65,7 +65,7 @@ def cnn_model(data): x=conv_pool_2, size=SIZE, activation="softmax", - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -93,7 +93,7 @@ def get_model(self, batch_size=2, single_device=False): input=predict, label=label, total=batch_size_tensor ) - test_program = fluid.default_main_program().clone(for_test=True) + test_program = base.default_main_program().clone(for_test=True) # Reader train_reader = paddle.batch( diff --git a/test/legacy_test/dist_fleet_simnet_bow.py b/test/legacy_test/dist_fleet_simnet_bow.py index 5885f39569450..ffbe371cc228a 100644 --- a/test/legacy_test/dist_fleet_simnet_bow.py +++ b/test/legacy_test/dist_fleet_simnet_bow.py @@ -19,7 +19,7 @@ from test_dist_fleet_base import FleetDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -37,8 +37,8 @@ sample_rate = 1 # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 def fake_simnet_reader(): @@ -111,7 +111,7 @@ def train_network( reader = None if is_pyreader: - reader = fluid.io.PyReader( + reader = base.io.PyReader( feed_list=datas, capacity=64, iterable=False, @@ -123,7 +123,7 @@ def train_network( input=q, is_distributed=is_distributed, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", ), @@ -139,7 +139,7 @@ def train_network( q_fc = paddle.static.nn.fc( x=q_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__q_fc__", learning_rate=base_lr, @@ -151,7 +151,7 @@ def train_network( input=pt, is_distributed=is_distributed, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -168,11 +168,11 @@ def train_network( pt_fc = paddle.static.nn.fc( x=pt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) # embedding @@ -180,7 +180,7 @@ def train_network( input=nt, is_distributed=is_distributed, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", ), @@ -196,11 +196,11 @@ def train_network( nt_fc = paddle.static.nn.fc( x=nt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc) cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc) @@ -235,7 +235,7 @@ def check_model_right(self, dirname): with open(model_filename, "rb") as f: program_desc_str = f.read() - program = fluid.Program.parse_from_string(program_desc_str) + program = base.Program.parse_from_string(program_desc_str) with open(os.path.join(dirname, "__model__.proto"), "w") as wn: wn.write(str(program)) @@ -246,8 +246,8 @@ def do_pyreader_training(self, fleet): fleet(Fleet api): the fleet object of Parameter Server, define distribute training role """ - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) fleet.init_worker() batch_size = 4 # reader @@ -259,7 +259,7 @@ def do_pyreader_training(self, fleet): pass_start = time.time() while True: loss_val = exe.run( - program=fluid.default_main_program(), + program=base.default_main_program(), fetch_list=[self.avg_cost.name], ) loss_val = np.mean(loss_val) @@ -269,7 +269,7 @@ def do_pyreader_training(self, fleet): fleet.util.print_on_rank(message, 0) pass_time = time.time() - pass_start - except fluid.core.EOFException: + except base.core.EOFException: self.reader.reset() def do_dataset_training(self, fleet): diff --git a/test/legacy_test/dist_fleet_sparse_embedding_ctr.py b/test/legacy_test/dist_fleet_sparse_embedding_ctr.py index 922a458b5d9a3..120b7e51305d7 100644 --- a/test/legacy_test/dist_fleet_sparse_embedding_ctr.py +++ b/test/legacy_test/dist_fleet_sparse_embedding_ctr.py @@ -21,7 +21,7 @@ from test_dist_fleet_base import FleetDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base def fake_ctr_reader(): @@ -74,7 +74,7 @@ def net(self, args, batch_size=4, lr=0.01): datas = [dnn_data, lr_data, label] if args.reader == "pyreader": - self.reader = fluid.io.PyReader( + self.reader = base.io.PyReader( feed_list=datas, capacity=64, iterable=False, @@ -101,7 +101,7 @@ def net(self, args, batch_size=4, lr=0.01): size=[dnn_input_dim, dnn_layer_dims[0]], is_test=inference, entry=entry, - param_attr=fluid.ParamAttr(name="deep_embedding", initializer=init), + param_attr=base.ParamAttr(name="deep_embedding", initializer=init), ) dnn_pool = paddle.static.nn.sequence_lod.sequence_pool( input=dnn_embedding, pool_type="sum" @@ -112,7 +112,7 @@ def net(self, args, batch_size=4, lr=0.01): x=dnn_out, size=dim, activation="relu", - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), name='dnn-fc-%d' % i, @@ -125,7 +125,7 @@ def net(self, args, batch_size=4, lr=0.01): size=[lr_input_dim, 1], is_test=inference, entry=entry, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="wide_embedding", initializer=paddle.nn.initializer.Constant(value=0.01), ), @@ -161,9 +161,9 @@ def do_pyreader_training(self, fleet): fleet(Fleet api): the fleet object of Parameter Server, define distribute training role """ - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) fleet.init_worker() batch_size = 4 @@ -176,7 +176,7 @@ def do_pyreader_training(self, fleet): try: while True: loss_val = exe.run( - program=fluid.default_main_program(), + program=base.default_main_program(), fetch_list=[self.avg_cost.name], ) loss_val = np.mean(loss_val) @@ -185,7 +185,7 @@ def do_pyreader_training(self, fleet): epoch_id, loss_val ) ) - except fluid.core.EOFException: + except base.core.EOFException: self.reader.reset() model_dir = os.getenv("MODEL_DIR", None) diff --git a/test/legacy_test/dist_fleet_sync_batch_norm.py b/test/legacy_test/dist_fleet_sync_batch_norm.py index 09e11bda2d671..56a001178b4cf 100644 --- a/test/legacy_test/dist_fleet_sync_batch_norm.py +++ b/test/legacy_test/dist_fleet_sync_batch_norm.py @@ -20,9 +20,9 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet -from paddle.fluid import core +from paddle.base import core from paddle.static import Executor, Program, program_guard @@ -30,7 +30,7 @@ def get_program(args): main, startup = Program(), Program() main.random_seed = 10 startup.random_seed = 10 - with fluid.unique_name.guard(): + with base.unique_name.guard(): with program_guard(main, startup): data = paddle.static.data( name='input', @@ -42,14 +42,14 @@ def get_program(args): input=data, num_filters=32, filter_size=1, - param_attr=fluid.ParamAttr(name='conv2d_weight'), + param_attr=base.ParamAttr(name='conv2d_weight'), bias_attr=False, use_cudnn=args.use_cudnn, ) bn = paddle.static.nn.batch_norm( conv, - param_attr=fluid.ParamAttr(name='bn_scale'), - bias_attr=fluid.ParamAttr(name='bn_bias'), + param_attr=base.ParamAttr(name='bn_scale'), + bias_attr=base.ParamAttr(name='bn_bias'), moving_mean_name='bn_moving_mean', moving_variance_name='bn_moving_variance', data_layout=args.layout, @@ -69,7 +69,7 @@ def get_program(args): def train(args): - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.sync_batch_norm = True build_strategy.enable_inplace = False build_strategy.memory_optimize = False @@ -86,7 +86,7 @@ def train(args): exe.run(startup) for nm in args.fetch_list: - fv = fluid.framework._get_var(str(nm), program=main) + fv = base.framework._get_var(str(nm), program=main) fv.persistable = True fetch_list = [v.name for v in outs] + args.fetch_list @@ -100,7 +100,7 @@ def train(args): ) data = np.load(filepath) - comp_prog = fluid.compiler.CompiledProgram( + comp_prog = base.compiler.CompiledProgram( main, build_strategy=build_strategy ) sync_bn_fetches = exe.run( diff --git a/test/legacy_test/dist_hapi_mnist_dynamic.py b/test/legacy_test/dist_hapi_mnist_dynamic.py index 64983a5054853..6c3a6aabd3705 100644 --- a/test/legacy_test/dist_hapi_mnist_dynamic.py +++ b/test/legacy_test/dist_hapi_mnist_dynamic.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import Model, fluid, set_device +from paddle import Model, base, set_device from paddle.metric import Accuracy from paddle.nn.layer.loss import CrossEntropyLoss from paddle.static import InputSpec as Input @@ -50,7 +50,7 @@ def compute_accuracy(pred, gt): @unittest.skipIf( - not fluid.is_compiled_with_cuda(), 'CPU testing is not supported' + not base.is_compiled_with_cuda(), 'CPU testing is not supported' ) class TestDistTraining(unittest.TestCase): def test_dynamic_multiple_gpus(self): diff --git a/test/legacy_test/dist_hapi_mnist_static.py b/test/legacy_test/dist_hapi_mnist_static.py index 9229e34529ba5..dd5dbf914278e 100644 --- a/test/legacy_test/dist_hapi_mnist_static.py +++ b/test/legacy_test/dist_hapi_mnist_static.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import Model, fluid, set_device +from paddle import Model, base, set_device from paddle.metric import Accuracy from paddle.nn.layer.loss import CrossEntropyLoss from paddle.static import InputSpec as Input @@ -50,7 +50,7 @@ def compute_accuracy(pred, gt): @unittest.skipIf( - not fluid.is_compiled_with_cuda(), 'CPU testing is not supported' + not base.is_compiled_with_cuda(), 'CPU testing is not supported' ) class TestDistTraining(unittest.TestCase): def test_static_multiple_gpus(self): diff --git a/test/legacy_test/dist_hapi_pure_fp16_static.py b/test/legacy_test/dist_hapi_pure_fp16_static.py index 3506b22d31f30..9abdd545f1325 100644 --- a/test/legacy_test/dist_hapi_pure_fp16_static.py +++ b/test/legacy_test/dist_hapi_pure_fp16_static.py @@ -17,18 +17,18 @@ import numpy as np import paddle -from paddle import Model, fluid +from paddle import Model, base from paddle.nn.layer.loss import CrossEntropyLoss from paddle.static import InputSpec from paddle.vision.models import LeNet @unittest.skipIf( - not fluid.is_compiled_with_cuda(), 'CPU testing is not supported' + not base.is_compiled_with_cuda(), 'CPU testing is not supported' ) class TestDistTrainingWithPureFP16(unittest.TestCase): def test_amp_training_purefp16(self): - if not fluid.is_compiled_with_cuda(): + if not base.is_compiled_with_cuda(): self.skipTest('module not tested when ONLY_CPU compiling') data = np.random.random(size=(4, 1, 28, 28)).astype(np.float32) label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64) diff --git a/test/legacy_test/dist_mnist.py b/test/legacy_test/dist_mnist.py index 925a6f4f6739e..9bd3747524242 100644 --- a/test/legacy_test/dist_mnist.py +++ b/test/legacy_test/dist_mnist.py @@ -18,7 +18,7 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.incubate.distributed.fleet.collective import fleet paddle.enable_static() @@ -27,8 +27,8 @@ paddle.dataset.mnist.fetch() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 def cnn_model(data): @@ -39,7 +39,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -50,7 +50,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -64,7 +64,7 @@ def cnn_model(data): x=conv_pool_2, size=SIZE, activation="softmax", - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -92,7 +92,7 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): input=predict, label=label, total=batch_size_tensor ) - inference_program = fluid.default_main_program().clone() + inference_program = base.default_main_program().clone() # Optimization # TODO(typhoonzero): fix distributed adam optimizer # opt = paddle.optimizer.Adam( diff --git a/test/legacy_test/dist_mnist_batch_merge.py b/test/legacy_test/dist_mnist_batch_merge.py index e6a5070a53c99..5a1aa871932a3 100644 --- a/test/legacy_test/dist_mnist_batch_merge.py +++ b/test/legacy_test/dist_mnist_batch_merge.py @@ -16,7 +16,7 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base DTYPE = "float32" @@ -56,7 +56,7 @@ def get_model(self, batch_size=2): input=predict, label=label, total=batch_size_tensor ) - inference_program = fluid.default_main_program().clone() + inference_program = base.default_main_program().clone() # Optimization opt = paddle.optimizer.Momentum(learning_rate=0.001, momentum=0.9) diff --git a/test/legacy_test/dist_mnist_dgc.py b/test/legacy_test/dist_mnist_dgc.py index 6f9d892cbd570..29d848c373915 100644 --- a/test/legacy_test/dist_mnist_dgc.py +++ b/test/legacy_test/dist_mnist_dgc.py @@ -22,7 +22,7 @@ ) import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -30,8 +30,8 @@ paddle.dataset.mnist.fetch() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 def cnn_model(data): @@ -42,7 +42,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -53,7 +53,7 @@ def cnn_model(data): pool_size=2, pool_stride=2, act="relu", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -67,7 +67,7 @@ def cnn_model(data): x=conv_pool_2, size=SIZE, activation="softmax", - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -95,7 +95,7 @@ def get_model(self, batch_size=2, use_dgc=False, build_strategy=None): input=predict, label=label, total=batch_size_tensor ) - inference_program = fluid.default_main_program().clone() + inference_program = base.default_main_program().clone() if not use_dgc: opt = paddle.optimizer.Momentum(learning_rate=self.lr, momentum=0.9) else: diff --git a/test/legacy_test/dist_mnist_fp16_allreduce.py b/test/legacy_test/dist_mnist_fp16_allreduce.py index 44626be4f01da..669ce95021e28 100644 --- a/test/legacy_test/dist_mnist_fp16_allreduce.py +++ b/test/legacy_test/dist_mnist_fp16_allreduce.py @@ -16,7 +16,7 @@ from test_dist_base import TestDistRunnerBase, _insert_comm_op, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.meta_optimizers import ( FP16AllReduceOptimizer as FP16AllReduce, @@ -26,8 +26,8 @@ paddle.dataset.mnist.fetch() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 class TestDistMnist2x2(TestDistRunnerBase): @@ -51,7 +51,7 @@ def get_model(self, batch_size=2, single_device=False): input=predict, label=label, total=batch_size_tensor ) - inference_program = fluid.default_main_program().clone() + inference_program = base.default_main_program().clone() # Optimization opt = paddle.optimizer.Momentum(learning_rate=0.001, momentum=0.9) diff --git a/test/legacy_test/dist_mnist_lars.py b/test/legacy_test/dist_mnist_lars.py index 9c963bc4cbb39..02ed25c76ccac 100644 --- a/test/legacy_test/dist_mnist_lars.py +++ b/test/legacy_test/dist_mnist_lars.py @@ -16,14 +16,14 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base DTYPE = "float32" paddle.dataset.mnist.fetch() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 class TestDistMnist2x2(TestDistRunnerBase): @@ -47,7 +47,7 @@ def get_model(self, batch_size=2): input=predict, label=label, total=batch_size_tensor ) - inference_program = fluid.default_main_program().clone() + inference_program = base.default_main_program().clone() # Optimization opt = paddle.incubate.optimizer.LarsMomentumOptimizer( learning_rate=0.001, momentum=0.9 diff --git a/test/legacy_test/dist_se_resnext.py b/test/legacy_test/dist_se_resnext.py index f7b31d315722f..98b6af3af08ee 100644 --- a/test/legacy_test/dist_se_resnext.py +++ b/test/legacy_test/dist_se_resnext.py @@ -17,13 +17,13 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base paddle.enable_static() # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 train_parameters = { "input_size": [3, 224, 224], @@ -120,7 +120,7 @@ def net(self, input, class_dim=1000): x=drop, size=class_dim, activation='softmax', - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.05) ), ) @@ -173,7 +173,7 @@ def conv_bn_layer( groups=groups, act=None, # avoid pserver CPU init differs from GPU - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.05) ), bias_attr=False, @@ -186,7 +186,7 @@ def squeeze_excitation(self, input, num_channels, reduction_ratio): squeeze = paddle.static.nn.fc( x=pool, size=num_channels // reduction_ratio, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.05) ), activation='relu', @@ -195,7 +195,7 @@ def squeeze_excitation(self, input, num_channels, reduction_ratio): excitation = paddle.static.nn.fc( x=squeeze, size=num_channels, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.05) ), activation='sigmoid', @@ -226,7 +226,7 @@ def get_model(self, batch_size=2, use_dgc=False): acc_top5 = paddle.static.accuracy(input=out, label=label, k=5) # Evaluator - test_program = fluid.default_main_program().clone(for_test=True) + test_program = base.default_main_program().clone(for_test=True) # Optimization total_images = 6149 # flowers diff --git a/test/legacy_test/dist_sharding_save.py b/test/legacy_test/dist_sharding_save.py index 7483e02b48c5d..8eb61d220971f 100755 --- a/test/legacy_test/dist_sharding_save.py +++ b/test/legacy_test/dist_sharding_save.py @@ -17,13 +17,13 @@ from dist_mnist import cnn_model # noqa: F401 import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet.base import role_maker from paddle.distributed.fleet.meta_optimizers import sharding # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 def runtime_main(): @@ -34,12 +34,12 @@ def runtime_main(): paddle.enable_static() # model definition - train_prog = paddle.fluid.Program() - startup_prog = paddle.fluid.Program() + train_prog = paddle.base.Program() + startup_prog = paddle.base.Program() role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - with fluid.program_guard(train_prog, startup_prog): - with fluid.unique_name.guard(): + with base.program_guard(train_prog, startup_prog): + with base.unique_name.guard(): input_x = paddle.static.data( name="x", shape=[-1, 32], dtype='float32' ) @@ -76,8 +76,8 @@ def runtime_main(): # execution device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace(device_id) - exe = fluid.Executor(place) + place = base.CUDAPlace(device_id) + exe = base.Executor(place) exe.run(startup_prog) dirname = "./ut_sharding_save_model" sharding.utils.save_persistables( diff --git a/test/legacy_test/dist_text_classification.py b/test/legacy_test/dist_text_classification.py index 3a6bb6c152c96..0e3c79d758c80 100644 --- a/test/legacy_test/dist_text_classification.py +++ b/test/legacy_test/dist_text_classification.py @@ -21,7 +21,7 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base DTYPE = "float32" VOCAB_URL = 'http://paddle-dist-ce-data.bj.bcebos.com/imdb.vocab' @@ -59,7 +59,7 @@ def conv_net( input=input, size=[dict_dim, emb_dim], is_sparse=False, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -70,7 +70,7 @@ def conv_net( filter_size=window_size, act="tanh", pool_type="max", - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -78,7 +78,7 @@ def conv_net( fc_0 = paddle.static.nn.fc( x=[conv_3], size=fc0_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -87,7 +87,7 @@ def conv_net( x=[fc_0], size=class_dim, activation="softmax", - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), ) @@ -138,7 +138,7 @@ def get_model(self, batch_size=2): ) avg_cost = paddle.mean(x=cost) acc = paddle.static.accuracy(input=predict, label=label) - inference_program = fluid.default_main_program().clone() + inference_program = base.default_main_program().clone() # Optimization opt = get_optimizer(learning_rate=0.001) diff --git a/test/legacy_test/dist_word2vec.py b/test/legacy_test/dist_word2vec.py index d6fcf02a43bdd..d32f836cdd15b 100644 --- a/test/legacy_test/dist_word2vec.py +++ b/test/legacy_test/dist_word2vec.py @@ -17,7 +17,7 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base IS_SPARSE = True EMBED_SIZE = 32 @@ -25,8 +25,8 @@ N = 5 # Fix seed for test -fluid.default_startup_program().random_seed = 1 -fluid.default_main_program().random_seed = 1 +base.default_startup_program().random_seed = 1 +base.default_main_program().random_seed = 1 class TestDistWord2vec2x2(TestDistRunnerBase): @@ -39,7 +39,7 @@ def __network__(words): size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=IS_SPARSE, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name='shared_w', initializer=paddle.nn.initializer.Constant(value=0.1), ), @@ -49,7 +49,7 @@ def __network__(words): size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=IS_SPARSE, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name='shared_w', initializer=paddle.nn.initializer.Constant(value=0.1), ), @@ -59,7 +59,7 @@ def __network__(words): size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=IS_SPARSE, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name='shared_w', initializer=paddle.nn.initializer.Constant(value=0.1), ), @@ -69,7 +69,7 @@ def __network__(words): size=[dict_size, EMBED_SIZE], dtype='float32', is_sparse=IS_SPARSE, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name='shared_w', initializer=paddle.nn.initializer.Constant(value=0.1), ), @@ -83,7 +83,7 @@ def __network__(words): x=concat_embed, size=HIDDEN_SIZE, activation='sigmoid', - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.1) ), ) @@ -91,7 +91,7 @@ def __network__(words): x=hidden1, size=dict_size, activation='softmax', - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.1) ), ) @@ -126,7 +126,7 @@ def __network__(words): [first_word, second_word, third_word, forth_word, next_word] ) - inference_program = paddle.fluid.default_main_program().clone() + inference_program = paddle.base.default_main_program().clone() sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) diff --git a/test/legacy_test/distributed_fused_lamb_test_base.py b/test/legacy_test/distributed_fused_lamb_test_base.py index b0926c93c2082..ec96fee570cf9 100644 --- a/test/legacy_test/distributed_fused_lamb_test_base.py +++ b/test/legacy_test/distributed_fused_lamb_test_base.py @@ -21,7 +21,7 @@ import paddle from paddle.distributed import fleet from paddle.distributed.fleet.meta_optimizers.common import CollectiveHelper -from paddle.fluid import core +from paddle.base import core from paddle.incubate import DistributedFusedLamb from paddle.nn.clip import ClipGradBase, _clip_by_global_norm_using_mp_type from paddle.vision.models import resnet18 as resnet @@ -126,7 +126,7 @@ def run_model(use_distributed_lamb, use_fp16, use_master_param_norm, **kwargs): main = paddle.static.Program() startup = paddle.static.Program() with paddle.static.program_guard(main, startup): - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.amp.fp16_guard(): image = paddle.static.data( name='image', diff --git a/test/legacy_test/eager_op_test.py b/test/legacy_test/eager_op_test.py index 817d39b7d879d..67ec4af14a97e 100644 --- a/test/legacy_test/eager_op_test.py +++ b/test/legacy_test/eager_op_test.py @@ -39,12 +39,12 @@ ) import paddle -from paddle import fluid +from paddle import base from paddle.autograd.ir_backward import grad as ir_grad -from paddle.fluid import core, unique_name -from paddle.fluid.backward import append_backward -from paddle.fluid.executor import Executor -from paddle.fluid.framework import ( +from paddle.base import core, unique_name +from paddle.base.backward import append_backward +from paddle.base.executor import Executor +from paddle.base.framework import ( OpProtoHolder, Program, _current_expected_place, @@ -52,7 +52,7 @@ get_flags, set_flags, ) -from paddle.fluid.wrapped_decorator import signature_safe_contextmanager +from paddle.base.wrapped_decorator import signature_safe_contextmanager sys.path.append(os.path.abspath(os.path.dirname(__file__))) @@ -78,7 +78,7 @@ def check_out_dtype(api_fn, in_specs, expect_dtypes, target_index=0, **configs): config(dict): other arguments of paddle api function Example: - check_out_dtype(fluid.layers.pad_constant_like, [([2,3,2,3], 'float64'), ([1, 3, 1,3], )], ['float32', 'float64', 'int64'], target_index=1, pad_value=0.) + check_out_dtype(base.layers.pad_constant_like, [([2,3,2,3], 'float64'), ([1, 3, 1,3], )], ['float32', 'float64', 'int64'], target_index=1, pad_value=0.) """ with paddle_static_guard(): @@ -106,7 +106,7 @@ def check_out_dtype(api_fn, in_specs, expect_dtypes, target_index=0, **configs): ) out = api_fn(*input_t, **configs) - out_dtype = fluid.data_feeder.convert_dtype(out.dtype) + out_dtype = base.data_feeder.convert_dtype(out.dtype) if out_dtype != expect_dtype: raise ValueError( @@ -578,7 +578,7 @@ def _enable_check_cinn_test(self, place, inputs, outputs): if ( not core.is_compiled_with_cinn() or not core.is_compiled_with_cuda() - or not isinstance(place, fluid.CUDAPlace) + or not isinstance(place, base.CUDAPlace) ): return False # CINN not support bfloat16 now, skip cinn test @@ -851,11 +851,11 @@ def _create_var_from_numpy(self, value): if isinstance(value, tuple): data = value[0] lod = value[1] - v = fluid.dygraph.base.to_variable(value=data) + v = base.dygraph.base.to_variable(value=data) v.value().get_tensor().set_recursive_sequence_lengths(lod) return v else: - return fluid.dygraph.base.to_variable(value) + return base.dygraph.base.to_variable(value) def get_sequence_batch_size_1_input(self, lod=None, shape=None): """Get LoD input data whose batch size is 1. @@ -1100,8 +1100,8 @@ def cal_python_api(python_api, args, kernel_sig): ][i] return result - with fluid.dygraph.base.guard(place=place): - block = fluid.default_main_program().global_block() + with base.dygraph.base.guard(place=place): + block = base.default_main_program().global_block() op_proto = OpProtoHolder.instance().get_op_proto(self.op_type) # prepare input variable dygraph_tensor_inputs = ( @@ -1166,8 +1166,8 @@ def _calc_dygraph_output( self.__class__.op_type = ( self.op_type ) # for ci check, please not delete it for now - with fluid.dygraph.base.guard(place=place): - block = fluid.default_main_program().global_block() + with base.dygraph.base.guard(place=place): + block = base.default_main_program().global_block() op_proto = OpProtoHolder.instance().get_op_proto(self.op_type) @@ -1204,8 +1204,8 @@ def _calc_dygraph_output( return outputs def get_kernel_signature(self, place, egr_inps=None, egr_oups=None): - with fluid.dygraph.base.guard(place=place): - block = fluid.default_main_program().global_block() + with base.dygraph.base.guard(place=place): + block = base.default_main_program().global_block() op_proto = OpProtoHolder.instance().get_op_proto(self.op_type) # prepare input variable dygraph_tensor_inputs = ( @@ -1387,7 +1387,7 @@ def _check_ir_output(self, place, program, feed_map, fetch_list, outs): executor = Executor(place) new_program = None if isinstance(program, paddle.static.CompiledProgram): - new_program = fluid.CompiledProgram( + new_program = base.CompiledProgram( program._program, build_strategy=program._build_strategy ) else: @@ -1438,7 +1438,7 @@ def _calc_output( for_inplace_test=None, check_cinn=False, ): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): program = Program() block = program.global_block() op = self._append_ops(block) @@ -1459,9 +1459,9 @@ def _calc_output( original_program = program if parallel: use_cuda = False - if isinstance(place, fluid.CUDAPlace): + if isinstance(place, base.CUDAPlace): use_cuda = True - compiled_prog = fluid.CompiledProgram(program) + compiled_prog = base.CompiledProgram(program) program = compiled_prog fetch_list = getattr(self, "fetch_list", []) # if the fetch_list is customized by user, we use it directly. @@ -1490,14 +1490,14 @@ def _calc_output( self.rtol = self.cinn_rtol if (enable_inplace is not None) or enable_cinn_test: - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() if enable_inplace is not None: build_strategy.enable_inplace = enable_inplace if enable_cinn_test: build_strategy.build_cinn_pass = check_cinn self._check_cinn = enable_cinn_test - compiled_prog = fluid.CompiledProgram( + compiled_prog = base.CompiledProgram( program, build_strategy=build_strategy ) program = compiled_prog @@ -1700,8 +1700,8 @@ def _get_need_run_ops(self, op_desc, fwd_op_desc=None): def _dfs_grad_op(op_desc, fwd_op_desc=None): visited_ops.append(op_desc.type()) - has_infer_inplace = fluid.core.has_infer_inplace(op_desc.type()) - has_grad_op_maker = fluid.core.has_grad_op_maker(op_desc.type()) + has_infer_inplace = base.core.has_infer_inplace(op_desc.type()) + has_grad_op_maker = base.core.has_grad_op_maker(op_desc.type()) has_infer_inplace_in_grad_descendants = False if not has_grad_op_maker: has_infer_inplace_in_descendants = False @@ -1784,7 +1784,7 @@ def _calc_grad_output( Returns: res (tuple(outs, fetch_list, feed_map, program, op_desc)): The results of given grad_op_desc. """ - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): ( fwd_outs, fwd_fetch_list, @@ -1805,9 +1805,9 @@ def _calc_grad_output( exe = Executor(place) program = grad_program if enable_inplace is not None: - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.enable_inplace = enable_inplace - compiled_program = fluid.CompiledProgram( + compiled_program = base.CompiledProgram( grad_program, build_strategy=build_strategy ) program = compiled_program @@ -1878,8 +1878,8 @@ def check_inplace_output_with_place( if os.getenv("FLAGS_enable_new_ir_in_executor"): return - has_infer_inplace = fluid.core.has_infer_inplace(self.op_type) - has_grad_op_maker = fluid.core.has_grad_op_maker(self.op_type) + has_infer_inplace = base.core.has_infer_inplace(self.op_type) + has_grad_op_maker = base.core.has_grad_op_maker(self.op_type) fwd_res = self._calc_output( place, no_check_set=no_check_set, for_inplace_test=True ) @@ -1891,7 +1891,7 @@ def check_inplace_output_with_place( return for op_desc, father_op_desc in reversed(need_run_ops): # The first one is the forward op - has_infer_inplace = fluid.core.has_infer_inplace(op_desc.type()) + has_infer_inplace = base.core.has_infer_inplace(op_desc.type()) if op_desc.type() == self.op_type: if has_infer_inplace: res[op_desc] = self._check_forward_inplace( @@ -1906,7 +1906,7 @@ def check_inplace_output_with_place( else: # TODO(zhiqiu): enhance inplace_grad test for ops (sum and activation) using mkldnn # skip op that use_mkldnn currently - flags_use_mkldnn = fluid.core.globals()["FLAGS_use_mkldnn"] + flags_use_mkldnn = base.core.globals()["FLAGS_use_mkldnn"] attrs_use_mkldnn = hasattr(self, 'attrs') and bool( self.attrs.get('use_mkldnn', False) ) @@ -2248,7 +2248,7 @@ def convert_uint16_to_float_ifneed(self, actual_np, expect_np): return actual_np, expect_np def find_actual_value(self, name): - with fluid.dygraph.base.guard(place=place): + with base.dygraph.base.guard(place=place): imperative_actual = find_imperative_actual( name, self.outputs, place ) @@ -2258,7 +2258,7 @@ def find_actual_value(self, name): return imperative_actual, imperative_actual_t def find_expect_value(self, name): - with fluid.dygraph.base.guard(place=place): + with base.dygraph.base.guard(place=place): imperative_expect = find_imperative_expect( name, self.ref_outputs, place ) @@ -2269,7 +2269,7 @@ def find_expect_value(self, name): def _compare_list(self, name, actual, expect): """if expect is a tuple, we need to compare list.""" - with fluid.dygraph.base.guard(place=place): + with base.dygraph.base.guard(place=place): self.op_test.assertListEqual( actual.value() .get_tensor() @@ -2469,8 +2469,8 @@ def _is_skip_name(self, name): and check_new_ir ): if ( - type(place) is paddle.fluid.libpaddle.CPUPlace - or type(place) is paddle.fluid.libpaddle.CUDAPlace + type(place) is paddle.base.libpaddle.CPUPlace + or type(place) is paddle.base.libpaddle.CUDAPlace ): print("New IR checker begins...........") with paddle.new_ir_utils._newir_guard(): @@ -2567,7 +2567,7 @@ def _get_places(self): return [] else: return [] - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] cpu_only = self._cpu_only if hasattr(self, '_cpu_only') else False if ( core.is_compiled_with_cuda() @@ -2992,7 +2992,7 @@ def check_grad_with_place( ) if check_dygraph: - with fluid.dygraph.base.guard(place): + with base.dygraph.base.guard(place): dygraph_dygraph_grad = self._get_dygraph_grad( inputs_to_check, place, @@ -3027,8 +3027,8 @@ def check_grad_with_place( and check_new_ir ): if ( - type(place) is paddle.fluid.libpaddle.CPUPlace - or type(place) is paddle.fluid.libpaddle.CUDAPlace + type(place) is paddle.base.libpaddle.CPUPlace + or type(place) is paddle.base.libpaddle.CUDAPlace ): print("New IR gradient begins...........") with paddle.new_ir_utils._newir_guard(): @@ -3076,8 +3076,8 @@ def _get_dygraph_grad( if hasattr(self, "use_custom_device") and self.use_custom_device: check_dygraph = False - with fluid.dygraph.base.guard(place=place): - block = fluid.default_main_program().global_block() + with base.dygraph.base.guard(place=place): + block = base.default_main_program().global_block() op_proto = OpProtoHolder.instance().get_op_proto(self.op_type) @@ -3182,15 +3182,15 @@ def _numpy_to_lod_tensor(np_value, lod, place): return tensor @staticmethod - def np_dtype_to_fluid_dtype(input): + def np_dtype_to_base_dtype(input): return input @staticmethod - def fluid_dtype_to_np_dtype(self, dtype): + def base_dtype_to_np_dtype(self, dtype): return dtype @staticmethod - def np_value_to_fluid_value(input): + def np_value_to_base_value(input): return input def cast_bf16_output(self, block, cast_inputs): @@ -3287,7 +3287,7 @@ def _get_gradient( parallel=False, check_cinn=False, ): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): prog = Program() scope = core.Scope() ir_scope = core.Scope() @@ -3372,18 +3372,18 @@ def _get_gradient( if parallel or enable_cinn_test: use_cuda = False - if isinstance(place, fluid.CUDAPlace): + if isinstance(place, base.CUDAPlace): use_cuda = True build_strategy = None if enable_cinn_test: - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.build_cinn_pass = check_cinn self._check_cinn = True - compiled_prog = fluid.CompiledProgram(prog, build_strategy) + compiled_prog = base.CompiledProgram(prog, build_strategy) prog = compiled_prog - executor = fluid.Executor(place) + executor = base.Executor(place) res = list( map( np.array, diff --git a/test/legacy_test/feed_data_reader.py b/test/legacy_test/feed_data_reader.py index 68c2f22cda148..f91641d578c17 100644 --- a/test/legacy_test/feed_data_reader.py +++ b/test/legacy_test/feed_data_reader.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle import fluid -from paddle.fluid.framework import Variable +from paddle import base +from paddle.base.framework import Variable def cyclic_reader(reader): @@ -45,5 +45,5 @@ def _feed_executor(self): return feed_data def get_next(self, exe, program): - assert isinstance(exe, fluid.Executor), "exe must be Executor" + assert isinstance(exe, base.Executor), "exe must be Executor" return self._feed_executor() diff --git a/test/legacy_test/fleet_heter_ps_training.py b/test/legacy_test/fleet_heter_ps_training.py index e2555e0c58a69..f54101b9c8f3c 100644 --- a/test/legacy_test/fleet_heter_ps_training.py +++ b/test/legacy_test/fleet_heter_ps_training.py @@ -13,14 +13,14 @@ # limitations under the License. import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet -fluid.disable_dygraph() +base.disable_dygraph() def get_dataset(inputs): - dataset = fluid.DatasetFactory().create_dataset() + dataset = base.DatasetFactory().create_dataset() dataset.set_use_var(inputs) dataset.set_batch_size(1) dataset.set_filelist([]) @@ -40,7 +40,7 @@ def net(batch_size=4, lr=0.01): """ dnn_input_dim, lr_input_dim = int(2), int(2) - with fluid.device_guard("cpu"): + with base.device_guard("cpu"): dnn_data = paddle.static.data( name="dnn_data", shape=[-1, 1], @@ -68,7 +68,7 @@ def net(batch_size=4, lr=0.01): is_distributed=False, input=dnn_data, size=[dnn_input_dim, dnn_layer_dims[0]], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="deep_embedding", initializer=paddle.nn.initializer.Constant(value=0.01), ), @@ -84,7 +84,7 @@ def net(batch_size=4, lr=0.01): is_distributed=False, input=lr_data, size=[lr_input_dim, 1], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="wide_embedding", initializer=paddle.nn.initializer.Constant(value=0.01), ), @@ -94,13 +94,13 @@ def net(batch_size=4, lr=0.01): input=lr_embedding, pool_type="sum" ) - with fluid.device_guard("gpu"): + with base.device_guard("gpu"): for i, dim in enumerate(dnn_layer_dims[1:]): fc = paddle.static.nn.fc( x=dnn_out, size=dim, activation="relu", - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), name='dnn-fc-%d' % i, @@ -149,13 +149,13 @@ def net(batch_size=4, lr=0.01): fleet.stop_worker() elif fleet.is_worker(): pass - # place = fluid.CPUPlace() - # exe = fluid.Executor(place) - # exe.run(fluid.default_startup_program()) + # place = base.CPUPlace() + # exe = base.Executor(place) + # exe.run(base.default_startup_program()) # fleet.init_worker() # step = 1 # for i in range(step): # exe.train_from_dataset( - # program=fluid.default_main_program(), dataset=dataset, debug=False) + # program=base.default_main_program(), dataset=dataset, debug=False) # exe.close() # fleet.stop_worker() diff --git a/test/legacy_test/fleet_meta_optimizer_base.py b/test/legacy_test/fleet_meta_optimizer_base.py index acd7bb0a2f7d8..c0f2e2fc2a32f 100755 --- a/test/legacy_test/fleet_meta_optimizer_base.py +++ b/test/legacy_test/fleet_meta_optimizer_base.py @@ -17,7 +17,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -51,8 +51,8 @@ def debug_program(self, main_prog, startup_prog): print(startup_prog_op_types) def net(self, main_prog, startup_prog): - with fluid.program_guard(main_prog, startup_prog): - with fluid.unique_name.guard(): + with base.program_guard(main_prog, startup_prog): + with base.unique_name.guard(): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) input_x = paddle.static.data( @@ -87,11 +87,11 @@ def fc_block(input_x): fc_3 = paddle.static.nn.fc(x=fc_2, size=64, activation='tanh') return fc_3 - with fluid.program_guard(main_prog, startup_prog): - with fluid.unique_name.guard(): + with base.program_guard(main_prog, startup_prog): + with base.unique_name.guard(): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - with fluid.device_guard("gpu:0"): + with base.device_guard("gpu:0"): input_x = paddle.static.data( name="x", shape=[-1, 32], dtype='float32' ) @@ -100,10 +100,10 @@ def fc_block(input_x): ) for stage_idx in range(pp_degree): - with fluid.device_guard("gpu:" + str(stage_idx)): + with base.device_guard("gpu:" + str(stage_idx)): input_x = fc_block(input_x) - with fluid.device_guard("gpu:" + str(pp_degree - 1)): + with base.device_guard("gpu:" + str(pp_degree - 1)): prediction = paddle.static.nn.fc( x=[input_x], size=2, activation='softmax' ) @@ -119,7 +119,7 @@ def fc_block(input_x): return avg_cost, strategy def boundary_net(self, main_prog, startup_prog): - with fluid.program_guard(main_prog, startup_prog): + with base.program_guard(main_prog, startup_prog): fleet.init(is_collective=True) x = paddle.static.data(name='x', shape=[-1, 4], dtype='float32') with paddle.static.device_guard('gpu:0'): @@ -142,8 +142,8 @@ def optimizer( regularization=None, grad_clip=None, ): - with fluid.program_guard(train_prog, startup_prog): - with fluid.unique_name.guard(): + with base.program_guard(train_prog, startup_prog): + with base.unique_name.guard(): if name == 'momentum': optimizer = paddle.optimizer.Momentum( learning_rate=0.01, diff --git a/test/legacy_test/fleet_ps_training.py b/test/legacy_test/fleet_ps_training.py index 773448b417c71..2afb25664c800 100644 --- a/test/legacy_test/fleet_ps_training.py +++ b/test/legacy_test/fleet_ps_training.py @@ -16,7 +16,7 @@ from utils import gen_data import paddle -from paddle import fluid +from paddle import base from paddle.incubate.distributed.fleet import role_maker from paddle.incubate.distributed.fleet.parameter_server.distribute_transpiler import ( fleet, @@ -26,7 +26,7 @@ input_y = paddle.static.data(name="y", shape=[-1, 1], dtype='int64') input_y = paddle.cast(input_y, dtype="float32") -with fluid.device_guard("gpu"): +with base.device_guard("gpu"): input_y = paddle.cast(input_y, dtype="int64") cost = mlp(input_x, input_y) @@ -42,8 +42,8 @@ fleet.init_server() fleet.run_server() elif fleet.is_worker(): - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(fleet.startup_program) step = 1001 for i in range(step): diff --git a/test/legacy_test/gradient_checker.py b/test/legacy_test/gradient_checker.py index c24691eda1503..085100540d11f 100644 --- a/test/legacy_test/gradient_checker.py +++ b/test/legacy_test/gradient_checker.py @@ -19,9 +19,9 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.backward import _append_grad_suffix_, _as_list +from paddle import base +from paddle.base import core +from paddle.base.backward import _append_grad_suffix_, _as_list def _product(t): @@ -81,7 +81,7 @@ def var_to_np_array_in_scope(scope, place, name): def make_jacobian(x, y_size, np_dtype): - if isinstance(x, fluid.framework.Variable): + if isinstance(x, base.framework.Variable): return np.zeros((_product(x.shape), y_size), dtype=np_dtype) elif isinstance(x, Sequence): jacobians = list( @@ -105,7 +105,7 @@ def _compute_numerical_jacobian(program, x, y, place, scope, delta): program (Program): the network program. x (Variable): the input variables. y (list[Variable]): the output variables. - place (fluid.CPUPlace or fluid.CUDAPlace): the device. + place (base.CPUPlace or base.CUDAPlace): the device. scope (Scope): the scope used to run program. delta: the amount of perturbation we give to the input @@ -116,12 +116,12 @@ def _compute_numerical_jacobian(program, x, y, place, scope, delta): where "x_size" is the number of elements in x and "y_size" is the number of elements in each y_i. """ - if not isinstance(x, fluid.framework.Variable): + if not isinstance(x, base.framework.Variable): raise TypeError('x is not Variable') # To compute the jacobian, treat x and y as one-dimensional vectors. y = _as_list(y) - exe = fluid.Executor(place) + exe = base.Executor(place) def run(): y_res = exe.run(program, scope=scope, fetch_list=y) @@ -160,7 +160,7 @@ def _compute_analytical_jacobian(program, x, y, place, scope): program (Program): a Program with forward pass. x (Variable|list[Variable]): a variable or list of variable y (Variable): the target variable. - place (fluid.CPUPlace or fluid.CUDAPlace): the device. + place (base.CPUPlace or base.CUDAPlace): the device. scope (Scope): the scope used to run program. Returns: @@ -170,7 +170,7 @@ def _compute_analytical_jacobian(program, x, y, place, scope): where "x_size" is the number of elements in x_i and "dy_size" is the number of elements in y. """ - if not isinstance(y, fluid.framework.Variable): + if not isinstance(y, base.framework.Variable): raise TypeError('y is not Variable') dy_name = _append_grad_suffix_(y.name) @@ -181,13 +181,13 @@ def _compute_analytical_jacobian(program, x, y, place, scope): name=dy_name, shape=y.shape, dtype=np_type, persistable=True ) # append backward - dx = fluid.gradients(y, x, dy) + dx = base.gradients(y, x, dy) # init dy tensor in scope value = np.zeros(y.shape, dtype=np_type) dy_t = set_var_in_scope(scope, place, dy_name, value) - exe = fluid.Executor(place) + exe = base.Executor(place) y_size = _product(y.shape) @@ -237,9 +237,9 @@ def grad_check( x (Variable|list[Variable]): input variables to the program. y (Variable|list[Variable]): output variables to the program. x_init (numpy.array|list[numpy.array]|None): the init value for input x. - place (fluid.CPUPlace or fluid.CUDAPlace): the device. + place (base.CPUPlace or base.CUDAPlace): the device. program (Program|None): a Program with forward pass. - If None, use fluid.default_main_program(). + If None, use base.default_main_program(). eps (float): perturbation for finite differences. atol (float): absolute tolerance. rtol (float): relative tolerance. @@ -265,14 +265,14 @@ def fail_test(msg): u.stop_gradient = False u.persistable = True if place is None: - place = fluid.CPUPlace() + place = base.CPUPlace() if program is None: - program = fluid.default_main_program() + program = base.default_main_program() # init variable in startup program - scope = fluid.executor.global_scope() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + scope = base.executor.global_scope() + exe = base.Executor(place) + exe.run(base.default_startup_program()) x_init = _as_list(x_init) # init inputs if x_init is not None @@ -352,9 +352,9 @@ def double_grad_check( y (Variable|list[Variable]): output variables to the program. x_init (numpy.array|list[numpy.array]|None): the init value for input x. y_grads (numpy.array|list[numpy.array]|None): the gradients with respect to y. - place (fluid.CPUPlace or fluid.CUDAPlace): the device. + place (base.CPUPlace or base.CUDAPlace): the device. program (Program|None): a Program with forward pass. - If None, use fluid.default_main_program(). + If None, use base.default_main_program(). eps (float): perturbation for finite differences. atol (float): absolute tolerance. rtol (float): relative tolerance. @@ -374,10 +374,10 @@ def double_grad_check( u.persistable = True if program is None: - program = fluid.default_main_program() + program = base.default_main_program() if y_grads is None: - scope = fluid.executor.global_scope() + scope = base.executor.global_scope() y_grads = [] y_grads_init = [] for yi in y: @@ -398,7 +398,7 @@ def double_grad_check( ] # append first order grads - target_grads = fluid.gradients(y, x, y_grads) + target_grads = base.gradients(y, x, y_grads) # y_grads are the input of first-order backward, # so, they are also the input of second-order backward. @@ -437,9 +437,9 @@ def triple_grad_check( x_init (numpy.array|list[numpy.array]|None): the init value for input x. y_grads (numpy.array|list[numpy.array]|None): the gradients with respect to y. x_grads_grads (numpy.array|list[numpy.array]|None): the gradients with respect to your input. - place (fluid.CPUPlace or fluid.CUDAPlace): the device. + place (base.CPUPlace or base.CUDAPlace): the device. program (Program|None): a Program with forward pass. - If None, use fluid.default_main_program(). + If None, use base.default_main_program(). eps (float): perturbation for finite differences. atol (float): absolute tolerance. rtol (float): relative tolerance. @@ -459,10 +459,10 @@ def triple_grad_check( u.persistable = True if program is None: - program = fluid.default_main_program() + program = base.default_main_program() if y_grads is None: - scope = fluid.executor.global_scope() + scope = base.executor.global_scope() y_grads = [] y_grads_init = [] for yi in y: @@ -483,10 +483,10 @@ def triple_grad_check( ] # append first order grads - target_grads = fluid.gradients(y, x, y_grads) + target_grads = base.gradients(y, x, y_grads) if x_grads_grads is None: - scope = fluid.executor.global_scope() + scope = base.executor.global_scope() x_grads_grads = [] x_grads_grads_init = [] for dxi in target_grads: @@ -511,7 +511,7 @@ def triple_grad_check( x_init += y_grads_init # append second order grads - target_grads_grads = fluid.gradients(target_grads, x, x_grads_grads) + target_grads_grads = base.gradients(target_grads, x, x_grads_grads) # filter None in target_grads_grads for Dy/Dx may be None in kernel filted = [ @@ -546,16 +546,16 @@ def get_static_double_grad( y (Variable|list[Variable]): output variables to the program. x_init (numpy.array|list[numpy.array]|None): the init value for input x. dy_init (numpy.array|list[numpy.array]|None): the init value for output y. - place (fluid.CPUPlace or fluid.CUDAPlace): the device. + place (base.CPUPlace or base.CUDAPlace): the device. program (Program|None): a Program with forward pass. - If None, use fluid.default_main_program(). + If None, use base.default_main_program(). Returns: A list of numpy array that stores second derivative result calculated by static graph. """ if program is None: - program = fluid.default_main_program() - scope = fluid.executor.global_scope() + program = base.default_main_program() + scope = base.executor.global_scope() y_grads = [] for i in range(len(y)): yi = y[i] @@ -569,7 +569,7 @@ def get_static_double_grad( y_grads.append(dy) # append first order grads - dx = fluid.gradients(y, x, y_grads) + dx = base.gradients(y, x, y_grads) # y_grads are the input of first-order backward, # so, they are also the input of second-order backward. @@ -591,14 +591,14 @@ def get_static_double_grad( u.stop_gradient = False u.persistable = True if place is None: - place = fluid.CPUPlace() + place = base.CPUPlace() if program is None: - program = fluid.default_main_program() + program = base.default_main_program() # init variable in startup program - scope = fluid.executor.global_scope() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + scope = base.executor.global_scope() + exe = base.Executor(place) + exe.run(base.default_startup_program()) x_init = _as_list(x_init) # init inputs if x_init is not None @@ -628,8 +628,8 @@ def get_static_double_grad( dys.append(dy) # append second order backward - ddx = fluid.gradients(y, x, dys) - exe = fluid.Executor(place) + ddx = base.gradients(y, x, dys) + exe = base.Executor(place) # filter None in dx for DX/DY may be None in kernel # only fetch not None dx in exe.run @@ -650,7 +650,7 @@ def get_eager_double_grad( func: A wrapped dygraph function that its logic is equal to static program x_init (numpy.array|list[numpy.array]|None): the init value for input x. dy_init (numpy.array|list[numpy.array]|None): the init value for gradient of output. - place (fluid.CPUPlace or fluid.CUDAPlace): the device. + place (base.CPUPlace or base.CUDAPlace): the device. return_mid_result (bool): A flag that controls the return content. Returns: If 'return_mid_result' set True. @@ -659,9 +659,9 @@ def get_eager_double_grad( If 'return_mid_result' set False. A list of numpy array that stores second derivative result calculated by dygraph. """ - if isinstance(place, fluid.CPUPlace): + if isinstance(place, base.CPUPlace): paddle.set_device("cpu") - if isinstance(place, fluid.CUDAPlace): + if isinstance(place, base.CUDAPlace): paddle.set_device("gpu") inputs = [] dys = [] @@ -736,7 +736,7 @@ def double_grad_check_for_dygraph( x (Variable|list[Variable]): input variables to the program. y (Variable|list[Variable]): output variables to the program. x_init (numpy.array|list[numpy.array]|None): the init value for input x. - place (fluid.CPUPlace or fluid.CUDAPlace): the device. + place (base.CPUPlace or base.CUDAPlace): the device. atol (float): absolute tolerance. rtol (float): relative tolerance. raise_exception (bool): whether to raise an exception if @@ -804,15 +804,15 @@ def get_static_triple_grad( y (Variable|list[Variable]): output variables to the program. x_init (numpy.array|list[numpy.array]|None): the init value for input x. dy_init (numpy.array|list[numpy.array]|None): the init value for output y. - place (fluid.CPUPlace or fluid.CUDAPlace): the device. + place (base.CPUPlace or base.CUDAPlace): the device. program (Program|None): a Program with forward pass. - If None, use fluid.default_main_program(). + If None, use base.default_main_program(). Returns: A list of numpy array that stores third derivative result calculated by static graph. """ if program is None: - program = fluid.default_main_program() - scope = fluid.executor.global_scope() + program = base.default_main_program() + scope = base.executor.global_scope() y_grads = [] for i in range(len(y)): yi = y[i] @@ -826,7 +826,7 @@ def get_static_triple_grad( y_grads.append(dy) # append first order grads - dx = fluid.gradients(y, x, y_grads) + dx = base.gradients(y, x, y_grads) # y_grads are the input of first-order backward, # so, they are also the input of second-order backward. @@ -855,7 +855,7 @@ def get_eager_triple_grad( func: A wrapped dygraph function that its logic is equal to static program x_init (numpy.array|list[numpy.array]|None): the init value for input x. dy_init (numpy.array|list[numpy.array]|None): the init value for gradient of output. - place (fluid.CPUPlace or fluid.CUDAPlace): the device. + place (base.CPUPlace or base.CUDAPlace): the device. return_mid_result (list[Tensor], list[Tensor]): If set True, the Returns: A list of numpy array that stores second derivative result calculated by dygraph @@ -899,7 +899,7 @@ def triple_grad_check_for_dygraph( x (Variable|list[Variable]): input variables to the program. y (Variable|list[Variable]): output variables to the program. x_init (numpy.array|list[numpy.array]|None): the init value for input x. - place (fluid.CPUPlace or fluid.CUDAPlace): the device. + place (base.CPUPlace or base.CUDAPlace): the device. atol (float): absolute tolerance. rtol (float): relative tolerance. raise_exception (bool): whether to raise an exception if diff --git a/test/legacy_test/ir_memory_optimize_net_base.py b/test/legacy_test/ir_memory_optimize_net_base.py index 795852e7b0341..5404e28ef839b 100644 --- a/test/legacy_test/ir_memory_optimize_net_base.py +++ b/test/legacy_test/ir_memory_optimize_net_base.py @@ -21,8 +21,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import compiler, core +from paddle import base +from paddle.base import compiler, core # open eager delete mode os.environ['FLAGS_eager_delete_tensor_gb'] = '0.0' @@ -56,8 +56,8 @@ def check_network_convergence( 'Skip use_parallel_executor=True because Paddle comes without parallel support on windows' ) return - fluid.default_startup_program().random_seed = 100 - fluid.default_main_program().random_seed = 100 + base.default_startup_program().random_seed = 100 + base.default_main_program().random_seed = 100 data = paddle.static.data( name="words", shape=[-1, 1], dtype="int64", lod_level=1 @@ -68,19 +68,19 @@ def check_network_convergence( cost = network(data, label, len(self.word_dict)) optimizer = paddle.optimizer.Adam(learning_rate=0.001) optimizer.minimize(cost) - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.enable_inplace = enable_inplace build_strategy.memory_optimize = use_ir_memory_optimize # execution - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - feeder = fluid.DataFeeder(feed_list=[data, label], place=place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + feeder = base.DataFeeder(feed_list=[data, label], place=place) reader = feeder.feed(self.train_reader()) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) train_cp = compiler.CompiledProgram( - fluid.default_main_program(), build_strategy=build_strategy + base.default_main_program(), build_strategy=build_strategy ) fetch_list = [cost.name] @@ -127,8 +127,8 @@ def test_network(self): self.setup_reader() - with fluid.program_guard(fluid.Program(), fluid.Program()): - with fluid.scope_guard(core.Scope()): + with base.program_guard(base.Program(), base.Program()): + with base.scope_guard(core.Scope()): ( baseline_first_loss, baseline_last_loss, diff --git a/test/legacy_test/jit_load_rename_var.py b/test/legacy_test/jit_load_rename_var.py index 645e3faf37760..c7d74522de496 100644 --- a/test/legacy_test/jit_load_rename_var.py +++ b/test/legacy_test/jit_load_rename_var.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid import unique_name -from paddle.fluid.dygraph.base import switch_to_static_graph +from paddle.base import unique_name +from paddle.base.dygraph.base import switch_to_static_graph @switch_to_static_graph diff --git a/test/legacy_test/nets.py b/test/legacy_test/nets.py index 16a947b221e8b..621ad4b462cca 100644 --- a/test/legacy_test/nets.py +++ b/test/legacy_test/nets.py @@ -13,7 +13,7 @@ # limitations under the License. import paddle -from paddle.fluid.data_feeder import check_variable_and_dtype, convert_dtype +from paddle.base.data_feeder import check_variable_and_dtype, convert_dtype from paddle.utils import deprecated __all__ = [ @@ -46,7 +46,7 @@ def simple_img_conv_pool( r""" :api_attr: Static Graph - The simple_img_conv_pool api is composed of :ref:`api_fluid_layers_conv2d` and :ref:`api_fluid_layers_pool2d` . + The simple_img_conv_pool api is composed of :ref:`api_base_layers_conv2d` and :ref:`api_base_layers_pool2d` . Args: input (Variable): 4-D Tensor, shape is [N, C, H, W], data type can be float32 or float64. @@ -106,11 +106,11 @@ def simple_img_conv_pool( Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import paddle paddle.enable_static() img = paddle.static.data(name='img', shape=[100, 1, 28, 28], dtype='float32') - conv_pool = fluid.nets.simple_img_conv_pool(input=img, + conv_pool = base.nets.simple_img_conv_pool(input=img, filter_size=5, num_filters=20, pool_size=2, @@ -208,12 +208,12 @@ def img_conv_group( Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import paddle paddle.enable_static() img = paddle.static.data(name='img', shape=[None, 1, 28, 28], dtype='float32') - conv_pool = fluid.nets.img_conv_group(input=img, + conv_pool = base.nets.img_conv_group(input=img, conv_padding=1, conv_num_filter=[3, 3], conv_filter_size=3, @@ -288,10 +288,10 @@ def sequence_conv_pool( :api_attr: Static Graph **This api takes input as an LoDTensor. If input is a Tensor, please use** - :ref:`api_fluid_nets_simple_img_conv_pool` **instead** + :ref:`api_base_nets_simple_img_conv_pool` **instead** - The sequence_conv_pool is composed of :ref:`api_fluid_layers_sequence_conv` - and :ref:`api_fluid_layers_sequence_pool` . + The sequence_conv_pool is composed of :ref:`api_base_layers_sequence_conv` + and :ref:`api_base_layers_sequence_pool` . Args: input (Tensor): 2-D LoDTensor, the input of sequence_conv, @@ -323,7 +323,7 @@ def sequence_conv_pool( Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import paddle paddle.enable_static() input_dim = 100 #len(word_dict) @@ -331,7 +331,7 @@ def sequence_conv_pool( hid_dim = 512 data = paddle.static.data(name="words", shape=[None, 1], dtype="int64", lod_level=1) emb = paddle.static.nn.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True) - seq_conv = fluid.nets.sequence_conv_pool(input=emb, + seq_conv = base.nets.sequence_conv_pool(input=emb, num_filters=hid_dim, filter_size=3, act="tanh", @@ -359,8 +359,8 @@ def glu(input, dim=-1): r""" :api_attr: Static Graph - The Gated Linear Units(GLU) composed by :ref:`api_fluid_layers_split` , - :ref:`api_fluid_layers_sigmoid` and :ref:`api_fluid_layers_elementwise_mul` . + The Gated Linear Units(GLU) composed by :ref:`api_base_layers_split` , + :ref:`api_base_layers_sigmoid` and :ref:`api_base_layers_elementwise_mul` . Specifically, GLU will plit the input into two equal-sized parts, :math:`a` and :math:`b`, along the given dimension and then compute as following: @@ -385,14 +385,14 @@ def glu(input, dim=-1): Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import paddle paddle.enable_static() data = paddle.static.data( name="words", shape=[-1, 6, 3, 9], dtype="float32") # shape of output: [-1, 3, 3, 9] - output = fluid.nets.glu(input=data, dim=1) + output = base.nets.glu(input=data, dim=1) """ check_variable_and_dtype( input, 'input', ['float16', 'float32', 'float64'], "glu" @@ -428,7 +428,7 @@ def scaled_dot_product_attention( Note that the implementation is adapted to batch, and all matrix multiplication in :math:`Attention(Q, K, V)` is batched matrix multiplication. Refer to - :ref:`api_fluid_layers_matmul` . + :ref:`api_base_layers_matmul` . Args: queries (Variable): A 3-D Tensor with shape :math:`[N, L_q, d_k \\times h]` , @@ -466,14 +466,14 @@ def scaled_dot_product_attention( Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as base import paddle paddle.enable_static() queries = paddle.static.data(name="queries", shape=[3, 5, 9], dtype="float32") keys = paddle.static.data(name="keys", shape=[3, 6, 9], dtype="float32") values = paddle.static.data(name="values", shape=[3, 6, 10], dtype="float32") - contexts = fluid.nets.scaled_dot_product_attention(queries, keys, values) + contexts = base.nets.scaled_dot_product_attention(queries, keys, values) contexts.shape # [3, 5, 10] """ check_variable_and_dtype( diff --git a/test/legacy_test/nproc_process.py b/test/legacy_test/nproc_process.py index 0c98c4a45dfd6..bee588de40bd4 100644 --- a/test/legacy_test/nproc_process.py +++ b/test/legacy_test/nproc_process.py @@ -15,11 +15,11 @@ import os import sys -from paddle import fluid +from paddle import base def train(prefix): - if fluid.core.is_compiled_with_xpu(): + if base.core.is_compiled_with_xpu(): selected_devices = os.getenv("FLAGS_selected_xpus") else: selected_devices = os.getenv("FLAGS_selected_gpus") diff --git a/test/legacy_test/op.py b/test/legacy_test/op.py index 6adc9daa52a09..0dec2f001188e 100644 --- a/test/legacy_test/op.py +++ b/test/legacy_test/op.py @@ -14,8 +14,8 @@ import numpy as np -from paddle.fluid import core -from paddle.fluid.proto import framework_pb2 +from paddle.base import core +from paddle.base.proto import framework_pb2 # NOTE: this is added to support creating a Scalar message diff --git a/test/legacy_test/parallel_dygraph_mnist.py b/test/legacy_test/parallel_dygraph_mnist.py index f3c20bc5c653b..f8908a0338508 100644 --- a/test/legacy_test/parallel_dygraph_mnist.py +++ b/test/legacy_test/parallel_dygraph_mnist.py @@ -19,7 +19,7 @@ ) import paddle -from paddle.fluid.dygraph.base import to_variable +from paddle.base.dygraph.base import to_variable class SimpleImgConvPool(paddle.nn.Layer): diff --git a/test/legacy_test/parallel_dygraph_shared_unused_var.py b/test/legacy_test/parallel_dygraph_shared_unused_var.py index 46f98854547f0..f51a4f765859e 100644 --- a/test/legacy_test/parallel_dygraph_shared_unused_var.py +++ b/test/legacy_test/parallel_dygraph_shared_unused_var.py @@ -24,7 +24,7 @@ ) import paddle -from paddle.fluid.dygraph.base import to_variable +from paddle.base.dygraph.base import to_variable from paddle.nn import Linear np.random.seed(2021) diff --git a/test/legacy_test/parallel_dygraph_sparse_embedding.py b/test/legacy_test/parallel_dygraph_sparse_embedding.py index 7236b05ffd32e..eaeb9bed5b78f 100644 --- a/test/legacy_test/parallel_dygraph_sparse_embedding.py +++ b/test/legacy_test/parallel_dygraph_sparse_embedding.py @@ -24,8 +24,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base.dygraph.base import to_variable from paddle.nn import Embedding @@ -48,14 +48,14 @@ def __init__( self.vocab_size, self.hidden_size, sparse=is_sparse, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-init_scale, high=init_scale ) ), ) self.softmax_weight = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype=dtype, default_initializer=paddle.nn.initializer.Uniform( @@ -63,7 +63,7 @@ def __init__( ), ) self.softmax_bias = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.vocab_size], dtype=dtype, default_initializer=paddle.nn.initializer.Uniform( diff --git a/test/legacy_test/parallel_executor_test_base.py b/test/legacy_test/parallel_executor_test_base.py index 65034bdb1dd69..9cf5b00324f3a 100644 --- a/test/legacy_test/parallel_executor_test_base.py +++ b/test/legacy_test/parallel_executor_test_base.py @@ -23,8 +23,8 @@ from feed_data_reader import FeedDataReader import paddle -from paddle import fluid -from paddle.fluid import compiler, core +from paddle import base +from paddle.base import compiler, core __all__ = ['TestParallelExecutorBase'] DeviceType = core.DeviceType @@ -71,22 +71,22 @@ def run_executor(exe, binary, feed, fetch_list): paddle.seed(0) paddle.framework.random._manual_program_seed(0) - main = fluid.Program() - startup = fluid.Program() + main = base.Program() + startup = base.Program() - with fluid.program_guard(main, startup): + with base.program_guard(main, startup): feed_dict, loss = cls.build_model( feed_dict, get_data_from_feeder, main, method, optimizer ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if use_device == DeviceType.CUDA - else fluid.XPUPlace(0) + else base.XPUPlace(0) if use_device == DeviceType.XPU - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup) build_strategy, exec_strategy = cls.set_strategy( @@ -112,9 +112,9 @@ def run_executor(exe, binary, feed, fetch_list): if batch_size is not None: batch_size *= ( - fluid.core.get_cuda_device_count() + base.core.get_cuda_device_count() if use_device == DeviceType.CUDA - else fluid.core.get_xpu_device_count() + else base.core.get_xpu_device_count() if use_device == DeviceType.XPU else int(os.environ.get('CPU_NUM', multiprocessing.cpu_count())) ) @@ -171,21 +171,21 @@ def check_pass_conflict( use_fast_executor=True, enable_sequential_execution=False, ): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): feed_dict, loss = cls.build_model( feed_dict, get_data_from_feeder, main, method, optimizer ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if use_device == DeviceType.CUDA - else fluid.XPUPlace(0) + else base.XPUPlace(0) if use_device == DeviceType.XPU - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup) build_strategy, exec_strategy = cls.set_strategy( @@ -222,14 +222,14 @@ def set_strategy( use_reduce, use_device, ): - exec_strategy = fluid.ExecutionStrategy() + exec_strategy = base.ExecutionStrategy() if use_fast_executor: exec_strategy.use_experimental_executor = True - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.reduce_strategy = ( - fluid.BuildStrategy.ReduceStrategy.Reduce + base.BuildStrategy.ReduceStrategy.Reduce if use_reduce - else fluid.BuildStrategy.ReduceStrategy.AllReduce + else base.BuildStrategy.ReduceStrategy.AllReduce ) build_strategy.fuse_elewise_add_act_ops = fuse_elewise_add_act_ops build_strategy.fuse_relu_depthwise_conv = fuse_relu_depthwise_conv diff --git a/test/legacy_test/prim_op_test.py b/test/legacy_test/prim_op_test.py index cca23904a922d..c41cfc05d79ca 100644 --- a/test/legacy_test/prim_op_test.py +++ b/test/legacy_test/prim_op_test.py @@ -20,8 +20,8 @@ import numpy as np import paddle -from paddle.fluid import core -from paddle.fluid.framework import ( +from paddle.base import core +from paddle.base.framework import ( OpProtoHolder, _dygraph_tracer, canonicalize_attrs, @@ -401,7 +401,7 @@ def init_checker_threshold(self): def check(self): if ( - type(self.place) is paddle.fluid.libpaddle.CUDAPlace + type(self.place) is paddle.base.libpaddle.CUDAPlace and not paddle.is_compiled_with_cuda() ): return @@ -417,9 +417,9 @@ def check(self): def get_kernel_sig(self): paddle.disable_static() - if type(self.place) is paddle.fluid.libpaddle.CPUPlace: + if type(self.place) is paddle.base.libpaddle.CPUPlace: paddle.device.set_device("cpu") - if type(self.place) is paddle.fluid.libpaddle.CUDAPlace: + if type(self.place) is paddle.base.libpaddle.CUDAPlace: paddle.device.set_device("gpu:0") ( eager_tensor_inputs, @@ -437,9 +437,9 @@ def get_kernel_sig(self): def get_eager_desire(self): paddle.disable_static() - if type(self.place) is paddle.fluid.libpaddle.CPUPlace: + if type(self.place) is paddle.base.libpaddle.CPUPlace: paddle.device.set_device("cpu") - if type(self.place) is paddle.fluid.libpaddle.CUDAPlace: + if type(self.place) is paddle.base.libpaddle.CUDAPlace: paddle.device.set_device("gpu:0") ( eager_tensor_inputs, @@ -578,7 +578,7 @@ def check_static_comp(self): # forward comp only for comp op if self.prim_op_type == "prim": return - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): core._set_prim_forward_enabled(self.enable_fw_comp) startup_program, main_program = ( paddle.static.Program(), @@ -655,9 +655,9 @@ def check_jit_comp(self): if self.prim_op_type == "prim": return paddle.disable_static() - if type(self.place) == paddle.fluid.libpaddle.CPUPlace: + if type(self.place) == paddle.base.libpaddle.CPUPlace: paddle.device.set_device("cpu") - if type(self.place) == paddle.fluid.libpaddle.CUDAPlace: + if type(self.place) == paddle.base.libpaddle.CUDAPlace: paddle.device.set_device("gpu:0") atol = self.fw_comp_atol if self.enable_fw_comp else self.jit_comp_atol rtol = self.fw_comp_rtol if self.enable_fw_comp else self.jit_comp_rtol @@ -734,7 +734,7 @@ def check_jit_comp_with_cinn(self): return # cinn doesn't support cpu place if ( - type(self.place) == paddle.fluid.libpaddle.CPUPlace + type(self.place) == paddle.base.libpaddle.CPUPlace and self.enable_cinn and core.is_compiled_with_cinn() ): @@ -751,9 +751,9 @@ def check_jit_comp_with_cinn(self): else self.fw_comp_rtol ) core._set_prim_forward_enabled(self.enable_fw_comp) - if type(self.place) is paddle.fluid.libpaddle.CPUPlace: + if type(self.place) is paddle.base.libpaddle.CPUPlace: paddle.device.set_device("cpu") - if type(self.place) is paddle.fluid.libpaddle.CUDAPlace: + if type(self.place) is paddle.base.libpaddle.CUDAPlace: paddle.device.set_device("gpu:0") ( eager_tensor_inputs, @@ -848,7 +848,7 @@ def init(self): def check(self): if ( - type(self.place) is paddle.fluid.libpaddle.CUDAPlace + type(self.place) is paddle.base.libpaddle.CUDAPlace and not paddle.is_compiled_with_cuda() ): return @@ -925,9 +925,9 @@ def gen_no_grad_set(self, var_dict): def get_eager_desire(self): paddle.disable_static() - if type(self.place) is paddle.fluid.libpaddle.CPUPlace: + if type(self.place) is paddle.base.libpaddle.CPUPlace: paddle.device.set_device("cpu") - if type(self.place) is paddle.fluid.libpaddle.CUDAPlace: + if type(self.place) is paddle.base.libpaddle.CUDAPlace: paddle.device.set_device("gpu:0") ( eager_tensor_inputs, @@ -978,9 +978,9 @@ def check_eager_comp(self): if self.prim_op_type == "comp": return paddle.disable_static() - if type(self.place) is paddle.fluid.libpaddle.CPUPlace: + if type(self.place) is paddle.base.libpaddle.CPUPlace: paddle.device.set_device("cpu") - if type(self.place) is paddle.fluid.libpaddle.CUDAPlace: + if type(self.place) is paddle.base.libpaddle.CUDAPlace: paddle.device.set_device("gpu:0") atol = self.rev_comp_atol rtol = self.rev_comp_rtol @@ -1027,7 +1027,7 @@ def check_static_comp(self): core._set_prim_backward_enabled(self.enable_rev_comp) atol = self.rev_comp_atol if self.enable_rev_comp else self.fw_comp_atol rtol = self.rev_comp_rtol if self.enable_rev_comp else self.fw_comp_rtol - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): startup_program, main_program = ( paddle.static.Program(), paddle.static.Program(), @@ -1130,9 +1130,9 @@ def check_static_comp(self): def check_jit_comp(self): paddle.disable_static() - if type(self.place) is paddle.fluid.libpaddle.CPUPlace: + if type(self.place) is paddle.base.libpaddle.CPUPlace: paddle.device.set_device("cpu") - if type(self.place) is paddle.fluid.libpaddle.CUDAPlace: + if type(self.place) is paddle.base.libpaddle.CUDAPlace: paddle.device.set_device("gpu:0") if self.prim_op_type == "prim": core._set_prim_backward_enabled(self.enable_rev_comp) @@ -1247,15 +1247,15 @@ def check_jit_comp(self): def check_jit_comp_with_cinn(self): # cinn doesn't support cpu place if ( - type(self.place) is paddle.fluid.libpaddle.CPUPlace + type(self.place) is paddle.base.libpaddle.CPUPlace and self.enable_cinn and core.is_compiled_with_cinn() ): return paddle.disable_static() - if type(self.place) is paddle.fluid.libpaddle.CPUPlace: + if type(self.place) is paddle.base.libpaddle.CPUPlace: paddle.device.set_device("cpu") - if type(self.place) is paddle.fluid.libpaddle.CUDAPlace: + if type(self.place) is paddle.base.libpaddle.CUDAPlace: paddle.device.set_device("gpu:0") if self.prim_op_type == "prim": core._set_prim_backward_enabled(self.enable_rev_comp) diff --git a/test/legacy_test/seresnext_net.py b/test/legacy_test/seresnext_net.py index 1cfda9d1d50a5..357b5b7e226b1 100644 --- a/test/legacy_test/seresnext_net.py +++ b/test/legacy_test/seresnext_net.py @@ -12,9 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle import fluid +from paddle import base -fluid.core._set_eager_deletion_mode(-1, -1, False) +base.core._set_eager_deletion_mode(-1, -1, False) import os @@ -45,7 +45,7 @@ def squeeze_excitation(input, num_channels, reduction_ratio): - # pool = fluid.layers.pool2d( + # pool = base.layers.pool2d( # input=input, pool_size=0, pool_type='avg', global_pooling=True) conv = input shape = conv.shape diff --git a/test/legacy_test/seresnext_test_base.py b/test/legacy_test/seresnext_test_base.py index bed6180caea74..73ad9c27c0196 100644 --- a/test/legacy_test/seresnext_test_base.py +++ b/test/legacy_test/seresnext_test_base.py @@ -16,7 +16,7 @@ import seresnext_net from parallel_executor_test_base import DeviceType, TestParallelExecutorBase -from paddle.fluid import core +from paddle.base import core class TestResnetBase(TestParallelExecutorBase): diff --git a/test/legacy_test/simple_nets.py b/test/legacy_test/simple_nets.py index 8ff57cdce22db..0a0ab0f298353 100644 --- a/test/legacy_test/simple_nets.py +++ b/test/legacy_test/simple_nets.py @@ -15,7 +15,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base def simple_fc_net_with_inputs(img, label, class_num=10): @@ -25,7 +25,7 @@ def simple_fc_net_with_inputs(img, label, class_num=10): hidden, size=100, activation='relu', - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ), ) @@ -52,7 +52,7 @@ def batchnorm_fc_with_inputs(img, label, class_num=10): hidden, size=200, activation='relu', - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ), ) @@ -87,7 +87,7 @@ def bow_net( """ BOW net This model is from https://github.com/PaddlePaddle/models: - fluid/PaddleNLP/text_classification/nets.py + base/PaddleNLP/text_classification/nets.py """ data = paddle.static.data( name="words", shape=[-1, 1], dtype="int64", lod_level=1 diff --git a/test/legacy_test/static_model_parallel_fused_attention.py b/test/legacy_test/static_model_parallel_fused_attention.py index 5110b0aa255c9..981dda453f87f 100644 --- a/test/legacy_test/static_model_parallel_fused_attention.py +++ b/test/legacy_test/static_model_parallel_fused_attention.py @@ -16,7 +16,7 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.incubate.nn import FusedMultiHeadAttention @@ -116,7 +116,7 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): ) if dist_strategy: - data_loader = fluid.io.DataLoader.from_generator( + data_loader = base.io.DataLoader.from_generator( feed_list=[data_in], capacity=64, use_double_buffer=False, diff --git a/test/legacy_test/static_model_parallel_fused_feedforward.py b/test/legacy_test/static_model_parallel_fused_feedforward.py index dcd502e58f89d..8f1406322ab8e 100644 --- a/test/legacy_test/static_model_parallel_fused_feedforward.py +++ b/test/legacy_test/static_model_parallel_fused_feedforward.py @@ -16,7 +16,7 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.incubate.nn import FusedFeedForward @@ -107,7 +107,7 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): ) if dist_strategy: - data_loader = fluid.io.DataLoader.from_generator( + data_loader = base.io.DataLoader.from_generator( feed_list=[data_in], capacity=64, use_double_buffer=False, diff --git a/test/legacy_test/static_model_parallel_fused_multi_transformer.py b/test/legacy_test/static_model_parallel_fused_multi_transformer.py index 4979fee8f9b99..1da41d0e0dd21 100644 --- a/test/legacy_test/static_model_parallel_fused_multi_transformer.py +++ b/test/legacy_test/static_model_parallel_fused_multi_transformer.py @@ -16,7 +16,7 @@ from test_dist_base import TestDistRunnerBase, runtime_main import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.incubate.nn import FusedMultiTransformer @@ -149,7 +149,7 @@ def get_model(self, batch_size=2, use_dgc=False, dist_strategy=None): ) if dist_strategy: - data_loader = fluid.io.DataLoader.from_generator( + data_loader = base.io.DataLoader.from_generator( feed_list=[data_in], capacity=64, use_double_buffer=False, diff --git a/test/legacy_test/test_accuracy_op.py b/test/legacy_test/test_accuracy_op.py index ae60df680e2b7..e722b6912c24d 100755 --- a/test/legacy_test/test_accuracy_op.py +++ b/test/legacy_test/test_accuracy_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16, paddle_static_guard import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def accuracy_wrapper(infer, indices, label): @@ -111,8 +111,8 @@ def test_type_errors(self): with paddle_static_guard(): with program_guard(Program(), Program()): # The input type of accuracy_op must be Variable. - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) label = paddle.static.data( name='label', shape=[-1, 1], dtype="int32" @@ -175,7 +175,7 @@ def test_api(self): class TestAccuracyAPI2(unittest.TestCase): def test_api(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): predictions = paddle.to_tensor( [[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], dtype='float32', @@ -188,7 +188,7 @@ def test_api(self): class TestAccuracyAPI(unittest.TestCase): def test_api(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): predictions = paddle.to_tensor( [[0.2, 0.1, 0.4, 0.1, 0.1], [0.2, 0.3, 0.1, 0.15, 0.25]], dtype='float32', diff --git a/test/legacy_test/test_activation_nn_grad.py b/test/legacy_test/test_activation_nn_grad.py index 58a2085a3a880..0c7742fa9c57d 100644 --- a/test/legacy_test/test_activation_nn_grad.py +++ b/test/legacy_test/test_activation_nn_grad.py @@ -20,8 +20,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestSigmoidTripleGradCheck(unittest.TestCase): @@ -41,9 +41,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -71,9 +71,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -92,7 +92,7 @@ def func(self, place): y = paddle.tanh(x) x_arr = np.random.random(shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 - from paddle.fluid import core + from paddle.base import core core._set_prim_backward_enabled(True) gradient_checker.triple_grad_check( @@ -105,9 +105,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -126,7 +126,7 @@ def func(self, place): y = paddle.tanh(x) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 - from paddle.fluid import core + from paddle.base import core core._set_prim_backward_enabled(True) gradient_checker.double_grad_check( @@ -139,9 +139,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -169,9 +169,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -195,9 +195,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -229,9 +229,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places = [fluid.CUDAPlace(0)] + places = [base.CUDAPlace(0)] for p in places: self.func(p) @@ -263,9 +263,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -297,9 +297,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -332,9 +332,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -364,9 +364,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places = [fluid.CUDAPlace(0)] + places = [base.CUDAPlace(0)] for p in places: self.func(p) @@ -396,9 +396,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places = [fluid.CUDAPlace(0)] + places = [base.CUDAPlace(0)] for p in places: self.func(p) @@ -428,9 +428,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -460,9 +460,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -490,9 +490,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -520,9 +520,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -549,9 +549,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -578,9 +578,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -608,9 +608,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -637,9 +637,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -666,9 +666,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -695,9 +695,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -725,9 +725,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_activation_op.py b/test/legacy_test/test_activation_op.py index 144b7fdcaa4e5..22230ad0000dc 100644 --- a/test/legacy_test/test_activation_op.py +++ b/test/legacy_test/test_activation_op.py @@ -23,9 +23,9 @@ import paddle import paddle.nn.functional as F -from paddle import fluid, static -from paddle.fluid import Program, core, program_guard -from paddle.fluid.layer_helper import LayerHelper +from paddle import base, static +from paddle.base import Program, core, program_guard +from paddle.base.layer_helper import LayerHelper @contextmanager @@ -39,7 +39,7 @@ def dynamic_guad(): class TestSqrtOpError(unittest.TestCase): def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with program_guard(Program(), Program()): # The input type of sqrt op must be Variable or numpy.ndarray. in1 = 1 @@ -70,7 +70,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.exp(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -120,7 +120,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.exp(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.if_enable_cinn() self.convert_input_output() @@ -156,7 +156,7 @@ def init_shape(self): class Test_Exp_Op_Fp16(unittest.TestCase): def test_api_fp16(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with static.program_guard( paddle.static.Program(), paddle.static.Program() ): @@ -194,7 +194,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.expm1(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -226,7 +226,7 @@ def setUp(self): def test_static_api(self): def run(place): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): X = paddle.static.data('X', self.shape, dtype=self.dtype) out = paddle.expm1(X) @@ -266,23 +266,23 @@ def test_api_int(self): class TestParameter: def test_out_name(self): - with paddle.fluid.framework._static_guard(): - with fluid.program_guard(fluid.Program()): + with paddle.base.framework._static_guard(): + with base.program_guard(base.Program()): np_x = np.array([0.1]).astype('float32').reshape((-1, 1)) data = paddle.static.data( name="X", shape=[-1, 1], dtype="float32" ) out = eval("paddle.%s(data, name='Y')" % self.op_type) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) (result,) = exe.run(feed={"X": np_x}, fetch_list=[out]) expected = eval("np.%s(np_x)" % self.op_type) np.testing.assert_allclose(result, expected, rtol=1e-05) def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.array([0.1]) - x = fluid.dygraph.to_variable(np_x) + x = base.dygraph.to_variable(np_x) z = eval("paddle.%s(x).numpy()" % self.op_type) z_expected = eval("np.%s(np_x)" % self.op_type) np.testing.assert_allclose(z, z_expected, rtol=1e-05) @@ -301,7 +301,7 @@ def setUp(self): x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) out = 1 / (1 + np.exp(-x)) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -341,7 +341,7 @@ def setUp(self): out = 1 / (1 + np.exp(-x)) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x)) + 'X': OpTest.np_dtype_to_base_dtype(convert_float_to_uint16(x)) } self.outputs = {'Out': convert_float_to_uint16(out)} @@ -385,7 +385,7 @@ def setUp(self): np.random.seed(1024) x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) out = x / (np.exp(-x) + 1) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -416,7 +416,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', [11, 17]) out1 = F.silu(x) @@ -440,7 +440,7 @@ def test_dygraph_api(self): paddle.enable_static() def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.silu, 1) @@ -466,7 +466,7 @@ def setUp(self): np.random.seed(2048) x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) out = np.log(1 / (1 + np.exp(-x))) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -494,7 +494,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', [11, 17]) out1 = F.log_sigmoid(x) @@ -518,7 +518,7 @@ def test_dygraph_api(self): paddle.enable_static() def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.log_sigmoid, 1) @@ -552,7 +552,7 @@ def setUp(self): + 1j * np.random.uniform(-1, 1, self.shape) ).astype(self.dtype) out = np.tanh(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -607,7 +607,7 @@ def executed_api(self): self.tanh = F.tanh def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', [10, 12], self.dtype) out1 = self.tanh(x) @@ -631,7 +631,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, self.tanh, 1) @@ -664,7 +664,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.arctan(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -674,23 +674,23 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') def test_out_name(self): - with paddle.fluid.framework._static_guard(): - with fluid.program_guard(fluid.Program()): + with paddle.base.framework._static_guard(): + with base.program_guard(base.Program()): np_x = np.array([0.1]).astype('float32').reshape((-1, 1)) data = paddle.static.data( name="X", shape=[-1, 1], dtype="float32" ) out = paddle.atan(data, name='Y') - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) (result,) = exe.run(feed={"X": np_x}, fetch_list=[out]) expected = np.arctan(np_x) self.assertEqual(result, expected) def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.array([0.1]) - x = fluid.dygraph.to_variable(np_x) + x = base.dygraph.to_variable(np_x) z = paddle.atan(x).numpy() z_expected = np.arctan(np_x) self.assertEqual(z, z_expected) @@ -711,7 +711,7 @@ def setUp(self): np.random.seed(1024) x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.sinh(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -729,17 +729,17 @@ def init_shape(self): class TestSinhAPI(unittest.TestCase): def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.array([0.1]) - x = fluid.dygraph.to_variable(np_x) + x = base.dygraph.to_variable(np_x) z = paddle.sinh(x).numpy() z_expected = np.sinh(np_x) np.testing.assert_allclose(z, z_expected, rtol=1e-05) def test_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): test_data_shape = [11, 17] - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input_x = np.random.uniform(0.1, 1, test_data_shape).astype( "float32" ) @@ -750,10 +750,10 @@ def test_api(self): ) pd_sinh_out = paddle.sinh(data_x) - exe = fluid.Executor(place=fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place=base.CPUPlace()) + exe.run(base.default_startup_program()) (np_sinh_res,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"data_x": input_x}, fetch_list=[pd_sinh_out], ) @@ -763,11 +763,11 @@ def test_api(self): def test_backward(self): test_data_shape = [11, 17] - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_x = np.random.uniform(0.1, 1, test_data_shape).astype( "float32" ) - var = fluid.dygraph.to_variable(input_x) + var = base.dygraph.to_variable(input_x) var.stop_gradient = False loss = paddle.sinh(var) loss.backward() @@ -777,7 +777,7 @@ def test_backward(self): class TestSinhOpError(unittest.TestCase): def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with program_guard(Program()): # The input type must be Variable. self.assertRaises(TypeError, paddle.sinh, 1) @@ -803,7 +803,7 @@ def setUp(self): np.random.seed(1024) x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.cosh(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -821,17 +821,17 @@ def init_shape(self): class TestCoshAPI(unittest.TestCase): def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.array([0.1]) - x = fluid.dygraph.to_variable(np_x) + x = base.dygraph.to_variable(np_x) z = paddle.cosh(x).numpy() z_expected = np.cosh(np_x) np.testing.assert_allclose(z, z_expected, rtol=1e-05) def test_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): test_data_shape = [11, 17] - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input_x = np.random.uniform(0.1, 1, test_data_shape).astype( "float32" ) @@ -842,10 +842,10 @@ def test_api(self): ) pd_cosh_out = paddle.cosh(data_x) - exe = fluid.Executor(place=fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place=base.CPUPlace()) + exe.run(base.default_startup_program()) (np_cosh_res,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"data_x": input_x}, fetch_list=[pd_cosh_out], ) @@ -855,11 +855,11 @@ def test_api(self): def test_backward(self): test_data_shape = [11, 17] - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_x = np.random.uniform(0.1, 1, test_data_shape).astype( "float32" ) - var = fluid.dygraph.to_variable(input_x) + var = base.dygraph.to_variable(input_x) var.stop_gradient = False loss = paddle.cosh(var) loss.backward() @@ -869,7 +869,7 @@ def test_backward(self): class TestCoshOpError(unittest.TestCase): def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with program_guard(Program()): # The input type must be Variable. self.assertRaises(TypeError, paddle.cosh, 1) @@ -900,7 +900,7 @@ def setUp(self): np.random.seed(1024) x = np.random.uniform(10, 20, self.shape).astype(self.dtype) out = ref_tanhshrink(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -928,7 +928,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.tanhshrink(x) @@ -951,7 +951,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.tanhshrink, 1) @@ -985,7 +985,7 @@ def setUp(self): np.random.seed(1024) x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) * 10 out = ref_hardshrink(x, self.threshold) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {'threshold': self.threshold} @@ -1029,7 +1029,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', [10, 12], dtype="float32") out1 = F.hardshrink(x) @@ -1059,7 +1059,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.hardshrink, 1) @@ -1095,7 +1095,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', [10, 12], dtype="float32") out1 = F.hardtanh(x) @@ -1125,7 +1125,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.hardtanh, 1) @@ -1162,7 +1162,7 @@ def setUp(self): x = np.random.uniform(0.25, 10, self.shape).astype(self.dtype) out = ref_softshrink(x, threshold) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"lambda": threshold} @@ -1191,7 +1191,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.softshrink(x, self.threshold) @@ -1214,7 +1214,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.softshrink, 1) @@ -1250,7 +1250,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.sqrt(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1279,7 +1279,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.sqrt(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} def test_check_grad(self): @@ -1321,7 +1321,7 @@ def setUp(self): out = np.sqrt(x) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x)) + 'X': OpTest.np_dtype_to_base_dtype(convert_float_to_uint16(x)) } self.outputs = {'Out': convert_float_to_uint16(out)} @@ -1357,7 +1357,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.sqrt(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1386,7 +1386,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.sqrt(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} def if_enable_cinn(self): @@ -1423,7 +1423,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = 1.0 / np.sqrt(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1474,7 +1474,7 @@ def setUp(self): x[np.abs(x) < 0.005] = 0.02 out = np.abs(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1506,7 +1506,7 @@ def setUp(self): x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) out = np.ceil(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1537,7 +1537,7 @@ def setUp(self): x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) out = np.floor(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1591,7 +1591,7 @@ def setUp(self): + 1j * np.random.uniform(-1, 1, self.shape) ).astype(self.dtype) out = np.cos(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1651,7 +1651,7 @@ def setUp(self): out = np.tan(self.x_np) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x_np)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(self.x_np)} self.outputs = {'Out': out} self.convert_input_output() @@ -1703,7 +1703,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', [11, 17], self.dtype) out = paddle.tan(x) @@ -1714,7 +1714,7 @@ def test_static_api(self): def test_backward(self): test_data_shape = [11, 17] - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_x = np.random.uniform(0.1, 1, test_data_shape).astype( "float32" ) @@ -1737,7 +1737,7 @@ def setUp(self): x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype) out = np.arccos(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1773,7 +1773,7 @@ def setUp(self): + 1j * np.random.uniform(-1, 1, self.shape) ).astype(self.dtype) out = np.sin(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1819,7 +1819,7 @@ def setUp(self): x = np.random.uniform(-0.95, 0.95, self.shape).astype(self.dtype) out = np.arcsin(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1848,7 +1848,7 @@ def setUp(self): x = np.random.uniform(2, 3, self.shape).astype(self.dtype) out = np.arccosh(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1877,7 +1877,7 @@ def setUp(self): x = np.random.uniform(1, 2, self.shape).astype(self.dtype) out = np.arcsinh(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1906,7 +1906,7 @@ def setUp(self): x = np.random.uniform(-0.9, 0.9, self.shape).astype(self.dtype) out = np.arctanh(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -1935,7 +1935,7 @@ def setUp(self): x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) out = np.round(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -2004,7 +2004,7 @@ def executed_api(self): self.relu = F.relu def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', [10, 12], dtype="float32") out1 = self.relu(x) @@ -2027,8 +2027,8 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, self.relu, 1) @@ -2128,7 +2128,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', [10, 12], dtype="float32") out1 = F.leaky_relu(x) @@ -2158,7 +2158,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.leaky_relu, 1) @@ -2237,7 +2237,7 @@ def setUp(self): out = gelu(x, approximate) self.if_enable_cinn() - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() self.attrs = {"approximate": approximate} @@ -2284,7 +2284,7 @@ def setUp(self): self.rev_comp_atol = 1e-8 def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', [11, 17], dtype="float32") out1 = F.gelu(x) @@ -2314,7 +2314,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.gelu, 1) @@ -2347,7 +2347,7 @@ def setUp(self): t[t < t_min] = t_min t[t > t_max] = t_max - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': t} self.convert_input_output() self.attrs = {'t_min': t_min, 't_max': t_max} @@ -2379,7 +2379,7 @@ def setUp(self): self.attrs = {'threshold': 6.0} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -2410,7 +2410,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.relu6(x) @@ -2432,18 +2432,18 @@ def test_dygraph_api(self): for r in [out1, out2]: np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) - def test_fluid_api(self): - with paddle.fluid.framework._static_guard(): - with fluid.program_guard(fluid.Program()): + def test_base_api(self): + with paddle.base.framework._static_guard(): + with base.program_guard(base.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.relu6(x) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_relu6(self.x_np) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.relu6, 1) @@ -2461,7 +2461,7 @@ def test_errors(self): class TestRelu6APIWarnings(unittest.TestCase): def test_warnings(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with warnings.catch_warnings(record=True) as context: warnings.simplefilter("always") @@ -2515,7 +2515,7 @@ def setUp(self): x[np.abs(x - threshold + offset) < 0.005] = threshold - offset + 0.02 out = ref_hardswish(x, threshold, scale, offset) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() self.attrs = {'threshold': threshold, 'scale': scale, 'offset': offset} @@ -2554,7 +2554,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.hardswish(x) @@ -2576,12 +2576,12 @@ def test_dygraph_api(self): for r in [out1, out2]: np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) - def test_fluid_api(self): - with paddle.fluid.framework._static_guard(): - with fluid.program_guard(fluid.Program()): + def test_base_api(self): + with paddle.base.framework._static_guard(): + with base.program_guard(base.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.hardswish(x) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_hardswish(self.x_np) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) @@ -2592,7 +2592,7 @@ def test_fluid_api(self): np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.hardswish, 1) @@ -2624,7 +2624,7 @@ def setUp(self): t[t > threshold] = threshold out = np.log(np.exp(t) + 1) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() self.attrs = {'threshold': threshold} @@ -2659,7 +2659,7 @@ def setUp(self): # Note: unlike other Relu extensions, point 0 on standard ELU function (i.e. alpha = 1) # is differentiable, so we can skip modifications like x[np.abs(x) < 0.005] = 0.02 here - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() self.attrs = {'alpha': alpha} @@ -2702,7 +2702,7 @@ def executed_api(self): self.elu = F.elu def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', [10, 12], dtype="float32") out1 = self.elu(x) @@ -2734,7 +2734,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, self.elu, 1) @@ -2778,7 +2778,7 @@ def setUp(self): alpha = 1.5 out = celu(x, alpha) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() self.attrs = {'alpha': alpha} @@ -2813,7 +2813,7 @@ def executed_api(self): self.celu = F.celu def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', [10, 12], dtype="float32") out1 = self.celu(x, 1.5) @@ -2845,7 +2845,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, self.celu, 1) @@ -2877,7 +2877,7 @@ def setUp(self): x = np.random.uniform(1, 2, self.shape).astype(self.dtype) out = np.reciprocal(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -2909,7 +2909,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.log(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -2924,7 +2924,7 @@ def test_check_grad(self): class Test_Log_Op_Fp16(unittest.TestCase): def test_api_fp16(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with static.program_guard( paddle.static.Program(), paddle.static.Program() ): @@ -2937,7 +2937,7 @@ def test_api_fp16(self): (res,) = exe.run(fetch_list=[out]) def test_api_bf16(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with static.program_guard( paddle.static.Program(), paddle.static.Program() ): @@ -2977,7 +2977,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.log2(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -2987,7 +2987,7 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') def test_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): @@ -2997,7 +2997,7 @@ def test_api(self): ) out1 = paddle.log2(data_x) - exe = paddle.static.Executor(place=fluid.CPUPlace()) + exe = paddle.static.Executor(place=base.CPUPlace()) exe.run(paddle.static.default_startup_program()) (res1,) = exe.run( paddle.static.default_main_program(), @@ -3008,7 +3008,7 @@ def test_api(self): np.testing.assert_allclose(res1, expected_res, rtol=1e-05) # dygraph - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64") data_x = paddle.to_tensor(np_x) z = paddle.log2(data_x) @@ -3034,7 +3034,7 @@ def test_api_int(self): paddle.enable_static() def test_api_bf16(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with static.program_guard( paddle.static.Program(), paddle.static.Program() ): @@ -3057,7 +3057,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.log10(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -3084,7 +3084,7 @@ def test_api_int(self): paddle.enable_static() def test_api_bf16(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with static.program_guard( paddle.static.Program(), paddle.static.Program() ): @@ -3099,7 +3099,7 @@ def test_api_bf16(self): class TestLog10API(unittest.TestCase): def test_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() ): @@ -3120,7 +3120,7 @@ def test_api(self): np.testing.assert_allclose(res1, expected_res, rtol=1e-05) # dygraph - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64") data_x = paddle.to_tensor(np_x) z = paddle.log10(data_x) @@ -3140,7 +3140,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.log1p(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -3152,7 +3152,7 @@ def test_check_grad(self): class Test_Log1p_Op_Fp16(unittest.TestCase): def test_api_fp16(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with static.program_guard( paddle.static.Program(), paddle.static.Program() ): @@ -3177,7 +3177,7 @@ def test_api_int(self): paddle.enable_static() def test_api_bf16(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with static.program_guard( paddle.static.Program(), paddle.static.Program() ): @@ -3197,8 +3197,8 @@ def init_shape(self): class TestLog1pAPI(unittest.TestCase): def test_api(self): - with paddle.fluid.framework._static_guard(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with paddle.base.framework._static_guard(): + with base.program_guard(base.Program(), base.Program()): input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64") data_x = paddle.static.data( name="data_x", @@ -3207,10 +3207,10 @@ def test_api(self): ) out1 = paddle.log1p(data_x) - exe = fluid.Executor(place=fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place=base.CPUPlace()) + exe.run(base.default_startup_program()) (res1,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"data_x": input_x}, fetch_list=[out1], ) @@ -3218,9 +3218,9 @@ def test_api(self): np.testing.assert_allclose(res1, expected_res, rtol=1e-05) # dygraph - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64") - data_x = fluid.dygraph.to_variable(np_x) + data_x = base.dygraph.to_variable(np_x) z = paddle.log1p(data_x) np_z = z.numpy() z_expected = np.array(np.log1p(np_x)) @@ -3238,7 +3238,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.square(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -3271,7 +3271,7 @@ def setUp(self): out = np.square(x) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x)) + 'X': OpTest.np_dtype_to_base_dtype(convert_float_to_uint16(x)) } self.outputs = {'Out': convert_float_to_uint16(out)} @@ -3301,7 +3301,7 @@ def setUp(self): x = np.random.uniform(1, 2, self.shape).astype(self.dtype) out = np.power(x, 3) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {'factor': 3.0} self.convert_input_output() @@ -3335,7 +3335,7 @@ def setUp(self): out = np.power(x, 3) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(x), + 'X': OpTest.np_dtype_to_base_dtype(x), 'FactorTensor': np.array([3.0]).astype(self.dtype), } @@ -3351,7 +3351,7 @@ def test_check_grad(self): self.check_grad(['X'], 'Out') def test_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): input = np.random.uniform(1, 2, [11, 17]).astype("float32") x = paddle.static.data(name="x", shape=[11, 17], dtype="float32") res = paddle.static.data( @@ -3366,9 +3366,9 @@ def test_api(self): out_6 = paddle.pow(x, factor_2) self.assertEqual(('pow_res' in out_4.name), True) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) res_1, res_2, res, res_6 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": input}, fetch_list=[out_1, out_2, res, out_6], ) @@ -3410,7 +3410,7 @@ def setUp(self): # The same reason with TestAbs out = ref_stanh(x, scale_a, scale_b) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {'scale_a': scale_a, 'scale_b': scale_b} self.convert_input_output() @@ -3456,7 +3456,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', [10, 12]) out = paddle.stanh(x, self.scale_a, self.scale_b) @@ -3474,18 +3474,18 @@ def test_dygraph_api(self): for r in [out]: np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) - def test_fluid_api(self): - with paddle.fluid.framework._static_guard(): - with fluid.program_guard(fluid.Program()): + def test_base_api(self): + with paddle.base.framework._static_guard(): + with base.program_guard(base.Program()): x = paddle.static.data('X', [10, 12], dtype="float32") out = paddle.stanh(x, self.scale_a, self.scale_b) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, paddle.stanh, 1) @@ -3597,7 +3597,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.softplus(x, self.beta, self.threshold) @@ -3620,7 +3620,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.softplus, 1) @@ -3653,7 +3653,7 @@ def setUp(self): x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) out = ref_softsign(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -3683,7 +3683,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.softsign(x) @@ -3706,7 +3706,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.softsign, 1) @@ -3741,7 +3741,7 @@ def setUp(self): x[np.abs(x) < 0.005] = 0.02 out = ref_thresholded_relu(x, threshold) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"threshold": threshold} self.convert_input_output() @@ -3774,7 +3774,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.thresholded_relu(x, self.threshold) @@ -3797,7 +3797,7 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.thresholded_relu, 1) @@ -3840,7 +3840,7 @@ def setUp(self): self.attrs = {'slope': self.slope, 'offset': self.offset} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -3878,7 +3878,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.hardsigmoid(x) @@ -3900,12 +3900,12 @@ def test_dygraph_api(self): for r in [out1, out2]: np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) - def test_fluid_api(self): - with paddle.fluid.framework._static_guard(): - with fluid.program_guard(fluid.Program()): + def test_base_api(self): + with paddle.base.framework._static_guard(): + with base.program_guard(base.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.hardsigmoid(x, slope=0.2) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) @@ -3916,7 +3916,7 @@ def test_fluid_api(self): np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.hardsigmoid, 1) @@ -3948,7 +3948,7 @@ def setUp(self): x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) out = ref_swish(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {'beta': 1.0} self.convert_input_output() @@ -3982,7 +3982,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.swish(x) @@ -4004,18 +4004,18 @@ def test_dygraph_api(self): for r in [out1, out2]: np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) - def test_fluid_api(self): - with paddle.fluid.framework._static_guard(): - with fluid.program_guard(fluid.Program()): + def test_base_api(self): + with paddle.base.framework._static_guard(): + with base.program_guard(base.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.swish(x) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_swish(self.x_np) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.swish, 1) @@ -4049,7 +4049,7 @@ def setUp(self): x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) out = ref_mish(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.convert_input_output() @@ -4082,7 +4082,7 @@ def setUp(self): ) def test_static_api(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.mish(x) @@ -4104,18 +4104,18 @@ def test_dygraph_api(self): for r in [out1, out2]: np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) - def test_fluid_api(self): - with paddle.fluid.framework._static_guard(): - with fluid.program_guard(fluid.Program()): + def test_base_api(self): + with paddle.base.framework._static_guard(): + with base.program_guard(base.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.nn.functional.mish(x) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_mish(self.x_np) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.mish, 1) diff --git a/test/legacy_test/test_activation_sparse_op.py b/test/legacy_test/test_activation_sparse_op.py index 48bf62a5947ef..32da9b9483e7b 100644 --- a/test/legacy_test/test_activation_sparse_op.py +++ b/test/legacy_test/test_activation_sparse_op.py @@ -18,7 +18,7 @@ from op import Operator import paddle -from paddle.fluid import core +from paddle.base import core class TestSparseSquareOp(unittest.TestCase): diff --git a/test/legacy_test/test_adadelta_op.py b/test/legacy_test/test_adadelta_op.py index 42080a4280b55..a1733fa45a150 100644 --- a/test/legacy_test/test_adadelta_op.py +++ b/test/legacy_test/test_adadelta_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle import fluid +from paddle import base def adadelta_wrapper( @@ -173,9 +173,9 @@ def test_adadelta_dygraph(self): def test_adadelta(self): paddle.enable_static() - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): + place = base.CPUPlace() + main = base.Program() + with base.program_guard(main): x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1) @@ -191,9 +191,9 @@ def test_adadelta(self): train_reader = paddle.batch( paddle.dataset.uci_housing.train(), batch_size=1 ) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + feeder = base.DataFeeder(place=place, feed_list=[x, y]) + exe = base.Executor(place) + exe.run(base.default_startup_program()) for data in train_reader(): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) diff --git a/test/legacy_test/test_adagrad_op.py b/test/legacy_test/test_adagrad_op.py index 66f91de028dd5..552f19f6c0aff 100644 --- a/test/legacy_test/test_adagrad_op.py +++ b/test/legacy_test/test_adagrad_op.py @@ -20,7 +20,7 @@ from op import Operator import paddle -from paddle.fluid import core +from paddle.base import core def adamgrad_wrapper( diff --git a/test/legacy_test/test_adam_op.py b/test/legacy_test/test_adam_op.py index 0c5707c8e59fa..f0326fc54d744 100644 --- a/test/legacy_test/test_adam_op.py +++ b/test/legacy_test/test_adam_op.py @@ -19,8 +19,8 @@ from op import Operator import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def adam_wrapper( @@ -649,13 +649,13 @@ def test_check_output(self): class TestAdamOpV2(unittest.TestCase): def test_adam_op(self): - place = fluid.CPUPlace() + place = base.CPUPlace() shape = [2, 3, 8, 8] - exe = fluid.Executor(place) - train_prog = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(train_prog, startup): - with fluid.unique_name.guard(): + exe = base.Executor(place) + train_prog = base.Program() + startup = base.Program() + with base.program_guard(train_prog, startup): + with base.unique_name.guard(): data = paddle.static.data(name="data", shape=shape) conv = paddle.static.nn.conv2d(data, 8, 3) loss = paddle.mean(conv) @@ -684,7 +684,7 @@ def test_adam_op(self): def test_adam_op_dygraph(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") - a = fluid.dygraph.to_variable(value) + a = base.dygraph.to_variable(value) linear = paddle.nn.Linear(13, 5) adam = paddle.optimizer.Adam( @@ -732,7 +732,7 @@ def test_adam_op_with_state_dict(self): def test_adam_with_grad_clip(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") - a = fluid.dygraph.to_variable(value) + a = base.dygraph.to_variable(value) linear = paddle.nn.Linear(13, 5) clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) adam = paddle.optimizer.Adam( diff --git a/test/legacy_test/test_adam_optimizer_fp32_fp64.py b/test/legacy_test/test_adam_optimizer_fp32_fp64.py index b5a65b6938906..da78cd4403e7c 100644 --- a/test/legacy_test/test_adam_optimizer_fp32_fp64.py +++ b/test/legacy_test/test_adam_optimizer_fp32_fp64.py @@ -15,21 +15,21 @@ import unittest import paddle -from paddle import fluid +from paddle import base def get_places(): - places = [fluid.CPUPlace()] - if fluid.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) return places def main_test_func(place, dtype): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): - with fluid.scope_guard(fluid.Scope()): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): + with base.scope_guard(base.Scope()): x = paddle.static.data(name='x', shape=[None, 13], dtype=dtype) y = paddle.static.data(name='y', shape=[None, 1], dtype=dtype) y_predict = paddle.static.nn.fc(x, size=1) @@ -45,9 +45,9 @@ def main_test_func(place, dtype): train_reader = paddle.batch( paddle.dataset.uci_housing.train(), batch_size=1 ) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + feeder = base.DataFeeder(place=place, feed_list=[x, y]) + exe = base.Executor(place) + exe.run(base.default_startup_program()) for data in train_reader(): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) diff --git a/test/legacy_test/test_adamax_api.py b/test/legacy_test/test_adamax_api.py index 91f28a79d38fb..1fc1878d81995 100644 --- a/test/legacy_test/test_adamax_api.py +++ b/test/legacy_test/test_adamax_api.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestAdamaxAPI(unittest.TestCase): @@ -38,13 +38,13 @@ def test_adamax_api_dygraph(self): def test_adamax_api(self): paddle.enable_static() - place = fluid.CPUPlace() + place = base.CPUPlace() shape = [2, 3, 8, 8] - exe = fluid.Executor(place) - train_prog = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(train_prog, startup): - with fluid.unique_name.guard(): + exe = base.Executor(place) + train_prog = base.Program() + startup = base.Program() + with base.program_guard(train_prog, startup): + with base.unique_name.guard(): data = paddle.static.data(name="data", shape=shape) conv = paddle.static.nn.conv2d(data, 8, 3) loss = paddle.mean(conv) diff --git a/test/legacy_test/test_adamw_op.py b/test/legacy_test/test_adamw_op.py index e94a0de7e41ff..a6a30d3af101a 100644 --- a/test/legacy_test/test_adamw_op.py +++ b/test/legacy_test/test_adamw_op.py @@ -20,7 +20,7 @@ from eager_op_test import OpTest import paddle -from paddle import fluid +from paddle import base from paddle.framework import core @@ -245,13 +245,13 @@ def test_adamw_op_coverage(self): def test_adamw_op(self): paddle.enable_static() - place = fluid.CPUPlace() + place = base.CPUPlace() shape = [2, 3, 8, 8] - exe = fluid.Executor(place) - train_prog = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(train_prog, startup): - with fluid.unique_name.guard(): + exe = base.Executor(place) + train_prog = base.Program() + startup = base.Program() + with base.program_guard(train_prog, startup): + with base.unique_name.guard(): data = paddle.static.data(name="data", shape=shape) conv = paddle.static.nn.conv2d(data, 8, 3) loss = paddle.mean(conv) @@ -755,7 +755,7 @@ def get_numpy_output(param, grad, moment1, moment2, lr_ratio, t): def test_adamw_op(self): paddle.enable_static() - place = fluid.CUDAPlace(0) + place = base.CUDAPlace(0) learning_rate = 0.0001 beta1 = 0.85 @@ -763,10 +763,10 @@ def test_adamw_op(self): weight_decay = 0.01 epsilon = 1e-8 - train_prog = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(train_prog, startup): - with fluid.unique_name.guard(): + train_prog = base.Program() + startup = base.Program() + with base.program_guard(train_prog, startup): + with base.unique_name.guard(): x = paddle.static.data( name='x', shape=[None, 10], dtype='float32' ) @@ -863,7 +863,7 @@ def get_numpy_output(param, grad, moment1, moment2, lr_ratio, t): "linear_1.b_0@GRAD", ] - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup) test_prog = train_prog.clone(for_test=True) diff --git a/test/legacy_test/test_adaptive_avg_pool1d.py b/test/legacy_test/test_adaptive_avg_pool1d.py index 3b77091f6e83e..bca37ba88794f 100644 --- a/test/legacy_test/test_adaptive_avg_pool1d.py +++ b/test/legacy_test/test_adaptive_avg_pool1d.py @@ -18,8 +18,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def adaptive_start_index(index, input_size, output_size): @@ -80,14 +80,14 @@ def avg_pool1D_forward_naive( class TestPool1D_API(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_adaptive_avg_dygraph_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = F.adaptive_avg_pool1d(input, output_size=16) result_np = avg_pool1D_forward_naive( input_np, ksize=[16], strides=[0], paddings=[0], adaptive=True @@ -107,7 +107,7 @@ def check_adaptive_avg_dygraph_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_adaptive_avg_static_results(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 32], dtype="float32" ) @@ -118,9 +118,9 @@ def check_adaptive_avg_static_results(self, place): input_np, ksize=[16], strides=[2], paddings=[0], adaptive=True ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) diff --git a/test/legacy_test/test_adaptive_avg_pool2d.py b/test/legacy_test/test_adaptive_avg_pool2d.py index 663ac74781597..9c6c0c96287a4 100644 --- a/test/legacy_test/test_adaptive_avg_pool2d.py +++ b/test/legacy_test/test_adaptive_avg_pool2d.py @@ -19,8 +19,8 @@ from test_attribute_var import UnittestBase import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def adaptive_start_index(index, input_size, output_size): @@ -143,7 +143,7 @@ def test_static_graph(self): exe = paddle.static.Executor(place=place) [res_1, res_2, res_3, res_4, res_5] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_np}, fetch_list=[out_1, out_2, out_3, out_4, out_5], ) @@ -263,7 +263,7 @@ def test_static_graph(self): exe = paddle.static.Executor(place=place) [res_1, res_2, res_3, res_4, res_5] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_np}, fetch_list=[out_1, out_2, out_3, out_4, out_5], ) diff --git a/test/legacy_test/test_adaptive_avg_pool3d.py b/test/legacy_test/test_adaptive_avg_pool3d.py index d5054ba2107af..f3aa4cdf81efa 100755 --- a/test/legacy_test/test_adaptive_avg_pool3d.py +++ b/test/legacy_test/test_adaptive_avg_pool3d.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def adaptive_start_index(index, input_size, output_size): @@ -164,7 +164,7 @@ def test_static_graph(self): exe = paddle.static.Executor(place=place) [res_1, res_2, res_3, res_4, res_5] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_np}, fetch_list=[out_1, out_2, out_3, out_4, out_5], ) @@ -296,7 +296,7 @@ def test_static_graph(self): exe = paddle.static.Executor(place=place) [res_1, res_2, res_3, res_4, res_5] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_np}, fetch_list=[out_1, out_2, out_3, out_4, out_5], ) diff --git a/test/legacy_test/test_adaptive_max_pool1d.py b/test/legacy_test/test_adaptive_max_pool1d.py index f303a761134bf..b4f44790af5d3 100644 --- a/test/legacy_test/test_adaptive_max_pool1d.py +++ b/test/legacy_test/test_adaptive_max_pool1d.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def adaptive_start_index(index, input_size, output_size): @@ -71,14 +71,14 @@ def max_pool1D_forward_naive( class TestPool1D_API(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_adaptive_max_dygraph_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = F.adaptive_max_pool1d(input, output_size=16) result_np = max_pool1D_forward_naive( @@ -94,7 +94,7 @@ def check_adaptive_max_dygraph_results(self, place): def check_adaptive_max_static_results(self, place): with paddle_static_guard(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 32], dtype="float32" ) @@ -109,9 +109,9 @@ def check_adaptive_max_static_results(self, place): adaptive=True, ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) diff --git a/test/legacy_test/test_adaptive_max_pool2d.py b/test/legacy_test/test_adaptive_max_pool2d.py index 104271b955257..e4625fbd8eb33 100644 --- a/test/legacy_test/test_adaptive_max_pool2d.py +++ b/test/legacy_test/test_adaptive_max_pool2d.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def adaptive_start_index(index, input_size, output_size): @@ -144,7 +144,7 @@ def test_static_graph(self): exe = paddle.static.Executor(place=place) [res_1, res_2, res_3, res_5] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_np}, fetch_list=[out_1, out_2, out_3, out_5], ) @@ -250,7 +250,7 @@ def test_static_graph(self): exe = paddle.static.Executor(place=place) [res_1, res_2, res_3, res_5] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_np}, fetch_list=[out_1, out_2, out_3, out_5], ) diff --git a/test/legacy_test/test_adaptive_max_pool3d.py b/test/legacy_test/test_adaptive_max_pool3d.py index 13eed4823d88f..b09b5779fc444 100755 --- a/test/legacy_test/test_adaptive_max_pool3d.py +++ b/test/legacy_test/test_adaptive_max_pool3d.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def adaptive_start_index(index, input_size, output_size): @@ -165,7 +165,7 @@ def test_static_graph(self): exe = paddle.static.Executor(place=place) [res_1, res_2, res_3, res_5] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_np}, fetch_list=[out_1, out_2, out_3, out_5], ) @@ -275,7 +275,7 @@ def test_static_graph(self): exe = paddle.static.Executor(place=place) [res_1, res_2, res_3, res_5] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_np}, fetch_list=[out_1, out_2, out_3, out_5], ) diff --git a/test/legacy_test/test_add_position_encoding_op.py b/test/legacy_test/test_add_position_encoding_op.py index 1f4906045b01b..48f718ae4f98f 100644 --- a/test/legacy_test/test_add_position_encoding_op.py +++ b/test/legacy_test/test_add_position_encoding_op.py @@ -55,7 +55,7 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), + 'X': OpTest.np_dtype_to_base_dtype(self.x), } self.outputs = {'Out': self.out} self.attrs = {'alpha': self.alpha, 'beta': self.beta} diff --git a/test/legacy_test/test_add_reader_dependency.py b/test/legacy_test/test_add_reader_dependency.py index abcde1833f4e6..9dfd4a500d1f7 100644 --- a/test/legacy_test/test_add_reader_dependency.py +++ b/test/legacy_test/test_add_reader_dependency.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.layer_helper import LayerHelper +from paddle import base +from paddle.base.layer_helper import LayerHelper def inplace_add(x, bias): @@ -40,18 +40,18 @@ def setUp(self): self.use_double_buffer = True def test_main(self): - self.run_main(fluid.CPUPlace()) + self.run_main(base.CPUPlace()) - if fluid.is_compiled_with_cuda(): - self.run_main(fluid.CUDAPlace(0)) + if base.is_compiled_with_cuda(): + self.run_main(base.CUDAPlace(0)) def run_main(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): - with fluid.scope_guard(fluid.Scope()): + with base.program_guard(base.Program(), base.Program()): + with base.scope_guard(base.Scope()): tmp_in = paddle.static.data( name='tmp_in', dtype='float32', shape=[1] ) - loader = fluid.io.DataLoader.from_generator( + loader = base.io.DataLoader.from_generator( feed_list=[tmp_in], capacity=16, iterable=False, @@ -71,9 +71,9 @@ def data_source(): persistable_in.persistable = True persistable_in = inplace_add(persistable_in, bias=1) - prog = fluid.CompiledProgram(fluid.default_main_program()) + prog = base.CompiledProgram(base.default_main_program()) - exe = fluid.Executor(place) + exe = base.Executor(place) loader.set_batch_generator(data_source) loader.start() @@ -95,12 +95,12 @@ def data_source(): self.assertEqual(ret.shape, (1,)) self.assertEqual(ret[0], batch_id) batch_id += 1 - except fluid.core.EOFException: + except base.core.EOFException: loader.reset() self.assertEqual(batch_id, self.batch_num) t = ( - fluid.global_scope() + base.global_scope() .find_var(persistable_in.name) .get_tensor() ) diff --git a/test/legacy_test/test_addmm_op.py b/test/legacy_test/test_addmm_op.py index 1f92270cbeeac..201d86f7d17c4 100644 --- a/test/legacy_test/test_addmm_op.py +++ b/test/legacy_test/test_addmm_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestAddMMOp(OpTest): @@ -119,14 +119,14 @@ def test_errors(self): with program_guard(Program(), Program()): # The input type of addmm_op must be Variable. - input = fluid.create_lod_tensor( - np.array([[-1, -1], [-1, -1]]), [[2]], fluid.CPUPlace() + input = base.create_lod_tensor( + np.array([[-1, -1], [-1, -1]]), [[2]], base.CPUPlace() ) - x1 = fluid.create_lod_tensor( - np.array([[-1, -1], [-1, -1]]), [[2]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([[-1, -1], [-1, -1]]), [[2]], base.CPUPlace() ) - x2 = fluid.create_lod_tensor( - np.array([[-1, -1], [-1, -1]]), [[2]], fluid.CPUPlace() + x2 = base.create_lod_tensor( + np.array([[-1, -1], [-1, -1]]), [[2]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.addmm, input, x1, x2) @@ -323,10 +323,10 @@ def test_api_with_dygraph(self): np_x = np.random.random((20, 6)).astype(np.float32) np_y = np.random.random((6, 30)).astype(np.float32) - with fluid.dygraph.guard(): - input = fluid.dygraph.to_variable(np_input) - x = fluid.dygraph.to_variable(np_x) - y = fluid.dygraph.to_variable(np_y) + with base.dygraph.guard(): + input = base.dygraph.to_variable(np_input) + x = base.dygraph.to_variable(np_x) + y = base.dygraph.to_variable(np_y) out = paddle.tensor.addmm(input, x, y) np.testing.assert_allclose( np_input + np.dot(np_x, np_y), out.numpy(), rtol=1e-5, atol=1e-8 diff --git a/test/legacy_test/test_affine_grid_function.py b/test/legacy_test/test_affine_grid_function.py index 20114a5304d5d..63f20d663971c 100644 --- a/test/legacy_test/test_affine_grid_function.py +++ b/test/legacy_test/test_affine_grid_function.py @@ -17,9 +17,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid +from paddle import base class AffineGridTestCase(unittest.TestCase): @@ -45,12 +45,12 @@ def __init__( def setUp(self): self.theta = np.random.randn(*(self.theta_shape)).astype(self.dtype) - def fluid_layer(self, place): + def base_layer(self, place): paddle.enable_static() - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): theta_var = paddle.static.data( "input", self.theta_shape, dtype=self.dtype ) @@ -58,17 +58,17 @@ def fluid_layer(self, place): theta_var, self.output_shape ) feed_dict = {"input": self.theta} - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def functional(self, place): paddle.enable_static() - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): theta_var = paddle.static.data( "input", self.theta_shape, dtype=self.dtype ) @@ -78,7 +78,7 @@ def functional(self, place): align_corners=self.align_corners, ) feed_dict = {"input": self.theta} - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np @@ -100,8 +100,8 @@ def paddle_dygraph_layer(self): return y_np def _test_equivalence(self, place): - place = fluid.CPUPlace() - result1 = self.fluid_layer(place) + place = base.CPUPlace() + result1 = self.base_layer(place) result2 = self.functional(place) result3 = self.paddle_dygraph_layer() if self.align_corners: @@ -109,17 +109,17 @@ def _test_equivalence(self, place): np.testing.assert_array_almost_equal(result2, result3) def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self._test_equivalence(place) - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self._test_equivalence(place) class AffineGridErrorTestCase(AffineGridTestCase): def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() with dg.guard(place): with self.assertRaises(ValueError): self.paddle_dygraph_layer() diff --git a/test/legacy_test/test_affine_grid_op.py b/test/legacy_test/test_affine_grid_op.py index c623f68554ce1..0bb9f2a19b435 100644 --- a/test/legacy_test/test_affine_grid_op.py +++ b/test/legacy_test/test_affine_grid_op.py @@ -152,7 +152,7 @@ def initTestCase(self): self.output_shape = np.array([20, 2, 5, 7]).astype("int32") self.dynamic_shape = True self.use_cudnn = True - if paddle.fluid.core.is_compiled_with_rocm(): + if paddle.base.core.is_compiled_with_rocm(): self.use_cudnn = ( False # ROCM platform do not have MIOPEN kernel for affine_grid ) diff --git a/test/legacy_test/test_allclose_layer.py b/test/legacy_test/test_allclose_layer.py index 3bf2d34fc24f5..7357199b55ced 100644 --- a/test/legacy_test/test_allclose_layer.py +++ b/test/legacy_test/test_allclose_layer.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -37,9 +37,9 @@ def allclose_check(self, use_cuda, dtype='float32'): a, b, rtol=0.01, atol=0.0, name="corner_case" ) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) x = np.array([10000.0, 1e-07]).astype(dtype) y = np.array([10000.1, 1e-08]).astype(dtype) @@ -73,33 +73,33 @@ def allclose_check(self, use_cuda, dtype='float32'): self.assertEqual(result_c, corner_res) def test_allclose_cpu_fp32(self): - main = fluid.Program() - startup = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, startup): self.allclose_check(use_cuda=False, dtype='float32') def test_allclose_cpu_fp64(self): - main = fluid.Program() - startup = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, startup): self.allclose_check(use_cuda=False, dtype='float64') def test_allclose_gpu_fp32(self): - if fluid.core.is_compiled_with_cuda(): - main = fluid.Program() - startup = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + if base.core.is_compiled_with_cuda(): + main = base.Program() + startup = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, startup): self.allclose_check(use_cuda=True, dtype='float32') def test_allclose_gpu_fp64(self): - if fluid.core.is_compiled_with_cuda(): - main = fluid.Program() - startup = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + if base.core.is_compiled_with_cuda(): + main = base.Program() + startup = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, startup): self.allclose_check(use_cuda=True, dtype='float64') def test_dygraph_mode(self): @@ -114,7 +114,7 @@ def test_dygraph_mode(self): x_5 = np.array([10.1]).astype("float64") y_5 = np.array([10]).astype("float64") - with fluid.dygraph.guard(): + with base.dygraph.guard(): x_v_1 = paddle.to_tensor(x_1) y_v_1 = paddle.to_tensor(y_1) ret_1 = paddle.allclose( diff --git a/test/legacy_test/test_allclose_op.py b/test/legacy_test/test_allclose_op.py index d79d3cd69af49..e272e98403903 100644 --- a/test/legacy_test/test_allclose_op.py +++ b/test/legacy_test/test_allclose_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core class TestAllcloseOp(OpTest): diff --git a/test/legacy_test/test_angle_op.py b/test/legacy_test/test_angle_op.py index 6321a06606de9..1433bcd117b09 100644 --- a/test/legacy_test/test_angle_op.py +++ b/test/legacy_test/test_angle_op.py @@ -19,7 +19,7 @@ import paddle from paddle import static -from paddle.fluid import core, dygraph +from paddle.base import core, dygraph paddle.enable_static() diff --git a/test/legacy_test/test_apply_pass_to_program.py b/test/legacy_test/test_apply_pass_to_program.py index b5a81df5c27ca..bb76bae2b3b59 100644 --- a/test/legacy_test/test_apply_pass_to_program.py +++ b/test/legacy_test/test_apply_pass_to_program.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.framework import _apply_pass +from paddle import base +from paddle.base.framework import _apply_pass from paddle.framework.ir import apply_build_strategy from paddle.nn import CrossEntropyLoss from paddle.vision.models import resnet50 @@ -79,7 +79,7 @@ class TestIRPassBase(unittest.TestCase): def setUp(self): paddle.enable_static() if paddle.is_compiled_with_cuda(): - fluid.set_flags( + base.set_flags( { 'FLAGS_cudnn_deterministic': 1, 'FLAGS_max_inplace_grad_add': 6, diff --git a/test/legacy_test/test_arange.py b/test/legacy_test/test_arange.py index 9412cb992aa3a..19390754048f4 100644 --- a/test/legacy_test/test_arange.py +++ b/test/legacy_test/test_arange.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core from paddle.static import Program, program_guard diff --git a/test/legacy_test/test_arg_min_max_op.py b/test/legacy_test/test_arg_min_max_op.py index fba469a0e333e..000e5d88b080b 100644 --- a/test/legacy_test/test_arg_min_max_op.py +++ b/test/legacy_test/test_arg_min_max_op.py @@ -20,7 +20,7 @@ from test_attribute_var import UnittestBase import paddle -from paddle.fluid import Program, core, program_guard +from paddle.base import Program, core, program_guard class BaseTestCase(OpTest): diff --git a/test/legacy_test/test_arg_min_max_v2_op.py b/test/legacy_test/test_arg_min_max_v2_op.py index bca85ebd4887e..f7e79b56e9f0b 100644 --- a/test/legacy_test/test_arg_min_max_v2_op.py +++ b/test/legacy_test/test_arg_min_max_v2_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def create_kernel_case(op_type, numpy_op_type): @@ -166,7 +166,7 @@ def setUp(self): np.random.seed(123) self.input_data = np.random.rand(10, 10).astype("float32") self.places = [] - self.places.append(fluid.CPUPlace()) + self.places.append(base.CPUPlace()) if core.is_compiled_with_cuda(): self.places.append(paddle.CUDAPlace(0)) self.op = eval("paddle.%s" % (op_type)) diff --git a/test/legacy_test/test_argsort_op.py b/test/legacy_test/test_argsort_op.py index fb96f0f9bee54..4ecbe2a8386b1 100644 --- a/test/legacy_test/test_argsort_op.py +++ b/test/legacy_test/test_argsort_op.py @@ -18,11 +18,11 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.backward import append_backward -from paddle.fluid.executor import Executor -from paddle.fluid.framework import Program, grad_var_name +from paddle import base +from paddle.base import core +from paddle.base.backward import append_backward +from paddle.base.executor import Executor +from paddle.base.framework import Program, grad_var_name np.random.seed(123) paddle.enable_static() @@ -85,7 +85,7 @@ def setUp(self): self.input_shape, self.axis, self.descending, self.dtype ) - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): x = paddle.static.data( name="x", shape=[-1] + list(self.input_shape), dtype=self.dtype ) @@ -141,7 +141,7 @@ def backward(self): def test_backward(self, numeric_grad_delta=1e-5, max_relative_error=1e-7): self.check_forward() - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): append_backward(self.loss) ana_grad = [np.array(x) for x in self.backward()] @@ -356,14 +356,14 @@ def setUp(self): self.place = core.CPUPlace() def test_error(self): - def test_fluid_var_type(): - with fluid.program_guard(fluid.Program()): + def test_base_var_type(): + with base.program_guard(base.Program()): x = [1] output = paddle.argsort(x=x) - self.assertRaises(TypeError, test_fluid_var_type) + self.assertRaises(TypeError, test_base_var_type) def test_paddle_var_type(): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = [1] output = paddle.argsort(x=x) self.assertRaises(TypeError, test_paddle_var_type) @@ -393,7 +393,7 @@ def setUp(self): self.data = np.random.rand(*self.input_shape) def test_api(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): input = paddle.static.data( name="input", shape=self.input_shape, dtype="float64" ) @@ -401,7 +401,7 @@ def test_api(self): output = paddle.argsort(input, axis=self.axis) output2 = paddle.argsort(input, axis=self.axis, descending=True) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) result, result2 = exe.run( feed={'input': self.data}, fetch_list=[output, output2] ) diff --git a/test/legacy_test/test_array_read_write_op.py b/test/legacy_test/test_array_read_write_op.py index 208bf7483eb6c..3e76d7e3d309f 100644 --- a/test/legacy_test/test_array_read_write_op.py +++ b/test/legacy_test/test_array_read_write_op.py @@ -17,11 +17,11 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard -from paddle.fluid.backward import append_backward -from paddle.fluid.executor import Executor -from paddle.fluid.framework import default_main_program +from paddle import base +from paddle.base import Program, core, program_guard +from paddle.base.backward import append_backward +from paddle.base.executor import Executor +from paddle.base.framework import default_main_program def _test_read_write(x): @@ -104,10 +104,10 @@ def test_read_write(self): # the input gradient should also be 1 self.assertAlmostEqual(1.0, g_out_sum, delta=0.1) - with fluid.dygraph.guard(place): - tensor1 = fluid.dygraph.to_variable(tensor) - tensor2 = fluid.dygraph.to_variable(tensor) - tensor3 = fluid.dygraph.to_variable(tensor) + with base.dygraph.guard(place): + tensor1 = base.dygraph.to_variable(tensor) + tensor2 = base.dygraph.to_variable(tensor) + tensor3 = base.dygraph.to_variable(tensor) x_dygraph = [tensor1, tensor2, tensor3] for each_x in x_dygraph: each_x.stop_gradient = False diff --git a/test/legacy_test/test_assert_op.py b/test/legacy_test/test_assert_op.py index f94d60f67f85b..3f153de651272 100644 --- a/test/legacy_test/test_assert_op.py +++ b/test/legacy_test/test_assert_op.py @@ -15,17 +15,17 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.static.nn.control_flow import Assert class TestAssertOp(unittest.TestCase): def run_network(self, net_func): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): net_func() - exe = fluid.Executor() + exe = base.Executor() exe.run(main_program) def test_assert_true(self): diff --git a/test/legacy_test/test_assign_op.py b/test/legacy_test/test_assign_op.py index e42d29cb0b1c6..e9dc9889d5fc3 100644 --- a/test/legacy_test/test_assign_op.py +++ b/test/legacy_test/test_assign_op.py @@ -21,9 +21,9 @@ from eager_op_test import convert_float_to_uint16, convert_uint16_to_float import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard -from paddle.fluid.backward import append_backward +from paddle import base +from paddle.base import Program, core, program_guard +from paddle.base.backward import append_backward class TestAssignOp(eager_op_test.OpTest): @@ -126,11 +126,11 @@ def test_assign_LoDTensorArray(self): append_backward(mean) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) feed_x = np.random.random(size=(100, 10)).astype('float32') ones = np.ones((100, 10)).astype('float32') feed_add = feed_x + ones @@ -149,8 +149,8 @@ def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): # The type of input must be Variable or numpy.ndarray. - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.assign, x1) # When the type of input is numpy.ndarray, the dtype of input must be float32, int32. @@ -179,11 +179,11 @@ def test_assign_LoDTensorArray(self): append_backward(mean) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) feed_x = np.random.random(size=(100, 10)).astype('float32') ones = np.ones((100, 10)).astype('float32') feed_add = feed_x + ones @@ -198,7 +198,7 @@ def test_assign_LoDTensorArray(self): def test_assign_NumpyArray(self): for dtype in [np.bool_, np.float32, np.int32, np.int64]: - with fluid.dygraph.guard(): + with base.dygraph.guard(): array = np.random.random(size=(100, 10)).astype(dtype) result1 = paddle.zeros(shape=[3, 3], dtype='float32') paddle.assign(array, result1) @@ -279,8 +279,8 @@ def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): # The type of input must be Variable or numpy.ndarray. - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.assign, x1) # When the type of input is numpy.ndarray, the dtype of input must be float32, int32. @@ -321,9 +321,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) paddle.disable_static() @@ -353,9 +353,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) paddle.disable_static() diff --git a/test/legacy_test/test_assign_pos_op.py b/test/legacy_test/test_assign_pos_op.py index a98cb87379121..57a4a75fb1bf3 100644 --- a/test/legacy_test/test_assign_pos_op.py +++ b/test/legacy_test/test_assign_pos_op.py @@ -19,7 +19,7 @@ import paddle from paddle.distributed.models.moe import utils -from paddle.fluid import core +from paddle.base import core def assign_pos(x, _cum_count): diff --git a/test/legacy_test/test_assign_value_op.py b/test/legacy_test/test_assign_value_op.py index 7cb5dece346c8..28f9b0fba0597 100644 --- a/test/legacy_test/test_assign_value_op.py +++ b/test/legacy_test/test_assign_value_op.py @@ -18,12 +18,12 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import framework +from paddle import base +from paddle.base import framework def assign_value_wrapper( - shape=[], dtype=fluid.core.VarDesc.VarType.FP32, values=0.0 + shape=[], dtype=base.core.VarDesc.VarType.FP32, values=0.0 ): tensor = paddle.Tensor() return paddle._C_ops.assign_value_( @@ -80,9 +80,9 @@ def setUp(self): self.dtype ) self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) def init_dtype(self): @@ -90,12 +90,12 @@ def init_dtype(self): def test_assign(self): with eager_op_test.paddle_static_guard(): - main_program = fluid.Program() - with fluid.program_guard(main_program): + main_program = base.Program() + with base.program_guard(main_program): x = paddle.tensor.create_tensor(dtype=self.dtype) paddle.assign(self.value, output=x) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) [fetched_x] = exe.run(main_program, feed={}, fetch_list=[x]) np.testing.assert_array_equal(fetched_x, self.value) self.assertEqual(fetched_x.dtype, self.value.dtype) @@ -119,9 +119,9 @@ def setUp(self): np.bool_ ) self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) def init_dtype(self): diff --git a/test/legacy_test/test_async_read_write.py b/test/legacy_test/test_async_read_write.py index 98aee8d905669..39374fe1cdcf0 100644 --- a/test/legacy_test/test_async_read_write.py +++ b/test/legacy_test/test_async_read_write.py @@ -18,7 +18,7 @@ import paddle from paddle.device import cuda -from paddle.fluid import core +from paddle.base import core class TestAsyncRead(unittest.TestCase): diff --git a/test/legacy_test/test_atan2_op.py b/test/legacy_test/test_atan2_op.py index ddcaa40dee980..74d5bb7d8f290 100644 --- a/test/legacy_test/test_atan2_op.py +++ b/test/legacy_test/test_atan2_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() np.random.seed(0) diff --git a/test/legacy_test/test_attribute_var.py b/test/legacy_test/test_attribute_var.py index b74141a10ebd5..50593501eb92b 100644 --- a/test/legacy_test/test_attribute_var.py +++ b/test/legacy_test/test_attribute_var.py @@ -20,7 +20,7 @@ import paddle import paddle.inference as paddle_infer -from paddle.fluid.framework import OpProtoHolder, Program, program_guard +from paddle.base.framework import OpProtoHolder, Program, program_guard paddle.enable_static() diff --git a/test/legacy_test/test_auc_op.py b/test/legacy_test/test_auc_op.py index 81261ab5b0e45..64b617c1eee22 100644 --- a/test/legacy_test/test_auc_op.py +++ b/test/legacy_test/test_auc_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle import fluid +from paddle import base class TestAucOp(OpTest): @@ -139,7 +139,7 @@ def test_static(self): class TestAucOpError(unittest.TestCase): def test_errors(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): def test_type1(): data1 = paddle.static.data( diff --git a/test/legacy_test/test_auto_growth_allocator_gpu.py b/test/legacy_test/test_auto_growth_allocator_gpu.py index f2edff5eac4a8..3ac11c1baf86f 100644 --- a/test/legacy_test/test_auto_growth_allocator_gpu.py +++ b/test/legacy_test/test_auto_growth_allocator_gpu.py @@ -17,10 +17,10 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base # it should be set at the beginning -if fluid.is_compiled_with_cuda(): +if base.is_compiled_with_cuda(): paddle.set_flags( { 'FLAGS_allocator_strategy': 'auto_growth', @@ -32,24 +32,24 @@ class TestMemoryLimit(unittest.TestCase): def setUp(self): self._limit = 10 - if fluid.is_compiled_with_cuda(): + if base.is_compiled_with_cuda(): paddle.set_flags({'FLAGS_gpu_memory_limit_mb': 10}) def test_allocate(self): - if not fluid.is_compiled_with_cuda(): + if not base.is_compiled_with_cuda(): return other_dim = int(1024 * 1024 / 4) - place = fluid.CUDAPlace(0) - t = fluid.LoDTensor() + place = base.CUDAPlace(0) + t = base.LoDTensor() t.set( np.ndarray([int(self._limit / 2), other_dim], dtype='float32'), place, ) del t - t = fluid.LoDTensor() + t = base.LoDTensor() large_np = np.ndarray([2 * self._limit, other_dim], dtype='float32') try: @@ -61,7 +61,7 @@ def test_allocate(self): class TestChunkSize(unittest.TestCase): def test_allocate(self): - if not fluid.is_compiled_with_cuda(): + if not base.is_compiled_with_cuda(): return paddle.rand([1024]) diff --git a/test/legacy_test/test_auto_parallel_cost_model.py b/test/legacy_test/test_auto_parallel_cost_model.py index 9c32caf214e44..d3b298309656b 100644 --- a/test/legacy_test/test_auto_parallel_cost_model.py +++ b/test/legacy_test/test_auto_parallel_cost_model.py @@ -29,7 +29,7 @@ from paddle.distributed.auto_parallel.static.partitioner import Partitioner from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto -from paddle.fluid import core +from paddle.base import core paddle.enable_static() _global_parallel_strategy = "dp_mp_pp" diff --git a/test/legacy_test/test_auto_parallel_mapper.py b/test/legacy_test/test_auto_parallel_mapper.py index bcf791dd9711f..f80b637cc520c 100644 --- a/test/legacy_test/test_auto_parallel_mapper.py +++ b/test/legacy_test/test_auto_parallel_mapper.py @@ -21,7 +21,7 @@ import paddle import paddle.nn.functional as F -from paddle import fluid, nn, static, utils +from paddle import base, nn, static, utils from paddle.distributed import fleet from paddle.distributed.auto_parallel.static.cluster import Cluster from paddle.distributed.auto_parallel.static.completion import Completer @@ -39,7 +39,7 @@ from paddle.distributed.auto_parallel.static.partitioner import Partitioner from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto -from paddle.fluid import core +from paddle.base import core if os.getenv("CUDA_VISIBLE_DEVICES") is not None: os.environ["CUDA_VISIBLE_DEVICES"] = "" @@ -595,7 +595,7 @@ def test_mapper_misc(self): ring_id = 0 root_id = 0 nranks = 2 - with fluid.program_guard(train_program, startup_program): + with base.program_guard(train_program, startup_program): input = paddle.static.data( name="input", shape=[-1, 10, 10], dtype='float32' ) @@ -635,8 +635,8 @@ def test_mapper_misc(self): inputs={"X": input}, outputs={"Out": output}, attrs={ - "in_dtype": fluid.core.VarDesc.VarType.FP32, - "out_dtype": fluid.core.VarDesc.VarType.FP32, + "in_dtype": base.core.VarDesc.VarType.FP32, + "out_dtype": base.core.VarDesc.VarType.FP32, }, ) self.assertRaises(ValueError, get_comm_volume, cast_op, 0, 1) diff --git a/test/legacy_test/test_auto_search_dist_op.py b/test/legacy_test/test_auto_search_dist_op.py index 369fdec36e55a..4567aafab5949 100644 --- a/test/legacy_test/test_auto_search_dist_op.py +++ b/test/legacy_test/test_auto_search_dist_op.py @@ -23,7 +23,7 @@ from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, ) -from paddle.fluid import core +from paddle.base import core paddle.enable_static() device = "gpu" if core.is_compiled_with_cuda() else "cpu" diff --git a/test/legacy_test/test_avoid_twice_initialization.py b/test/legacy_test/test_avoid_twice_initialization.py index 0f29188399a83..5afb4cd20ccc3 100644 --- a/test/legacy_test/test_avoid_twice_initialization.py +++ b/test/legacy_test/test_avoid_twice_initialization.py @@ -15,12 +15,12 @@ import unittest import paddle -from paddle import fluid +from paddle import base class TestAvoidTwiceInitialization(unittest.TestCase): def test_avoid_twice_initialization(self): - cur_program = fluid.Program() + cur_program = base.Program() cur_block = cur_program.current_block() var = cur_block.create_parameter( initializer=paddle.nn.initializer.Constant(value=0.01), diff --git a/test/legacy_test/test_backward.py b/test/legacy_test/test_backward.py index 55fd9b85227bc..2ae9ede04987a 100644 --- a/test/legacy_test/test_backward.py +++ b/test/legacy_test/test_backward.py @@ -18,8 +18,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid, static -from paddle.fluid import backward +from paddle import base, static +from paddle.base import backward class BackwardNet: @@ -59,16 +59,16 @@ class TestBackward(unittest.TestCase): def _check_all(self, net): place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) - main = fluid.Program() - startup = fluid.Program() + main = base.Program() + startup = base.Program() - with fluid.program_guard(main, startup): + with base.program_guard(main, startup): loss = net.build_model() self._check_backward(loss, main) @@ -96,11 +96,11 @@ def _check_backward(self, loss, main_program): # update no_grad_dict block_no_grad_set.update(no_grad_vars) no_grad_dict[global_block_idx].update( - list(map(fluid.backward._append_grad_suffix_, block_no_grad_set)) + list(map(base.backward._append_grad_suffix_, block_no_grad_set)) ) def _check_params_grad(self, loss, parameter_list=None, no_grad_set=None): - params_grads = fluid.backward.append_backward( + params_grads = base.backward.append_backward( loss, parameter_list, no_grad_set ) params_names = { @@ -111,7 +111,7 @@ def _check_params_grad(self, loss, parameter_list=None, no_grad_set=None): return params_grads def _check_stop_gradient(self, program): - no_grad_dict = fluid.backward._get_stop_gradients_(program) + no_grad_dict = base.backward._get_stop_gradients_(program) if no_grad_dict is not None and isinstance(no_grad_dict, dict): self.assertSetEqual( no_grad_dict[self.global_block_idx], @@ -126,11 +126,11 @@ def _check_op_path(self, root_block, outputs, inputs=[], no_grad_dict=None): else: block_no_grad_set = set( map( - fluid.backward._strip_grad_suffix_, + base.backward._strip_grad_suffix_, no_grad_dict[self.global_block_idx], ) ) - op_path = fluid.backward._find_op_path_( + op_path = base.backward._find_op_path_( root_block, outputs, inputs, block_no_grad_set ) op_types = [op.type for op in op_path] @@ -141,7 +141,7 @@ def _check_op_path(self, root_block, outputs, inputs=[], no_grad_dict=None): def _check_find_no_grad_vars( self, root_block, op_path, targets, block_no_grad_set ): - no_grad_vars = fluid.backward._find_no_grad_vars( + no_grad_vars = base.backward._find_no_grad_vars( root_block, op_path, targets, block_no_grad_set ) self.assertSetEqual(no_grad_vars, self.net.no_grad_vars) @@ -150,16 +150,16 @@ def _check_find_no_grad_vars( def _check_error_param_list(self, net, parameter_list): place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) - main = fluid.Program() - startup = fluid.Program() + main = base.Program() + startup = base.Program() - with fluid.program_guard(main, startup): + with base.program_guard(main, startup): loss = net.build_model() optimizer = paddle.optimizer.SGD(learning_rate=0.1) optimizer.minimize(loss, parameter_list=parameter_list) @@ -168,16 +168,16 @@ def _check_error_param_list(self, net, parameter_list): def _check_error_no_grad_set(self, net, no_grad_set): place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) - main = fluid.Program() - startup = fluid.Program() + main = base.Program() + startup = base.Program() - with fluid.program_guard(main, startup): + with base.program_guard(main, startup): loss = net.build_model() optimizer = paddle.optimizer.SGD(learning_rate=0.1) optimizer.minimize(loss, no_grad_set=no_grad_set) @@ -239,13 +239,13 @@ def build_model(self): # shared layer, the grad of 'w2v' will be summed and renamed. # To test _addup_repetitive_outputs_ x_emb = paddle.static.nn.embedding( - x, size=[100, 64], param_attr=fluid.ParamAttr(name='w2v') + x, size=[100, 64], param_attr=base.ParamAttr(name='w2v') ) x2_emb = paddle.static.nn.embedding( - x2, size=[100, 64], param_attr=fluid.ParamAttr(name='w2v') + x2, size=[100, 64], param_attr=base.ParamAttr(name='w2v') ) x3_emb = paddle.static.nn.embedding( - x3, size=[100, 64], param_attr=fluid.ParamAttr(name='w2v') + x3, size=[100, 64], param_attr=base.ParamAttr(name='w2v') ) # merge layers x_merge = paddle.add(x_emb, x2_emb, name='x_add_x2') @@ -255,7 +255,7 @@ def build_model(self): x=x_merge, size=1, activation='softmax', - weight_attr=fluid.ParamAttr(name='fc_w'), + weight_attr=base.ParamAttr(name='fc_w'), name='fc_predict', ) # useless layer for calculating loss @@ -263,7 +263,7 @@ def build_model(self): x=x2_merge, size=1, activation='sigmoid', - weight_attr=fluid.ParamAttr(name='fc_w'), + weight_attr=base.ParamAttr(name='fc_w'), name='fc_no_use', ) # loss @@ -293,16 +293,16 @@ def test_error(self): y = F.relu(conv) with self.assertRaises(TypeError): - x_grad = fluid.gradients(y.name, x) + x_grad = base.gradients(y.name, x) with self.assertRaises(TypeError): - x_grad = fluid.gradients(y, x.name) + x_grad = base.gradients(y, x.name) with self.assertRaises(TypeError): - x_grad = fluid.gradients([y], [x], target_gradients=x.name) + x_grad = base.gradients([y], [x], target_gradients=x.name) with self.assertRaises(TypeError): - x_grad = fluid.gradients([y], x, no_grad_set=conv) + x_grad = base.gradients([y], x, no_grad_set=conv) class TestSimpleNetWithErrorParamList(TestBackward): @@ -345,24 +345,24 @@ def build_net(self): avg_loss = paddle.mean(loss) param_names = [ param.name - for param in fluid.default_main_program().block(0).all_parameters() + for param in base.default_main_program().block(0).all_parameters() ] return avg_loss, param_names def setUp(self): - main_program = fluid.Program() - with fluid.program_guard(main_program): + main_program = base.Program() + with base.program_guard(main_program): self.avg_loss, self.param_names = self.build_net() def test_loss_type_error(self): with self.assertRaises(TypeError): - fluid.backward.append_backward(loss=self.avg_loss.name) + base.backward.append_backward(loss=self.avg_loss.name) def test_parameter_list_type_error(self): with self.assertRaises(TypeError): self.param_names[0] = np.random.random([10]) - fluid.backward.append_backward( + base.backward.append_backward( loss=self.avg_loss, parameter_list=self.param_names ) @@ -372,7 +372,7 @@ def test_callback_type_error(self): def callback(block, context): return - fluid.backward.append_backward( + base.backward.append_backward( loss=self.avg_loss, callbacks=callback ) @@ -387,10 +387,10 @@ def _check_grad_op_name(self, forward_list, optimiezed_list): ) def test_gradient_with_optimizer(self): - main = fluid.Program() - startup = fluid.Program() + main = base.Program() + startup = base.Program() - with fluid.program_guard(main, startup): + with base.program_guard(main, startup): img = static.data(name='image', shape=[None, 784]) pred = static.nn.fc(x=img, size=10, activation='relu') loss = paddle.mean(pred) @@ -436,7 +436,7 @@ def test(self): + (gt[2:4] * x).sum() ) exe = paddle.static.Executor() - paddle.fluid.backward.gradients(loss, []) + paddle.base.backward.gradients(loss, []) exe.run(startup_prg) # Optimizer out = exe.run( diff --git a/test/legacy_test/test_backward_infer_var_data_type_shape.py b/test/legacy_test/test_backward_infer_var_data_type_shape.py index 2fb83a96bc675..c68ef82d6284b 100644 --- a/test/legacy_test/test_backward_infer_var_data_type_shape.py +++ b/test/legacy_test/test_backward_infer_var_data_type_shape.py @@ -18,18 +18,18 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestBackwardInferVarDataTypeShape(unittest.TestCase): def test_backward_infer_var_data_type_shape(self): paddle.enable_static() - program = fluid.default_main_program() + program = base.default_main_program() dy = program.global_block().create_var( name="Tmp@GRAD", shape=[1, 1], dtype=np.float32, persistable=True ) # invoke warning - fluid.backward._infer_var_data_type_shape_( + base.backward._infer_var_data_type_shape_( "Tmp@GRAD", program.global_block() ) res = False diff --git a/test/legacy_test/test_base_layer.py b/test/legacy_test/test_base_layer.py index 9aa3327efe080..0ad517e00cdb1 100644 --- a/test/legacy_test/test_base_layer.py +++ b/test/legacy_test/test_base_layer.py @@ -17,15 +17,15 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.dygraph import to_variable -from paddle.fluid.framework import EagerParamBase +from paddle import base +from paddle.base.dygraph import to_variable +from paddle.base.framework import EagerParamBase class L1(paddle.nn.Layer): def __init__(self): super().__init__() - self._param_attr = fluid.ParamAttr( + self._param_attr = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.1) ) self.w1 = self.create_parameter( @@ -61,7 +61,7 @@ def forward(self): class TestBaseLayer(unittest.TestCase): def test_one_level(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): l = L1() ret = l() expected_names = ['l1.w1', 'l1.w2'] @@ -74,7 +74,7 @@ def test_one_level(self): ) def test_three_level(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): l = L3() expected_names = [ 'l3.layer1.layer1.w1', @@ -96,7 +96,7 @@ def test_three_level(self): ) def test_add_parameter_with_error(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = paddle.nn.Layer() param = net.create_parameter(shape=[1]) @@ -152,7 +152,7 @@ def test_buffers_and_named_buffers(self): def names(named_buffers): return [name for name, _ in named_buffers] - with fluid.dygraph.guard(): + with base.dygraph.guard(): layer = BufferLayer() net = BufferNet() @@ -172,7 +172,7 @@ def names(named_buffers): ) def test_register_buffer_with_error(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = paddle.nn.Layer() var = to_variable(np.zeros([1])) @@ -208,7 +208,7 @@ def test_register_buffer_with_error(self): net.register_buffer("attr_name", var) def test_register_buffer_same_name(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = paddle.nn.Layer() var1 = to_variable(np.zeros([1])) var2 = to_variable(np.zeros([2])) @@ -222,7 +222,7 @@ def test_register_buffer_same_name(self): self.assert_var_base_equal(net.buffer_name, var3) def test_buffer_not_persistable(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = paddle.nn.Layer() var1 = to_variable(np.zeros([1])) @@ -231,7 +231,7 @@ def test_buffer_not_persistable(self): self.assertEqual(len(net.state_dict()), 0) def test_buffer_not_persistable_del(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = paddle.nn.Layer() var1 = to_variable(np.zeros([1])) net.register_buffer("buffer_name", var1, persistable=False) @@ -239,7 +239,7 @@ def test_buffer_not_persistable_del(self): self.assertEqual(len(net.buffers()), 0) def test_buffer_not_persistable_overwrite(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = paddle.nn.Layer() var1 = to_variable(np.zeros([1])) var2 = to_variable(np.zeros([2])) @@ -255,7 +255,7 @@ def test_buffer_not_persistable_overwrite(self): self.assertEqual(len(net.state_dict()), 0) def test_buffer_not_persistable_assign(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = paddle.nn.Layer() var1 = to_variable(np.zeros([1])) net.register_buffer("buffer_name", var1, persistable=False) @@ -276,14 +276,14 @@ def test_buffer_not_persistable_assign(self): self.assertEqual(len(net.state_dict()), 1) def test_buffer_not_persistable_load(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = paddle.nn.Layer() var1 = to_variable(np.zeros([1])) net.register_buffer("buffer_name", var1, persistable=False) net.load_dict({}) def test_buffer_state_dict(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = paddle.nn.Layer() var1 = to_variable(np.zeros([2, 3])) var2 = to_variable(np.zeros([3, 2])) @@ -364,39 +364,39 @@ def funcsetUp(self): def func_test_to_api(self): self.linear.to(dtype='double') self.assertEqual( - self.linear.weight.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + self.linear.weight.dtype, paddle.base.core.VarDesc.VarType.FP64 ) self.assertEqual( - self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + self.linear.buf_name.dtype, paddle.base.core.VarDesc.VarType.FP64 ) np.testing.assert_allclose( self.linear.weight.grad.numpy(), self.new_grad, rtol=1e-05 ) self.assertEqual( self.linear.weight._grad_ivar().dtype, - paddle.fluid.core.VarDesc.VarType.FP64, + paddle.base.core.VarDesc.VarType.FP64, ) self.linear.to() self.assertEqual( - self.linear.weight.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + self.linear.weight.dtype, paddle.base.core.VarDesc.VarType.FP64 ) self.assertEqual( - self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + self.linear.buf_name.dtype, paddle.base.core.VarDesc.VarType.FP64 ) np.testing.assert_allclose( self.linear.weight.grad.numpy(), self.new_grad, rtol=1e-05 ) self.assertEqual( self.linear.weight._grad_ivar().dtype, - paddle.fluid.core.VarDesc.VarType.FP64, + paddle.base.core.VarDesc.VarType.FP64, ) for p in self.linear.parameters(): self.assertTrue( - isinstance(p, paddle.fluid.framework.EagerParamBase) + isinstance(p, paddle.base.framework.EagerParamBase) ) - if paddle.fluid.is_compiled_with_cuda(): + if paddle.base.is_compiled_with_cuda(): self.linear.to(device=paddle.CUDAPlace(0)) self.assertTrue(self.linear.weight.place.is_gpu_place()) self.assertEqual(self.linear.weight.place.gpu_device_id(), 0) @@ -422,7 +422,7 @@ def func_test_to_api(self): ) for p in self.linear.parameters(): self.assertTrue( - isinstance(p, paddle.fluid.framework.EagerParamBase) + isinstance(p, paddle.base.framework.EagerParamBase) ) self.linear.to(device=paddle.CPUPlace()) @@ -442,71 +442,71 @@ def func_test_to_api(self): def func_test_to_api_paddle_dtype(self): self.linear.to(dtype=paddle.float64) self.assertEqual( - self.linear.weight.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + self.linear.weight.dtype, paddle.base.core.VarDesc.VarType.FP64 ) self.assertEqual( - self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + self.linear.buf_name.dtype, paddle.base.core.VarDesc.VarType.FP64 ) np.testing.assert_allclose( self.linear.weight.grad.numpy(), self.new_grad, rtol=1e-05 ) self.assertEqual( self.linear.weight._grad_ivar().dtype, - paddle.fluid.core.VarDesc.VarType.FP64, + paddle.base.core.VarDesc.VarType.FP64, ) self.linear.to() self.assertEqual( - self.linear.weight.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + self.linear.weight.dtype, paddle.base.core.VarDesc.VarType.FP64 ) self.assertEqual( - self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + self.linear.buf_name.dtype, paddle.base.core.VarDesc.VarType.FP64 ) np.testing.assert_allclose( self.linear.weight.grad.numpy(), self.new_grad, rtol=1e-05 ) self.assertEqual( self.linear.weight._grad_ivar().dtype, - paddle.fluid.core.VarDesc.VarType.FP64, + paddle.base.core.VarDesc.VarType.FP64, ) for p in self.linear.parameters(): self.assertTrue( - isinstance(p, paddle.fluid.framework.EagerParamBase) + isinstance(p, paddle.base.framework.EagerParamBase) ) def func_test_to_api_numpy_dtype(self): self.linear.to(dtype=np.float64) self.assertEqual( - self.linear.weight.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + self.linear.weight.dtype, paddle.base.core.VarDesc.VarType.FP64 ) self.assertEqual( - self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + self.linear.buf_name.dtype, paddle.base.core.VarDesc.VarType.FP64 ) np.testing.assert_allclose( self.linear.weight.grad.numpy(), self.new_grad, rtol=1e-05 ) self.assertEqual( self.linear.weight._grad_ivar().dtype, - paddle.fluid.core.VarDesc.VarType.FP64, + paddle.base.core.VarDesc.VarType.FP64, ) self.linear.to() self.assertEqual( - self.linear.weight.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + self.linear.weight.dtype, paddle.base.core.VarDesc.VarType.FP64 ) self.assertEqual( - self.linear.buf_name.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + self.linear.buf_name.dtype, paddle.base.core.VarDesc.VarType.FP64 ) np.testing.assert_allclose( self.linear.weight.grad.numpy(), self.new_grad, rtol=1e-05 ) self.assertEqual( self.linear.weight._grad_ivar().dtype, - paddle.fluid.core.VarDesc.VarType.FP64, + paddle.base.core.VarDesc.VarType.FP64, ) for p in self.linear.parameters(): self.assertTrue( - isinstance(p, paddle.fluid.framework.EagerParamBase) + isinstance(p, paddle.base.framework.EagerParamBase) ) def func_test_to_api_none_buffer(self): diff --git a/test/legacy_test/test_batch_fc_op.py b/test/legacy_test/test_batch_fc_op.py index 1ca4b640af075..e5f045a49d604 100644 --- a/test/legacy_test/test_batch_fc_op.py +++ b/test/legacy_test/test_batch_fc_op.py @@ -17,7 +17,7 @@ import numpy as np from eager_op_test import OpTest -from paddle.fluid import core +from paddle.base import core def np_cal_batchfc(input, w, bias): diff --git a/test/legacy_test/test_batch_norm_op.py b/test/legacy_test/test_batch_norm_op.py index bbe322ae0175b..4f3f06f692cba 100644 --- a/test/legacy_test/test_batch_norm_op.py +++ b/test/legacy_test/test_batch_norm_op.py @@ -25,9 +25,9 @@ from op import Operator import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard -from paddle.fluid.framework import grad_var_name +from paddle import base +from paddle.base import Program, core, program_guard +from paddle.base.framework import grad_var_name _set_use_system_allocator(True) @@ -274,19 +274,19 @@ def check_with_place(self, place, data_layout, dtype, shape): # create input x_tensor = create_or_get_tensor( - scope, "x_val", OpTest.np_dtype_to_fluid_dtype(x_val), place + scope, "x_val", OpTest.np_dtype_to_base_dtype(x_val), place ) scale_tensor = create_or_get_tensor( - scope, "scale_val", OpTest.np_dtype_to_fluid_dtype(scale_val), place + scope, "scale_val", OpTest.np_dtype_to_base_dtype(scale_val), place ) bias_tensor = create_or_get_tensor( - scope, "bias_val", OpTest.np_dtype_to_fluid_dtype(bias_val), place + scope, "bias_val", OpTest.np_dtype_to_base_dtype(bias_val), place ) mean_tensor = create_or_get_tensor( - scope, "mean", OpTest.np_dtype_to_fluid_dtype(mean), place + scope, "mean", OpTest.np_dtype_to_base_dtype(mean), place ) variance_tensor = create_or_get_tensor( - scope, "variance", OpTest.np_dtype_to_fluid_dtype(variance), place + scope, "variance", OpTest.np_dtype_to_base_dtype(variance), place ) # create output @@ -333,7 +333,7 @@ def check_with_place(self, place, data_layout, dtype, shape): # Create executor to have MKL-DNN cache # cleared after NHWC unit test place = core.CPUPlace() - exe = fluid.Executor(place) + exe = base.Executor(place) dims = y_tensor.shape() c = dims.pop(1) dims.append(c) @@ -555,8 +555,8 @@ def test_with_place(place, data_layout, shape): ] ground_truth = {name: var_dict[name] for name in var_names} - program = fluid.Program() - with fluid.program_guard(program): + program = base.Program() + with base.program_guard(program): block = program.global_block() for name in ground_truth: block.create_var( @@ -618,7 +618,7 @@ def test_with_place(place, data_layout, shape): program._sync_with_cpp() - exe = fluid.Executor(place) + exe = base.Executor(place) out = exe.run( program, feed={ @@ -799,8 +799,8 @@ class TestBatchNormOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # the input of batch_norm must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.static.nn.batch_norm, x1) @@ -821,8 +821,8 @@ def test_errors(self): with program_guard(Program(), Program()): batch_norm = paddle.nn.BatchNorm(10) # the input of BatchNorm must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) self.assertRaises(TypeError, batch_norm, x1) @@ -836,20 +836,20 @@ def test_errors(self): class TestDygraphBatchNormTrainableStats(unittest.TestCase): def test_dygraph(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: shape = [4, 10, 4, 4] def compute(x, is_test, trainable_statistics): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): bn = paddle.nn.BatchNorm( shape[1], is_test=is_test, trainable_statistics=trainable_statistics, ) - y = bn(fluid.dygraph.to_variable(x)) + y = bn(base.dygraph.to_variable(x)) return y.numpy() x = np.random.randn(*shape).astype("float32") @@ -858,11 +858,11 @@ def compute(x, is_test, trainable_statistics): np.testing.assert_allclose(y1, y2, rtol=1e-05) def test_static(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: - exe = fluid.Executor(p) + exe = base.Executor(p) shape = [4, 10, 16, 16] def compute(x_np, is_test, trainable_statistics): @@ -876,7 +876,7 @@ def compute(x_np, is_test, trainable_statistics): name='x', shape=x_np.shape, dtype=x_np.dtype ) y = bn(x) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] return r diff --git a/test/legacy_test/test_batch_norm_op_prim_nchw.py b/test/legacy_test/test_batch_norm_op_prim_nchw.py index 9d11d264908f0..a83fc9fbc2c88 100644 --- a/test/legacy_test/test_batch_norm_op_prim_nchw.py +++ b/test/legacy_test/test_batch_norm_op_prim_nchw.py @@ -23,7 +23,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_batch_norm_op_prim_nhwc.py b/test/legacy_test/test_batch_norm_op_prim_nhwc.py index 4f1e3e2d22522..11d8c4a0db459 100644 --- a/test/legacy_test/test_batch_norm_op_prim_nhwc.py +++ b/test/legacy_test/test_batch_norm_op_prim_nhwc.py @@ -19,7 +19,7 @@ from test_batch_norm_op_prim_nchw import TestBatchNormOp import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_batch_norm_op_v2.py b/test/legacy_test/test_batch_norm_op_v2.py index 618513a0d044b..b53bfb9e73373 100644 --- a/test/legacy_test/test_batch_norm_op_v2.py +++ b/test/legacy_test/test_batch_norm_op_v2.py @@ -17,23 +17,23 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestBatchNorm(unittest.TestCase): def test_name(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): batch_norm1d = paddle.nn.BatchNorm1D(1, name="test") def test_error(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: # paddle.disable_static() x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') @@ -69,7 +69,7 @@ def error3d(): batch_norm3d = paddle.nn.BatchNorm3D(1) batch_norm3d(paddle.to_tensor(x_data_4)) - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): self.assertRaises(ValueError, error1d) self.assertRaises(ValueError, error2d) self.assertRaises(ValueError, error3d) @@ -79,7 +79,7 @@ def error3d(): def test_large_batch(self): def compute_baseline(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): bn = paddle.nn.BatchNorm(shape[1]) x1 = paddle.to_tensor(x) x1.stop_gradient = False @@ -88,7 +88,7 @@ def compute_baseline(x): return y.numpy(), x1.gradient() def compute_1d(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): bn = paddle.nn.BatchNorm1D(shape[1]) x1 = paddle.to_tensor(x) x1.stop_gradient = False @@ -96,9 +96,9 @@ def compute_1d(x): y.backward() return y.numpy(), x1.gradient() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: # [N, C] shape = [200000, 4] @@ -117,14 +117,14 @@ def compute_1d(x): np.testing.assert_allclose(y1, y2, rtol=1e-05) def test_eager_api(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: shape = [4, 10, 4, 4] def compute_v1(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): bn = paddle.nn.BatchNorm(shape[1]) # bn = paddle.nn.BatchNorm2D(shape[1]) x1 = paddle.to_tensor(x) @@ -134,7 +134,7 @@ def compute_v1(x): return y.numpy(), x1.gradient() def compute_v2(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): print("v2") bn = paddle.nn.BatchNorm2D(shape[1]) x1 = paddle.to_tensor(x) @@ -150,14 +150,14 @@ def compute_v2(x): np.testing.assert_allclose(y1, y2, rtol=1e-05) def test_dygraph(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: shape = [4, 10, 4, 4] def compute_v1(x, is_test, trainable_statistics): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): bn = paddle.nn.BatchNorm( shape[1], is_test=is_test, @@ -167,7 +167,7 @@ def compute_v1(x, is_test, trainable_statistics): return y.numpy() def compute_v2(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): bn = paddle.nn.BatchNorm2D(shape[1]) y = bn(paddle.to_tensor(x)) @@ -177,15 +177,15 @@ def compute_v2(x): return y.numpy() def compute_v3(x, is_test, trainable_statistics): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): bn = paddle.nn.BatchNorm( shape[1], is_test=is_test, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(1.0), trainable=False, ), - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(0.0), trainable=False, ), @@ -195,7 +195,7 @@ def compute_v3(x, is_test, trainable_statistics): return y.numpy() def compute_v4(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): bn = paddle.nn.BatchNorm2D( shape[1], weight_attr=False, bias_attr=False ) @@ -211,11 +211,11 @@ def compute_v4(x): np.testing.assert_allclose(y3, y4, rtol=1e-05) def test_static(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: - exe = fluid.Executor(p) + exe = base.Executor(p) shape = [4, 10, 16, 16] def compute_v1(x_np, is_test, trainable_statistics): @@ -229,7 +229,7 @@ def compute_v1(x_np, is_test, trainable_statistics): name='x', shape=x_np.shape, dtype=x_np.dtype ) y = bn(x) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] return r @@ -240,7 +240,7 @@ def compute_v2(x_np): name='x', shape=x_np.shape, dtype=x_np.dtype ) y = bn(x) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] return r @@ -258,16 +258,16 @@ def setUp(self): paddle.set_default_dtype("float32") else: paddle.set_default_dtype("float64") - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def tearDown(self): paddle.set_default_dtype(self.original_dtyep) def test_1d(self): for p in self.places: - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): x = paddle.randn([2, 6, 4]) net1 = paddle.nn.BatchNorm1D(4, data_format="NLC") net2 = paddle.nn.BatchNorm1D(4) @@ -289,7 +289,7 @@ def test_1d(self): def test_2d(self): for p in self.places: - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): x = paddle.randn([2, 6, 6, 4]) net1 = paddle.nn.BatchNorm2D(4, data_format="NHWC") net2 = paddle.nn.BatchNorm2D(4) @@ -311,7 +311,7 @@ def test_2d(self): def test_3d(self): for p in self.places: - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): x = paddle.randn([2, 6, 6, 6, 4]) net1 = paddle.nn.BatchNorm3D(4, data_format="NDHWC") net2 = paddle.nn.BatchNorm3D(4) @@ -332,7 +332,7 @@ def test_3d(self): ) def test_1d_opt(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): batch_size = 13700 channels = 16 shape = (batch_size, channels) @@ -364,9 +364,9 @@ def test_1d_opt(self): class TestBatchNormUseGlobalStats(unittest.TestCase): def setUp(self): - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) self.init_test() # train mode @@ -376,11 +376,11 @@ def init_test(self): def test_global_stats(self): for p in self.places: - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): x = paddle.randn([2, 6, 6, 4]) net1 = paddle.nn.BatchNorm( 6, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(1.0) ), use_global_stats=self.use_global_stats, diff --git a/test/legacy_test/test_bce_loss.py b/test/legacy_test/test_bce_loss.py index 38310e5a62025..dc95248ae9bde 100644 --- a/test/legacy_test/test_bce_loss.py +++ b/test/legacy_test/test_bce_loss.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def test_static_layer( @@ -155,9 +155,9 @@ class TestBCELoss(unittest.TestCase): def test_BCELoss(self): input_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64) label_np = np.random.randint(0, 2, size=(20, 30)).astype(np.float64) - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: @@ -194,9 +194,9 @@ def test_BCELoss_weight(self): ) weight_np = np.random.random(size=(3, 4, 10)).astype(np.float64) place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) for reduction in ['sum', 'mean', 'none']: static_result = test_static_layer( diff --git a/test/legacy_test/test_bce_with_logits_loss.py b/test/legacy_test/test_bce_with_logits_loss.py index d9905fe463232..32444e34b6102 100644 --- a/test/legacy_test/test_bce_with_logits_loss.py +++ b/test/legacy_test/test_bce_with_logits_loss.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base def call_bce_layer( @@ -92,7 +92,7 @@ def test_dygraph( pos_weight_np=None, functional=False, ): - with paddle.fluid.dygraph.base.guard(): + with paddle.base.dygraph.base.guard(): logit = paddle.to_tensor(logit_np) label = paddle.to_tensor(label_np) weight = None @@ -141,9 +141,9 @@ class TestBCEWithLogitsLoss(unittest.TestCase): def test_BCEWithLogitsLoss(self): logit_np = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64) label_np = np.random.randint(0, 2, size=(20, 30)).astype(np.float64) - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) reductions = ['sum', 'mean', 'none'] for place in places: for reduction in reductions: @@ -191,9 +191,9 @@ def test_BCEWithLogitsLoss_weight(self): ) weight_np = np.random.random(size=(2, 3, 4, 10)).astype(np.float64) place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) for reduction in ['sum', 'mean', 'none']: static_result = test_static( @@ -248,9 +248,9 @@ def test_BCEWithLogitsLoss_pos_weight(self): pos_weight_np = np.random.random(size=(3, 4, 10)).astype(np.float64) weight_np = np.random.random(size=(2, 3, 4, 10)).astype(np.float64) place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) reduction = "mean" static_result = test_static( diff --git a/test/legacy_test/test_beam_search_decode_op.py b/test/legacy_test/test_beam_search_decode_op.py index eee6832f2d254..38f4a200283ff 100644 --- a/test/legacy_test/test_beam_search_decode_op.py +++ b/test/legacy_test/test_beam_search_decode_op.py @@ -17,7 +17,7 @@ import numpy as np from op import Operator -from paddle.fluid import core +from paddle.base import core class TestBeamSearchDecodeOp(unittest.TestCase): diff --git a/test/legacy_test/test_beam_search_op.py b/test/legacy_test/test_beam_search_op.py index 940f365bdee77..012eef68bf5d3 100644 --- a/test/legacy_test/test_beam_search_op.py +++ b/test/legacy_test/test_beam_search_op.py @@ -17,7 +17,7 @@ import numpy as np from op import Operator -from paddle.fluid import core +from paddle.base import core def create_tensor(scope, name, np_data): diff --git a/test/legacy_test/test_bernoulli_op.py b/test/legacy_test/test_bernoulli_op.py index cce3c09400e02..51cd296436ef3 100644 --- a/test/legacy_test/test_bernoulli_op.py +++ b/test/legacy_test/test_bernoulli_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def output_hist(out): diff --git a/test/legacy_test/test_bicubic_interp_op.py b/test/legacy_test/test_bicubic_interp_op.py index 07c2f1a3e8292..8cec2f266cf61 100644 --- a/test/legacy_test/test_bicubic_interp_op.py +++ b/test/legacy_test/test_bicubic_interp_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard from paddle.nn.functional import interpolate @@ -286,15 +286,15 @@ def test_case(self): actual_size_data = np.array([12, 12]).astype("int32") scale_data = np.array([2.0]).astype("float32") - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): x = paddle.static.data( name="x", shape=[2, 3, 6, 6], dtype="float32" ) @@ -329,10 +329,10 @@ def test_case(self): align_corners=False, ) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) results = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "dim": dim_data, @@ -350,8 +350,8 @@ def test_case(self): for res in results: np.testing.assert_allclose(res, expect_res, rtol=1e-05) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(x_data) + with base.dygraph.guard(): + x = base.dygraph.to_variable(x_data) interp = interpolate( x, size=[12, 12], mode='bicubic', align_corners=False ) @@ -366,8 +366,8 @@ class TestBicubicOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # the input of interpoalte must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) self.assertRaises(TypeError, interpolate, x1) @@ -423,8 +423,8 @@ def test_attr_data_format(): def test_actual_shape(): # the actual_shape must be Variable. - x = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) out = interpolate( x, size=[12, 12], mode='BICUBIC', align_corners=False @@ -460,8 +460,8 @@ def test_scale_type(): x = paddle.static.data( name="x", shape=[2, 3, 6, 6], dtype="float32" ) - scale = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + scale = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) out = interpolate( x, diff --git a/test/legacy_test/test_bicubic_interp_v2_op.py b/test/legacy_test/test_bicubic_interp_v2_op.py index 4975b1bf0684f..e034015f9a8f0 100644 --- a/test/legacy_test/test_bicubic_interp_v2_op.py +++ b/test/legacy_test/test_bicubic_interp_v2_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard from paddle.nn.functional import interpolate @@ -591,15 +591,15 @@ def test_case(self): actual_size_data = np.array([12, 12]).astype("int32") scale_data = np.array([2.0]).astype("float32") - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): x = paddle.static.data( name="x", shape=[2, 3, 6, 6], dtype="float32" ) @@ -640,10 +640,10 @@ def test_case(self): x, scale_factor=[2.0, 2.0], mode='bicubic', align_corners=False ) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) results = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "dim": dim_data, @@ -661,8 +661,8 @@ def test_case(self): for res in results: np.testing.assert_allclose(res, expect_res, rtol=1e-05) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(x_data) + with base.dygraph.guard(): + x = base.dygraph.to_variable(x_data) interp = interpolate( x, size=[12, 12], mode='bicubic', align_corners=False ) @@ -676,8 +676,8 @@ def test_case(self): class TestBicubicOpError(unittest.TestCase): def test_imperative_errors(self): # the input of interpoalte must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) self.assertRaises(TypeError, interpolate, x1) @@ -720,8 +720,8 @@ def test_attr_data_format(): def test_actual_shape(): # the actual_shape must be Variable. - x = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) out = interpolate( x, size=[12, 12], mode='BICUBIC', align_corners=False @@ -754,8 +754,8 @@ def test_scale_type(): x = paddle.static.data( name="x", shape=[2, 3, 6, 6], dtype="float32" ) - scale = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + scale = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) out = interpolate( x, @@ -914,7 +914,7 @@ def test_errors(self): @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) class TestBicubicInterpOpForFloat16(unittest.TestCase): def init_test_case(self): diff --git a/test/legacy_test/test_bilateral_slice_op.py b/test/legacy_test/test_bilateral_slice_op.py index f62b19a813e87..1cf5f292240d5 100644 --- a/test/legacy_test/test_bilateral_slice_op.py +++ b/test/legacy_test/test_bilateral_slice_op.py @@ -143,7 +143,7 @@ def naive_bilateral_slice(x, guide, grid, has_offset): @unittest.skipIf( - not paddle.fluid.is_compiled_with_cuda(), 'CPU testing is not supported' + not paddle.base.is_compiled_with_cuda(), 'CPU testing is not supported' ) class TestBilateralSliceOp(OpTest): def setUp(self): @@ -169,11 +169,11 @@ def setUp(self): self.outputs = {'Out': output_np} def test_check_output(self): - place = paddle.fluid.CUDAPlace(0) + place = paddle.base.CUDAPlace(0) self.check_output_with_place(place, atol=1e-5) def test_check_grad(self): - place = paddle.fluid.CUDAPlace(0) + place = paddle.base.CUDAPlace(0) self.check_grad_with_place(place, ['X'], 'Out') def initTestCase(self): @@ -182,7 +182,7 @@ def initTestCase(self): @unittest.skipIf( - not paddle.fluid.is_compiled_with_cuda(), 'CPU testing is not supported' + not paddle.base.is_compiled_with_cuda(), 'CPU testing is not supported' ) class TestBilateralSliceOp1(TestBilateralSliceOp): def initTestCase(self): @@ -204,10 +204,10 @@ def test_api(self): ) bilateral_slice(x, guide, grid, False) - if not paddle.fluid.is_compiled_with_cuda(): + if not paddle.base.is_compiled_with_cuda(): return - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x1 = paddle.rand([3, 1, 50, 30]) guide1 = paddle.rand([3, 50, 30]) grid1 = paddle.rand([3, 2, 2, 5, 3]) diff --git a/test/legacy_test/test_bilinear_api.py b/test/legacy_test/test_bilinear_api.py index f7f22c0725aab..5331252af2de0 100644 --- a/test/legacy_test/test_bilinear_api.py +++ b/test/legacy_test/test_bilinear_api.py @@ -17,20 +17,20 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestBilinearAPI(unittest.TestCase): def test_api(self): - with fluid.program_guard( - fluid.default_startup_program(), fluid.default_main_program() + with base.program_guard( + base.default_startup_program(), base.default_main_program() ): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: place = core.CPUPlace() - exe = fluid.Executor(place) + exe = base.Executor(place) data1 = paddle.static.data(name='X1', shape=[5, 5], dtype='float32') data2 = paddle.static.data(name='X2', shape=[5, 4], dtype='float32') @@ -43,7 +43,7 @@ def test_api(self): ) ret = bilinear(data1, data2) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) ret_fetch = exe.run( feed={'X1': layer1, 'X2': layer2}, fetch_list=[ret.name] ) diff --git a/test/legacy_test/test_bilinear_interp_op.py b/test/legacy_test/test_bilinear_interp_op.py index 33104d3cf2d02..fca64f8b802df 100755 --- a/test/legacy_test/test_bilinear_interp_op.py +++ b/test/legacy_test/test_bilinear_interp_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_bilinear_interp_v2_op.py b/test/legacy_test/test_bilinear_interp_v2_op.py index 06249697f0c6b..93805968673a8 100755 --- a/test/legacy_test/test_bilinear_interp_v2_op.py +++ b/test/legacy_test/test_bilinear_interp_v2_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.nn.functional import interpolate @@ -885,7 +885,7 @@ def test_case(self): place = core.CUDAPlace(0) else: place = core.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_data = np.random.random((2, 3, 6, 6)).astype("float32") input_x = paddle.to_tensor(input_data) expect_res = bilinear_interp_np( @@ -905,7 +905,7 @@ def test_case(self): place = core.CUDAPlace(0) else: place = core.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_data = np.random.random((2, 3, 6, 6)).astype("float32") size_np = np.array([12, 12]).astype("int64") input_x = paddle.to_tensor(input_data) @@ -927,7 +927,7 @@ def test_case(self): place = core.CUDAPlace(0) else: place = core.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_data = np.random.random((2, 3, 6, 6)).astype("float32") size_1 = np.array([12]).astype("int64") input_x = paddle.to_tensor(input_data) @@ -952,7 +952,7 @@ def test_case(self): place = core.CUDAPlace(0) else: place = core.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_data = np.random.random((2, 3, 6, 6)).astype("float32") scale_np = np.array([2, 2]).astype("int64") input_x = paddle.to_tensor(input_data) @@ -970,7 +970,7 @@ def test_case(self): @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) class TestBilinearInterpOpZoomOutForFloat16(unittest.TestCase): def init_test_case(self): @@ -1013,7 +1013,7 @@ def test_main(self): @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) class TestBilinearInterpOpZoomInForFloat16(unittest.TestCase): def init_test_case(self): @@ -1063,7 +1063,7 @@ def test_case(self): place = core.CUDAPlace(0) else: place = core.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_data = np.random.random((2, 3, 6, 6)).astype("float32") input_x = paddle.to_tensor(input_data) expect_res = bilinear_interp_np( @@ -1087,7 +1087,7 @@ def test_case(self): place = core.CUDAPlace(0) else: place = core.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_data = np.random.random((2, 3, 6, 6)).astype("float32") input_x = paddle.to_tensor(input_data) expect_res = bilinear_interp_np( @@ -1111,7 +1111,7 @@ def test_case(self): place = core.CUDAPlace(0) else: place = core.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_data = np.random.random((2, 3, 6, 6)).astype("float32") input_x = paddle.to_tensor(input_data) expect_res = bilinear_interp_np( diff --git a/test/legacy_test/test_bilinear_tensor_product_op.py b/test/legacy_test/test_bilinear_tensor_product_op.py index a121f0df21b4c..3e8a81393ff05 100644 --- a/test/legacy_test/test_bilinear_tensor_product_op.py +++ b/test/legacy_test/test_bilinear_tensor_product_op.py @@ -18,17 +18,17 @@ from eager_op_test import OpTest, paddle_static_guard import paddle -from paddle import fluid +from paddle import base class TestDygraphBilinearTensorProductAPIError(unittest.TestCase): def test_errors(self): with paddle_static_guard(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): layer = paddle.nn.Bilinear(5, 4, 1000) # the input must be Variable. - x0 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x0 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) self.assertRaises(TypeError, layer, x0) # the input dtype must be float32 or float64 @@ -60,7 +60,7 @@ def setUp(self): size0 = 5 size1 = 4 size2 = 5 - dtype = "float32" if fluid.core.is_compiled_with_rocm() else "float64" + dtype = "float32" if base.core.is_compiled_with_rocm() else "float64" a = np.random.random((batch_size, size0)).astype(dtype) b = np.random.random((batch_size, size1)).astype(dtype) w = np.random.random((size2, size0, size1)).astype(dtype) diff --git a/test/legacy_test/test_bincount_op.py b/test/legacy_test/test_bincount_op.py index 504b4b51c77d5..f788dcf2798c7 100644 --- a/test/legacy_test/test_bincount_op.py +++ b/test/legacy_test/test_bincount_op.py @@ -21,8 +21,8 @@ import paddle import paddle.inference as paddle_infer -from paddle import fluid -from paddle.fluid.framework import in_dygraph_mode +from paddle import base +from paddle.base.framework import in_dygraph_mode paddle.enable_static() @@ -31,18 +31,18 @@ class TestBincountOpAPI(unittest.TestCase): """Test bincount api.""" def test_static_graph(self): - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): inputs = paddle.static.data(name='input', dtype='int64', shape=[7]) weights = paddle.static.data( name='weights', dtype='int64', shape=[7] ) output = paddle.bincount(inputs, weights=weights) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) img = np.array([0, 1, 1, 3, 2, 1, 7]).astype(np.int64) w = np.array([0, 1, 1, 2, 2, 1, 0]).astype(np.int64) @@ -59,9 +59,9 @@ def test_static_graph(self): ) def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): inputs_np = np.array([0, 1, 1, 3, 2, 1, 7]).astype(np.int64) - inputs = fluid.dygraph.to_variable(inputs_np) + inputs = base.dygraph.to_variable(inputs_np) actual = paddle.bincount(inputs) expected = np.bincount(inputs) self.assertTrue( @@ -74,7 +74,7 @@ class TestBincountOpError(unittest.TestCase): """Test bincount op error.""" def run_network(self, net_func): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net_func() def test_input_value_error(self): @@ -104,7 +104,7 @@ def net_func(): input_value = paddle.to_tensor([1, 2, 3, 4, 5]) paddle.bincount(input_value, minlength=-1) - with fluid.dygraph.guard(): + with base.dygraph.guard(): if in_dygraph_mode(): # InvalidArgument for phi BincountKernel with self.assertRaises(ValueError): diff --git a/test/legacy_test/test_bmm_op.py b/test/legacy_test/test_bmm_op.py index 62d1c1e44c9b9..91a0f806dbe16 100644 --- a/test/legacy_test/test_bmm_op.py +++ b/test/legacy_test/test_bmm_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16, paddle_static_guard import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestBmmOp(OpTest): @@ -88,7 +88,7 @@ def test_checkout_grad(self): class API_TestBmm(unittest.TestCase): def test_out(self): with paddle_static_guard(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data( 'data1', shape=[-1, 3, 4], dtype='float64' ) @@ -96,8 +96,8 @@ def test_out(self): 'data2', shape=[-1, 4, 5], dtype='float64' ) result_bmm = paddle.bmm(data1, data2) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) input1 = np.random.random([10, 3, 4]).astype('float64') input2 = np.random.random([10, 4, 5]).astype('float64') (result,) = exe.run( @@ -122,9 +122,9 @@ def test_out(self): [[4.0, 4.0], [5.0, 5.0], [6.0, 6.0]], ] ) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(input1) - y = fluid.dygraph.to_variable(input2) + with base.dygraph.guard(): + x = base.dygraph.to_variable(input1) + y = base.dygraph.to_variable(input2) out = paddle.bmm(x, y) out_np = out.numpy() expected_result = np.matmul(input1, input2) diff --git a/test/legacy_test/test_boxps.py b/test/legacy_test/test_boxps.py index 545c485f25c32..66582739c5dfa 100644 --- a/test/legacy_test/test_boxps.py +++ b/test/legacy_test/test_boxps.py @@ -15,9 +15,9 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed.transpiler import collective -from paddle.fluid import core +from paddle.base import core from paddle.incubate.layers.nn import _pull_box_sparse @@ -32,8 +32,8 @@ def get_transpile(self, mode, trainers="127.0.0.1:6174"): return t def test_transpile(self): - main_program = fluid.Program() - startup_program = fluid.Program() + main_program = base.Program() + startup_program = base.Program() t = self.get_transpile("single_process_multi_thread") t.transpile( trainer_id=0, @@ -56,8 +56,8 @@ def test_single_trainers(self): transpiler = collective.GradAllReduce(0) try: transpiler.transpile( - startup_program=fluid.Program(), - main_program=fluid.Program(), + startup_program=base.Program(), + main_program=base.Program(), rank=1, endpoints="127.0.0.1:6174", current_endpoint="127.0.0.1:6174", @@ -68,8 +68,8 @@ def test_single_trainers(self): transpiler = collective.LocalSGD(0) try: transpiler.transpile( - startup_program=fluid.Program(), - main_program=fluid.Program(), + startup_program=base.Program(), + main_program=base.Program(), rank=1, endpoints="127.0.0.1:6174", current_endpoint="127.0.0.1:6174", @@ -94,8 +94,8 @@ class TestPullBoxSparseOP(unittest.TestCase): def test_pull_box_sparse_op(self): paddle.enable_static() - program = fluid.Program() - with fluid.program_guard(program): + program = base.Program() + with base.program_guard(program): x = paddle.static.data( name='x', shape=[-1, 1], dtype='int64', lod_level=0 ) diff --git a/test/legacy_test/test_broadcast_error.py b/test/legacy_test/test_broadcast_error.py index 8235cef218a34..3e8f924c85f2f 100644 --- a/test/legacy_test/test_broadcast_error.py +++ b/test/legacy_test/test_broadcast_error.py @@ -17,7 +17,7 @@ import numpy as np from eager_op_test import OpTest -from paddle.fluid import core +from paddle.base import core class TestBroadcastOpCpu(OpTest): diff --git a/test/legacy_test/test_broadcast_tensors_op.py b/test/legacy_test/test_broadcast_tensors_op.py index be37ff0157898..8e691f02fefff 100644 --- a/test/legacy_test/test_broadcast_tensors_op.py +++ b/test/legacy_test/test_broadcast_tensors_op.py @@ -19,7 +19,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core random.seed(2021) diff --git a/test/legacy_test/test_broadcast_to_op.py b/test/legacy_test/test_broadcast_to_op.py index e2da6a1117296..331addd30909b 100644 --- a/test/legacy_test/test_broadcast_to_op.py +++ b/test/legacy_test/test_broadcast_to_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard paddle.enable_static() @@ -26,8 +26,8 @@ class TestBroadcastToError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) shape = [2, 2] self.assertRaises(TypeError, paddle.tensor.broadcast_to, x1, shape) @@ -55,11 +55,11 @@ def test_api(self): out_2 = paddle.broadcast_to(x, shape=[positive_2, 14]) out_3 = paddle.broadcast_to(x, shape=expand_shape) - g0 = fluid.backward.calc_gradient(out_2, x) + g0 = base.backward.calc_gradient(out_2, x) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) res_1, res_2, res_3 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": input, "expand_shape": np.array([12, 14]).astype("int32"), @@ -71,7 +71,7 @@ def test_api(self): np.testing.assert_array_equal(res_3, np.tile(input, (1, 1))) def test_api_fp16_gpu(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() diff --git a/test/legacy_test/test_bucketize_api.py b/test/legacy_test/test_bucketize_api.py index 544b7c2a1ffe7..b8b9ca714a8a3 100644 --- a/test/legacy_test/test_bucketize_api.py +++ b/test/legacy_test/test_bucketize_api.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(10) diff --git a/test/legacy_test/test_buffer_shared_memory_reuse_pass.py b/test/legacy_test/test_buffer_shared_memory_reuse_pass.py index 930b85a8201da..ab99363f751a0 100644 --- a/test/legacy_test/test_buffer_shared_memory_reuse_pass.py +++ b/test/legacy_test/test_buffer_shared_memory_reuse_pass.py @@ -19,7 +19,7 @@ from simple_nets import simple_fc_net import paddle -from paddle import fluid +from paddle import base batch_size = 32 @@ -39,38 +39,38 @@ def initParameter(self): def setUp(self): paddle.enable_static() self.initParameter() - if self.use_cuda and fluid.core.is_compiled_with_cuda(): - self.device_count = fluid.core.get_cuda_device_count() + if self.use_cuda and base.core.is_compiled_with_cuda(): + self.device_count = base.core.get_cuda_device_count() else: self.device_count = 4 assert batch_size % self.device_count == 0 def build_program_and_scope(self): - self.place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() + self.place = base.CUDAPlace(0) if self.use_cuda else base.CPUPlace() paddle.seed(1) paddle.framework.random._manual_program_seed(1) - startup_program = fluid.Program() - main_program = fluid.Program() + startup_program = base.Program() + main_program = base.Program() - scope = fluid.Scope() - with fluid.program_guard(main_program, startup_program): - with fluid.unique_name.guard(): + scope = base.Scope() + with base.program_guard(main_program, startup_program): + with base.unique_name.guard(): loss = simple_fc_net() adam = paddle.optimizer.Adam(learning_rate=1e-3) adam.minimize(loss) - with fluid.scope_guard(scope): - exe = fluid.Executor( - fluid.CUDAPlace(0) + with base.scope_guard(scope): + exe = base.Executor( + base.CUDAPlace(0) if self.use_cuda - else fluid.CPUPlace() + else base.CPUPlace() ) exe.run(startup_program) return main_program, scope, exe, loss def is_invalid_test(self): - return self.use_cuda and not fluid.core.is_compiled_with_cuda() + return self.use_cuda and not base.core.is_compiled_with_cuda() def get_all_vars(self, program): all_vars = program.global_block().vars @@ -92,13 +92,13 @@ def check_single_card_fetch_var(self): for enable_inplace in [False, True]: prog, scope, _, loss = self.build_program_and_scope() scopes.append(scope) - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.memory_optimize = memory_optimize build_strategy.enable_inplace = enable_inplace build_strategy.fuse_all_optimizer_ops = ( self.fuse_all_optimizer_ops ) - compiled_prog = fluid.CompiledProgram( + compiled_prog = base.CompiledProgram( prog, build_strategy=build_strategy ) compiled_programs.append(compiled_prog) @@ -109,13 +109,13 @@ def check_single_card_fetch_var(self): for fetch_var in repeated_var_names[:4]: for _ in range(2): - with fluid.scope_guard(scope1): + with base.scope_guard(scope1): (fetch_val1,) = exe.run( prog1, feed=feed_dict, fetch_list=[fetch_var] ) for scope, compiled_prog in zip(scopes, compiled_programs): - with fluid.scope_guard(scope): + with base.scope_guard(scope): (fetch_val2,) = exe.run( compiled_prog, feed=feed_dict, diff --git a/test/legacy_test/test_build_strategy_fusion_group_pass.py b/test/legacy_test/test_build_strategy_fusion_group_pass.py index e635479e9ea45..14400a0c2f16b 100644 --- a/test/legacy_test/test_build_strategy_fusion_group_pass.py +++ b/test/legacy_test/test_build_strategy_fusion_group_pass.py @@ -17,8 +17,8 @@ from test_eager_deletion_padding_rnn import PaddingRNNTestBase, RNNConfig import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class FusionGroupPaddingRNNTest(PaddingRNNTestBase): @@ -27,12 +27,12 @@ def set_customed_config(self): # Use CUDA executor if core.is_compiled_with_cuda(): - self.exe = fluid.Executor(fluid.CUDAPlace(0)) + self.exe = base.Executor(base.CUDAPlace(0)) def test_train_enable_fusion_group(self): rnn_model = "static" config = RNNConfig("test", rnn_model) - with fluid.scope_guard(fluid.Scope()): + with base.scope_guard(base.Scope()): self.train(config, use_program_cache=False) diff --git a/test/legacy_test/test_c_comm_init_all_op.py b/test/legacy_test/test_c_comm_init_all_op.py index 61d86fae2dd28..c87018d4aceea 100644 --- a/test/legacy_test/test_c_comm_init_all_op.py +++ b/test/legacy_test/test_c_comm_init_all_op.py @@ -15,29 +15,29 @@ import unittest import paddle -from paddle import fluid +from paddle import base class TestCCommInitAllOp(unittest.TestCase): def setUp(self): - self.place = fluid.CUDAPlace(0) - self.exe = fluid.Executor(self.place) + self.place = base.CUDAPlace(0) + self.exe = base.Executor(self.place) def test_default_attrs(self): - program = fluid.Program() + program = base.Program() block = program.global_block() block.append_op(type='c_comm_init_all', attrs={'ring_id': 0}) self.exe.run(program) def test_init_with_same_ring_id(self): - program = fluid.Program() + program = base.Program() block = program.global_block() block.append_op(type='c_comm_init_all', attrs={'ring_id': 0}) with self.assertRaises(ValueError): self.exe.run(program) def test_specifying_devices(self): - program = fluid.Program() + program = base.Program() block = program.global_block() block.append_op( type='c_comm_init_all', attrs={'devices': [0], 'ring_id': 1} diff --git a/test/legacy_test/test_calc_gradient.py b/test/legacy_test/test_calc_gradient.py index 4cecbd5273650..945acf18bb932 100644 --- a/test/legacy_test/test_calc_gradient.py +++ b/test/legacy_test/test_calc_gradient.py @@ -17,34 +17,34 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.backward import calc_gradient +from paddle import base +from paddle.base.backward import calc_gradient paddle.enable_static() class TestCalcGradient(unittest.TestCase): def test_calc_gradient(self): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): x = paddle.create_parameter(dtype="float32", shape=[5, 10]) y = paddle.create_parameter(dtype="float32", shape=[10, 8]) mul_out = paddle.matmul(x=x, y=y) mean_out = paddle.mean(mul_out) a = calc_gradient(mean_out, mul_out) b = calc_gradient(mean_out, x) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(startup) exe.run(main, feed={}, fetch_list=[a, b]) class TestDoubleGrad(unittest.TestCase): def test1(self): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): net = lambda x: x * x x = paddle.create_parameter( name='x', @@ -52,21 +52,21 @@ def test1(self): dtype='float32', default_initializer=paddle.nn.initializer.Constant(3), ) - (grad1,) = fluid.gradients(net(x), x) # 2x = 6 + (grad1,) = base.gradients(net(x), x) # 2x = 6 z = net(x - grad1) - (grad2,) = fluid.gradients(z, x) # gradients( (x - 2x)^2) = 2x = 6 + (grad2,) = base.gradients(z, x) # gradients( (x - 2x)^2) = 2x = 6 - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(startup) out = exe.run(main, fetch_list=[grad1.name, grad2.name]) self.assertEqual(6, out[0][0]) self.assertEqual(6, out[1][0]) def test2(self): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): x = paddle.create_parameter( name='x', shape=[1], @@ -74,12 +74,12 @@ def test2(self): default_initializer=paddle.nn.initializer.Constant(1), ) y = x * x - (dx1,) = fluid.gradients(y, x) + (dx1,) = base.gradients(y, x) z = dx1 * dx1 + y * y - (dx2,) = fluid.gradients(z, x) + (dx2,) = base.gradients(z, x) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(startup) (out,) = exe.run(main, fetch_list=[dx2]) self.assertEqual(12, out[0]) @@ -87,16 +87,16 @@ def test2(self): class TestGradientWithPrune(unittest.TestCase): def test_prune(self): - with paddle.fluid.scope_guard(paddle.static.Scope()): + with paddle.base.scope_guard(paddle.static.Scope()): x = paddle.static.data(name='x', shape=[3], dtype='float32') x.stop_gradient = False x1, x2, x3 = paddle.split(x, axis=0, num_or_sections=3) y = x1 * 2 - x1_grad = fluid.gradients(y, x) + x1_grad = base.gradients(y, x) - exe = fluid.Executor(fluid.CPUPlace()) - main = fluid.default_main_program() - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + main = base.default_main_program() + exe.run(base.default_startup_program()) out = exe.run( main, feed={'x': np.ones([3]).astype('float32')}, @@ -127,7 +127,7 @@ def build_program(self): return start_prog, main_prog, [grad_x, jvp] def test_calc_gradient(self): - with paddle.fluid.scope_guard(paddle.static.Scope()): + with paddle.base.scope_guard(paddle.static.Scope()): start_prog, main_prog, fetch_list = self.build_program() exe = paddle.static.Executor() exe.run(start_prog) @@ -167,7 +167,7 @@ def build_program(self): return start_prog, main_prog, [grad_x, jvp] def test_calc_gradient(self): - with paddle.fluid.scope_guard(paddle.static.Scope()): + with paddle.base.scope_guard(paddle.static.Scope()): start_prog, main_prog, fetch_list = self.build_program() exe = paddle.static.Executor() exe.run(start_prog) diff --git a/test/legacy_test/test_case.py b/test/legacy_test/test_case.py index 48cca3b63ec6d..294f43542bfe6 100644 --- a/test/legacy_test/test_case.py +++ b/test/legacy_test/test_case.py @@ -18,10 +18,10 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.backward import append_backward -from paddle.fluid.framework import Program, program_guard +from paddle import base +from paddle.base import core +from paddle.base.backward import append_backward +from paddle.base.framework import Program, program_guard paddle.enable_static() @@ -84,11 +84,11 @@ def fn_3(): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run( main_program, fetch_list=[out_0, out_1, out_2, out_3, out_4] @@ -145,11 +145,11 @@ def fn_3(): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run( main_program, fetch_list=[out_0, out_1, out_2, out_3, out_4] @@ -180,11 +180,11 @@ def test_0d_tensor_backward(self): append_backward(out) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run(main_program, fetch_list=[out.name, x.grad_name]) np.testing.assert_allclose( @@ -289,11 +289,11 @@ def fn_3(): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) ret = exe.run(main_program, fetch_list=out) np.testing.assert_allclose( @@ -411,11 +411,11 @@ def fn_3(): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run(main_program, fetch_list=[out_1, out_2, out_3]) @@ -511,11 +511,11 @@ def fn_3(): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run(main_program, fetch_list=[out_1, out_2, out_3]) @@ -630,15 +630,15 @@ def fn_2(): pred_fn_pairs=[(switch_id == one, fn_1)], default=fn_2 ) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) for epoch in range(EPOCH_NUM): np.random.seed(epoch) feed_image = np.random.random(size=[BATCH_SIZE, INPUT_SIZE]).astype( 'float32' ) - main_program = fluid.default_main_program() + main_program = base.default_main_program() out = exe.run( main_program, feed={ diff --git a/test/legacy_test/test_cast_op.py b/test/legacy_test/test_cast_op.py index d1353b24f49f5..e3bae330f0910 100644 --- a/test/legacy_test/test_cast_op.py +++ b/test/legacy_test/test_cast_op.py @@ -24,8 +24,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def cast_wrapper(x, out_dtype=None): @@ -166,15 +166,15 @@ class TestCastOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The input type of cast_op must be Variable. - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.cast, x1, 'int32') class TestCastOpEager(unittest.TestCase): def test_eager(self): - with paddle.fluid.dygraph.base.guard(): + with paddle.base.dygraph.base.guard(): x = paddle.ones([2, 2], dtype="float16") x.stop_gradient = False out = paddle.cast(x, "float32") @@ -210,9 +210,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -241,9 +241,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_channel_shuffle.py b/test/legacy_test/test_channel_shuffle.py index f8b6ef1df9514..2ade43213190d 100644 --- a/test/legacy_test/test_channel_shuffle.py +++ b/test/legacy_test/test_channel_shuffle.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def channel_shuffle_np(x, groups, data_format="NCHW"): @@ -107,14 +107,14 @@ def test_static_graph_functional(self): exe = paddle.static.Executor(place=place) res_1 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, ) res_2 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, @@ -147,14 +147,14 @@ def test_static_graph_layer(self): exe = paddle.static.Executor(place=place) res_1 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, ) res_2 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, @@ -211,28 +211,28 @@ def test_dygraph2(self): class TestChannelShuffleError(unittest.TestCase): def test_error_functional(self): def error_input(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([9, 4, 4]).astype("float64") channel_shuffle = F.channel_shuffle(paddle.to_tensor(x), 3) self.assertRaises(ValueError, error_input) def error_groups_1(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") channel_shuffle = F.channel_shuffle(paddle.to_tensor(x), 3.33) self.assertRaises(TypeError, error_groups_1) def error_groups_2(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") channel_shuffle = F.channel_shuffle(paddle.to_tensor(x), -1) self.assertRaises(ValueError, error_groups_2) def error_data_format(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") channel_shuffle = F.channel_shuffle( paddle.to_tensor(x), 3, "WOW" @@ -242,7 +242,7 @@ def error_data_format(): def test_error_layer(self): def error_input_layer(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([9, 4, 4]).astype("float64") cs = paddle.nn.ChannelShuffle(3) cs(paddle.to_tensor(x)) @@ -250,21 +250,21 @@ def error_input_layer(): self.assertRaises(ValueError, error_input_layer) def error_groups_layer_1(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") cs = paddle.nn.ChannelShuffle(3.33) self.assertRaises(TypeError, error_groups_layer_1) def error_groups_layer_2(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") cs = paddle.nn.ChannelShuffle(-1) self.assertRaises(ValueError, error_groups_layer_2) def error_data_format_layer(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") cs = paddle.nn.ChannelShuffle(3, "MEOW") diff --git a/test/legacy_test/test_checkpoint_saver.py b/test/legacy_test/test_checkpoint_saver.py index b6ee6e28c7b06..2fe34a3385822 100644 --- a/test/legacy_test/test_checkpoint_saver.py +++ b/test/legacy_test/test_checkpoint_saver.py @@ -15,7 +15,7 @@ import unittest from paddle.distributed.fleet.utils.fs import HDFSClient -from paddle.fluid.incubate.checkpoint.checkpoint_saver import CheckpointSaver +from paddle.base.incubate.checkpoint.checkpoint_saver import CheckpointSaver class CheckpointerSaverTest(unittest.TestCase): diff --git a/test/legacy_test/test_cholesky_op.py b/test/legacy_test/test_cholesky_op.py index ecb59fa5c4c50..daacb067cdff8 100644 --- a/test/legacy_test/test_cholesky_op.py +++ b/test/legacy_test/test_cholesky_op.py @@ -20,8 +20,8 @@ from gradient_checker import grad_check import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core @skip_check_grad_ci( @@ -61,9 +61,9 @@ def test_check_output(self): self.check_output() def test_check_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda() and (not core.is_compiled_with_rocm()): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -71,8 +71,8 @@ def test_check_grad(self): def func(self, place): # use small size since Jacobian gradients is time consuming root_data = self.root_data[..., :3, :3] - prog = fluid.Program() - with fluid.program_guard(prog): + prog = base.Program() + with base.program_guard(prog): root = paddle.create_parameter( dtype=root_data.dtype, shape=root_data.shape ) @@ -98,7 +98,7 @@ def init_config(self): class TestDygraph(unittest.TestCase): def test_dygraph(self): if core.is_compiled_with_rocm(): - paddle.disable_static(place=fluid.CPUPlace()) + paddle.disable_static(place=base.CPUPlace()) else: paddle.disable_static() a = np.random.rand(3, 3) @@ -110,12 +110,12 @@ def test_dygraph(self): class TestCholeskySingularAPI(unittest.TestCase): def setUp(self): - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda() and (not core.is_compiled_with_rocm()): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place, with_out=False): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[4, 4], dtype="float64" ) @@ -123,10 +123,10 @@ def check_static_result(self, place, with_out=False): input_np = np.zeros([4, 4]).astype("float64") - exe = fluid.Executor(place) + exe = base.Executor(place) try: fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) @@ -141,14 +141,14 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.array( [ [[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15], [16, 17, 18]], ] ).astype("float64") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) try: result = paddle.cholesky(input) except RuntimeError as ex: diff --git a/test/legacy_test/test_cholesky_solve_op.py b/test/legacy_test/test_cholesky_solve_op.py index 37268c84bd4a3..413119ecc7587 100644 --- a/test/legacy_test/test_cholesky_solve_op.py +++ b/test/legacy_test/test_cholesky_solve_op.py @@ -23,8 +23,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard paddle.enable_static() @@ -171,7 +171,7 @@ def setUp(self): def check_static_result(self, place): paddle.enable_static() - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x = paddle.static.data(name="x", shape=[10, 2], dtype=self.dtype) y = paddle.static.data(name="y", shape=[10, 10], dtype=self.dtype) z = paddle.linalg.cholesky_solve(x, y, upper=self.upper) @@ -185,9 +185,9 @@ def check_static_result(self, place): z_np = cholesky_solution(umat, x_np, upper=self.upper) z2_np = scipy_cholesky_solution(umat, x_np, upper=self.upper) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": x_np, "y": umat}, fetch_list=[z], ) @@ -243,11 +243,11 @@ def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): # The input type of solve_op must be Variable. - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) - y1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + y1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.linalg.cholesky_solve, x1, y1) diff --git a/test/legacy_test/test_chunk_op.py b/test/legacy_test/test_chunk_op.py index aea019e630f44..f682ab0c80b05 100644 --- a/test/legacy_test/test_chunk_op.py +++ b/test/legacy_test/test_chunk_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard class TestChunkOpError(unittest.TestCase): @@ -52,7 +52,7 @@ def test_axis_type_tensor(): self.assertRaises(TypeError, test_axis_type_tensor) - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): def test_0_chunks_tensor(): x = paddle.uniform([1, 1, 1], dtype='float32') @@ -63,7 +63,7 @@ def test_0_chunks_tensor(): class API_TestChunk(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data( 'data1', shape=[4, 6, 6], dtype='float64' ) @@ -88,7 +88,7 @@ def test_out(self): class API_TestChunk1(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data( 'data1', shape=[4, 6, 6], dtype='float64' ) @@ -109,10 +109,10 @@ def test_out(self): class API_TestDygraphChunk(unittest.TestCase): def test_out1(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("int32") # input is a variable which shape is [4, 6, 6] - input = fluid.dygraph.to_variable(input_1) + input = base.dygraph.to_variable(input_1) x0, x1, x2 = paddle.chunk(input, chunks=3, axis=1) x0_out = x0.numpy() x1_out = x1.numpy() @@ -123,10 +123,10 @@ def test_out1(self): np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def test_out2(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("bool") # input is a variable which shape is [4, 6, 6] - input = fluid.dygraph.to_variable(input_1) + input = base.dygraph.to_variable(input_1) x0, x1, x2 = paddle.chunk(input, chunks=3, axis=1) x0_out = x0.numpy() x1_out = x1.numpy() @@ -137,10 +137,10 @@ def test_out2(self): np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def test_axis_tensor_input(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("int32") # input is a variable which shape is [4, 6, 6] - input = fluid.dygraph.to_variable(input_1) + input = base.dygraph.to_variable(input_1) num1 = paddle.full(shape=[1], fill_value=1, dtype='int32') x0, x1, x2 = paddle.chunk(input, chunks=3, axis=num1) x0_out = x0.numpy() diff --git a/test/legacy_test/test_class_center_sample_op.py b/test/legacy_test/test_class_center_sample_op.py index 8e064731e76b9..ffc7350e0f703 100644 --- a/test/legacy_test/test_class_center_sample_op.py +++ b/test/legacy_test/test_class_center_sample_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, paddle_static_guard import paddle -from paddle.fluid import Program, core, program_guard +from paddle.base import Program, core, program_guard def class_center_sample_numpy(label, classes_list, num_samples): @@ -131,9 +131,9 @@ def setUp(self): self.initParams() np.random.seed(self.seed) paddle.framework.random._manual_program_seed(2021) - self.places = [paddle.fluid.CPUPlace()] + self.places = [paddle.base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(paddle.fluid.CUDAPlace(0)) + self.places.append(paddle.base.CUDAPlace(0)) def initParams(self): self.batch_size = 10 @@ -173,9 +173,9 @@ def check_static_result(self, place): ) = class_center_sample_numpy( label_np, [self.num_classes], self.num_samples ) - exe = paddle.fluid.Executor(place) + exe = paddle.base.Executor(place) [remapped_label_res, sampled_class_index_res] = exe.run( - paddle.fluid.default_main_program(), + paddle.base.default_main_program(), feed={'label': label_np}, fetch_list=[remapped_label, sampled_class_index], ) @@ -192,7 +192,7 @@ def test_dynamic(self): self.check_dynamic_result(place=place) def check_dynamic_result(self, place): - with paddle.fluid.dygraph.guard(place): + with paddle.base.dygraph.guard(place): label_np = np.random.randint( 0, self.num_classes, (self.batch_size,), dtype=self.dtype ) @@ -230,9 +230,9 @@ class TestClassCenterSampleAPIError(unittest.TestCase): def setUp(self): self.initParams() np.random.seed(self.seed) - self.places = [paddle.fluid.CPUPlace()] + self.places = [paddle.base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(paddle.fluid.CUDAPlace(0)) + self.places.append(paddle.base.CUDAPlace(0)) def initParams(self): self.batch_size = 20 @@ -247,7 +247,7 @@ def init_dtype(self): def test_dynamic_errors(self): def test_num_samples(): for place in self.places: - with paddle.fluid.dygraph.guard(place): + with paddle.base.dygraph.guard(place): label_np = np.random.randint( 0, self.num_classes, @@ -270,9 +270,9 @@ class TestClassCenterSampleAPIError1(unittest.TestCase): def setUp(self): self.initParams() np.random.seed(self.seed) - self.places = [paddle.fluid.CPUPlace()] + self.places = [paddle.base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(paddle.fluid.CUDAPlace(0)) + self.places.append(paddle.base.CUDAPlace(0)) def initParams(self): self.batch_size = 5 @@ -287,7 +287,7 @@ def init_dtype(self): def test_dynamic_errors(self): def test_empty_label(): for place in self.places: - with paddle.fluid.dygraph.guard(place): + with paddle.base.dygraph.guard(place): label = paddle.to_tensor(np.array([], dtype=self.dtype)) ( @@ -299,7 +299,7 @@ def test_empty_label(): def test_group_value(): for place in self.places: - with paddle.fluid.dygraph.guard(place): + with paddle.base.dygraph.guard(place): label_np = np.random.randint( 0, self.num_classes, diff --git a/test/legacy_test/test_clip_by_norm_op.py b/test/legacy_test/test_clip_by_norm_op.py index da78ee681aac0..8319255bc925d 100644 --- a/test/legacy_test/test_clip_by_norm_op.py +++ b/test/legacy_test/test_clip_by_norm_op.py @@ -19,7 +19,7 @@ from op import Operator import paddle -from paddle.fluid import core +from paddle.base import core from paddle.nn import clip diff --git a/test/legacy_test/test_clip_op.py b/test/legacy_test/test_clip_op.py index b807d01ada068..354bc1ae95cad 100644 --- a/test/legacy_test/test_clip_op.py +++ b/test/legacy_test/test_clip_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestClipOp(OpTest): @@ -277,11 +277,11 @@ def test_clip(self): max = paddle.static.data(name='max', shape=[1], dtype='float32') place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) out_1 = self._executed_api(images, min=min, max=max) out_2 = self._executed_api(images, min=0.2, max=0.9) @@ -314,7 +314,7 @@ def test_clip(self): res10, res11, ) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "image": data, "min": np.array([0.2]).astype('float32'), @@ -357,9 +357,9 @@ def test_clip(self): def test_clip_dygraph(self): paddle.disable_static() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) paddle.disable_static(place) data_shape = [1, 9, 9, 4] @@ -442,7 +442,7 @@ def test_fp16(self): min = paddle.static.data(name='min1', shape=[1], dtype='float16') max = paddle.static.data(name='max1', shape=[1], dtype='float16') out = paddle.clip(images, min, max) - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) exe = paddle.static.Executor(place) res1 = exe.run( diff --git a/test/legacy_test/test_coalesce_tensor_op.py b/test/legacy_test/test_coalesce_tensor_op.py index a4fa649694ca7..46be0e58bec2e 100644 --- a/test/legacy_test/test_coalesce_tensor_op.py +++ b/test/legacy_test/test_coalesce_tensor_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def coalesce_tensor_eager_api( @@ -59,7 +59,7 @@ class TestAllocContinuousSpace(OpTest): def setUp(self): self.python_api = coalesce_tensor_eager_api self.op_type = "coalesce_tensor" - self.dtype, self.fluid_dtype = self.init_dtype() + self.dtype, self.base_dtype = self.init_dtype() self.attrs = self.init_attr() self.Inputs = self.init_input() self.Outputs, self.FusedOutput = self.init_output( @@ -86,7 +86,7 @@ def init_attr(self): "copy_data": True, "set_constant": False, "constant": 0.0, - "dtype": self.fluid_dtype, + "dtype": self.base_dtype, } def init_output(self, input_list, set_constant, constant): @@ -114,9 +114,9 @@ def init_output(self, input_list, set_constant, constant): return outputs, coalesce_tensor_var def verify_output(self, place): - with fluid.dygraph.base.guard(place=place): + with base.dygraph.base.guard(place=place): tensor_input = [ - fluid.dygraph.base.to_variable(value=data[1]) + base.dygraph.base.to_variable(value=data[1]) for data in self.inputs["Input"] ] eager_outputs, eager_fused_output = coalesce_tensor_eager_api( @@ -177,7 +177,7 @@ def init_attr(self): "copy_data": False, "set_constant": True, "constant": 0.5, - "dtype": self.fluid_dtype, + "dtype": self.base_dtype, "user_defined_size_of_dtype": 2, } diff --git a/test/legacy_test/test_collective_api_base.py b/test/legacy_test/test_collective_api_base.py index b736b1bd82694..375653d5e7b19 100644 --- a/test/legacy_test/test_collective_api_base.py +++ b/test/legacy_test/test_collective_api_base.py @@ -28,8 +28,8 @@ import paddle import paddle.distributed as dist -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def create_bool_test_data(shape=None, seed=None): @@ -119,8 +119,8 @@ def get_model( ) def run_trainer(self, args): - train_prog = fluid.Program() - startup_prog = fluid.Program() + train_prog = base.Program() + startup_prog = base.Program() endpoints = args["endpoints"].split(",") rank = args["trainerid"] current_endpoint = args["currentendpoint"] @@ -131,14 +131,14 @@ def run_trainer(self, args): paddle.distributed.init_parallel_env() if args['backend'] == 'nccl': device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace( + place = base.CUDAPlace( device_id - ) # if args.use_gpu else fluid.CPUPlace() + ) # if args.use_gpu else base.CPUPlace() elif args['backend'] == 'bkcl': device_id = int(os.getenv("FLAGS_selected_xpus", "0")) - place = fluid.XPUPlace(device_id) + place = base.XPUPlace(device_id) else: - place = fluid.CPUPlace() + place = base.CPUPlace() indata = create_test_data( shape=(10, 1000), dtype=args["dtype"], seed=os.getpid() ) @@ -154,7 +154,7 @@ def run_trainer(self, args): if args["use_comm_context"] else self.get_model(train_prog, startup_prog, rank) ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup_prog) fetch_list = [] for elem in result: diff --git a/test/legacy_test/test_collective_base.py b/test/legacy_test/test_collective_base.py index 453d5abec61da..52bf62591a7b5 100644 --- a/test/legacy_test/test_collective_base.py +++ b/test/legacy_test/test_collective_base.py @@ -24,9 +24,9 @@ import numpy as np -import paddle.fluid.unique_name as nameGen -from paddle import fluid -from paddle.fluid import core +import paddle.base.unique_name as nameGen +from paddle import base +from paddle.base import core class TestCollectiveRunnerBase: @@ -104,8 +104,8 @@ def initCommunicator( ) def run_trainer(self, args): - train_prog = fluid.Program() - startup_prog = fluid.Program() + train_prog = base.Program() + startup_prog = base.Program() endpoints = args["endpoints"].split(",") rank = args["trainerid"] current_endpoint = args["currentendpoint"] @@ -116,10 +116,10 @@ def run_trainer(self, args): self.rank = rank result = self.get_model(train_prog, startup_prog) device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace( + place = base.CUDAPlace( device_id - ) # if args.use_gpu else fluid.CPUPlace() - exe = fluid.Executor(place) + ) # if args.use_gpu else base.CPUPlace() + exe = base.Executor(place) exe.run(startup_prog) np.random.seed(os.getpid()) indata = np.random.random((10, 1000)) diff --git a/test/legacy_test/test_communicator_geo.py b/test/legacy_test/test_communicator_geo.py index 1b1713c0601eb..e2b84702c8e94 100644 --- a/test/legacy_test/test_communicator_geo.py +++ b/test/legacy_test/test_communicator_geo.py @@ -21,7 +21,7 @@ import numpy import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker from paddle.distributed.utils.launch_utils import find_free_ports @@ -39,7 +39,7 @@ def net(self): emb = paddle.static.nn.embedding( input=x1, size=[10000, 10], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="embedding", initializer=paddle.nn.initializer.Constant(value=0.01), ), @@ -78,8 +78,8 @@ def run_pserver(self, role, strategy): fleet.run_server() def run_trainer(self, role, strategy): - place = fluid.core.CPUPlace() - exe = fluid.Executor(place) + place = base.core.CPUPlace() + exe = base.Executor(place) fleet.init(role) avg_cost, x, z, y = self.net() @@ -87,15 +87,15 @@ def run_trainer(self, role, strategy): optimizer = fleet.distributed_optimizer(optimizer, strategy) optimizer.minimize(avg_cost) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) fleet.init_worker() train_reader = paddle.batch(self.fake_reader(), batch_size=24) - feeder = fluid.DataFeeder(place=place, feed_list=[x, z, y]) + feeder = base.DataFeeder(place=place, feed_list=[x, z, y]) for batch_id, data in enumerate(train_reader()): exe.run( - fluid.default_main_program(), + base.default_main_program(), feed=feeder.feed(data), fetch_list=[], ) diff --git a/test/legacy_test/test_communicator_ps_gpu.py b/test/legacy_test/test_communicator_ps_gpu.py index f487366e51693..4afc5eef90fee 100644 --- a/test/legacy_test/test_communicator_ps_gpu.py +++ b/test/legacy_test/test_communicator_ps_gpu.py @@ -21,7 +21,7 @@ paddle.enable_static() -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -80,7 +80,7 @@ def test_communicator_ps_gpu(self): dataset.load_into_memory(is_shuffle=True) os.environ["TEST_MODE"] = "1" - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) main_program._fleet_opt = {"stat_var_names": [x.name]} fleet.init_worker() diff --git a/test/legacy_test/test_compare_op.py b/test/legacy_test/test_compare_op.py index 2f4e12f2b4e40..85bdc6cd11838 100755 --- a/test/legacy_test/test_compare_op.py +++ b/test/legacy_test/test_compare_op.py @@ -19,8 +19,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def create_test_class(op_type, typename, callback): @@ -73,7 +73,7 @@ def setUp(self): self.input_x = np.array([1, 2, 3, 4]).astype(np.int64) self.input_y = np.array([1, 3, 2, 4]).astype(np.int64) self.real_result = callback(self.input_x, self.input_y) - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() if core.is_compiled_with_cuda(): self.place = paddle.CUDAPlace(0) @@ -84,7 +84,7 @@ def test_api(self): y = paddle.static.data(name='y', shape=[4], dtype='int64') op = eval("paddle.%s" % (self.op_type)) out = op(x, y) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) (res,) = exe.run( feed={"x": self.input_x, "y": self.input_y}, fetch_list=[out], @@ -99,7 +99,7 @@ def test_api_float(self): y = paddle.static.data(name='y', shape=[], dtype='int64') op = eval("paddle.%s" % (self.op_type)) out = op(x, y) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) (res,) = exe.run( feed={"x": self.input_x, "y": 1.0}, fetch_list=[out] ) @@ -481,8 +481,8 @@ def test_errors(self): with program_guard(Program(), Program()): # The input x and y of compare_op must be Variable. x = paddle.static.data(name='x', shape=[-1, 1], dtype="float32") - y = fluid.create_lod_tensor( - numpy.array([[-1]]), [[1]], fluid.CPUPlace() + y = base.create_lod_tensor( + numpy.array([[-1]]), [[1]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.greater_equal, x, y) @@ -490,33 +490,33 @@ def test_errors(self): class API_TestElementwise_Equal(unittest.TestCase): def test_api(self): paddle.enable_static() - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): label = paddle.assign(np.array([3, 3], dtype="int32")) limit = paddle.assign(np.array([3, 2], dtype="int32")) out = paddle.equal(x=label, y=limit) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) (res,) = exe.run(fetch_list=[out]) self.assertEqual((res == np.array([True, False])).all(), True) - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): label = paddle.assign(np.array([3, 3], dtype="int32")) limit = paddle.assign(np.array([3, 3], dtype="int32")) out = paddle.equal(x=label, y=limit) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) (res,) = exe.run(fetch_list=[out]) self.assertEqual((res == np.array([True, True])).all(), True) def test_api_fp16(self): paddle.enable_static() - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): label = paddle.to_tensor([3, 3], dtype="float16") limit = paddle.to_tensor([3, 2], dtype="float16") out = paddle.equal(x=label, y=limit) if core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) - exe = fluid.Executor(place) + exe = base.Executor(place) (res,) = exe.run(fetch_list=[out]) self.assertEqual((res == np.array([True, False])).all(), True) @@ -546,7 +546,7 @@ def test_place_1(self): label = paddle.assign(np.array([3, 3], dtype="int32")) limit = paddle.assign(np.array([3, 2], dtype="int32")) out = paddle.less_than(label, limit) - exe = fluid.Executor(place) + exe = base.Executor(place) (res,) = exe.run(fetch_list=[out]) self.assertEqual((res == np.array([False, False])).all(), True) diff --git a/test/legacy_test/test_compiled_program.py b/test/legacy_test/test_compiled_program.py index ac29c33174fb7..f2a135e025f4e 100644 --- a/test/legacy_test/test_compiled_program.py +++ b/test/legacy_test/test_compiled_program.py @@ -19,8 +19,8 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestCompiledProgram(unittest.TestCase): @@ -34,17 +34,17 @@ def setUp(self): paddle.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) loss = simple_fc_net() - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) (loss_data,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"image": self.img, "label": self.label}, fetch_list=[loss.name], ) @@ -55,15 +55,15 @@ def test_compiled_program_base(self): paddle.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) loss = simple_fc_net() - exe.run(fluid.default_startup_program()) - compiled_prog = fluid.CompiledProgram(fluid.default_main_program()) + exe.run(base.default_startup_program()) + compiled_prog = base.CompiledProgram(base.default_main_program()) (loss_data,) = exe.run( compiled_prog, @@ -75,7 +75,7 @@ def test_compiled_program_base(self): class TestCompiledProgramError(unittest.TestCase): def test_program_or_graph_error(self): - self.assertRaises(TypeError, fluid.CompiledProgram, "program") + self.assertRaises(TypeError, base.CompiledProgram, "program") def build_simple_model(self): img = paddle.static.data( @@ -89,14 +89,14 @@ def build_simple_model(self): avg_loss = paddle.mean(loss) def compile_program(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): # build model self.build_simple_model() # compile program - program = fluid.default_main_program() - compiled_program = fluid.CompiledProgram(program) - scope = fluid.global_scope() - place = fluid.CPUPlace() + program = base.default_main_program() + compiled_program = base.CompiledProgram(program) + scope = base.global_scope() + place = base.CPUPlace() compiled_program._compile(scope, place) return compiled_program, scope, place @@ -110,7 +110,7 @@ def test_compile_place_error(self): # need create different place if core.is_compiled_with_cuda(): compiled_program, scope, _ = self.compile_program() - new_place = fluid.CUDAPlace(0) + new_place = base.CUDAPlace(0) with self.assertRaises(ValueError): compiled_program._compile(scope, new_place) diff --git a/test/legacy_test/test_complex_abs.py b/test/legacy_test/test_complex_abs.py index 11c0fbc2b735e..845308b9bb026 100644 --- a/test/legacy_test/test_complex_abs.py +++ b/test/legacy_test/test_complex_abs.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg class TestComplexAbsOp(OpTest): @@ -30,7 +30,7 @@ def setUp(self): self.shape = (2, 3, 4, 5) self.init_input_output() - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(self.x)} self.outputs = {'Out': self.out} def init_input_output(self): @@ -58,7 +58,7 @@ def setUp(self): self.shape = (2, 3, 4, 5) self.init_input_output() - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(self.x)} self.outputs = {'Out': self.out} def init_input_output(self): @@ -102,7 +102,7 @@ def setUp(self): self.shape = (2, 3, 4, 5) self.init_input_output() - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(self.x)} self.outputs = {'Out': self.out} def init_input_output(self): diff --git a/test/legacy_test/test_complex_elementwise_layers.py b/test/legacy_test/test_complex_elementwise_layers.py index cee39ea2d363e..fe23f28715535 100644 --- a/test/legacy_test/test_complex_elementwise_layers.py +++ b/test/legacy_test/test_complex_elementwise_layers.py @@ -18,8 +18,8 @@ from numpy.random import random as rand import paddle -import paddle.fluid.dygraph as dg -from paddle import fluid +import paddle.base.dygraph as dg +from paddle import base paddle_apis = { "add": paddle.add, @@ -33,7 +33,7 @@ class TestComplexElementwiseLayers(unittest.TestCase): def setUp(self): self._dtypes = ["float32", "float64"] self._places = [paddle.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self._places.append(paddle.CUDAPlace(0)) def paddle_calc(self, x, y, op, place): diff --git a/test/legacy_test/test_complex_getitem.py b/test/legacy_test/test_complex_getitem.py index 2214e9d7cc240..a3b249212e93d 100644 --- a/test/legacy_test/test_complex_getitem.py +++ b/test/legacy_test/test_complex_getitem.py @@ -16,15 +16,15 @@ import numpy as np -import paddle.fluid.dygraph as dg -from paddle import fluid +import paddle.base.dygraph as dg +from paddle import base class TestComplexGetitemLayer(unittest.TestCase): def setUp(self): - self._places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - self._places.append(fluid.CUDAPlace(0)) + self._places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + self._places.append(base.CUDAPlace(0)) def test_case1(self): x_np = np.random.randn(2, 3, 4) + 1j * np.random.randn(2, 3, 4) diff --git a/test/legacy_test/test_complex_grad_accumulated.py b/test/legacy_test/test_complex_grad_accumulated.py index 0502a914da997..bf76f1d248fa5 100644 --- a/test/legacy_test/test_complex_grad_accumulated.py +++ b/test/legacy_test/test_complex_grad_accumulated.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core class Optimization_ex1(paddle.nn.Layer): diff --git a/test/legacy_test/test_complex_kron.py b/test/legacy_test/test_complex_kron.py index 8588afa842313..fe6a14bf56b89 100644 --- a/test/legacy_test/test_complex_kron.py +++ b/test/legacy_test/test_complex_kron.py @@ -17,8 +17,8 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg -from paddle import fluid +import paddle.base.dygraph as dg +from paddle import base class ComplexKronTestCase(unittest.TestCase): @@ -30,7 +30,7 @@ def __init__(self, methodName='runTest', x=None, y=None): def setUp(self): self.ref_result = np.kron(self.x, self.y) self._places = [paddle.CPUPlace()] - if fluid.is_compiled_with_cuda(): + if base.is_compiled_with_cuda(): self._places.append(paddle.CUDAPlace(0)) def runTest(self): diff --git a/test/legacy_test/test_complex_matmul.py b/test/legacy_test/test_complex_matmul.py index c6a89bb3cecbe..36c0e43511189 100644 --- a/test/legacy_test/test_complex_matmul.py +++ b/test/legacy_test/test_complex_matmul.py @@ -17,16 +17,16 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg -from paddle import fluid +import paddle.base.dygraph as dg +from paddle import base class TestComplexMatMulLayer(unittest.TestCase): def setUp(self): self._dtypes = ["float32", "float64"] - self._places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - self._places.append(fluid.CUDAPlace(0)) + self._places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + self._places.append(base.CUDAPlace(0)) def compare_by_basic_api(self, x, y, np_result): for place in self._places: diff --git a/test/legacy_test/test_complex_op.py b/test/legacy_test/test_complex_op.py index e0e509d6d41de..910e8945afda4 100644 --- a/test/legacy_test/test_complex_op.py +++ b/test/legacy_test/test_complex_op.py @@ -19,7 +19,7 @@ import paddle from paddle import static -from paddle.fluid import dygraph +from paddle.base import dygraph paddle.enable_static() diff --git a/test/legacy_test/test_complex_reshape.py b/test/legacy_test/test_complex_reshape.py index d3a3f09ef8691..14eb0fd75e729 100644 --- a/test/legacy_test/test_complex_reshape.py +++ b/test/legacy_test/test_complex_reshape.py @@ -17,15 +17,15 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg -from paddle import fluid +import paddle.base.dygraph as dg +from paddle import base class TestComplexReshape(unittest.TestCase): def setUp(self): self._dtypes = ["float32", "float64"] self._places = [paddle.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self._places.append(paddle.CUDAPlace(0)) def test_shape_norm_dims(self): diff --git a/test/legacy_test/test_complex_simplenet.py b/test/legacy_test/test_complex_simplenet.py index 34c99234f4ff9..acedc7a3170a8 100644 --- a/test/legacy_test/test_complex_simplenet.py +++ b/test/legacy_test/test_complex_simplenet.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core class Optimization_ex1(paddle.nn.Layer): diff --git a/test/legacy_test/test_complex_sum_layer.py b/test/legacy_test/test_complex_sum_layer.py index 9b09fa192575f..2ecbdbef4cfaf 100644 --- a/test/legacy_test/test_complex_sum_layer.py +++ b/test/legacy_test/test_complex_sum_layer.py @@ -18,15 +18,15 @@ from numpy.random import random as rand import paddle -import paddle.fluid.dygraph as dg -from paddle import fluid, tensor +import paddle.base.dygraph as dg +from paddle import base, tensor class TestComplexSumLayer(unittest.TestCase): def setUp(self): self._dtypes = ["float32", "float64"] self._places = [paddle.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self._places.append(paddle.CUDAPlace(0)) def test_complex_basic_api(self): diff --git a/test/legacy_test/test_complex_trace_layer.py b/test/legacy_test/test_complex_trace_layer.py index c59917a49194a..3df257fe4e916 100644 --- a/test/legacy_test/test_complex_trace_layer.py +++ b/test/legacy_test/test_complex_trace_layer.py @@ -17,16 +17,16 @@ import numpy as np from numpy.random import random as rand -import paddle.fluid.dygraph as dg -from paddle import fluid, tensor +import paddle.base.dygraph as dg +from paddle import base, tensor class TestComplexTraceLayer(unittest.TestCase): def setUp(self): self._dtypes = ["float32", "float64"] - self._places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - self._places.append(fluid.CUDAPlace(0)) + self._places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + self._places.append(base.CUDAPlace(0)) def test_basic_api(self): for dtype in self._dtypes: diff --git a/test/legacy_test/test_complex_transpose.py b/test/legacy_test/test_complex_transpose.py index 86c2fbc345e66..5ce77e6e00715 100644 --- a/test/legacy_test/test_complex_transpose.py +++ b/test/legacy_test/test_complex_transpose.py @@ -17,15 +17,15 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg -from paddle import fluid +import paddle.base.dygraph as dg +from paddle import base class TestComplexTransposeLayer(unittest.TestCase): def setUp(self): self._dtypes = ["float32", "float64"] self._places = [paddle.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self._places.append(paddle.CUDAPlace(0)) def test_transpose_by_complex_api(self): diff --git a/test/legacy_test/test_complex_variable.py b/test/legacy_test/test_complex_variable.py index 98dbaa2ba500c..83c4f634a78c5 100644 --- a/test/legacy_test/test_complex_variable.py +++ b/test/legacy_test/test_complex_variable.py @@ -17,10 +17,10 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg -from paddle.fluid import core -from paddle.fluid.data_feeder import convert_dtype -from paddle.fluid.framework import convert_np_dtype_to_dtype_ +import paddle.base.dygraph as dg +from paddle.base import core +from paddle.base.data_feeder import convert_dtype +from paddle.base.framework import convert_np_dtype_to_dtype_ class TestComplexVariable(unittest.TestCase): diff --git a/test/legacy_test/test_complex_view_op.py b/test/legacy_test/test_complex_view_op.py index 81a5dcba620e9..6055a9781419a 100644 --- a/test/legacy_test/test_complex_view_op.py +++ b/test/legacy_test/test_complex_view_op.py @@ -19,7 +19,7 @@ import paddle from paddle import static -from paddle.fluid import dygraph +from paddle.base import dygraph paddle.enable_static() diff --git a/test/legacy_test/test_concat_op.py b/test/legacy_test/test_concat_op.py index db848d7b5cff4..286f59797caed 100644 --- a/test/legacy_test/test_concat_op.py +++ b/test/legacy_test/test_concat_op.py @@ -20,8 +20,8 @@ from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestConcatOp(OpTest): @@ -419,11 +419,11 @@ def test_errors(self): paddle.concat(x1) # The item in input must be Variable. - x2 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x2 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) - x3 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x3 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.concat, [x2]) # The input dtype of concat_op must be float16, float32, float64, int32, int64. @@ -449,7 +449,7 @@ def test_input_same_dtype(): class TestConcatAPI(unittest.TestCase): - def test_fluid_api(self): + def test_base_api(self): paddle.enable_static() x_1 = paddle.static.data( shape=[None, 1, 4, 5], dtype='int32', name='x_1' @@ -466,9 +466,9 @@ def test_fluid_api(self): out_2 = paddle.concat([x_2, x_3], axis=positive_1_int32) out_3 = paddle.concat([x_2, x_3], axis=positive_1_int64) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) [res_1, res_2, res_3] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x_1": input_2, "x_2": input_2, "x_3": input_3}, fetch_list=[out_1, out_2, out_3], ) @@ -539,11 +539,11 @@ def test_imperative(self): def test_errors(self): with program_guard(Program(), Program()): # The item in input must be Variable. - x2 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x2 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) - x3 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x3 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.concat, [x2]) # The input dtype of concat_op must be float16, float32, float64, int32, int64. @@ -579,16 +579,16 @@ def setUp(self): self.input_shape = [2, 3] self.x = np.random.random(self.input_shape).astype("float32") self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) - def set_program(self, use_fluid_api): + def set_program(self, use_base_api): paddle.enable_static() - if use_fluid_api: - self.program = fluid.Program() - with fluid.program_guard(self.program): + if use_base_api: + self.program = base.Program() + with base.program_guard(self.program): input = paddle.assign(self.x) tensor_array = paddle.tensor.create_array(dtype='float32') zero = paddle.tensor.fill_constant( @@ -614,16 +614,16 @@ def set_program(self, use_fluid_api): self.out_var = paddle.concat(tensor_array, axis=self.axis) - def test_fluid_api(self): - self._run_static_mode(use_fluid_api=True) + def test_base_api(self): + self._run_static_mode(use_base_api=True) def test_paddle_api(self): - self._run_static_mode(use_fluid_api=False) + self._run_static_mode(use_base_api=False) - def _run_static_mode(self, use_fluid_api): - self.set_program(use_fluid_api) + def _run_static_mode(self, use_base_api): + self.set_program(use_base_api) self.assertTrue(self.out_var.shape[self.axis] == -1) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) res = exe.run(self.program, fetch_list=self.out_var) np.testing.assert_array_equal( res[0], np.concatenate([self.x] * self.iter_num, axis=self.axis) @@ -664,9 +664,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -705,9 +705,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_cond.py b/test/legacy_test/test_cond.py index 037ec5c5cce5c..55e6f8116cf33 100644 --- a/test/legacy_test/test_cond.py +++ b/test/legacy_test/test_cond.py @@ -18,10 +18,10 @@ from simple_nets import batchnorm_fc_with_inputs, simple_fc_net_with_inputs import paddle -from paddle import fluid -from paddle.fluid import core, framework -from paddle.fluid.backward import append_backward -from paddle.fluid.framework import Program, program_guard +from paddle import base +from paddle.base import core, framework +from paddle.base.backward import append_backward +from paddle.base.framework import Program, program_guard np.random.seed(123) @@ -63,11 +63,11 @@ def false_func(): # out is one tensor place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) (ret,) = exe.run(main_program, fetch_list=[out.name]) np.testing.assert_allclose( np.asarray(ret), np.full((3, 2), -1, np.int32), rtol=1e-05 @@ -101,11 +101,11 @@ def false_func(): # out is one tensor place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) (ret,) = exe.run(main_program, fetch_list=[out.name]) np.testing.assert_allclose(np.asarray(ret), np.array(2), rtol=1e-05) self.assertEqual(ret.shape, ()) @@ -138,11 +138,11 @@ def false_func(): # out is a tensor place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) (ret,) = exe.run(main_program, fetch_list=[out.name]) np.testing.assert_allclose( np.asarray(ret), np.full((3, 3), 2, np.int32), rtol=1e-05 @@ -170,12 +170,12 @@ def test_0d_tensor_backward(self): append_backward(out) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) ret = exe.run(main_program, fetch_list=[out.name, a.grad_name]) np.testing.assert_allclose( np.asarray(ret[0]), np.array(2.0), rtol=1e-05 @@ -244,11 +244,11 @@ def false_func(): # out is a tuple containing 2 tensors place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) ret = exe.run(main_program, fetch_list=out) np.testing.assert_allclose( np.asarray(ret[0]), np.full((1, 2), 1, np.int32), rtol=1e-05 @@ -290,11 +290,11 @@ def false_func(a, i): pred, lambda: true_func(a, i), lambda: false_func(a, i) ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) for feed_i in range(5): expected_a = 7 * (feed_i + 1) if feed_i % 2 == 0 else 8 - feed_i (ret,) = exe.run( @@ -335,11 +335,11 @@ def false_func(): out2 = paddle.static.nn.cond(pred, None, false_func) out3 = paddle.static.nn.cond(pred, true_func, None) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) for feed_i in range(5): # Test that output is None is runnable exe.run(main_program, feed={'i': np.full((1), feed_i, np.int32)}) @@ -409,9 +409,9 @@ def func_return_two_tensors(): def test_extremely_simple_net_with_op_in_condition(self): paddle.enable_static() - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): a = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=1.23 ) @@ -424,11 +424,11 @@ def test_extremely_simple_net_with_op_in_condition(self): append_backward(out) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) ret = exe.run( main_program, fetch_list=[out, b, a.grad_name, b.grad_name] ) @@ -488,11 +488,11 @@ def greater_equal_branch(i, a): append_backward(mean) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) for feed_i in range(0, 10): expected_a = 2.0 * feed_i if feed_i < 5: @@ -557,11 +557,11 @@ def greater_equal_branch(i, a): append_backward(out) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) ret = exe.run( main_program, fetch_list=[out.name, i.grad_name], @@ -577,10 +577,10 @@ def greater_equal_branch(i, a): def test_cond_op_in_condition(self): paddle.enable_static() - main_program = fluid.Program() - startup_program = fluid.Program() + main_program = base.Program() + startup_program = base.Program() - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): a = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=1.23 ) @@ -605,11 +605,11 @@ def test_cond_op_in_condition(self): append_backward(out) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) ret = exe.run(main_program, fetch_list=[out, a.grad_name, b.grad_name]) # Note: fill_constant has loss of precision, so we assertAlmostEqual. self.assertAlmostEqual(ret[0][0], 1.5252) @@ -638,8 +638,8 @@ def backward_value_helper(self, cond_func, use_cuda): i = paddle.static.data(name="i", shape=[1], dtype='int32') loss = cond_func(i, img, label) append_backward(loss) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) exe.run(startup_program) num_devices = 1 @@ -698,8 +698,8 @@ def add_optimizer_helper(self, cond_func, use_cuda): optimizer = paddle.optimizer.SGD(learning_rate=0.1) optimizer.minimize(loss) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) exe.run(startup_program) for feed_i in range(0, 10): diff --git a/test/legacy_test/test_conditional_block.py b/test/legacy_test/test_conditional_block.py index a5383dd6d10f9..90a8200375c65 100644 --- a/test/legacy_test/test_conditional_block.py +++ b/test/legacy_test/test_conditional_block.py @@ -17,17 +17,17 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.static import Executor, append_backward from paddle.static.nn.control_flow import ConditionalBlock class ConditionalBlockTest(unittest.TestCase): def test_forward(self): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): data = paddle.static.data(name='X', shape=[-1, 1], dtype='float32') data.stop_gradient = False cond = ConditionalBlock(inputs=[data]) @@ -56,9 +56,9 @@ def test_forward(self): class TestConditionalBlockOpInferShape(unittest.TestCase): def test_infer_shape(self): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): global_block = main_program.global_block() sub_block = main_program._create_block() main_program._rollback() diff --git a/test/legacy_test/test_conj_op.py b/test/legacy_test/test_conj_op.py index 8e9f90d08516e..4cad7bc276fc1 100644 --- a/test/legacy_test/test_conj_op.py +++ b/test/legacy_test/test_conj_op.py @@ -23,9 +23,9 @@ from eager_op_test import OpTest, convert_float_to_uint16 from numpy.random import random as rand -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg from paddle import static -from paddle.fluid import core +from paddle.base import core paddle.enable_static() @@ -46,7 +46,7 @@ def init_input_output(self): ).astype(self.dtype) out = np.conj(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): diff --git a/test/legacy_test/test_const_value.py b/test/legacy_test/test_const_value.py index b26892f8a3607..0bfa04bdb0ea4 100644 --- a/test/legacy_test/test_const_value.py +++ b/test/legacy_test/test_const_value.py @@ -14,7 +14,7 @@ import unittest -from paddle.fluid import framework +from paddle.base import framework class ConstantTest(unittest.TestCase): diff --git a/test/legacy_test/test_context_manager.py b/test/legacy_test/test_context_manager.py index 43ddca5fb83e1..afdd54b5ede57 100644 --- a/test/legacy_test/test_context_manager.py +++ b/test/legacy_test/test_context_manager.py @@ -14,23 +14,23 @@ import unittest -from paddle import fluid +from paddle import base class TestContextManagerRaiseException(unittest.TestCase): # When exception raised in 'with' context, we should safely exit the context def test_func1(self): def foo(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): print("raise error in context manager") raise TypeError("error") self.assertRaises(TypeError, foo) def test_func2(self): - # After test_func1 executed, if fluid.dygraph.guard() in test_func1 safely exited, - # fluid.in_dygraph_mode() should be false. - self.assertEqual(fluid.in_dygraph_mode(), False) + # After test_func1 executed, if base.dygraph.guard() in test_func1 safely exited, + # base.in_dygraph_mode() should be false. + self.assertEqual(base.in_dygraph_mode(), False) if __name__ == '__main__': diff --git a/test/legacy_test/test_conv1d_layer.py b/test/legacy_test/test_conv1d_layer.py index 5e3ce31f80640..e284c25568abf 100644 --- a/test/legacy_test/test_conv1d_layer.py +++ b/test/legacy_test/test_conv1d_layer.py @@ -17,9 +17,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid, nn +from paddle import base, nn class Conv1DTestCase(unittest.TestCase): @@ -84,10 +84,10 @@ def setUp(self): self.bias = None def functional(self, place): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): input_shape = ( (-1, self.num_channels, -1) if not self.channel_last @@ -115,7 +115,7 @@ def functional(self, place): feed_dict = {"input": self.input, "weight": self.weight} if self.bias is not None: feed_dict["bias"] = self.bias - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np @@ -147,17 +147,17 @@ def _test_equivalence(self, place): np.testing.assert_array_almost_equal(result1, result2) def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self._test_equivalence(place) - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self._test_equivalence(place) class Conv1DErrorTestCase(Conv1DTestCase): def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() with dg.guard(place): with self.assertRaises(ValueError): self.paddle_nn_layer() @@ -165,7 +165,7 @@ def runTest(self): class Conv1DTypeErrorTestCase(Conv1DTestCase): def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() with dg.guard(place): with self.assertRaises(TypeError): self.paddle_nn_layer() diff --git a/test/legacy_test/test_conv1d_transpose_layer.py b/test/legacy_test/test_conv1d_transpose_layer.py index 386b1fbc1a424..a5809cab6b6c0 100644 --- a/test/legacy_test/test_conv1d_transpose_layer.py +++ b/test/legacy_test/test_conv1d_transpose_layer.py @@ -17,9 +17,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid, nn +from paddle import base, nn class Conv1DTransposeTestCase(unittest.TestCase): @@ -90,10 +90,10 @@ def setUp(self): self.bias = None def functional(self, place): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): input_shape = ( (-1, self.in_channels, -1) if not self.channel_last @@ -123,7 +123,7 @@ def functional(self, place): feed_dict = {"input": self.input, "weight": self.weight} if self.bias is not None: feed_dict["bias"] = self.bias - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np @@ -155,17 +155,17 @@ def _test_equivalence(self, place): np.testing.assert_array_almost_equal(result1, result2) def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self._test_equivalence(place) - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self._test_equivalence(place) class Conv1DTransposeErrorTestCase(Conv1DTransposeTestCase): def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() with dg.guard(place): with self.assertRaises(ValueError): self.paddle_nn_layer() diff --git a/test/legacy_test/test_conv2d_api.py b/test/legacy_test/test_conv2d_api.py index 418cb52776d4f..1b02bf550f6c4 100644 --- a/test/legacy_test/test_conv2d_api.py +++ b/test/legacy_test/test_conv2d_api.py @@ -19,8 +19,8 @@ import paddle paddle.enable_static() -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestConv2DAPI(unittest.TestCase): @@ -318,7 +318,7 @@ def run_1(): ) class TestConv2DEnviron(unittest.TestCase): def run1(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): inputs = paddle.static.data( shape=[2, 3, 5, 5], name="inputs", @@ -334,17 +334,17 @@ def run1(self, place): groups=1, data_format="NCHW", ) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"inputs": self.input_np}, fetch_list=[result], ) def run2(self, place): - with fluid.dygraph.guard(place): - inputs = fluid.dygraph.to_variable(self.input_np) + with base.dygraph.guard(place): + inputs = base.dygraph.to_variable(self.input_np) conv = paddle.nn.Conv2D( in_channels=3, out_channels=4, @@ -360,9 +360,9 @@ def run_all(self, place): def test_environ(self): self.input_np = np.random.random([2, 3, 5, 5]).astype("float32") for place in [paddle.CPUPlace(), paddle.CUDAPlace(0)]: - fluid.set_flags({'FLAGS_conv2d_disable_cudnn': False}) + base.set_flags({'FLAGS_conv2d_disable_cudnn': False}) self.run_all(place) - fluid.set_flags({'FLAGS_conv2d_disable_cudnn': True}) + base.set_flags({'FLAGS_conv2d_disable_cudnn': True}) self.run_all(place) diff --git a/test/legacy_test/test_conv2d_fusion_op.py b/test/legacy_test/test_conv2d_fusion_op.py index 2aee55d2c0439..6aff6cb38b967 100644 --- a/test/legacy_test/test_conv2d_fusion_op.py +++ b/test/legacy_test/test_conv2d_fusion_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest from test_conv2d_op import conv2d_forward_naive -from paddle.fluid import core +from paddle.base import core def create_test_padding_SAME_class(parent): @@ -116,16 +116,16 @@ def setUp(self): self.output = self.output.astype(self.dtype) self.inputs = { - 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), - 'Bias': OpTest.np_dtype_to_fluid_dtype(bias), + 'Input': OpTest.np_dtype_to_base_dtype(input), + 'Filter': OpTest.np_dtype_to_base_dtype(filter), + 'Bias': OpTest.np_dtype_to_base_dtype(bias), } if self.add_residual_data: residual_data = np.random.random(self.output.shape).astype( self.dtype ) - self.inputs['ResidualData'] = OpTest.np_dtype_to_fluid_dtype( + self.inputs['ResidualData'] = OpTest.np_dtype_to_base_dtype( residual_data ) self.output += residual_data diff --git a/test/legacy_test/test_conv2d_layer.py b/test/legacy_test/test_conv2d_layer.py index 3c9a18419c2cb..4290a7352afed 100644 --- a/test/legacy_test/test_conv2d_layer.py +++ b/test/legacy_test/test_conv2d_layer.py @@ -17,9 +17,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid, nn +from paddle import base, nn def _reverse_repeat_list(t, n): @@ -98,11 +98,11 @@ def setUp(self): else: self.bias = None - def fluid_layer(self, place): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + def base_layer(self, place): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): input_shape = ( (-1, -1, -1, self.num_channels) if self.channel_last @@ -141,16 +141,16 @@ def fluid_layer(self, place): ) feed_dict = {"input": self.input} - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def functional(self, place): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): input_shape = ( (-1, -1, -1, self.num_channels) if self.channel_last @@ -190,7 +190,7 @@ def functional(self, place): feed_dict = {"input": self.input, "weight": self.weight} if self.bias is not None: feed_dict["bias"] = self.bias - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np @@ -219,8 +219,8 @@ def paddle_nn_layer(self): return y_np, t1 def _test_equivalence(self, place): - place = fluid.CPUPlace() - result1 = self.fluid_layer(place) + place = base.CPUPlace() + result1 = self.base_layer(place) result2 = self.functional(place) with dg.guard(place): result3, g1 = self.paddle_nn_layer() @@ -228,17 +228,17 @@ def _test_equivalence(self, place): np.testing.assert_array_almost_equal(result2, result3) def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self._test_equivalence(place) - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self._test_equivalence(place) class Conv2DErrorTestCase(Conv2DTestCase): def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() with dg.guard(place): with self.assertRaises(ValueError): self.paddle_nn_layer() diff --git a/test/legacy_test/test_conv2d_op.py b/test/legacy_test/test_conv2d_op.py index b7c83f2c6ec71..3d4ffe78b935c 100644 --- a/test/legacy_test/test_conv2d_op.py +++ b/test/legacy_test/test_conv2d_op.py @@ -19,8 +19,8 @@ from testsuite import create_op import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def conv2d_forward_naive( @@ -467,14 +467,14 @@ def setUp(self): 'Filter': convert_float_to_uint16(filter), } self.inputs_fp32 = { - 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), + 'Input': OpTest.np_dtype_to_base_dtype(input), + 'Filter': OpTest.np_dtype_to_base_dtype(filter), } else: output = output.astype(self.dtype) self.inputs = { - 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), + 'Input': OpTest.np_dtype_to_base_dtype(input), + 'Filter': OpTest.np_dtype_to_base_dtype(filter), } self.attrs = { @@ -725,8 +725,8 @@ def test_errors(self): def test_Variable(): # the input of conv2d must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) paddle.static.nn.conv2d(x1, 1, 1) @@ -798,8 +798,8 @@ def setUp(self): output = output.astype(self.dtype) self.inputs = { - 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), + 'Input': OpTest.np_dtype_to_base_dtype(input), + 'Filter': OpTest.np_dtype_to_base_dtype(filter), } self.attrs = { 'strides': self.stride, diff --git a/test/legacy_test/test_conv2d_op_depthwise_conv.py b/test/legacy_test/test_conv2d_op_depthwise_conv.py index 0354446f1d8bb..19e5b335768b0 100644 --- a/test/legacy_test/test_conv2d_op_depthwise_conv.py +++ b/test/legacy_test/test_conv2d_op_depthwise_conv.py @@ -31,7 +31,7 @@ ) from testsuite import create_op -from paddle.fluid import core +from paddle.base import core # ----------------TestDepthwiseConv ----- diff --git a/test/legacy_test/test_conv2d_transpose_layer.py b/test/legacy_test/test_conv2d_transpose_layer.py index 6a7f77fdcdfc8..78634d5124929 100644 --- a/test/legacy_test/test_conv2d_transpose_layer.py +++ b/test/legacy_test/test_conv2d_transpose_layer.py @@ -17,9 +17,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid, nn +from paddle import base, nn class Conv2DTransposeTestCase(unittest.TestCase): @@ -89,11 +89,11 @@ def setUp(self): else: self.bias = None - def fluid_layer(self, place): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + def base_layer(self, place): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): input_shape = ( (-1, -1, -1, self.num_channels) if self.channel_last @@ -122,16 +122,16 @@ def fluid_layer(self, place): data_format=self.data_format, ) feed_dict = {"input": self.input} - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def functional(self, place): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): input_shape = ( (-1, -1, -1, self.num_channels) if self.channel_last @@ -167,7 +167,7 @@ def functional(self, place): feed_dict = {"input": self.input, "weight": self.weight} if self.bias is not None: feed_dict["bias"] = self.bias - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np @@ -199,9 +199,9 @@ def paddle_nn_layer(self): return y_np def _test_equivalence(self, place): - place = fluid.CPUPlace() + place = base.CPUPlace() - result1 = self.fluid_layer(place) + result1 = self.base_layer(place) result2 = self.functional(place) with dg.guard(place): @@ -211,17 +211,17 @@ def _test_equivalence(self, place): np.testing.assert_array_almost_equal(result2, result3) def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self._test_equivalence(place) - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self._test_equivalence(place) class Conv2DTransposeErrorTestCase(Conv2DTransposeTestCase): def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() with dg.guard(place): with self.assertRaises(ValueError): self.paddle_nn_layer() diff --git a/test/legacy_test/test_conv2d_transpose_op.py b/test/legacy_test/test_conv2d_transpose_op.py index b7fe99f85c3c2..28478c886ab7d 100644 --- a/test/legacy_test/test_conv2d_transpose_op.py +++ b/test/legacy_test/test_conv2d_transpose_op.py @@ -25,8 +25,8 @@ from test_attribute_var import UnittestBase from testsuite import create_op -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def conv2dtranspose_forward_naive(input_, filter_, attrs): @@ -1172,10 +1172,10 @@ def test_case1(self): place = core.CUDAPlace(0) else: place = core.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) results = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"data1": data1_np, "data2": data2_np}, fetch_list=[out1, out2, out3, out4, out5, out6, out7], return_numpy=True, diff --git a/test/legacy_test/test_conv3d_layer.py b/test/legacy_test/test_conv3d_layer.py index d3b1dd6960106..da1a21edbc435 100644 --- a/test/legacy_test/test_conv3d_layer.py +++ b/test/legacy_test/test_conv3d_layer.py @@ -17,9 +17,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid, nn +from paddle import base, nn class Conv3DTestCase(unittest.TestCase): @@ -85,11 +85,11 @@ def setUp(self): else: self.bias = None - def fluid_layer(self, place): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + def base_layer(self, place): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): input_shape = ( (-1, -1, -1, -1, self.num_channels) if self.channel_last @@ -116,16 +116,16 @@ def fluid_layer(self, place): data_format=self.data_format, ) feed_dict = {"input": self.input} - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def functional(self, place): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): input_shape = ( (-1, -1, -1, -1, self.num_channels) if self.channel_last @@ -153,7 +153,7 @@ def functional(self, place): feed_dict = {"input": self.input, "weight": self.weight} if self.bias is not None: feed_dict["bias"] = self.bias - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np @@ -181,8 +181,8 @@ def paddle_nn_layer(self): return y_np, t1 def _test_equivalence(self, place): - place = fluid.CPUPlace() - result1 = self.fluid_layer(place) + place = base.CPUPlace() + result1 = self.base_layer(place) result2 = self.functional(place) with dg.guard(place): result3, g1 = self.paddle_nn_layer() @@ -190,17 +190,17 @@ def _test_equivalence(self, place): np.testing.assert_array_almost_equal(result2, result3) def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self._test_equivalence(place) - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self._test_equivalence(place) class Conv3DErrorTestCase(Conv3DTestCase): def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() with dg.guard(place): with self.assertRaises(ValueError): self.paddle_nn_layer() diff --git a/test/legacy_test/test_conv3d_op.py b/test/legacy_test/test_conv3d_op.py index 0d96766323afa..5ae2fb3a0f888 100644 --- a/test/legacy_test/test_conv3d_op.py +++ b/test/legacy_test/test_conv3d_op.py @@ -24,7 +24,7 @@ from testsuite import create_op import paddle -from paddle.fluid import core +from paddle.base import core def conv3d_forward_naive( @@ -420,14 +420,14 @@ def setUp(self): 'Filter': convert_float_to_uint16(filter), } self.inputs_fp32 = { - 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), + 'Input': OpTest.np_dtype_to_base_dtype(input), + 'Filter': OpTest.np_dtype_to_base_dtype(filter), } else: output = output.astype(self.dtype) self.inputs = { - 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), + 'Input': OpTest.np_dtype_to_base_dtype(input), + 'Filter': OpTest.np_dtype_to_base_dtype(filter), } self.attrs = { @@ -751,8 +751,8 @@ def setUp(self): ).astype(self.dtype) self.inputs = { - 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), + 'Input': OpTest.np_dtype_to_base_dtype(input), + 'Filter': OpTest.np_dtype_to_base_dtype(filter), } self.attrs = { 'strides': self.stride, diff --git a/test/legacy_test/test_conv3d_transpose_layer.py b/test/legacy_test/test_conv3d_transpose_layer.py index 90ddc1ecfdb00..ecfad7710e4ba 100644 --- a/test/legacy_test/test_conv3d_transpose_layer.py +++ b/test/legacy_test/test_conv3d_transpose_layer.py @@ -17,9 +17,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid, nn +from paddle import base, nn class Conv3DTransposeTestCase(unittest.TestCase): @@ -87,11 +87,11 @@ def setUp(self): -1, 1, size=(self.num_filters,) ).astype(self.dtype) - def fluid_layer(self, place): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + def base_layer(self, place): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): input_shape = ( (-1, -1, -1, -1, self.num_channels) if self.channel_last @@ -119,16 +119,16 @@ def fluid_layer(self, place): data_format=self.data_format, ) feed_dict = {"input": self.input} - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def functional(self, place): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): input_shape = ( (-1, -1, -1, -1, self.num_channels) if self.channel_last @@ -157,7 +157,7 @@ def functional(self, place): feed_dict = {"input": self.input, "weight": self.weight} if self.bias is not None: feed_dict["bias"] = self.bias - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np @@ -182,8 +182,8 @@ def paddle_nn_layer(self): return y_np def _test_equivalence(self, place): - place = fluid.CPUPlace() - result1 = self.fluid_layer(place) + place = base.CPUPlace() + result1 = self.base_layer(place) result2 = self.functional(place) with dg.guard(place): result3 = self.paddle_nn_layer() @@ -191,17 +191,17 @@ def _test_equivalence(self, place): np.testing.assert_array_almost_equal(result2, result3) def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self._test_equivalence(place) - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self._test_equivalence(place) class Conv3DTransposeErrorTestCase(Conv3DTransposeTestCase): def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() with dg.guard(place): with self.assertRaises(ValueError): self.paddle_nn_layer() diff --git a/test/legacy_test/test_conv3d_transpose_op.py b/test/legacy_test/test_conv3d_transpose_op.py index c0814754cc231..e8cdb7ddf42c7 100644 --- a/test/legacy_test/test_conv3d_transpose_op.py +++ b/test/legacy_test/test_conv3d_transpose_op.py @@ -21,7 +21,7 @@ paddle.enable_static() from eager_op_test import OpTest, copy_bits_from_float_to_uint16 -from paddle.fluid import core +from paddle.base import core def convert_float_to_uint16(float_list, data_format="NCHW"): diff --git a/test/legacy_test/test_conv3d_transpose_part2_op.py b/test/legacy_test/test_conv3d_transpose_part2_op.py index 54d31a7bbd22b..f691e623e4254 100644 --- a/test/legacy_test/test_conv3d_transpose_part2_op.py +++ b/test/legacy_test/test_conv3d_transpose_part2_op.py @@ -22,8 +22,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestWithSymmetricPad_NHWC(TestConv3DTransposeOp): @@ -175,10 +175,10 @@ def test_case1(self): place = core.CUDAPlace(0) else: place = core.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) results = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"data1": data1_np, "data2": data2_np}, fetch_list=[out1, out2, out3, out4, out5, out6, out7], return_numpy=True, diff --git a/test/legacy_test/test_conv_nn_grad.py b/test/legacy_test/test_conv_nn_grad.py index 0b9a1d5da8625..7fb2ba25372cc 100644 --- a/test/legacy_test/test_conv_nn_grad.py +++ b/test/legacy_test/test_conv_nn_grad.py @@ -19,8 +19,8 @@ from decorator_helper import prog_scope import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestConvDoubleGradCheck(unittest.TestCase): @@ -28,12 +28,12 @@ class TestConvDoubleGradCheck(unittest.TestCase): def func(self, place): shape = [2, 4, 3, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d(x, 2, 1, groups=1, bias_attr=False) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -42,10 +42,10 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -55,12 +55,12 @@ class TestConvDoubleGradCheckTest0(unittest.TestCase): def func(self, place): shape = [2, 4, 3, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d(x, 2, 1, bias_attr=False) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -69,9 +69,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -81,12 +81,12 @@ class TestConvDoubleGradCheckTest1(unittest.TestCase): def func(self, place): shape = [2, 3, 3, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d(x, 2, 1, padding=1, bias_attr=False) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -95,9 +95,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -107,12 +107,12 @@ class TestConv3DDoubleGradCheck(unittest.TestCase): def func(self, place): shape = [2, 4, 3, 4, 2] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d(x, 2, 1, bias_attr=False) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -121,10 +121,10 @@ def func(self, place): ) def test_grad(self): - # places = [fluid.CPUPlace()] + # places = [base.CPUPlace()] places = [] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -134,12 +134,12 @@ class TestConv3DDoubleGradCheckTest1(unittest.TestCase): def func(self, place): shape = [2, 4, 5, 3, 2] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d(x, 2, 1, padding=1, bias_attr=False) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -148,9 +148,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -160,7 +160,7 @@ class TestConv2DoubleGradCheck_AsyPadding(unittest.TestCase): def func(self, place): shape = [2, 2, 3, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d( input=x, @@ -172,7 +172,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -181,9 +181,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -193,7 +193,7 @@ class TestConv2DoubleGradCheck_PaddingSAME(unittest.TestCase): def func(self, place): shape = [2, 2, 3, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d( input=x, @@ -205,7 +205,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -214,9 +214,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -226,7 +226,7 @@ class TestConv2DoubleGradCheck_PaddingVALID(unittest.TestCase): def func(self, place): shape = [2, 2, 3, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d( input=x, @@ -238,7 +238,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -247,9 +247,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -259,7 +259,7 @@ class TestConv2DoubleGradCheck_ChannelLast(unittest.TestCase): def func(self, place): shape = [2, 2, 3, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d( input=x, @@ -273,7 +273,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -282,9 +282,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -294,7 +294,7 @@ class TestConv2DoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase): def func(self, place): shape = [2, 2, 3, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv2d( input=x, @@ -308,7 +308,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -317,9 +317,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -329,7 +329,7 @@ class TestConv3DDoubleGradCheck_AsyPadding(unittest.TestCase): def func(self, place): shape = [2, 2, 2, 2, 2] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d( input=x, @@ -341,7 +341,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -350,9 +350,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -362,7 +362,7 @@ class TestConv3DoubleGradCheck_PaddingSAME(unittest.TestCase): def func(self, place): shape = [2, 2, 2, 2, 2] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d( input=x, @@ -375,7 +375,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -384,9 +384,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -396,7 +396,7 @@ class TestConv3DoubleGradCheck_PaddingVALID(unittest.TestCase): def func(self, place): shape = [2, 2, 3, 3, 2] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d( input=x, @@ -408,7 +408,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -417,9 +417,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -429,7 +429,7 @@ class TestConv3DDoubleGradCheck_ChannelLast(unittest.TestCase): def func(self, place): shape = [2, 2, 2, 2, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d( input=x, @@ -443,7 +443,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -452,9 +452,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -464,7 +464,7 @@ class TestConv3DDoubleGradCheck_ChannelLast_AsyPadding(unittest.TestCase): def func(self, place): shape = [2, 2, 2, 2, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) y = paddle.static.nn.conv3d( input=x, @@ -478,7 +478,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -487,9 +487,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -499,7 +499,7 @@ class TestDepthWiseConvDoubleGradCheck(unittest.TestCase): def func(self, place): shape = [2, 4, 3, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', shape, dtype) # condition of depthwise conv: @@ -511,7 +511,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -522,7 +522,7 @@ def func(self, place): def test_grad(self): places = [] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -536,7 +536,7 @@ def func(self, place): x_shape = [2, 4, 3, 3] w_shape = [4, 1, 3, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', x_shape, dtype) w = paddle.static.data('w', w_shape, dtype) @@ -563,7 +563,7 @@ def func(self, place): def test_grad(self): places = [] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -577,7 +577,7 @@ def func(self, place): x_shape = [2, 3, 8, 8, 8] w_shape = [6, 3, 3, 3, 3] eps = 0.005 - dtype = np.float32 if fluid.core.is_compiled_with_rocm() else np.float64 + dtype = np.float32 if base.core.is_compiled_with_rocm() else np.float64 x = paddle.static.data('x', x_shape, dtype) w = paddle.static.data('w', w_shape, dtype) x.persistable = True @@ -596,7 +596,7 @@ def func(self, place): def test_grad(self): places = [] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_conv_transpose_nn_grad.py b/test/legacy_test/test_conv_transpose_nn_grad.py index 6ea52dcffef26..eacea0637c69b 100644 --- a/test/legacy_test/test_conv_transpose_nn_grad.py +++ b/test/legacy_test/test_conv_transpose_nn_grad.py @@ -19,8 +19,8 @@ from decorator_helper import prog_scope import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestConvTransposeDoubleGradCheck(unittest.TestCase): @@ -40,7 +40,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -70,7 +70,7 @@ def test_grad(self): places = [] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -101,7 +101,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -154,7 +154,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -207,7 +207,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) @@ -262,7 +262,7 @@ def func(self, place): ) x_arr = np.random.uniform(-1, 1, shape).astype(dtype) - w = fluid.default_main_program().global_block().all_parameters() + w = base.default_main_program().global_block().all_parameters() w_arr = [] for p in w: w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype)) diff --git a/test/legacy_test/test_corr.py b/test/legacy_test/test_corr.py index cc7f7c12abdd5..128a8cf6a1ad3 100644 --- a/test/legacy_test/test_corr.py +++ b/test/legacy_test/test_corr.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base np_minor_version = int((np.__version__).split('.')[1]) @@ -35,9 +35,9 @@ def setUp(self): def test_tensor_corr_default(self): typelist = ['float64', 'float32'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: paddle.set_device('cpu') @@ -60,9 +60,9 @@ def test_tensor_corr_default(self): def test_tensor_corr_rowvar(self): typelist = ['float64', 'float32'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: diff --git a/test/legacy_test/test_cosine_similarity_api.py b/test/legacy_test/test_cosine_similarity_api.py index c4c9e7ec14a8f..7fe78e42c7ab1 100644 --- a/test/legacy_test/test_cosine_similarity_api.py +++ b/test/legacy_test/test_cosine_similarity_api.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F from paddle import nn -from paddle.fluid import ( +from paddle.base import ( Executor, Program, core, diff --git a/test/legacy_test/test_cost_model.py b/test/legacy_test/test_cost_model.py index c7594f27e5d5c..1529d3432cd09 100644 --- a/test/legacy_test/test_cost_model.py +++ b/test/legacy_test/test_cost_model.py @@ -16,7 +16,7 @@ import paddle from paddle.cost_model import CostModel -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_count_nonzero_api.py b/test/legacy_test/test_count_nonzero_api.py index 741ed0aa0b87d..1b3c78dca3687 100644 --- a/test/legacy_test/test_count_nonzero_api.py +++ b/test/legacy_test/test_count_nonzero_api.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(10) diff --git a/test/legacy_test/test_cov.py b/test/legacy_test/test_cov.py index f464e3f1cb63f..43beb24cb6d1a 100644 --- a/test/legacy_test/test_cov.py +++ b/test/legacy_test/test_cov.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base def numpy_cov(np_arr, rowvar=True, ddof=1, fweights=None, aweights=None): @@ -37,9 +37,9 @@ def setUp(self): def test_tensor_cov_default(self): typelist = ['float64'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: @@ -60,9 +60,9 @@ def test_tensor_cov_default(self): def test_tensor_cov_rowvar(self): typelist = ['float64'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: @@ -87,9 +87,9 @@ def test_tensor_cov_rowvar(self): def test_tensor_cov_ddof(self): typelist = ['float64'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: @@ -114,9 +114,9 @@ def test_tensor_cov_ddof(self): def test_tensor_cov_fweights(self): typelist = ['float64'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: @@ -145,9 +145,9 @@ def test_tensor_cov_fweights(self): def test_tensor_cov_aweights(self): typelist = ['float64'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: @@ -176,9 +176,9 @@ def test_tensor_cov_aweights(self): def test_tensor_cov_weights(self): typelist = ['float64'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: diff --git a/test/legacy_test/test_create_global_var.py b/test/legacy_test/test_create_global_var.py index 413666a5c8e30..d051929e84536 100644 --- a/test/legacy_test/test_create_global_var.py +++ b/test/legacy_test/test_create_global_var.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import Program, program_guard +from paddle.base import Program, program_guard class TestCreateGlobalVarError(unittest.TestCase): diff --git a/test/legacy_test/test_create_parameter.py b/test/legacy_test/test_create_parameter.py index 0993160ea6a79..895247931ba39 100644 --- a/test/legacy_test/test_create_parameter.py +++ b/test/legacy_test/test_create_parameter.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import Program, program_guard +from paddle.base import Program, program_guard class TestCreateParameterError(unittest.TestCase): diff --git a/test/legacy_test/test_cross_entropy_loss.py b/test/legacy_test/test_cross_entropy_loss.py index 6070f5622af51..f0cbc7cae9684 100644 --- a/test/legacy_test/test_cross_entropy_loss.py +++ b/test/legacy_test/test_cross_entropy_loss.py @@ -19,8 +19,8 @@ from test_softmax_with_cross_entropy_op import cross_entropy import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard def log_softmax(x, axis=-1): @@ -177,7 +177,7 @@ def cross_entropy_soft_2d( class CrossEntropyLoss(unittest.TestCase): def setUp(self): self.dtype = ( - 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + 'float32' if base.core.is_compiled_with_rocm() else 'float64' ) # test for deprecated softmax_with_cross_entropy @@ -185,7 +185,7 @@ def test_softmax_with_cross_entropy(self): self.numeric_stable_mode = False self.soft_label = True self.dtype = ( - 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + 'float32' if base.core.is_compiled_with_rocm() else 'float64' ) self.axis = -1 self.ignore_index = -100 # should not be changed @@ -219,18 +219,18 @@ def test_softmax_with_cross_entropy(self): paddle.disable_static() paddle_loss_swce = paddle.nn.functional.softmax_with_cross_entropy( - fluid.dygraph.to_variable(self.logits), - fluid.dygraph.to_variable(self.labels), + base.dygraph.to_variable(self.logits), + base.dygraph.to_variable(self.labels), soft_label=True, axis=self.axis, ) paddle_loss_ce = paddle.nn.functional.cross_entropy( - fluid.dygraph.to_variable(self.logits), - fluid.dygraph.to_variable(self.labels), + base.dygraph.to_variable(self.logits), + base.dygraph.to_variable(self.labels), soft_label=True, axis=self.axis, - weight=fluid.dygraph.to_variable(self.weight) + weight=base.dygraph.to_variable(self.weight) if self.weight is not None else None, reduction=self.reduction, @@ -247,7 +247,7 @@ def test_cross_entropy_loss_soft_1d(self): self.numeric_stable_mode = False self.soft_label = True self.dtype = ( - 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + 'float32' if base.core.is_compiled_with_rocm() else 'float64' ) self.axis = -1 self.ignore_index = -100 # should not be changed @@ -282,11 +282,11 @@ def test_cross_entropy_loss_soft_1d(self): # 2. dygraph paddle.disable_static() paddle_loss_none_weight = paddle.nn.functional.cross_entropy( - fluid.dygraph.to_variable(self.logits), - fluid.dygraph.to_variable(self.labels), + base.dygraph.to_variable(self.logits), + base.dygraph.to_variable(self.labels), soft_label=True, axis=self.axis, - weight=fluid.dygraph.to_variable(self.weight) + weight=base.dygraph.to_variable(self.weight) if self.weight is not None else None, reduction=self.reduction, @@ -295,14 +295,14 @@ def test_cross_entropy_loss_soft_1d(self): # 3. static paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[self.N, self.C], dtype=self.dtype ) @@ -315,7 +315,7 @@ def test_cross_entropy_loss_soft_1d(self): ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -335,7 +335,7 @@ def test_cross_entropy_loss_soft_1d_weight(self): self.numeric_stable_mode = False self.soft_label = True self.dtype = ( - 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + 'float32' if base.core.is_compiled_with_rocm() else 'float64' ) self.axis = -1 self.ignore_index = -100 # should not be changed @@ -380,25 +380,25 @@ def test_cross_entropy_loss_soft_1d_weight(self): # 2. dygraph paddle.disable_static() paddle_loss_none_weight = paddle.nn.functional.cross_entropy( - fluid.dygraph.to_variable(self.logits), - fluid.dygraph.to_variable(self.labels), + base.dygraph.to_variable(self.logits), + base.dygraph.to_variable(self.labels), soft_label=True, axis=self.axis, - weight=fluid.dygraph.to_variable(self.weight), + weight=base.dygraph.to_variable(self.weight), reduction=self.reduction, ) dy_ret_value = paddle_loss_none_weight.numpy() # 3.static paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[self.N, self.C], dtype=self.dtype ) @@ -414,7 +414,7 @@ def test_cross_entropy_loss_soft_1d_weight(self): ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -435,7 +435,7 @@ def test_cross_entropy_loss_soft_1d_mean(self): self.numeric_stable_mode = False self.soft_label = True self.dtype = ( - 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + 'float32' if base.core.is_compiled_with_rocm() else 'float64' ) self.axis = -1 self.ignore_index = -100 # should not be changed @@ -471,8 +471,8 @@ def test_cross_entropy_loss_soft_1d_mean(self): # 2 dygraph paddle.disable_static() paddle_loss_mean = paddle.nn.functional.cross_entropy( - fluid.dygraph.to_variable(self.logits), - fluid.dygraph.to_variable(self.labels), + base.dygraph.to_variable(self.logits), + base.dygraph.to_variable(self.labels), soft_label=True, axis=self.axis, weight=self.weight, @@ -482,14 +482,14 @@ def test_cross_entropy_loss_soft_1d_mean(self): # 3. static paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[self.N, self.C], dtype=self.dtype ) @@ -502,7 +502,7 @@ def test_cross_entropy_loss_soft_1d_mean(self): ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={'input': self.logits, 'label': self.labels}, @@ -519,7 +519,7 @@ def test_cross_entropy_loss_soft_1d_weight_mean(self): self.numeric_stable_mode = False self.soft_label = True self.dtype = ( - 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + 'float32' if base.core.is_compiled_with_rocm() else 'float64' ) self.axis = -1 self.ignore_index = -100 # should not be changed @@ -555,25 +555,25 @@ def test_cross_entropy_loss_soft_1d_weight_mean(self): # 2. dygraph paddle_loss_none_weight = paddle.nn.functional.cross_entropy( - fluid.dygraph.to_variable(self.logits), - fluid.dygraph.to_variable(self.labels), + base.dygraph.to_variable(self.logits), + base.dygraph.to_variable(self.labels), soft_label=True, axis=self.axis, - weight=fluid.dygraph.to_variable(self.weight), + weight=base.dygraph.to_variable(self.weight), reduction=self.reduction, ) dy_ret_value = paddle_loss_none_weight.numpy() # 3. static paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[self.N, self.C], dtype=self.dtype ) @@ -588,7 +588,7 @@ def test_cross_entropy_loss_soft_1d_weight_mean(self): weight=weight, reduction=self.reduction, soft_label=True ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -610,7 +610,7 @@ def inner_cross_entropy_loss_soft_2d(soft_label): self.numeric_stable_mode = False self.soft_label = soft_label self.dtype = ( - 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + 'float32' if base.core.is_compiled_with_rocm() else 'float64' ) self.axis = -1 self.ignore_index = -100 # should not be changed @@ -654,11 +654,11 @@ def inner_cross_entropy_loss_soft_2d(soft_label): # 2. dygraph paddle_loss_none_weight = paddle.nn.functional.cross_entropy( - fluid.dygraph.to_variable(self.logits), - fluid.dygraph.to_variable(self.labels), + base.dygraph.to_variable(self.logits), + base.dygraph.to_variable(self.labels), soft_label=True, axis=self.axis, - weight=fluid.dygraph.to_variable(self.weight) + weight=base.dygraph.to_variable(self.weight) if self.weight is not None else None, reduction=self.reduction, @@ -667,14 +667,14 @@ def inner_cross_entropy_loss_soft_2d(soft_label): # 3. static paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[self.N, self.H, self.W, self.C], @@ -690,7 +690,7 @@ def inner_cross_entropy_loss_soft_2d(soft_label): reduction=self.reduction, soft_label=True ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -714,7 +714,7 @@ def test_cross_entropy_loss_soft_2d_weight_mean(self): self.numeric_stable_mode = False self.soft_label = True self.dtype = ( - 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + 'float32' if base.core.is_compiled_with_rocm() else 'float64' ) self.axis = -1 self.ignore_index = -100 # should not be changed @@ -754,25 +754,25 @@ def test_cross_entropy_loss_soft_2d_weight_mean(self): # 2. dygraph paddle_loss_none_weight = paddle.nn.functional.cross_entropy( - fluid.dygraph.to_variable(self.logits), - fluid.dygraph.to_variable(self.labels), + base.dygraph.to_variable(self.logits), + base.dygraph.to_variable(self.labels), soft_label=True, axis=self.axis, - weight=fluid.dygraph.to_variable(self.weight), + weight=base.dygraph.to_variable(self.weight), reduction=self.reduction, ) dy_ret_value = paddle_loss_none_weight.numpy() # 3. static paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[self.N, self.H, self.W, self.C], @@ -791,7 +791,7 @@ def test_cross_entropy_loss_soft_2d_weight_mean(self): weight=weight, reduction=self.reduction, soft_label=True ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -814,14 +814,14 @@ def test_cross_entropy_loss_1d_with_mean_ignore(self): input_np = np.random.random([2, 4]).astype(self.dtype) label_np = np.random.randint(0, 4, size=(2)).astype(np.int64) paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[2, 4], dtype=self.dtype ) @@ -829,7 +829,7 @@ def test_cross_entropy_loss_1d_with_mean_ignore(self): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(ignore_index=0) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -841,13 +841,13 @@ def test_cross_entropy_loss_1d_with_mean_ignore(self): self.assertIsNotNone(static_ret) expected = cross_entropy_loss_1d(input_np, label_np)[0] - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( axis=1, ignore_index=0 ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -862,14 +862,14 @@ def test_cross_entropy_loss_1d_with_mean_ignore_negative(self): input_np = np.random.random([N, C]).astype(self.dtype) label_np = -np.ones(N).astype(np.int64) paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[N, C], dtype=self.dtype ) @@ -878,7 +878,7 @@ def test_cross_entropy_loss_1d_with_mean_ignore_negative(self): ignore_index=-1 ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -889,13 +889,13 @@ def test_cross_entropy_loss_1d_with_mean_ignore_negative(self): ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( axis=1, ignore_index=-1 ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -912,14 +912,14 @@ def test_cross_entropy_loss_1d_with_weight_mean_ignore(self): label_np = np.random.randint(0, C, size=(N)).astype(np.int64) weight_np = np.random.random([C]).astype(self.dtype) paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[N, C], dtype=self.dtype ) @@ -932,7 +932,7 @@ def test_cross_entropy_loss_1d_with_weight_mean_ignore(self): ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -944,15 +944,15 @@ def test_cross_entropy_loss_1d_with_weight_mean_ignore(self): ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), + weight=base.dygraph.to_variable(weight_np), axis=1, ignore_index=0, ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -972,13 +972,13 @@ def test_cross_entropy_loss_1d_with_weight_mean_ignore_exceedlabel(self): label_np[0] = 255 weight_np = np.random.random([C]).astype(self.dtype) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), ignore_index=255 + weight=base.dygraph.to_variable(weight_np), ignore_index=255 ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -993,14 +993,14 @@ def test_cross_entropy_loss_1d_with_weight_mean(self): label_np = np.random.randint(0, 4, size=(2)).astype(np.int64) weight_np = np.random.random([4]).astype(self.dtype) # shape:C paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[2, 4], dtype=self.dtype ) @@ -1011,7 +1011,7 @@ def test_cross_entropy_loss_1d_with_weight_mean(self): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss(weight=weight) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -1026,13 +1026,13 @@ def test_cross_entropy_loss_1d_with_weight_mean(self): 0 ] - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), axis=1 + weight=base.dygraph.to_variable(weight_np), axis=1 ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -1048,14 +1048,14 @@ def test_cross_entropy_loss_1d_with_weight_sum(self): label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) # N,1 weight_np = np.random.random([200]).astype(self.dtype) # C paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[100, 200], dtype=self.dtype ) @@ -1068,7 +1068,7 @@ def test_cross_entropy_loss_1d_with_weight_sum(self): ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -1079,13 +1079,13 @@ def test_cross_entropy_loss_1d_with_weight_sum(self): fetch_list=[ret], ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='sum' + weight=base.dygraph.to_variable(weight_np), reduction='sum' ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -1102,14 +1102,14 @@ def test_cross_entropy_loss_1d_with_weight_none(self): weight_np = np.random.random([200]).astype(self.dtype) # C paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[100, 200], dtype=self.dtype ) @@ -1123,7 +1123,7 @@ def test_cross_entropy_loss_1d_with_weight_none(self): ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -1135,13 +1135,13 @@ def test_cross_entropy_loss_1d_with_weight_none(self): ) static_ret = np.squeeze(static_ret) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='none' + weight=base.dygraph.to_variable(weight_np), reduction='none' ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() dy_ret_value = np.squeeze(dy_ret_value) @@ -1158,14 +1158,14 @@ def test_cross_entropy_loss_1d_with_weight_none_func(self): label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) # N weight_np = np.random.random([200]).astype(self.dtype) # C paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[100, 200], dtype=self.dtype ) @@ -1177,7 +1177,7 @@ def test_cross_entropy_loss_1d_with_weight_none_func(self): input, label, weight=weight, reduction='none' ) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -1189,11 +1189,11 @@ def test_cross_entropy_loss_1d_with_weight_none_func(self): ) static_ret = np.squeeze(static_ret) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): dy_ret = paddle.nn.functional.cross_entropy( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), - weight=fluid.dygraph.to_variable(weight_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), + weight=base.dygraph.to_variable(weight_np), reduction='none', ) dy_ret_value = dy_ret.numpy() @@ -1210,32 +1210,32 @@ def test_cross_entropy_loss_1d_mean(self): input_np = np.random.random([100, 200]).astype(self.dtype) # N,C label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) # N,1 paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[100, 200], dtype=self.dtype ) label = paddle.static.data(name='label', shape=[100], dtype='int64') cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss() ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={'input': input_np, 'label': label_np}, fetch_list=[ret], ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss() dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -1248,14 +1248,14 @@ def test_cross_entropy_loss_1d_sum(self): input_np = np.random.random([100, 200]).astype(self.dtype) # N,C label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) # N,1 paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[100, 200], dtype=self.dtype ) @@ -1264,20 +1264,20 @@ def test_cross_entropy_loss_1d_sum(self): reduction='sum' ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={'input': input_np, 'label': label_np}, fetch_list=[ret], ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='sum' ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -1290,14 +1290,14 @@ def test_cross_entropy_loss_1d_none(self): input_np = np.random.random([100, 200]).astype(self.dtype) # N,C label_np = np.random.randint(0, 100, size=(100)).astype(np.int64) # N,1 paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[100, 200], dtype=self.dtype ) @@ -1306,7 +1306,7 @@ def test_cross_entropy_loss_1d_none(self): reduction='none' ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={'input': input_np, 'label': label_np}, @@ -1314,13 +1314,13 @@ def test_cross_entropy_loss_1d_none(self): ) static_ret = np.squeeze(static_ret) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='none' ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() dy_ret_value = np.squeeze(dy_ret_value) @@ -1340,14 +1340,14 @@ def test_cross_entropy_loss_2d_with_weight_none(self): weight_np = np.random.random(size=(3,)).astype(self.dtype) # C paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) @@ -1362,7 +1362,7 @@ def test_cross_entropy_loss_2d_with_weight_none(self): ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -1374,13 +1374,13 @@ def test_cross_entropy_loss_2d_with_weight_none(self): ) static_ret = np.squeeze(static_ret) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='none' + weight=base.dygraph.to_variable(weight_np), reduction='none' ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() dy_ret_value = np.squeeze(dy_ret_value) @@ -1402,14 +1402,14 @@ def test_cross_entropy_loss_2d_with_weight_axis_change_mean(self): weight_np = np.random.random(size=(3,)).astype(self.dtype) # C paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[2, 3, 2, 2], dtype=self.dtype ) @@ -1425,7 +1425,7 @@ def test_cross_entropy_loss_2d_with_weight_axis_change_mean(self): # specify the class channels to axis 1 ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -1437,15 +1437,15 @@ def test_cross_entropy_loss_2d_with_weight_axis_change_mean(self): ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), + weight=base.dygraph.to_variable(weight_np), reduction='mean', axis=1, ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -1468,13 +1468,13 @@ def test_cross_entropy_loss_2d_with_weight_mean_ignore_exceedlabel(self): label_np = np.random.randint(0, C, size=(N, H, W)).astype(np.int64) label_np[0, 0, 0] = 255 weight_np = np.random.random([C]).astype(self.dtype) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), ignore_index=255 + weight=base.dygraph.to_variable(weight_np), ignore_index=255 ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -1492,14 +1492,14 @@ def test_cross_entropy_loss_2d_with_weight_mean(self): ) # NHW weight_np = np.random.random(size=(3,)).astype(self.dtype) # C paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) @@ -1514,7 +1514,7 @@ def test_cross_entropy_loss_2d_with_weight_mean(self): ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -1525,13 +1525,13 @@ def test_cross_entropy_loss_2d_with_weight_mean(self): fetch_list=[ret], ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='mean' + weight=base.dygraph.to_variable(weight_np), reduction='mean' ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -1552,14 +1552,14 @@ def test_cross_entropy_loss_2d_with_weight_sum(self): weight_np = np.random.random(size=(3,)).astype(self.dtype) # C paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) @@ -1574,7 +1574,7 @@ def test_cross_entropy_loss_2d_with_weight_sum(self): ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -1585,13 +1585,13 @@ def test_cross_entropy_loss_2d_with_weight_sum(self): fetch_list=[ret], ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( - weight=fluid.dygraph.to_variable(weight_np), reduction='sum' + weight=base.dygraph.to_variable(weight_np), reduction='sum' ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -1610,14 +1610,14 @@ def test_cross_entropy_loss_2d_none(self): np.int64 ) # NHW paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) @@ -1628,7 +1628,7 @@ def test_cross_entropy_loss_2d_none(self): reduction='none' ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -1639,13 +1639,13 @@ def test_cross_entropy_loss_2d_none(self): ) static_ret = np.squeeze(static_ret) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='none' ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() dy_ret_value = np.squeeze(dy_ret_value) @@ -1663,14 +1663,14 @@ def test_cross_entropy_loss_2d_mean(self): np.int64 ) # NHW paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) @@ -1682,7 +1682,7 @@ def test_cross_entropy_loss_2d_mean(self): ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -1692,13 +1692,13 @@ def test_cross_entropy_loss_2d_mean(self): fetch_list=[ret], ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='mean' ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -1717,14 +1717,14 @@ def test_cross_entropy_loss_2d_sum(self): np.int64 ) # NHW paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[2, 2, 2, 3], dtype=self.dtype ) @@ -1736,7 +1736,7 @@ def test_cross_entropy_loss_2d_sum(self): ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ @@ -1746,13 +1746,13 @@ def test_cross_entropy_loss_2d_sum(self): fetch_list=[ret], ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): cross_entropy_loss = paddle.nn.loss.CrossEntropyLoss( reduction='sum' ) dy_ret = cross_entropy_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -1818,14 +1818,14 @@ def static_test_WeightLength_NotEqual(): label_np = np.random.randint(0, 4, size=(2)).astype(np.int64) weight_np = np.random.random([3]).astype('float32') paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[2, 4], dtype='float32' ) @@ -1840,7 +1840,7 @@ def static_test_WeightLength_NotEqual(): ) ret = cross_entropy_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) static_ret = exe.run( prog, feed={ diff --git a/test/legacy_test/test_cross_entropy_op.py b/test/legacy_test/test_cross_entropy_op.py index 0cf4e0a6c2fc8..4ad24ba762b09 100644 --- a/test/legacy_test/test_cross_entropy_op.py +++ b/test/legacy_test/test_cross_entropy_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, paddle_static_guard, randomize_probability import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestCrossEntropyOp(OpTest): @@ -413,11 +413,11 @@ def test_errors(self): def test_Variable(): # the input of cross_entropy must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) - lab1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + lab1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) paddle.nn.functional.cross_entropy( x1, lab1, reduction='none', use_softmax=False diff --git a/test/legacy_test/test_cross_op.py b/test/legacy_test/test_cross_op.py index 1114bb0b69ffb..72f77a0adaf3b 100644 --- a/test/legacy_test/test_cross_op.py +++ b/test/legacy_test/test_cross_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestCrossOp(OpTest): @@ -142,7 +142,7 @@ def test_cross_api(self): x = paddle.static.data(name='x', shape=[-1, 3], dtype="float32") y = paddle.static.data(name='y', shape=[-1, 3], dtype="float32") z = paddle.cross(x, y, axis=1) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': self.data_x, 'y': self.data_y}, fetch_list=[z.name], @@ -158,7 +158,7 @@ def test_cross_api(self): x = paddle.static.data(name='x', shape=[-1, 3], dtype="float32") y = paddle.static.data(name='y', shape=[-1, 3], dtype="float32") z = paddle.cross(x, y) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': self.data_x, 'y': self.data_y}, fetch_list=[z.name], @@ -180,9 +180,9 @@ def test_cross_api(self): def test_dygraph_api(self): self.input_data() # case 1: - # with fluid.dygraph.guard(): - # x = fluid.dygraph.to_variable(self.data_x) - # y = fluid.dygraph.to_variable(self.data_y) + # with base.dygraph.guard(): + # x = base.dygraph.to_variable(self.data_x) + # y = base.dygraph.to_variable(self.data_y) # z = paddle.cross(x, y) # np_z = z.numpy() # expect_out = np.array([[-1.0, -1.0, -1.0], [2.0, 2.0, 2.0], @@ -190,9 +190,9 @@ def test_dygraph_api(self): # np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 2: - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(self.data_x) - y = fluid.dygraph.to_variable(self.data_y) + with base.dygraph.guard(): + x = base.dygraph.to_variable(self.data_x) + y = base.dygraph.to_variable(self.data_y) z = paddle.cross(x, y, axis=1) np_z = z.numpy() expect_out = np.array( diff --git a/test/legacy_test/test_crypto.py b/test/legacy_test/test_crypto.py index 79777fa53373e..c0c4490e84c4f 100644 --- a/test/legacy_test/test_crypto.py +++ b/test/legacy_test/test_crypto.py @@ -14,7 +14,7 @@ import unittest -from paddle.fluid.core import CipherFactory, CipherUtils +from paddle.base.core import CipherFactory, CipherUtils class CipherUtilsTestCase(unittest.TestCase): diff --git a/test/legacy_test/test_cuda_graph_static_mode.py b/test/legacy_test/test_cuda_graph_static_mode.py index 3f433e469110a..15df4acea3de9 100644 --- a/test/legacy_test/test_cuda_graph_static_mode.py +++ b/test/legacy_test/test_cuda_graph_static_mode.py @@ -19,7 +19,7 @@ import paddle from paddle.device.cuda.graphs import CUDAGraph -from paddle.fluid.dygraph.base import switch_to_static_graph +from paddle.base.dygraph.base import switch_to_static_graph def can_use_cuda_graph(): diff --git a/test/legacy_test/test_cuda_graph_static_mode_error.py b/test/legacy_test/test_cuda_graph_static_mode_error.py index 6ee8af1ba69a6..f2ef98eab5f90 100644 --- a/test/legacy_test/test_cuda_graph_static_mode_error.py +++ b/test/legacy_test/test_cuda_graph_static_mode_error.py @@ -19,7 +19,7 @@ import paddle from paddle.device.cuda.graphs import CUDAGraph -from paddle.fluid.dygraph.base import switch_to_static_graph +from paddle.base.dygraph.base import switch_to_static_graph class TestCUDAGraphInFirstBatch(unittest.TestCase): diff --git a/test/legacy_test/test_cuda_max_memory_allocated.py b/test/legacy_test/test_cuda_max_memory_allocated.py index c7b6828a60c1d..b9f76325728b0 100644 --- a/test/legacy_test/test_cuda_max_memory_allocated.py +++ b/test/legacy_test/test_cuda_max_memory_allocated.py @@ -20,7 +20,7 @@ max_memory_allocated, memory_allocated, ) -from paddle.fluid import core +from paddle.base import core class TestMaxMemoryAllocated(unittest.TestCase): diff --git a/test/legacy_test/test_cuda_max_memory_reserved.py b/test/legacy_test/test_cuda_max_memory_reserved.py index 07d2b5a9ded19..e6ccaddbfb985 100644 --- a/test/legacy_test/test_cuda_max_memory_reserved.py +++ b/test/legacy_test/test_cuda_max_memory_reserved.py @@ -20,7 +20,7 @@ max_memory_reserved, memory_reserved, ) -from paddle.fluid import core +from paddle.base import core class TestMaxMemoryreserved(unittest.TestCase): diff --git a/test/legacy_test/test_cuda_memory_allocated.py b/test/legacy_test/test_cuda_memory_allocated.py index 252dd6f93117c..bd5b294e735f8 100644 --- a/test/legacy_test/test_cuda_memory_allocated.py +++ b/test/legacy_test/test_cuda_memory_allocated.py @@ -16,7 +16,7 @@ import paddle from paddle.device.cuda import device_count, memory_allocated -from paddle.fluid import core +from paddle.base import core class TestMemoryAllocated(unittest.TestCase): diff --git a/test/legacy_test/test_cuda_memory_reserved.py b/test/legacy_test/test_cuda_memory_reserved.py index 941ab27ecc5ba..0a119aa3c30a0 100644 --- a/test/legacy_test/test_cuda_memory_reserved.py +++ b/test/legacy_test/test_cuda_memory_reserved.py @@ -16,7 +16,7 @@ import paddle from paddle.device.cuda import device_count, memory_reserved -from paddle.fluid import core +from paddle.base import core class TestMemoryreserved(unittest.TestCase): diff --git a/test/legacy_test/test_cuda_random_seed.py b/test/legacy_test/test_cuda_random_seed.py index e14f4c2fd1657..c517bd33b22dd 100644 --- a/test/legacy_test/test_cuda_random_seed.py +++ b/test/legacy_test/test_cuda_random_seed.py @@ -21,8 +21,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core @unittest.skipIf( @@ -36,7 +36,7 @@ class TestGeneratorSeed(unittest.TestCase): def test_gen_dropout_dygraph(self): gen = paddle.seed(12343) - fluid.enable_dygraph() + base.enable_dygraph() gen.manual_seed(111111111) st = paddle.get_cuda_rng_state() @@ -65,7 +65,7 @@ def test_gen_dropout_dygraph(self): def test_generator_gaussian_random_dygraph(self): """Test Generator seed.""" - fluid.enable_dygraph() + base.enable_dygraph() st = paddle.get_cuda_rng_state() x1 = paddle.randn([120], dtype="float32") @@ -86,7 +86,7 @@ def test_generator_gaussian_random_dygraph(self): def test_generator_randint_dygraph(self): """Test Generator seed.""" - fluid.enable_dygraph() + base.enable_dygraph() paddle.seed(12312321111) x = paddle.randint(low=10, shape=[10], dtype="int32") @@ -106,14 +106,14 @@ def test_generator_randint_dygraph(self): np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_gen_TruncatedNormal_initializer(self): - fluid.disable_dygraph() + base.disable_dygraph() gen = paddle.seed(123123143) cur_state = paddle.get_cuda_rng_state() - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): # example 1: # attr shape is a list which doesn't contain tensor Variable. x = paddle.uniform(shape=[2, 10]) @@ -132,14 +132,14 @@ def test_gen_TruncatedNormal_initializer(self): ), ) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) out1 = exe.run( train_program, feed={}, fetch_list=[result_1, result_2] ) paddle.seed(123123143) - with fluid.program_guard(train_program, startup_program): + with base.program_guard(train_program, startup_program): exe.run(startup_program) out2 = exe.run( train_program, feed={}, fetch_list=[result_1, result_2] @@ -160,7 +160,7 @@ def test_generator_pickle(self): output_dir = tempfile.mkdtemp() random_file = os.path.join(output_dir, "random.pdmodel") - fluid.enable_dygraph() + base.enable_dygraph() x0 = paddle.randn([120], dtype="float32") st = paddle.get_cuda_rng_state() diff --git a/test/legacy_test/test_cummax_op.py b/test/legacy_test/test_cummax_op.py index 1ff5cb2442a63..53e1da6da45f8 100644 --- a/test/legacy_test/test_cummax_op.py +++ b/test/legacy_test/test_cummax_op.py @@ -19,8 +19,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def cummax_dim2(arr, axis=None): @@ -152,7 +152,7 @@ def run_cases(self): np.testing.assert_array_equal(ind, indices.numpy()) def run_static(self, use_gpu=False): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): data_np = np.random.random((100, 100)).astype(np.float32) x = paddle.static.data('x', [100, 100]) y1, indices1 = paddle.cummax(x) @@ -161,9 +161,9 @@ def run_static(self, use_gpu=False): y4, indices4 = paddle.cummax(x, axis=-2) y5, indices5 = paddle.cummax(x, axis=-2, dtype=np.int32) - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) out = exe.run( feed={'x': data_np}, fetch_list=[ @@ -201,22 +201,22 @@ def run_static(self, use_gpu=False): np.testing.assert_allclose(ind, out[9], rtol=1e-05) def test_cpu(self): - paddle.disable_static(paddle.fluid.CPUPlace()) + paddle.disable_static(paddle.base.CPUPlace()) self.run_cases() paddle.enable_static() self.run_static() def test_gpu(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return - paddle.disable_static(paddle.fluid.CUDAPlace(0)) + paddle.disable_static(paddle.base.CUDAPlace(0)) self.run_cases() paddle.enable_static() self.run_static(use_gpu=True) def test_errors(self): paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def test_x_type(): data = [1, 2, 3] diff --git a/test/legacy_test/test_cummin_op.py b/test/legacy_test/test_cummin_op.py index dc542ebe90077..95235dc33d442 100644 --- a/test/legacy_test/test_cummin_op.py +++ b/test/legacy_test/test_cummin_op.py @@ -19,8 +19,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def cummin_dim2(arr, axis=None): @@ -152,7 +152,7 @@ def run_cases(self): np.testing.assert_array_equal(ind, indices.numpy()) def run_static(self, use_gpu=False): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): data_np = np.random.random((100, 100)).astype(np.float32) x = paddle.static.data('x', [100, 100]) y1, indices1 = paddle.cummin(x) @@ -161,9 +161,9 @@ def run_static(self, use_gpu=False): y4, indices4 = paddle.cummin(x, axis=-2) y5, indices5 = paddle.cummin(x, axis=-2, dtype=np.int32) - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) out = exe.run( feed={'x': data_np}, fetch_list=[ @@ -201,22 +201,22 @@ def run_static(self, use_gpu=False): np.testing.assert_allclose(ind, out[9], rtol=1e-05) def test_cpu(self): - paddle.disable_static(paddle.fluid.CPUPlace()) + paddle.disable_static(paddle.base.CPUPlace()) self.run_cases() paddle.enable_static() self.run_static() def test_gpu(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return - paddle.disable_static(paddle.fluid.CUDAPlace(0)) + paddle.disable_static(paddle.base.CUDAPlace(0)) self.run_cases() paddle.enable_static() self.run_static(use_gpu=True) def test_errors(self): paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def test_x_type(): data = [1, 2, 3] diff --git a/test/legacy_test/test_cumprod_op.py b/test/legacy_test/test_cumprod_op.py index 65b3c8da65870..f0569a7697239 100644 --- a/test/legacy_test/test_cumprod_op.py +++ b/test/legacy_test/test_cumprod_op.py @@ -19,7 +19,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(0) diff --git a/test/legacy_test/test_cumsum_op.py b/test/legacy_test/test_cumsum_op.py index 4134d649044f4..6422852458138 100644 --- a/test/legacy_test/test_cumsum_op.py +++ b/test/legacy_test/test_cumsum_op.py @@ -21,8 +21,8 @@ import paddle import paddle.inference as paddle_infer -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestCumsumOp(unittest.TestCase): @@ -53,7 +53,7 @@ def run_cases(self): np.testing.assert_array_equal(z, y.numpy()) def run_static(self, use_gpu=False): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): data_np = np.random.random((100, 100)).astype(np.float32) x = paddle.static.data('X', [100, 100]) y = paddle.cumsum(x) @@ -63,9 +63,9 @@ def run_static(self, use_gpu=False): y5 = paddle.cumsum(x, dtype=np.int32) y6 = paddle.cumsum(x, axis=-2) - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) out = exe.run( feed={'X': data_np}, fetch_list=[ @@ -90,23 +90,23 @@ def run_static(self, use_gpu=False): np.testing.assert_allclose(z, out[5], rtol=1e-05) def test_cpu(self): - paddle.disable_static(paddle.fluid.CPUPlace()) + paddle.disable_static(paddle.base.CPUPlace()) self.run_cases() paddle.enable_static() self.run_static() def test_gpu(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return - paddle.disable_static(paddle.fluid.CUDAPlace(0)) + paddle.disable_static(paddle.base.CUDAPlace(0)) self.run_cases() paddle.enable_static() self.run_static(use_gpu=True) def test_name(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data('x', [3, 4]) y = paddle.cumsum(x, name='out') self.assertTrue('out' in y.name) @@ -475,7 +475,7 @@ def test_check_grad(self): class BadInputTest(unittest.TestCase): def test_error(self): paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def test_bad_x(): data = [1, 2, 4] diff --git a/test/legacy_test/test_custom_grad_input.py b/test/legacy_test/test_custom_grad_input.py index cb7b673029fa5..cd16a32bcc7c9 100644 --- a/test/legacy_test/test_custom_grad_input.py +++ b/test/legacy_test/test_custom_grad_input.py @@ -17,7 +17,7 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg class TestTensorBackward(unittest.TestCase): diff --git a/test/legacy_test/test_data.py b/test/legacy_test/test_data.py index 141d9729f61a9..f9dbab12354fd 100644 --- a/test/legacy_test/test_data.py +++ b/test/legacy_test/test_data.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle.fluid import Program, core, program_guard +from paddle.base import Program, core, program_guard class TestApiStaticDataError(unittest.TestCase): diff --git a/test/legacy_test/test_data_feeder.py b/test/legacy_test/test_data_feeder.py index 72bacedf97a6b..e8cda8eb45d63 100644 --- a/test/legacy_test/test_data_feeder.py +++ b/test/legacy_test/test_data_feeder.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -24,7 +24,7 @@ class TestDataFeeder(unittest.TestCase): def test_lod_level_0_converter(self): img = paddle.static.data(name='image', shape=[-1, 1, 28, 28]) label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') - feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) + feeder = base.DataFeeder([img, label], base.CPUPlace()) result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])]) self.assertEqual(result['image'].shape(), [2, 1, 28, 28]) @@ -45,7 +45,7 @@ def test_lod_level_1_converter(self): name='sentences', shape=[-1, 1], dtype='int64', lod_level=1 ) label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') - feeder = fluid.DataFeeder([sentences, label], fluid.CPUPlace()) + feeder = base.DataFeeder([sentences, label], base.CPUPlace()) # lod = [[0, 3, 5, 9]] # data = [[1, 2, 3], [4, 5], [6, 7, 8, 9]] @@ -68,7 +68,7 @@ def test_lod_level_2_converter(self): name='paragraphs', shape=[-1, 1], dtype='int64', lod_level=2 ) label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') - feeder = fluid.DataFeeder([paragraphs, label], fluid.CPUPlace()) + feeder = base.DataFeeder([paragraphs, label], base.CPUPlace()) # lod = [[0, 2, 3], [0, 3, 5, 9]] # data = [[[1, 2, 3], [4, 5]], [[6, 7, 8, 9]]] diff --git a/test/legacy_test/test_data_norm_op.py b/test/legacy_test/test_data_norm_op.py index 0839a778d30c1..251123e9abdaa 100644 --- a/test/legacy_test/test_data_norm_op.py +++ b/test/legacy_test/test_data_norm_op.py @@ -20,8 +20,8 @@ from op import Operator import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def _reference_testing(x, batch_size, batch_sum, batch_square_sum, slot_dim=-1): @@ -120,21 +120,21 @@ def check_with_place( # create input x_tensor = create_or_get_tensor( - scope, "x_val", OpTest.np_dtype_to_fluid_dtype(x_val), place + scope, "x_val", OpTest.np_dtype_to_base_dtype(x_val), place ) batch_size_tensor = create_or_get_tensor( scope, "batch_size", - OpTest.np_dtype_to_fluid_dtype(batch_size), + OpTest.np_dtype_to_base_dtype(batch_size), place, ) batch_sum_tensor = create_or_get_tensor( - scope, "batch_sum", OpTest.np_dtype_to_fluid_dtype(batch_sum), place + scope, "batch_sum", OpTest.np_dtype_to_base_dtype(batch_sum), place ) batch_square_sum_tensor = create_or_get_tensor( scope, "batch_square_sum", - OpTest.np_dtype_to_fluid_dtype(batch_square_sum), + OpTest.np_dtype_to_base_dtype(batch_square_sum), place, ) @@ -165,10 +165,10 @@ def check_with_place( scale_w = np.ones(scale_shape).astype(np.float32) bias = np.zeros(scale_shape).astype(np.float32) scale_w_tensor = create_or_get_tensor( - scope, "scale_w", OpTest.np_dtype_to_fluid_dtype(scale_w), place + scope, "scale_w", OpTest.np_dtype_to_base_dtype(scale_w), place ) bias_tensor = create_or_get_tensor( - scope, "bias", OpTest.np_dtype_to_fluid_dtype(bias), place + scope, "bias", OpTest.np_dtype_to_base_dtype(bias), place ) data_norm_op = Operator( "data_norm", @@ -528,7 +528,7 @@ class TestDataNormOpErrorr(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): x2 = paddle.static.data(name='x2', shape=[-1, 3, 4], dtype="int32") - # self.assertRaises(TypeError, fluid.data_norm, x2) + # self.assertRaises(TypeError, base.data_norm, x2) paddle.static.nn.data_norm( input=x2, param_attr={}, enable_scale_and_shift=True ) @@ -543,10 +543,10 @@ def test_0_size(): paddle.enable_static() x = paddle.static.data(name='x', shape=[0, 3], dtype='float32') out = paddle.static.nn.data_norm(x, slot_dim=1) - cpu = fluid.core.CPUPlace() - exe = fluid.Executor(cpu) - exe.run(fluid.default_startup_program()) - test_program = fluid.default_main_program().clone(for_test=True) + cpu = base.core.CPUPlace() + exe = base.Executor(cpu) + exe.run(base.default_startup_program()) + test_program = base.default_main_program().clone(for_test=True) exe.run( test_program, fetch_list=out, diff --git a/test/legacy_test/test_dataloader_early_reset.py b/test/legacy_test/test_dataloader_early_reset.py index 1367faa4d94cf..1c826eb9cb89a 100644 --- a/test/legacy_test/test_dataloader_early_reset.py +++ b/test/legacy_test/test_dataloader_early_reset.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base def infinite_reader(): @@ -40,30 +40,30 @@ def build_network(self): optimizer.minimize(loss) def get_place(self): - if fluid.is_compiled_with_cuda(): - return fluid.CUDAPlace(0) + if base.is_compiled_with_cuda(): + return base.CUDAPlace(0) else: - return fluid.CPUPlace() + return base.CPUPlace() def create_data_loader(self): self.x = paddle.static.data(name='x', shape=[None, 32], dtype='float32') - return fluid.io.DataLoader.from_generator( + return base.io.DataLoader.from_generator( feed_list=[self.x], capacity=10, iterable=self.iterable ) def test_main(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): - with fluid.scope_guard(fluid.Scope()): + with base.program_guard(base.Program(), base.Program()): + with base.scope_guard(base.Scope()): self.run_network() def run_network(self): loader = self.create_data_loader() self.build_network() - exe = fluid.Executor(self.get_place()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(self.get_place()) + exe.run(base.default_startup_program()) - prog = fluid.default_main_program() + prog = base.default_main_program() loader.set_batch_generator(infinite_reader, places=self.get_place()) for epoch_id in range(10): diff --git a/test/legacy_test/test_dataloader_keep_order.py b/test/legacy_test/test_dataloader_keep_order.py index 1068093f9c4ae..a37e1b4939770 100644 --- a/test/legacy_test/test_dataloader_keep_order.py +++ b/test/legacy_test/test_dataloader_keep_order.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base def create_reader(shape, batch_number): @@ -46,7 +46,7 @@ def build_network(self, places): input_data = paddle.static.data( shape=self.shape, dtype='float32', name="input" ) - loader = fluid.io.DataLoader.from_generator( + loader = base.io.DataLoader.from_generator( capacity=16, feed_list=[input_data], iterable=self.iterable ) @@ -83,12 +83,12 @@ def assertInputData(self, batch_id, input_data, dev_cnt): start_val += 1 def get_places(self): - place_list = [fluid.cpu_places(1)] - if fluid.is_compiled_with_cuda(): + place_list = [base.cpu_places(1)] + if base.is_compiled_with_cuda(): if os.name == "nt": - place_list.extend([fluid.cuda_places(0)]) + place_list.extend([base.cuda_places(0)]) else: - place_list.extend([fluid.cuda_places(0)]) + place_list.extend([base.cuda_places(0)]) return place_list def test_main(self): @@ -96,18 +96,18 @@ def test_main(self): self.run_main_with_place(p) def run_main_with_place(self, places): - with fluid.scope_guard(fluid.Scope()): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.scope_guard(base.Scope()): + with base.program_guard(base.Program(), base.Program()): input_data, loss, loader = self.build_network(places) fetch_list = [input_data] - exe = fluid.Executor(places[0]) - exe.run(fluid.default_startup_program()) + exe = base.Executor(places[0]) + exe.run(base.default_startup_program()) dev_cnt = len(places) self.assertTrue(dev_cnt == 1) - main_program = fluid.default_main_program() + main_program = base.default_main_program() max_batch_num = min( self.break_num, int(self.batch_num / dev_cnt) @@ -151,7 +151,7 @@ def run_main_with_place(self, places): batch_id, fetch_val, dev_cnt ) batch_id += 1 - except fluid.core.EOFException: + except base.core.EOFException: loader.reset() self.assertEqual(batch_id, max_batch_num) diff --git a/test/legacy_test/test_dataloader_unkeep_order.py b/test/legacy_test/test_dataloader_unkeep_order.py index 12b5d4e0853bd..17e5257dffc01 100644 --- a/test/legacy_test/test_dataloader_unkeep_order.py +++ b/test/legacy_test/test_dataloader_unkeep_order.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.reader import keep_data_loader_order +from paddle import base +from paddle.base.reader import keep_data_loader_order keep_data_loader_order(False) @@ -52,7 +52,7 @@ def build_network(self, places): input_data = paddle.static.data( shape=self.shape, dtype='float32', name="input" ) - loader = fluid.io.DataLoader.from_generator( + loader = base.io.DataLoader.from_generator( capacity=16, feed_list=[input_data], iterable=self.iterable ) @@ -103,12 +103,12 @@ def assertInputData( start_val += 1 def get_places(self): - place_list = [fluid.cpu_places(1)] - if fluid.is_compiled_with_cuda(): + place_list = [base.cpu_places(1)] + if base.is_compiled_with_cuda(): if os.name == "nt": - place_list.extend([fluid.cuda_places(0)]) + place_list.extend([base.cuda_places(0)]) else: - place_list.extend([fluid.cuda_places(0)]) + place_list.extend([base.cuda_places(0)]) return place_list def test_main(self): @@ -116,18 +116,18 @@ def test_main(self): self.run_main_with_place(p) def run_main_with_place(self, places): - with fluid.scope_guard(fluid.Scope()): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.scope_guard(base.Scope()): + with base.program_guard(base.Program(), base.Program()): input_data, loss, loader = self.build_network(places) fetch_list = [input_data] - exe = fluid.Executor(places[0]) - exe.run(fluid.default_startup_program()) + exe = base.Executor(places[0]) + exe.run(base.default_startup_program()) dev_cnt = len(places) self.assertTrue(dev_cnt == 1) - main_program = fluid.default_main_program() + main_program = base.default_main_program() max_batch_num = min( self.break_num, int(self.batch_num / dev_cnt) @@ -178,7 +178,7 @@ def run_main_with_place(self, places): batch_id, fetch_val, dev_cnt ) batch_id += 1 - except fluid.core.EOFException: + except base.core.EOFException: loader.reset() if dev_cnt == 1: diff --git a/test/legacy_test/test_dataset.py b/test/legacy_test/test_dataset.py index 04e38849da281..b9a94767334e2 100644 --- a/test/legacy_test/test_dataset.py +++ b/test/legacy_test/test_dataset.py @@ -21,8 +21,8 @@ import unittest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestDataset(unittest.TestCase): @@ -60,7 +60,7 @@ def test_config(self): """ Testcase for python config. """ - dataset = fluid.InMemoryDataset() + dataset = base.InMemoryDataset() dataset.set_parse_ins_id(True) dataset.set_parse_content(True) dataset._set_trainer_num(1) @@ -138,7 +138,7 @@ def test_run_with_dump(self): def test_dataset_config(self): """Testcase for dataset configuration.""" - dataset = fluid.core.Dataset("MultiSlotDataset") + dataset = base.core.Dataset("MultiSlotDataset") dataset.set_thread_num(12) dataset.set_filelist(["a.txt", "b.txt", "c.txt"]) dataset.set_trainer_num(4) @@ -212,11 +212,11 @@ def test_set_download_cmd(self): exe = paddle.static.Executor(paddle.CPUPlace()) startup_program = paddle.static.Program() main_program = paddle.static.Program() - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) if self.use_data_loader: - data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last + data_loader = base.io.DataLoader.from_dataset( + dataset, base.cpu_places(), self.drop_last ) for i in range(self.epoch_num): for data in data_loader(): @@ -273,20 +273,20 @@ def test_in_memory_dataset_run(self): dataset.local_shuffle() dataset._set_generate_unique_feasigns(True, 15) dataset._generate_local_tables_unlock(0, 11, 1, 25, 15) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) if self.use_data_loader: - data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last + data_loader = base.io.DataLoader.from_dataset( + dataset, base.cpu_places(), self.drop_last ) for i in range(self.epoch_num): for data in data_loader(): - exe.run(fluid.default_main_program(), feed=data) + exe.run(base.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: exe.train_from_dataset( - fluid.default_main_program(), dataset + base.default_main_program(), dataset ) except Exception as e: self.assertTrue(False) @@ -297,7 +297,7 @@ def test_in_memory_dataset_gpugraph_mode(self): """ Testcase for InMemoryDataset in gpugraph mode. """ - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_feed_type("SlotRecordInMemoryDataFeed") graph_config = { "walk_len": 24, @@ -347,9 +347,9 @@ def test_in_memory_dataset_masterpatch(self): slots = ["slot1", "slot2", "slot3", "slot4"] slots_vars = [] - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + train_program = base.Program() + startup_program = base.Program() + with base.program_guard(train_program, startup_program): for slot in slots[:2]: var = paddle.static.data( name=slot, shape=[-1, 1], dtype="int64", lod_level=1 @@ -376,7 +376,7 @@ def test_in_memory_dataset_masterpatch(self): dataset.load_into_memory() dataset.local_shuffle() - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) for i in range(2): @@ -423,9 +423,9 @@ def test_in_memory_dataset_masterpatch1(self): f.write(data) slots_vars = [] - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + train_program = base.Program() + startup_program = base.Program() + with base.program_guard(train_program, startup_program): var1 = paddle.static.data( name="slot1", shape=[-1, 1], dtype="int64", lod_level=0 ) @@ -454,7 +454,7 @@ def test_in_memory_dataset_masterpatch1(self): dataset.load_into_memory() dataset.local_shuffle() - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) for i in range(2): @@ -512,30 +512,30 @@ def test_in_memory_dataset_run_2(self): dataset.load_into_memory() dataset.local_shuffle() - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) for i in range(2): try: - exe.train_from_dataset(fluid.default_main_program(), dataset) + exe.train_from_dataset(base.default_main_program(), dataset) # exe.train_from_dataset( - # fluid.default_main_program(), dataset, thread=1 + # base.default_main_program(), dataset, thread=1 # ) exe.train_from_dataset( - fluid.default_main_program(), dataset, thread=2 + base.default_main_program(), dataset, thread=2 ) # exe.train_from_dataset( - # fluid.default_main_program(), dataset, thread=2 + # base.default_main_program(), dataset, thread=2 # ) # exe.train_from_dataset( - # fluid.default_main_program(), dataset, thread=3 + # base.default_main_program(), dataset, thread=3 # ) # exe.train_from_dataset( - # fluid.default_main_program(), dataset, thread=4 + # base.default_main_program(), dataset, thread=4 # ) except ImportError as e: pass @@ -543,17 +543,17 @@ def test_in_memory_dataset_run_2(self): self.assertTrue(False) if self.use_data_loader: - data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last + data_loader = base.io.DataLoader.from_dataset( + dataset, base.cpu_places(), self.drop_last ) for i in range(self.epoch_num): for data in data_loader(): - exe.run(fluid.default_main_program(), feed=data) + exe.run(base.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: exe.train_from_dataset( - fluid.default_main_program(), dataset + base.default_main_program(), dataset ) except Exception as e: self.assertTrue(False) @@ -586,7 +586,7 @@ def test_in_memory_dataset_run_2(self): fleet_send_sleep_seconds=2, fea_eval=True, ) - fleet_ptr = fluid.core.Fleet() + fleet_ptr = base.core.Fleet() fleet_ptr.set_client2client_config(1, 1, 1) fleet_ptr.get_cache_threshold(0) @@ -626,20 +626,20 @@ def test_queue_dataset_run(self): ) dataset.set_filelist([filename1, filename2]) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) if self.use_data_loader: - data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last + data_loader = base.io.DataLoader.from_dataset( + dataset, base.cpu_places(), self.drop_last ) for i in range(self.epoch_num): for data in data_loader(): - exe.run(fluid.default_main_program(), feed=data) + exe.run(base.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: exe.train_from_dataset( - fluid.default_main_program(), dataset + base.default_main_program(), dataset ) except Exception as e: self.assertTrue(False) @@ -650,7 +650,7 @@ def test_queue_dataset_run(self): ) dataset.set_filelist([]) # try: - # exe.train_from_dataset(fluid.default_main_program(), dataset2) + # exe.train_from_dataset(base.default_main_program(), dataset2) # except ImportError as e: # print("warning: we skip trainer_desc_pb2 import problem in windows") # except Exception as e: @@ -694,24 +694,24 @@ def test_queue_dataset_run_2(self): ) dataset.set_filelist([filename1, filename2]) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) if self.use_data_loader: - data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last + data_loader = base.io.DataLoader.from_dataset( + dataset, base.cpu_places(), self.drop_last ) for i in range(self.epoch_num): for data in data_loader(): - exe.run(fluid.default_main_program(), feed=data) + exe.run(base.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: exe.train_from_dataset( - fluid.default_main_program(), dataset + base.default_main_program(), dataset ) except Exception as e: self.assertTrue(False) @@ -760,24 +760,24 @@ def test_queue_dataset_run_3(self): dataset.set_filelist([filename1, filename2]) dataset.load_into_memory() - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) if self.use_data_loader: - data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last + data_loader = base.io.DataLoader.from_dataset( + dataset, base.cpu_places(), self.drop_last ) for i in range(self.epoch_num): for data in data_loader(): - exe.run(fluid.default_main_program(), feed=data) + exe.run(base.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: exe.train_from_dataset( - fluid.default_main_program(), dataset + base.default_main_program(), dataset ) except Exception as e: self.assertTrue(False) @@ -878,7 +878,7 @@ def test_cuda_in_memory_dataset_run(self): ) slots_vars.append(var) - dataset = fluid.DatasetFactory().create_dataset("InMemoryDataset") + dataset = base.DatasetFactory().create_dataset("InMemoryDataset") dataset.set_feed_type("SlotRecordInMemoryDataFeed") dataset.set_batch_size(1) dataset.set_pipe_command("cat") @@ -905,15 +905,15 @@ def test_cuda_in_memory_dataset_run(self): dataset.get_memory_data_size() - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) for i in range(self.epoch_num): try: - exe.train_from_dataset(fluid.default_main_program(), dataset) + exe.train_from_dataset(base.default_main_program(), dataset) except Exception as e: self.assertTrue(False) temp_dir.cleanup() @@ -1014,12 +1014,12 @@ def test_dataset_none(self): files = [self.filename1, self.filename2] dataset = self.get_dataset(slots_vars, files) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) # test dataset->None try: - exe.train_from_dataset(fluid.default_main_program(), None) + exe.train_from_dataset(base.default_main_program(), None) except ImportError as e: print("warning: we skip trainer_desc_pb2 import problem in windows") except RuntimeError as e: @@ -1036,11 +1036,11 @@ def test_infer_from_dataset(self): files = [self.filename1, self.filename2] dataset = self.get_dataset(slots_vars, files) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) try: - exe.infer_from_dataset(fluid.default_main_program(), dataset) + exe.infer_from_dataset(base.default_main_program(), dataset) except ImportError as e: print("warning: we skip trainer_desc_pb2 import problem in windows") except Exception as e: @@ -1054,15 +1054,15 @@ def test_fetch_handler(self): files = [self.filename1, self.filename2] dataset = self.get_dataset(slots_vars, files) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) - fh = fluid.executor.FetchHandler(out.name) + fh = base.executor.FetchHandler(out.name) fh.help() try: exe.train_from_dataset( - program=fluid.default_main_program(), + program=base.default_main_program(), dataset=dataset, fetch_handler=fh, ) @@ -1110,14 +1110,14 @@ def test_dataset_fleet(self): data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) - train_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() + train_program = base.Program() + startup_program = base.Program() + scope = base.Scope() from paddle.incubate.distributed.fleet.parameter_server.distribute_transpiler import ( fleet, ) - with fluid.program_guard(train_program, startup_program): + with base.program_guard(train_program, startup_program): slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"] slots_vars = [] for slot in slots: @@ -1127,9 +1127,9 @@ def test_dataset_fleet(self): slots_vars.append(var) fake_cost = paddle.subtract(slots_vars[0], slots_vars[-1]) fake_cost = paddle.mean(fake_cost) - with fluid.scope_guard(scope): - place = fluid.CPUPlace() - exe = fluid.Executor(place) + with base.scope_guard(scope): + place = base.CPUPlace() + exe = base.Executor(place) try: fleet.init() except ImportError as e: @@ -1182,14 +1182,14 @@ def test_dataset_fleet2(self): data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) - train_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() + train_program = base.Program() + startup_program = base.Program() + scope = base.Scope() from paddle.incubate.distributed.fleet.parameter_server.pslib import ( fleet, ) - with fluid.program_guard(train_program, startup_program): + with base.program_guard(train_program, startup_program): slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"] slots_vars = [] for slot in slots: @@ -1199,9 +1199,9 @@ def test_dataset_fleet2(self): slots_vars.append(var) fake_cost = paddle.subtract(slots_vars[0], slots_vars[-1]) fake_cost = paddle.mean(fake_cost) - with fluid.scope_guard(scope): - place = fluid.CPUPlace() - exe = fluid.Executor(place) + with base.scope_guard(scope): + place = base.CPUPlace() + exe = base.Executor(place) try: fleet.init() except ImportError as e: @@ -1315,14 +1315,14 @@ def test_bosps_dataset_fleet2(self): data += "1 7 2 3 6 4 8 8 8 8 1 7\n" f.write(data) - train_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() + train_program = base.Program() + startup_program = base.Program() + scope = base.Scope() from paddle.incubate.distributed.fleet.parameter_server.pslib import ( fleet, ) - with fluid.program_guard(train_program, startup_program): + with base.program_guard(train_program, startup_program): slots = ["slot1_ff", "slot2_ff", "slot3_ff", "slot4_ff"] slots_vars = [] for slot in slots: @@ -1332,9 +1332,9 @@ def test_bosps_dataset_fleet2(self): slots_vars.append(var) fake_cost = paddle.subtract(slots_vars[0], slots_vars[-1]) fake_cost = paddle.mean(fake_cost) - with fluid.scope_guard(scope): - place = fluid.CPUPlace() - exe = fluid.Executor(place) + with base.scope_guard(scope): + place = base.CPUPlace() + exe = base.Executor(place) try: fleet.init() except ImportError as e: diff --git a/test/legacy_test/test_dataset_consistency_inspection.py b/test/legacy_test/test_dataset_consistency_inspection.py index 431c354c4b6b4..de9beede4a87b 100644 --- a/test/legacy_test/test_dataset_consistency_inspection.py +++ b/test/legacy_test/test_dataset_consistency_inspection.py @@ -21,12 +21,12 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet # paddle.enable_static() -# fluid.disable_dygraph() -fluid.disable_dygraph() +# base.disable_dygraph() +base.disable_dygraph() url_schema_len = 5 query_schema = [ 'Q_query_basic', diff --git a/test/legacy_test/test_dataset_dataloader.py b/test/legacy_test/test_dataset_dataloader.py index b17d2089c6d2c..03d27fdced05e 100644 --- a/test/legacy_test/test_dataset_dataloader.py +++ b/test/legacy_test/test_dataset_dataloader.py @@ -20,7 +20,7 @@ from simple_nets import simple_fc_net_with_inputs import paddle -from paddle import fluid +from paddle import base BATCH_SIZE = 32 BATCH_NUM = 10 @@ -31,12 +31,12 @@ def get_place_string(p): - if isinstance(p, (fluid.CPUPlace or fluid.CUDAPlace)): - tmp = fluid.core.Place() + if isinstance(p, (base.CPUPlace or base.CUDAPlace)): + tmp = base.core.Place() tmp.set_place(p) p = tmp - if p._type() == fluid.CPUPlace()._type(): + if p._type() == base.CPUPlace()._type(): return 'CPUPlace()' else: return 'CUDAPlace()' @@ -83,9 +83,9 @@ def tearDown(self): self.temp_dir.cleanup() def build_network(self): - main_prog = fluid.Program() - startup_prog = fluid.Program() - with fluid.program_guard(main_prog, startup_prog): + main_prog = base.Program() + startup_prog = base.Program() + with base.program_guard(main_prog, startup_prog): image = paddle.static.data( name='image', shape=[-1] + IMAGE_SHAPE, dtype='float32' ) @@ -105,14 +105,14 @@ def check_batch_number(self, place, randomize_batch_num=False): dataset = paddle.distributed.InMemoryDataset() dataset._set_batch_size(BATCH_SIZE) - if isinstance(place, fluid.CPUPlace): + if isinstance(place, base.CPUPlace): file_num = 1 os.environ['CPU_NUM'] = str(file_num) - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] use_cuda = False else: file_num = 1 - places = [fluid.CUDAPlace(0)] + places = [base.CUDAPlace(0)] use_cuda = True filelist = [] @@ -140,11 +140,11 @@ def check_batch_number(self, place, randomize_batch_num=False): if self.dataset_name == 'InMemoryDataset': dataset.load_into_memory() - dataloader = fluid.io.DataLoader.from_dataset( + dataloader = base.io.DataLoader.from_dataset( dataset=dataset, places=places, drop_last=self.drop_last ) - prog = fluid.CompiledProgram(main_prog) - exe = fluid.Executor(place) + prog = base.CompiledProgram(main_prog) + exe = base.Executor(place) exe.run(startup_prog) @@ -199,19 +199,19 @@ def check_batch_number(self, place, randomize_batch_num=False): self.assertTrue(has_complete_batch) def get_all_places(self): - p = [fluid.CPUPlace()] - if fluid.is_compiled_with_cuda(): - p.append(fluid.CUDAPlace(0)) + p = [base.CPUPlace()] + if base.is_compiled_with_cuda(): + p.append(base.CUDAPlace(0)) return p def test_batch_number_with_same_length_files(self): for p in self.get_all_places(): - with fluid.scope_guard(fluid.Scope()): + with base.scope_guard(base.Scope()): self.check_batch_number(place=p, randomize_batch_num=False) def test_batch_number_with_different_length_files(self): for p in self.get_all_places(): - with fluid.scope_guard(fluid.Scope()): + with base.scope_guard(base.Scope()): self.check_batch_number(place=p, randomize_batch_num=True) diff --git a/test/legacy_test/test_debugger.py b/test/legacy_test/test_debugger.py index 098e24a78f6eb..bddd9ad540b1d 100644 --- a/test/legacy_test/test_debugger.py +++ b/test/legacy_test/test_debugger.py @@ -15,8 +15,8 @@ import unittest from paddle.distributed.fleet.base.util_factory import draw_block_graphviz -from paddle.fluid import core -from paddle.fluid.framework import Program +from paddle.base import core +from paddle.base.framework import Program class TestDrawBlockGraphviz(unittest.TestCase): diff --git a/test/legacy_test/test_decoupled_py_reader.py b/test/legacy_test/test_decoupled_py_reader.py index 38ef00b1ddcf1..a28b2584a5ff6 100644 --- a/test/legacy_test/test_decoupled_py_reader.py +++ b/test/legacy_test/test_decoupled_py_reader.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base EPOCH_NUM = 5 BATCH_SIZE = 16 @@ -37,18 +37,18 @@ def random_reader(): def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): paddle.seed(1) paddle.framework.random._manual_program_seed(1) - startup_prog = fluid.Program() - main_prog = fluid.Program() + startup_prog = base.Program() + main_prog = base.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main_prog, startup_prog): + with base.unique_name.guard(): + with base.program_guard(main_prog, startup_prog): image = paddle.static.data( name='image', shape=[-1, 784], dtype='float32' ) label = paddle.static.data( name='label', shape=[-1, 1], dtype='int64' ) - py_reader = fluid.io.PyReader( + py_reader = base.io.PyReader( feed_list=[image, label], capacity=4, iterable=not use_legacy_py_reader, @@ -60,7 +60,7 @@ def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): hidden, size=hidden_size, activation='tanh', - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ), ) @@ -89,24 +89,24 @@ def run_main( places, use_double_buffer, ): - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): startup_prog, main_prog, py_reader, loss = simple_fc_net( places, use_legacy_py_reader, use_double_buffer ) reader = paddle.batch(random_reader, batch_size=BATCH_SIZE) - ps = places if use_double_buffer else fluid.cpu_places(len(places)) + ps = places if use_double_buffer else base.cpu_places(len(places)) py_reader.decorate_sample_list_generator( reader, places=ps if py_reader.iterable else None ) - exe = fluid.Executor(place=places[0]) + exe = base.Executor(place=places[0]) exe.run(startup_prog) - prog = fluid.CompiledProgram(main_prog) + prog = base.CompiledProgram(main_prog) step = 0 step_list = [] @@ -125,7 +125,7 @@ def run_main( ) loss_list.append(np.mean(L)) step += 1 - except fluid.core.EOFException: + except base.core.EOFException: py_reader.reset() break step_list.append(step) @@ -161,10 +161,10 @@ def run_main( def prepare_places(self, with_cpu=True, with_gpu=True): places = [] if with_cpu: - places.append([fluid.CPUPlace()]) + places.append([base.CPUPlace()]) - if with_gpu and fluid.core.is_compiled_with_cuda(): - tmp = fluid.cuda_places() + if with_gpu and base.core.is_compiled_with_cuda(): + tmp = base.cuda_places() assert len(tmp) > 0, "no gpu detected" places.append([tmp[0]]) return places diff --git a/test/legacy_test/test_decoupled_py_reader_data_check.py b/test/legacy_test/test_decoupled_py_reader_data_check.py index 1c8742cd895b2..ff5ff423493d3 100644 --- a/test/legacy_test/test_decoupled_py_reader_data_check.py +++ b/test/legacy_test/test_decoupled_py_reader_data_check.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestClass(unittest.TestCase): @@ -42,14 +42,14 @@ def fake_reader(): reader = paddle.reader.cache(fake_reader) batch_reader = paddle.batch(reader, batch_size=batch_size) - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for p in places: - main_prog = fluid.Program() - startup_prog = fluid.Program() - with fluid.program_guard(main_prog, startup_prog): + main_prog = base.Program() + startup_prog = base.Program() + with base.program_guard(main_prog, startup_prog): img = paddle.static.data( shape=[-1] + img_shape, dtype='float32', name='image' ) @@ -57,17 +57,17 @@ def fake_reader(): shape=[-1] + label_shape, dtype='int64', name='label' ) - feeder = fluid.DataFeeder(feed_list=[img, label], place=p) + feeder = base.DataFeeder(feed_list=[img, label], place=p) use_double_buffer = self.use_double_buffer if ( - p._type() != fluid.CPUPlace()._type() + p._type() != base.CPUPlace()._type() and not use_double_buffer ): use_double_buffer = True if self.use_py_reader: - py_reader = fluid.io.PyReader( + py_reader = base.io.PyReader( feed_list=[img, label], capacity=4, iterable=True, @@ -77,7 +77,7 @@ def fake_reader(): batch_reader, places=p ) else: - py_reader = fluid.io.DataLoader.from_generator( + py_reader = base.io.DataLoader.from_generator( feed_list=[img, label], capacity=4, iterable=True, diff --git a/test/legacy_test/test_default_scope_funcs.py b/test/legacy_test/test_default_scope_funcs.py index c6c0e73c5d2ee..779e20d2a7c64 100644 --- a/test/legacy_test/test_default_scope_funcs.py +++ b/test/legacy_test/test_default_scope_funcs.py @@ -14,7 +14,7 @@ import unittest -from paddle.fluid.default_scope_funcs import ( +from paddle.base.default_scope_funcs import ( enter_local_scope, find_var, get_cur_scope, diff --git a/test/legacy_test/test_deformable_conv_op.py b/test/legacy_test/test_deformable_conv_op.py index 84d0a7d7907da..db01a7c68a48b 100644 --- a/test/legacy_test/test_deformable_conv_op.py +++ b/test/legacy_test/test_deformable_conv_op.py @@ -178,10 +178,10 @@ def setUp(self): output = output.astype(self.dtype) self.inputs = { - 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Offset': OpTest.np_dtype_to_fluid_dtype(offset), - 'Mask': OpTest.np_dtype_to_fluid_dtype(mask), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), + 'Input': OpTest.np_dtype_to_base_dtype(input), + 'Offset': OpTest.np_dtype_to_base_dtype(offset), + 'Mask': OpTest.np_dtype_to_base_dtype(mask), + 'Filter': OpTest.np_dtype_to_base_dtype(filter), } self.attrs = { 'strides': self.stride, diff --git a/test/legacy_test/test_deformable_conv_v1_op.py b/test/legacy_test/test_deformable_conv_v1_op.py index 6a355811194f4..c4354ea9e8a88 100644 --- a/test/legacy_test/test_deformable_conv_v1_op.py +++ b/test/legacy_test/test_deformable_conv_v1_op.py @@ -173,9 +173,9 @@ def setUp(self): ) output = output.astype(self.dtype) self.inputs = { - 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Offset': OpTest.np_dtype_to_fluid_dtype(offset), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), + 'Input': OpTest.np_dtype_to_base_dtype(input), + 'Offset': OpTest.np_dtype_to_base_dtype(offset), + 'Filter': OpTest.np_dtype_to_base_dtype(filter), } self.attrs = { 'strides': self.stride, diff --git a/test/legacy_test/test_deg2rad.py b/test/legacy_test/test_deg2rad.py index 0f038e86f2522..350471f896e69 100644 --- a/test/legacy_test/test_deg2rad.py +++ b/test/legacy_test/test_deg2rad.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -33,22 +33,22 @@ def setUp(self): self.out_np = np.deg2rad(self.x_np) def test_static_graph(self): - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(startup_program, train_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(startup_program, train_program): x = paddle.static.data( name='input', dtype=self.x_dtype, shape=self.x_shape ) out = paddle.deg2rad(x) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'input': self.x_np}, fetch_list=[out], ) diff --git a/test/legacy_test/test_deprecated_decorator.py b/test/legacy_test/test_deprecated_decorator.py index 1e4bfe50515dd..81ae80a1f9bf7 100755 --- a/test/legacy_test/test_deprecated_decorator.py +++ b/test/legacy_test/test_deprecated_decorator.py @@ -90,7 +90,7 @@ def test_new_multiply(self): def test_ops_elementwise_mul(self): """ Test for new C++ elementwise_op, expected result should be True, - because not matter what fluid.layers.elementwise_mul is deprecated. + because not matter what base.layers.elementwise_mul is deprecated. """ a = np.random.uniform(0.1, 1, [51, 76]).astype(np.float32) @@ -118,7 +118,7 @@ def test_tensor_gradient(self): with warnings.catch_warnings(record=True) as w: grad = x.gradient() assert ( - 'API "paddle.fluid.dygraph.tensor_patch_methods.gradient" is ' + 'API "paddle.base.dygraph.tensor_patch_methods.gradient" is ' 'deprecated since 2.1.0' ) in str(w[-1].message) diff --git a/test/legacy_test/test_deprecated_memory_optimize_interfaces.py b/test/legacy_test/test_deprecated_memory_optimize_interfaces.py index bcb17baaee764..81fa915c9f1dd 100644 --- a/test/legacy_test/test_deprecated_memory_optimize_interfaces.py +++ b/test/legacy_test/test_deprecated_memory_optimize_interfaces.py @@ -17,7 +17,7 @@ from simple_nets import simple_fc_net import paddle -from paddle import fluid +from paddle import base from paddle.distributed import transpiler @@ -26,10 +26,10 @@ def setUp(self): self.method = transpiler.memory_optimize def build_network(self, call_interface): - startup_prog = fluid.Program() - main_prog = fluid.Program() - with fluid.program_guard(main_prog, startup_prog): - with fluid.unique_name.guard(): + startup_prog = base.Program() + main_prog = base.Program() + with base.program_guard(main_prog, startup_prog): + with base.unique_name.guard(): loss = simple_fc_net() opt = paddle.optimizer.Adam(learning_rate=1e-3) opt.minimize(loss) diff --git a/test/legacy_test/test_desc_clone.py b/test/legacy_test/test_desc_clone.py index 10f0134757a79..a106233b896c4 100644 --- a/test/legacy_test/test_desc_clone.py +++ b/test/legacy_test/test_desc_clone.py @@ -19,8 +19,8 @@ import nets import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core SEED = 1 DTYPE = "float32" @@ -29,7 +29,7 @@ # random seed must set before configuring the network. -# fluid.default_startup_program().random_seed = SEED +# base.default_startup_program().random_seed = SEED def cnn_model(data): conv_pool_1 = nets.simple_img_conv_pool( input=data, @@ -60,7 +60,7 @@ def cnn_model(data): x=conv_pool_2, size=SIZE, activation="softmax", - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Normal(loc=0.0, scale=scale) ), ) @@ -87,7 +87,7 @@ def get_model(batch_size): input=predict, label=label, total=batch_size_tensor ) - inference_program = fluid.default_main_program().clone() + inference_program = base.default_main_program().clone() # Optimization opt = paddle.optimizer.Adam(learning_rate=0.001, beta1=0.9, beta2=0.999) @@ -114,7 +114,7 @@ def operator_equal(a, b): raise ValueError("In operator_equal not equal\n") for k, v in a.__dict__.items(): - if isinstance(v, (fluid.framework.Program, fluid.framework.Block)): + if isinstance(v, (base.framework.Program, base.framework.Block)): continue elif isinstance(v, core.OpDesc): @@ -136,7 +136,7 @@ def operator_equal(a, b): def block_equal(a, b): for k, v in a.__dict__.items(): if isinstance( - v, (core.ProgramDesc, fluid.framework.Program, core.BlockDesc) + v, (core.ProgramDesc, base.framework.Program, core.BlockDesc) ): continue elif k == "ops": @@ -177,9 +177,9 @@ def program_equal(a, b): class TestCloneWithStopGradient(unittest.TestCase): def test_clone_with_stop_gradient(self): - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + train_program = base.Program() + startup_program = base.Program() + with base.program_guard(train_program, startup_program): img = paddle.static.data(name='image', shape=[-1, 784]) hidden1 = paddle.static.nn.fc(x=img, size=200, activation='relu') hidden1.stop_gradient = True @@ -207,9 +207,9 @@ def test_clone_with_stop_gradient(self): class TestCloneWithStopGradientInSubBlock(unittest.TestCase): def test_clone_with_stop_gradient(self): - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + train_program = base.Program() + startup_program = base.Program() + with base.program_guard(train_program, startup_program): img = paddle.static.data(name='image', shape=[-1, 784]) true = paddle.ones(shape=[1], dtype="float32") hidden1 = paddle.static.nn.fc(x=img, size=200, activation='relu') @@ -254,9 +254,9 @@ def false_fn(): class TestCloneWithRaise(unittest.TestCase): def test_clone_with_stop_gradient(self): - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + train_program = base.Program() + startup_program = base.Program() + with base.program_guard(train_program, startup_program): img = paddle.static.data(name='image', shape=[-1, 784]) true = paddle.ones(shape=[1], dtype="float32") hidden1 = paddle.static.nn.fc(x=img, size=200, activation='relu') diff --git a/test/legacy_test/test_detach.py b/test/legacy_test/test_detach.py index 4b3d25a1cde73..5bb336866733a 100644 --- a/test/legacy_test/test_detach.py +++ b/test/legacy_test/test_detach.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base.dygraph.base import to_variable from paddle.nn import Linear @@ -31,7 +31,7 @@ def generate_Data(self): def no_detach_multi(self): data = self.generate_Data() - with fluid.dygraph.guard(): + with base.dygraph.guard(): linear_w_param_attrs = paddle.ParamAttr( initializer=paddle.nn.initializer.Constant(5.0) ) @@ -79,7 +79,7 @@ def no_detach_multi(self): def no_detach_single(self): data = self.generate_Data() - with fluid.dygraph.guard(): + with base.dygraph.guard(): linear_w_param_attrs = paddle.ParamAttr( initializer=paddle.nn.initializer.Constant(5.0) ) @@ -115,11 +115,11 @@ def no_detach_single(self): def detach_multi(self): data = self.generate_Data() - with fluid.dygraph.guard(): + with base.dygraph.guard(): linear_w_param_attrs = paddle.ParamAttr( initializer=paddle.nn.initializer.Constant(5.0) ) - linear_b_param_attrs = fluid.ParamAttr( + linear_b_param_attrs = base.ParamAttr( initializer=paddle.nn.initializer.Constant(6.0) ) linear = Linear( @@ -131,7 +131,7 @@ def detach_multi(self): linear1_w_param_attrs = paddle.ParamAttr( initializer=paddle.nn.initializer.Constant(7.0) ) - linear1_b_param_attrs = fluid.ParamAttr( + linear1_b_param_attrs = base.ParamAttr( initializer=paddle.nn.initializer.Constant(8.0) ) linear1 = Linear( @@ -179,7 +179,7 @@ def test_NoDetachSingle_DetachMulti(self): class TestInplace(unittest.TestCase): def test_forward_version(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32)) self.assertEqual(var.inplace_version, 0) detach_var_1 = var.detach() @@ -198,7 +198,7 @@ def test_forward_version(self): def test_backward_error(self): # It raises an error because the inplace operator will result # in incorrect gradient computation. - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.ones(shape=[4, 2, 3], dtype="float32") var_a.stop_gradient = False diff --git a/test/legacy_test/test_detection.py b/test/legacy_test/test_detection.py index e3aebb946692c..8aec394e21803 100644 --- a/test/legacy_test/test_detection.py +++ b/test/legacy_test/test_detection.py @@ -18,22 +18,22 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph import base -from paddle.fluid.framework import Program, program_guard +from paddle import base +from paddle.base import core +from paddle.base.dygraph import base +from paddle.base.framework import Program, program_guard paddle.enable_static() @contextlib.contextmanager def new_program_scope(main=None, startup=None, scope=None): - prog = main if main else fluid.Program() - startup_prog = startup if startup else fluid.Program() - scope = scope if scope else fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): - with fluid.unique_name.guard(): + prog = main if main else base.Program() + startup_prog = startup if startup else base.Program() + scope = scope if scope else base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): + with base.unique_name.guard(): yield @@ -58,17 +58,17 @@ def _get_place(self, force_to_use_cpu=False): @contextlib.contextmanager def static_graph(self): with new_program_scope(): - fluid.default_startup_program().random_seed = self.seed - fluid.default_main_program().random_seed = self.seed + base.default_startup_program().random_seed = self.seed + base.default_main_program().random_seed = self.seed yield def get_static_graph_result( self, feed, fetch_list, with_lod=False, force_to_use_cpu=False ): - exe = fluid.Executor(self._get_place(force_to_use_cpu)) - exe.run(fluid.default_startup_program()) + exe = base.Executor(self._get_place(force_to_use_cpu)) + exe.run(base.default_startup_program()) return exe.run( - fluid.default_main_program(), + base.default_main_program(), feed=feed, fetch_list=fetch_list, return_numpy=(not with_lod), @@ -76,11 +76,11 @@ def get_static_graph_result( @contextlib.contextmanager def dynamic_graph(self, force_to_use_cpu=False): - with fluid.dygraph.guard( + with base.dygraph.guard( self._get_place(force_to_use_cpu=force_to_use_cpu) ): - fluid.default_startup_program().random_seed = self.seed - fluid.default_main_program().random_seed = self.seed + base.default_startup_program().random_seed = self.seed + base.default_main_program().random_seed = self.seed yield diff --git a/test/legacy_test/test_device.py b/test/legacy_test/test_device.py index 8396b2a39d699..d054b333cb84e 100644 --- a/test/legacy_test/test_device.py +++ b/test/legacy_test/test_device.py @@ -15,8 +15,8 @@ import unittest import paddle -from paddle import fluid -from paddle.fluid import core, framework +from paddle import base +from paddle.base import core, framework class TestStaticDeviceManage(unittest.TestCase): @@ -28,7 +28,7 @@ def _test_device(self, device_name, device_class): out3 = paddle.concat(x=[out1, out2], axis=0) exe = paddle.static.Executor() - exe.run(paddle.fluid.default_startup_program()) + exe.run(paddle.base.default_startup_program()) res = exe.run(fetch_list=[out3]) device = paddle.get_device() @@ -49,7 +49,7 @@ def test_xpu_device(self): class TestImperativeDeviceManage(unittest.TestCase): def test_cpu(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.set_device('cpu') out1 = paddle.zeros(shape=[1, 3], dtype='float32') out2 = paddle.ones(shape=[1, 3], dtype='float32') @@ -63,7 +63,7 @@ def test_cpu(self): def test_gpu(self): if core.is_compiled_with_cuda(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.set_device('gpu:0') out1 = paddle.zeros(shape=[1, 3], dtype='float32') out2 = paddle.ones(shape=[1, 3], dtype='float32') @@ -79,7 +79,7 @@ def test_gpu(self): def test_xpu(self): if core.is_compiled_with_xpu(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): out = paddle.to_tensor([1, 2]) device = paddle.get_device() self.assertEqual( diff --git a/test/legacy_test/test_device_guard.py b/test/legacy_test/test_device_guard.py index 76ef6b6799a6a..336437333e692 100644 --- a/test/legacy_test/test_device_guard.py +++ b/test/legacy_test/test_device_guard.py @@ -16,7 +16,7 @@ import warnings import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_diag.py b/test/legacy_test/test_diag.py index 429682b84e8a7..96d5f9e53db79 100644 --- a/test/legacy_test/test_diag.py +++ b/test/legacy_test/test_diag.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import Program, core, program_guard +from paddle.base import Program, core, program_guard class TestDiagOp(OpTest): diff --git a/test/legacy_test/test_diag_embed.py b/test/legacy_test/test_diag_embed.py index 98efcf4b5ca4c..4e9e26d1b4ad4 100644 --- a/test/legacy_test/test_diag_embed.py +++ b/test/legacy_test/test_diag_embed.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestDiagEmbedOp(OpTest): @@ -61,9 +61,9 @@ def test_case1(self): out2 = F.diag_embed(data1, offset=1, dim1=-2, dim2=3) place = core.CPUPlace() - exe = fluid.Executor(place) + exe = base.Executor(place) results = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"data1": diag_embed}, fetch_list=[out1, out2], return_numpy=True, diff --git a/test/legacy_test/test_diag_v2.py b/test/legacy_test/test_diag_v2.py index c98daafaa6ead..bbe146245bf14 100644 --- a/test/legacy_test/test_diag_v2.py +++ b/test/legacy_test/test_diag_v2.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestDiagV2Op(OpTest): @@ -226,9 +226,9 @@ def run_static(self, use_gpu=False): result12 = paddle.diag(x5, offset=-1) result13 = paddle.diag(x6, offset=-1) - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) ( res0, res1, @@ -285,23 +285,23 @@ def run_static(self, use_gpu=False): np.testing.assert_allclose(res13, self.expected12, rtol=1e-05) def test_cpu(self): - paddle.disable_static(place=paddle.fluid.CPUPlace()) + paddle.disable_static(place=paddle.base.CPUPlace()) self.run_imperative() paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): self.run_static() def test_gpu(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return - paddle.disable_static(place=paddle.fluid.CUDAPlace(0)) + paddle.disable_static(place=paddle.base.CUDAPlace(0)) self.run_imperative() paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): self.run_static(use_gpu=True) diff --git a/test/legacy_test/test_diagflat.py b/test/legacy_test/test_diagflat.py index db3137ae020e4..c391df1c8eb6c 100644 --- a/test/legacy_test/test_diagflat.py +++ b/test/legacy_test/test_diagflat.py @@ -105,7 +105,7 @@ def test_gpu(self): self.run_static(use_gpu=True) def test_fp16_with_gpu(self, use_gpu=False): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() diff --git a/test/legacy_test/test_diagonal_op.py b/test/legacy_test/test_diagonal_op.py index 3cce67889a87f..005a890b6ed07 100644 --- a/test/legacy_test/test_diagonal_op.py +++ b/test/legacy_test/test_diagonal_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_diff_op.py b/test/legacy_test/test_diff_op.py index 96101b58340d9..bf7313ae61237 100644 --- a/test/legacy_test/test_diff_op.py +++ b/test/legacy_test/test_diff_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestDiffOp(unittest.TestCase): @@ -79,11 +79,11 @@ def test_dygraph(self): def test_static(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x = paddle.static.data( name="input", shape=self.input.shape, dtype=self.input.dtype ) @@ -105,12 +105,12 @@ def test_static(self): dtype=self.append.dtype, ) - exe = fluid.Executor(place) + exe = base.Executor(place) out = paddle.diff( x, n=self.n, axis=self.axis, prepend=prepend, append=append ) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "input": self.input, "prepend": self.prepend, @@ -240,7 +240,7 @@ def set_args(self): class TestDiffOpFp16(TestDiffOp): def test_fp16_with_gpu(self): paddle.enable_static() - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() diff --git a/test/legacy_test/test_digamma_op.py b/test/legacy_test/test_digamma_op.py index 60eec154620d8..04fbefc1dc8be 100644 --- a/test/legacy_test/test_digamma_op.py +++ b/test/legacy_test/test_digamma_op.py @@ -19,8 +19,8 @@ from scipy.special import psi import paddle -from paddle import fluid, static -from paddle.fluid import core +from paddle import base, static +from paddle.base import core class TestDigammaOp(OpTest): @@ -126,7 +126,7 @@ def test_in_dynamic_mode(self): sc_res = psi(input) for place in self.places: # it is more convenient to use `guard` than `enable/disable_**` here - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_t = paddle.to_tensor(input) res = paddle.digamma(input_t).numpy() np.testing.assert_allclose(res, sc_res, rtol=1e-05) @@ -146,7 +146,7 @@ def test_dtype_error(self): # in dynamic mode with self.assertRaises(RuntimeError): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input = np.random.random(self._shape).astype("int32") input_t = paddle.to_tensor(input) res = paddle.digamma(input_t) diff --git a/test/legacy_test/test_dist_allreduce_op.py b/test/legacy_test/test_dist_allreduce_op.py index afde2ee0ebae5..cb77d5476cfaf 100644 --- a/test/legacy_test/test_dist_allreduce_op.py +++ b/test/legacy_test/test_dist_allreduce_op.py @@ -30,9 +30,9 @@ def _setup_config(self): self._nccl2_reduce_layer = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_allreduce_op.py", delta=1e-5, check_error_log=True ) diff --git a/test/legacy_test/test_dist_base.py b/test/legacy_test/test_dist_base.py index a1ea2d40a4b0f..8e6a1c3c9c9ee 100755 --- a/test/legacy_test/test_dist_base.py +++ b/test/legacy_test/test_dist_base.py @@ -28,11 +28,11 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet.meta_optimizers import ( RawProgramOptimizer as RawProgram, ) -from paddle.fluid import compiler +from paddle.base import compiler from paddle.incubate.distributed.fleet import role_maker from paddle.incubate.distributed.fleet.collective import ( DistributedStrategy, @@ -149,7 +149,7 @@ def get_transpiler( nccl_comm_num=1, hogwild_mode=False, ): - # NOTE: import fluid until runtime, or else forking processes will cause error. + # NOTE: import base until runtime, or else forking processes will cause error. config = paddle.distributed.transpiler.DistributeTranspilerConfig() config.enable_dc_asgd = dc_asgd config.sync_mode = sync_mode @@ -186,7 +186,7 @@ def run_pserver(self, args): t = self.get_transpiler( trainer_id=args.trainer_id, - main_program=fluid.default_main_program(), + main_program=base.default_main_program(), pserver_endpoints=args.endpoints, trainers=args.trainers, sync_mode=args.sync_mode, @@ -198,8 +198,8 @@ def run_pserver(self, args): args.current_endpoint, pserver_prog ) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(startup_prog) print_to_err(type(self).__name__, "run pserver startup program done.") exe.run(pserver_prog) @@ -223,10 +223,10 @@ def run_pipeline_trainer(self, args): device_id = int(os.getenv("FLAGS_selected_gpus", "0")) eprint(type(self).__name__, "device_id: %d." % device_id) - place = fluid.CUDAPlace(device_id) + place = base.CUDAPlace(device_id) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) eprint(type(self).__name__, "run worker startup program done.") data_loader.set_sample_list_generator(train_reader, place) @@ -234,7 +234,7 @@ def run_pipeline_trainer(self, args): print_to_err(type(self).__name__, "begin to train on trainer") out_losses = [] - main_program = fluid.default_main_program() + main_program = base.default_main_program() lr_scheduler = self.get_lr_scheduler(main_program) for i in range(RUN_STEP): loss = exe.run(main_program, fetch_list=[avg_cost]) @@ -269,24 +269,24 @@ def run_use_fleet_api_20_trainer(self, args): predict, ) = self.get_model(batch_size=args.batch_size) - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace(device_id) - elif fluid.core.is_compiled_with_xpu(): + place = base.CUDAPlace(device_id) + elif base.core.is_compiled_with_xpu(): device_id = int(os.getenv("FLAGS_selected_xpus", "0")) - place = fluid.XPUPlace(device_id) + place = base.XPUPlace(device_id) else: raise ValueError( "fleet dygraph api must in paddlepaddle-xpu or paddlepaddle-gpu." ) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) eprint(type(self).__name__, "run worker startup program done.") feed_var_list = [ var - for var in fluid.default_main_program().global_block().vars.values() + for var in base.default_main_program().global_block().vars.values() if var.is_data ] @@ -295,7 +295,7 @@ def run_use_fleet_api_20_trainer(self, args): if feed_var_list[0].name == 'label': feed_var_list = feed_var_list[::-1] - feeder = fluid.DataFeeder(feed_var_list, place) + feeder = base.DataFeeder(feed_var_list, place) reader_generator = train_reader() def get_data(): @@ -319,7 +319,7 @@ def get_data(): out_losses = [] for i in range(RUN_STEP): (loss,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), fetch_list=[avg_cost.name], feed=feeder.feed(get_data()), ) @@ -335,7 +335,7 @@ def run_use_fleet_api_trainer(self, args): self.lr = args.lr - exec_strategy = fluid.ExecutionStrategy() + exec_strategy = base.ExecutionStrategy() exec_strategy.num_threads = 1 dist_strategy = DistributedStrategy() @@ -369,19 +369,19 @@ def run_use_fleet_api_trainer(self, args): trainer_prog = fleet._origin_program dist_prog = fleet.main_program - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace(device_id) - elif fluid.core.is_compiled_with_xpu(): + place = base.CUDAPlace(device_id) + elif base.core.is_compiled_with_xpu(): device_id = int(os.getenv("FLAGS_selected_xpus", "0")) - place = fluid.XPUPlace(device_id) + place = base.XPUPlace(device_id) else: raise ValueError( "fleet dygraph api must in paddlepaddle-xpu or paddlepaddle-gpu." ) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) eprint(type(self).__name__, "run worker startup program done.") feed_var_list = [ @@ -397,7 +397,7 @@ def run_use_fleet_api_trainer(self, args): if feed_var_list[0].name == 'label': feed_var_list = feed_var_list[::-1] - feeder = fluid.DataFeeder(feed_var_list, place) + feeder = base.DataFeeder(feed_var_list, place) reader_generator = train_reader() def get_data(): @@ -428,37 +428,37 @@ def get_data(): if args.save_model: model_save_dir = "/tmp" if fleet.worker_index() == 0: - model_save_dir_fluid = os.path.join( - model_save_dir, "fluid_persistables" + model_save_dir_base = os.path.join( + model_save_dir, "base_persistables" ) model_save_dir_fleet = os.path.join( model_save_dir, "fleet_persistables" ) - infer_save_dir_fluid = os.path.join( - model_save_dir, "fluid_infer/infer" + infer_save_dir_base = os.path.join( + model_save_dir, "base_infer/infer" ) infer_save_dir_fleet = os.path.join( model_save_dir, "fleet_infer/infer" ) else: - model_save_dir_fluid = os.path.join( - model_save_dir, "fluid_persistables_2" + model_save_dir_base = os.path.join( + model_save_dir, "base_persistables_2" ) model_save_dir_fleet = os.path.join( model_save_dir, "fleet_persistables_2" ) - infer_save_dir_fluid = os.path.join( - model_save_dir, "fluid_infer_2/infer_2" + infer_save_dir_base = os.path.join( + model_save_dir, "base_infer_2/infer_2" ) infer_save_dir_fleet = os.path.join( model_save_dir, "fleet_infer_2/infer_2" ) paddle.distributed.io.save_persistables( - exe, model_save_dir_fluid, fleet._origin_program + exe, model_save_dir_base, fleet._origin_program ) fleet.save_persistables(executor=exe, dirname=model_save_dir_fleet) paddle.static.io.save_inference_model( - path_prefix=infer_save_dir_fluid, + path_prefix=infer_save_dir_base, feed_vars=feed_var_list, fetch_vars=[avg_cost], executor=exe, @@ -474,7 +474,7 @@ def run_trainer(self, args): old_stdout = sys.stdout sys.stdout = StringIO() - build_stra = fluid.BuildStrategy() + build_stra = base.BuildStrategy() # FIXME force disable enable_inplace and memory_optimize build_stra.enable_inplace = False build_stra.memory_optimize = False @@ -491,11 +491,11 @@ def run_trainer(self, args): if args.use_reduce: build_stra.reduce_strategy = ( - fluid.BuildStrategy.ReduceStrategy.Reduce + base.BuildStrategy.ReduceStrategy.Reduce ) else: build_stra.reduce_strategy = ( - fluid.BuildStrategy.ReduceStrategy.AllReduce + base.BuildStrategy.ReduceStrategy.AllReduce ) pass_builder = None if args.batch_merge_repeat > 1: @@ -554,7 +554,7 @@ def run_trainer(self, args): ) t = self.get_transpiler( trainer_id=args.trainer_id, - main_program=fluid.default_main_program(), + main_program=base.default_main_program(), pserver_endpoints=args.endpoints, trainers=args.trainers, sync_mode=args.sync_mode, @@ -589,21 +589,21 @@ def run_trainer(self, args): ) nccl2_t.transpile( args.trainer_id, - program=fluid.default_main_program(), - startup_program=fluid.default_startup_program(), + program=base.default_main_program(), + startup_program=base.default_startup_program(), trainers=args.endpoints, current_endpoint=args.current_endpoint, ) print_to_err( type(self).__name__, "get trainer program done. with nccl2 mode" ) - trainer_prog = fluid.default_main_program() + trainer_prog = base.default_main_program() else: print_to_err( type(self).__name__, "do nothing about main program, just use it", ) - trainer_prog = fluid.default_main_program() + trainer_prog = base.default_main_program() print_to_err(type(self).__name__, "use main program done.") # FIXME(gongwb):wait pserver initialization. @@ -611,15 +611,15 @@ def run_trainer(self, args): if args.use_cuda: device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace(device_id) + place = base.CUDAPlace(device_id) else: - place = fluid.CPUPlace() + place = base.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) print_to_err(type(self).__name__, "run worker startup program done.") - exec_strategy = fluid.ExecutionStrategy() + exec_strategy = base.ExecutionStrategy() exec_strategy.num_threads = 1 print_to_err(type(self).__name__, "begin to compile with data parallel") @@ -634,7 +634,7 @@ def run_trainer(self, args): if var.is_data ] - feeder = fluid.DataFeeder(feed_var_list, place) + feeder = base.DataFeeder(feed_var_list, place) reader_generator = train_reader() def get_data(): @@ -715,19 +715,19 @@ def _get_data(self, batch, args): def run_trainer(self, args): seed = 90 if args.update_method == 'gloo': - place = fluid.CPUPlace() - elif fluid.core.is_compiled_with_cuda(): + place = base.CPUPlace() + elif base.core.is_compiled_with_cuda(): device_id = int(os.getenv("FLAGS_selected_gpus", "0")) - place = fluid.CUDAPlace(device_id) - elif fluid.core.is_compiled_with_xpu(): + place = base.CUDAPlace(device_id) + elif base.core.is_compiled_with_xpu(): device_id = int(os.getenv("FLAGS_selected_xpus", "0")) - place = fluid.XPUPlace(device_id) + place = base.XPUPlace(device_id) else: assert "Only support CUDAPlace or XPUPlace or CPU(Gloo) for now." - with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + with base.dygraph.guard(place): + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed np.random.seed(seed) import random @@ -976,7 +976,7 @@ def _after_setup_config(self): self.__use_xpu = True self._use_dgc = False else: - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.__use_cuda = True else: self.__use_cuda = False diff --git a/test/legacy_test/test_dist_fleet_a_sync_optimizer_async.py b/test/legacy_test/test_dist_fleet_a_sync_optimizer_async.py index 94daccf0c5991..96a07d4dc2479 100644 --- a/test/legacy_test/test_dist_fleet_a_sync_optimizer_async.py +++ b/test/legacy_test/test_dist_fleet_a_sync_optimizer_async.py @@ -37,11 +37,11 @@ def test_a_sync_optimizer_trainer(self): os.environ["TRAINING_ROLE"] = "TRAINER" from paddle.distributed import fleet - main_program = paddle.fluid.Program() - startup_program = paddle.fluid.Program() + main_program = paddle.base.Program() + startup_program = paddle.base.Program() - paddle.fluid.framework.switch_main_program(main_program) - paddle.fluid.framework.switch_startup_program(startup_program) + paddle.base.framework.switch_main_program(main_program) + paddle.base.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) @@ -58,7 +58,7 @@ def test_a_sync_optimizer_trainer(self): optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer.minimize(avg_cost) - prog = paddle.fluid.default_main_program() + prog = paddle.base.default_main_program() self.assertEqual(prog.global_block().ops[-1].type, "send_barrier") sends = 0 @@ -75,11 +75,11 @@ def test_a_sync_optimizer_pserver(self): os.environ["TRAINING_ROLE"] = "PSERVER" from paddle.distributed import fleet - main_program = paddle.fluid.Program() - startup_program = paddle.fluid.Program() + main_program = paddle.base.Program() + startup_program = paddle.base.Program() - paddle.fluid.framework.switch_main_program(main_program) - paddle.fluid.framework.switch_startup_program(startup_program) + paddle.base.framework.switch_main_program(main_program) + paddle.base.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) diff --git a/test/legacy_test/test_dist_fleet_a_sync_optimizer_auto.py b/test/legacy_test/test_dist_fleet_a_sync_optimizer_auto.py index 54a8e7c1de27d..4218e567a33e7 100644 --- a/test/legacy_test/test_dist_fleet_a_sync_optimizer_auto.py +++ b/test/legacy_test/test_dist_fleet_a_sync_optimizer_auto.py @@ -37,11 +37,11 @@ def test_a_sync_optimizer1(self): os.environ["TRAINING_ROLE"] = "TRAINER" from paddle.distributed import fleet - main_program = paddle.fluid.Program() - startup_program = paddle.fluid.Program() + main_program = paddle.base.Program() + startup_program = paddle.base.Program() - paddle.fluid.framework.switch_main_program(main_program) - paddle.fluid.framework.switch_startup_program(startup_program) + paddle.base.framework.switch_main_program(main_program) + paddle.base.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') diff --git a/test/legacy_test/test_dist_fleet_a_sync_optimizer_auto_async.py b/test/legacy_test/test_dist_fleet_a_sync_optimizer_auto_async.py index a5a95370168ac..67807c6673701 100644 --- a/test/legacy_test/test_dist_fleet_a_sync_optimizer_auto_async.py +++ b/test/legacy_test/test_dist_fleet_a_sync_optimizer_auto_async.py @@ -39,11 +39,11 @@ def test_a_sync_optimizer3(self): os.environ["TRAINING_ROLE"] = "TRAINER" from paddle.distributed import fleet - main_program = paddle.fluid.Program() - startup_program = paddle.fluid.Program() + main_program = paddle.base.Program() + startup_program = paddle.base.Program() - paddle.fluid.framework.switch_main_program(main_program) - paddle.fluid.framework.switch_startup_program(startup_program) + paddle.base.framework.switch_main_program(main_program) + paddle.base.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) input_x = paddle.static.data( @@ -56,7 +56,7 @@ def test_a_sync_optimizer3(self): is_distributed=False, input=input_x, size=[1000000000, 100000], - param_attr=paddle.fluid.ParamAttr( + param_attr=paddle.base.ParamAttr( name="embedding", initializer=paddle.paddle.nn.initializer.Constant(value=0.01), ), diff --git a/test/legacy_test/test_dist_fleet_a_sync_optimizer_auto_geo.py b/test/legacy_test/test_dist_fleet_a_sync_optimizer_auto_geo.py index 7bf6733c7c77b..104143d70949e 100644 --- a/test/legacy_test/test_dist_fleet_a_sync_optimizer_auto_geo.py +++ b/test/legacy_test/test_dist_fleet_a_sync_optimizer_auto_geo.py @@ -38,11 +38,11 @@ def test_a_sync_optimizer2(self): os.environ["TRAINING_ROLE"] = "TRAINER" from paddle.distributed import fleet - main_program = paddle.fluid.Program() - startup_program = paddle.fluid.Program() + main_program = paddle.base.Program() + startup_program = paddle.base.Program() - paddle.fluid.framework.switch_main_program(main_program) - paddle.fluid.framework.switch_startup_program(startup_program) + paddle.base.framework.switch_main_program(main_program) + paddle.base.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) diff --git a/test/legacy_test/test_dist_fleet_a_sync_optimizer_geo.py b/test/legacy_test/test_dist_fleet_a_sync_optimizer_geo.py index 7b565264f995b..475cba5081190 100755 --- a/test/legacy_test/test_dist_fleet_a_sync_optimizer_geo.py +++ b/test/legacy_test/test_dist_fleet_a_sync_optimizer_geo.py @@ -36,11 +36,11 @@ def test_a_sync_optimizer_trainer(self): os.environ["TRAINING_ROLE"] = "TRAINER" from paddle.distributed import fleet - main_program = paddle.fluid.Program() - startup_program = paddle.fluid.Program() + main_program = paddle.base.Program() + startup_program = paddle.base.Program() - paddle.fluid.framework.switch_main_program(main_program) - paddle.fluid.framework.switch_startup_program(startup_program) + paddle.base.framework.switch_main_program(main_program) + paddle.base.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') @@ -67,11 +67,11 @@ def test_a_sync_optimizer_pserver(self): os.environ["TRAINING_ROLE"] = "PSERVER" from paddle.distributed import fleet - main_program = paddle.fluid.Program() - startup_program = paddle.fluid.Program() + main_program = paddle.base.Program() + startup_program = paddle.base.Program() - paddle.fluid.framework.switch_main_program(main_program) - paddle.fluid.framework.switch_startup_program(startup_program) + paddle.base.framework.switch_main_program(main_program) + paddle.base.framework.switch_startup_program(startup_program) fleet.init(role_maker.PaddleCloudRoleMaker()) input_x = paddle.static.data(name="x", shape=[-1, 32], dtype='float32') @@ -93,7 +93,7 @@ def test_a_sync_optimizer_pserver(self): optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer.minimize(avg_cost) - prog = paddle.fluid.default_main_program() + prog = paddle.base.default_main_program() self.assertEqual(prog.global_block().ops[0].type, "listen_and_serv") diff --git a/test/legacy_test/test_dist_fleet_a_sync_optimizer_sync.py b/test/legacy_test/test_dist_fleet_a_sync_optimizer_sync.py index 69e751a04419e..90763b6fa74e2 100644 --- a/test/legacy_test/test_dist_fleet_a_sync_optimizer_sync.py +++ b/test/legacy_test/test_dist_fleet_a_sync_optimizer_sync.py @@ -50,7 +50,7 @@ def test_gradient_merge_optimizer(self): optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer.minimize(avg_cost) - prog = paddle.fluid.default_main_program() + prog = paddle.base.default_main_program() self.assertEqual(prog.global_block().ops[-1].type, "send_barrier") sends = 0 diff --git a/test/legacy_test/test_dist_fleet_base.py b/test/legacy_test/test_dist_fleet_base.py index 45657b71d9910..ad421c228b0e5 100644 --- a/test/legacy_test/test_dist_fleet_base.py +++ b/test/legacy_test/test_dist_fleet_base.py @@ -28,7 +28,7 @@ from contextlib import closing import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker from paddle.distributed.fleet.utils.ps_util import DistributedInfer @@ -129,8 +129,8 @@ def build_optimizer(self, avg_cost, strategy): optimizer = paddle.optimizer.SGD(scheduler, grad_clip=grad_clip) """ # learning rate decay method before 2.0 - optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.exponential_decay( + optimizer = base.optimizer.SGD( + learning_rate=base.layers.exponential_decay( learning_rate=LEARNING_RATE, decay_steps=500, decay_rate=0.969, @@ -160,10 +160,10 @@ def get_executor(self): if self._exe is None: device_env = os.getenv("DEVICE", 'cpu') if device_env == 'cpu': - device = fluid.CPUPlace() + device = base.CPUPlace() elif device_env == 'gpu': - device = fluid.CUDAPlace(0) - self._exe = fluid.Executor(device) + device = base.CUDAPlace(0) + self._exe = base.Executor(device) return self._exe def do_dataset_training(self, fleet): diff --git a/test/legacy_test/test_dist_fleet_heter_program.py b/test/legacy_test/test_dist_fleet_heter_program.py index eeefe24d05191..150be7559906c 100644 --- a/test/legacy_test/test_dist_fleet_heter_program.py +++ b/test/legacy_test/test_dist_fleet_heter_program.py @@ -17,7 +17,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet import fleet from paddle.distributed.fleet.base import role_maker @@ -86,7 +86,7 @@ def embedding_layer(input): return paddle.static.nn.sparse_embedding( input=input, size=[100001, 10], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="SparseFeatFactors", initializer=paddle.nn.initializer.Uniform(), ), @@ -96,12 +96,12 @@ def embedding_layer(input): concated = paddle.concat(sparse_embed_seq + inputs[0:1], axis=1) - with fluid.device_guard("gpu"): + with base.device_guard("gpu"): fc1 = paddle.static.nn.fc( x=concated, size=400, activation="relu", - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Normal( std=1 / math.sqrt(concated.shape[1]) ) @@ -109,12 +109,12 @@ def embedding_layer(input): name="fc1", ) - with fluid.device_guard("cpu"): + with base.device_guard("cpu"): fc2 = paddle.static.nn.fc( x=fc1, size=400, activation="relu", - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Normal( std=1 / math.sqrt(fc1.shape[1]) ) @@ -122,12 +122,12 @@ def embedding_layer(input): name="fc2", ) - with fluid.device_guard("gpu"): + with base.device_guard("gpu"): fc3 = paddle.static.nn.fc( x=fc2, size=400, activation="relu", - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Normal( std=1 / math.sqrt(fc2.shape[1]) ) @@ -135,19 +135,19 @@ def embedding_layer(input): name="fc3", ) - with fluid.device_guard("cpu"): + with base.device_guard("cpu"): predict = paddle.static.nn.fc( x=fc3, size=2, activation="softmax", - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Normal( std=1 / math.sqrt(fc3.shape[1]) ) ), ) - with fluid.device_guard("gpu"): + with base.device_guard("gpu"): labels = paddle.cast(inputs[-1], dtype="int64") cost = paddle.nn.functional.cross_entropy( input=predict, label=labels, reduction='none', use_softmax=False diff --git a/test/legacy_test/test_dist_fleet_minimize.py b/test/legacy_test/test_dist_fleet_minimize.py index cc317cd353ead..8313f5d40ecfd 100644 --- a/test/legacy_test/test_dist_fleet_minimize.py +++ b/test/legacy_test/test_dist_fleet_minimize.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -78,7 +78,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_emb = paddle.static.nn.sparse_embedding( input=q, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -94,7 +94,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_fc = paddle.static.nn.fc( x=q_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__q_fc__", learning_rate=base_lr, @@ -110,7 +110,7 @@ def get_loss(cos_q_pt, cos_q_nt): pt_emb = paddle.static.nn.sparse_embedding( input=pt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -126,12 +126,12 @@ def get_loss(cos_q_pt, cos_q_nt): pt_fc = paddle.static.nn.fc( x=pt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) # nt nt = paddle.static.data( @@ -141,7 +141,7 @@ def get_loss(cos_q_pt, cos_q_nt): nt_emb = paddle.static.nn.sparse_embedding( input=nt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -157,12 +157,12 @@ def get_loss(cos_q_pt, cos_q_nt): nt_fc = paddle.static.nn.fc( x=nt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc) cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc) diff --git a/test/legacy_test/test_dist_fleet_ps.py b/test/legacy_test/test_dist_fleet_ps.py index 1b6becb18f5ae..66612fa224aa7 100644 --- a/test/legacy_test/test_dist_fleet_ps.py +++ b/test/legacy_test/test_dist_fleet_ps.py @@ -18,7 +18,7 @@ paddle.enable_static() -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -79,7 +79,7 @@ def get_loss(cos_q_pt, cos_q_nt): input=q, is_distributed=is_distributed, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -96,7 +96,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_fc = paddle.static.nn.fc( x=q_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__q_fc__", learning_rate=base_lr, @@ -113,7 +113,7 @@ def get_loss(cos_q_pt, cos_q_nt): input=pt, is_distributed=is_distributed, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -130,12 +130,12 @@ def get_loss(cos_q_pt, cos_q_nt): pt_fc = paddle.static.nn.fc( x=pt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) # nt nt = paddle.static.data( @@ -146,7 +146,7 @@ def get_loss(cos_q_pt, cos_q_nt): input=nt, is_distributed=is_distributed, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -163,12 +163,12 @@ def get_loss(cos_q_pt, cos_q_nt): nt_fc = paddle.static.nn.fc( x=nt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc) cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc) diff --git a/test/legacy_test/test_dist_fleet_ps11.py b/test/legacy_test/test_dist_fleet_ps11.py index 44ad3514f64d8..380b0b93ac483 100755 --- a/test/legacy_test/test_dist_fleet_ps11.py +++ b/test/legacy_test/test_dist_fleet_ps11.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -78,7 +78,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_emb = paddle.static.nn.sparse_embedding( input=q, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -94,7 +94,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_fc = paddle.static.nn.fc( x=q_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__q_fc__", learning_rate=base_lr, @@ -110,7 +110,7 @@ def get_loss(cos_q_pt, cos_q_nt): pt_emb = paddle.static.nn.sparse_embedding( input=pt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -126,12 +126,12 @@ def get_loss(cos_q_pt, cos_q_nt): pt_fc = paddle.static.nn.fc( x=pt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) # nt nt = paddle.static.data( @@ -141,7 +141,7 @@ def get_loss(cos_q_pt, cos_q_nt): nt_emb = paddle.static.nn.sparse_embedding( input=nt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -157,12 +157,12 @@ def get_loss(cos_q_pt, cos_q_nt): nt_fc = paddle.static.nn.fc( x=nt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc) cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc) diff --git a/test/legacy_test/test_dist_fleet_ps12.py b/test/legacy_test/test_dist_fleet_ps12.py index 7bc2fdcd479ae..122d3c81c26dd 100644 --- a/test/legacy_test/test_dist_fleet_ps12.py +++ b/test/legacy_test/test_dist_fleet_ps12.py @@ -19,7 +19,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -81,7 +81,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_emb = paddle.static.nn.sparse_embedding( input=q, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -97,7 +97,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_fc = paddle.static.nn.fc( x=q_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__q_fc__", learning_rate=base_lr, @@ -113,7 +113,7 @@ def get_loss(cos_q_pt, cos_q_nt): pt_emb = paddle.static.nn.sparse_embedding( input=pt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -129,12 +129,12 @@ def get_loss(cos_q_pt, cos_q_nt): pt_fc = paddle.static.nn.fc( x=pt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) # nt nt = paddle.static.data( @@ -144,7 +144,7 @@ def get_loss(cos_q_pt, cos_q_nt): nt_emb = paddle.static.nn.sparse_embedding( input=nt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -160,12 +160,12 @@ def get_loss(cos_q_pt, cos_q_nt): nt_fc = paddle.static.nn.fc( x=nt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc) cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc) diff --git a/test/legacy_test/test_dist_fleet_ps13.py b/test/legacy_test/test_dist_fleet_ps13.py index edced3b5c8ec9..968c978be79d3 100644 --- a/test/legacy_test/test_dist_fleet_ps13.py +++ b/test/legacy_test/test_dist_fleet_ps13.py @@ -19,7 +19,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -82,7 +82,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_emb = paddle.static.nn.sparse_embedding( input=q, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -98,7 +98,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_fc = paddle.static.nn.fc( x=q_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__q_fc__", learning_rate=base_lr, @@ -114,7 +114,7 @@ def get_loss(cos_q_pt, cos_q_nt): pt_emb = paddle.static.nn.sparse_embedding( input=pt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -130,12 +130,12 @@ def get_loss(cos_q_pt, cos_q_nt): pt_fc = paddle.static.nn.fc( x=pt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) # nt nt = paddle.static.data( @@ -145,7 +145,7 @@ def get_loss(cos_q_pt, cos_q_nt): nt_emb = paddle.static.nn.sparse_embedding( input=nt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -161,12 +161,12 @@ def get_loss(cos_q_pt, cos_q_nt): nt_fc = paddle.static.nn.fc( x=nt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc) cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc) diff --git a/test/legacy_test/test_dist_fleet_ps2.py b/test/legacy_test/test_dist_fleet_ps2.py index d134a02d1fecb..7feb1e8ca7af6 100644 --- a/test/legacy_test/test_dist_fleet_ps2.py +++ b/test/legacy_test/test_dist_fleet_ps2.py @@ -19,7 +19,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -81,7 +81,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_emb = paddle.static.nn.sparse_embedding( input=q, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -98,7 +98,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_fc = paddle.static.nn.fc( x=q_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__q_fc__", learning_rate=base_lr, @@ -114,7 +114,7 @@ def get_loss(cos_q_pt, cos_q_nt): pt_emb = paddle.static.nn.sparse_embedding( input=pt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -130,12 +130,12 @@ def get_loss(cos_q_pt, cos_q_nt): pt_fc = paddle.static.nn.fc( x=pt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) # nt nt = paddle.static.data( @@ -145,7 +145,7 @@ def get_loss(cos_q_pt, cos_q_nt): nt_emb = paddle.static.nn.sparse_embedding( input=nt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -161,12 +161,12 @@ def get_loss(cos_q_pt, cos_q_nt): nt_fc = paddle.static.nn.fc( x=nt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc) cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc) diff --git a/test/legacy_test/test_dist_fleet_ps3.py b/test/legacy_test/test_dist_fleet_ps3.py index 286edccfebda2..d76af1a93059d 100644 --- a/test/legacy_test/test_dist_fleet_ps3.py +++ b/test/legacy_test/test_dist_fleet_ps3.py @@ -18,7 +18,7 @@ paddle.enable_static() -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -79,7 +79,7 @@ def get_loss(cos_q_pt, cos_q_nt): input=q, is_distributed=is_distributed, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -96,7 +96,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_fc = paddle.static.nn.fc( x=q_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__q_fc__", learning_rate=base_lr, @@ -113,7 +113,7 @@ def get_loss(cos_q_pt, cos_q_nt): input=pt, is_distributed=is_distributed, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -130,12 +130,12 @@ def get_loss(cos_q_pt, cos_q_nt): pt_fc = paddle.static.nn.fc( x=pt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) # nt nt = paddle.static.data( @@ -146,7 +146,7 @@ def get_loss(cos_q_pt, cos_q_nt): input=nt, is_distributed=is_distributed, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -163,12 +163,12 @@ def get_loss(cos_q_pt, cos_q_nt): nt_fc = paddle.static.nn.fc( x=nt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc) cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc) diff --git a/test/legacy_test/test_dist_fleet_ps4.py b/test/legacy_test/test_dist_fleet_ps4.py index 1f74970707723..2585ab99c7521 100644 --- a/test/legacy_test/test_dist_fleet_ps4.py +++ b/test/legacy_test/test_dist_fleet_ps4.py @@ -18,7 +18,7 @@ paddle.enable_static() -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -78,7 +78,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_emb = paddle.static.nn.sparse_embedding( input=q, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -94,7 +94,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_fc = paddle.static.nn.fc( x=q_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__q_fc__", learning_rate=base_lr, @@ -110,7 +110,7 @@ def get_loss(cos_q_pt, cos_q_nt): pt_emb = paddle.static.nn.sparse_embedding( input=pt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -126,12 +126,12 @@ def get_loss(cos_q_pt, cos_q_nt): pt_fc = paddle.static.nn.fc( x=pt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) # nt nt = paddle.static.data( @@ -141,7 +141,7 @@ def get_loss(cos_q_pt, cos_q_nt): nt_emb = paddle.static.nn.sparse_embedding( input=nt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -157,12 +157,12 @@ def get_loss(cos_q_pt, cos_q_nt): nt_fc = paddle.static.nn.fc( x=nt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc) cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc) diff --git a/test/legacy_test/test_dist_fleet_ps5.py b/test/legacy_test/test_dist_fleet_ps5.py index 63290ee6f4d79..914c31134542e 100644 --- a/test/legacy_test/test_dist_fleet_ps5.py +++ b/test/legacy_test/test_dist_fleet_ps5.py @@ -18,7 +18,7 @@ paddle.enable_static() -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -79,7 +79,7 @@ def get_loss(cos_q_pt, cos_q_nt): input=q, is_distributed=is_distributed, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -96,7 +96,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_fc = paddle.static.nn.fc( x=q_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__q_fc__", learning_rate=base_lr, @@ -113,7 +113,7 @@ def get_loss(cos_q_pt, cos_q_nt): input=pt, is_distributed=is_distributed, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -130,12 +130,12 @@ def get_loss(cos_q_pt, cos_q_nt): pt_fc = paddle.static.nn.fc( x=pt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) # nt nt = paddle.static.data( @@ -146,7 +146,7 @@ def get_loss(cos_q_pt, cos_q_nt): input=nt, is_distributed=is_distributed, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__tmp_", learning_rate=emb_lr, @@ -163,12 +163,12 @@ def get_loss(cos_q_pt, cos_q_nt): nt_fc = paddle.static.nn.fc( x=nt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc) cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc) diff --git a/test/legacy_test/test_dist_fleet_ps6.py b/test/legacy_test/test_dist_fleet_ps6.py index 8b96ed41a31fd..bbda76ae3f32c 100644 --- a/test/legacy_test/test_dist_fleet_ps6.py +++ b/test/legacy_test/test_dist_fleet_ps6.py @@ -18,7 +18,7 @@ paddle.enable_static() -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -78,7 +78,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_emb = paddle.static.nn.sparse_embedding( input=q, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -94,7 +94,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_fc = paddle.static.nn.fc( x=q_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__q_fc__", learning_rate=base_lr, @@ -110,7 +110,7 @@ def get_loss(cos_q_pt, cos_q_nt): pt_emb = paddle.static.nn.sparse_embedding( input=pt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -126,12 +126,12 @@ def get_loss(cos_q_pt, cos_q_nt): pt_fc = paddle.static.nn.fc( x=pt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) # nt nt = paddle.static.data( @@ -141,7 +141,7 @@ def get_loss(cos_q_pt, cos_q_nt): nt_emb = paddle.static.nn.sparse_embedding( input=nt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -157,12 +157,12 @@ def get_loss(cos_q_pt, cos_q_nt): nt_fc = paddle.static.nn.fc( x=nt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc) cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc) diff --git a/test/legacy_test/test_dist_fleet_raw_program_optimizer.py b/test/legacy_test/test_dist_fleet_raw_program_optimizer.py index c19791a3c33a8..a0fde4c955ecc 100644 --- a/test/legacy_test/test_dist_fleet_raw_program_optimizer.py +++ b/test/legacy_test/test_dist_fleet_raw_program_optimizer.py @@ -34,9 +34,9 @@ def _setup_config(self): self._use_fleet_api_20 = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_fleet_raw_program_optimizer.py", delta=1e-5, @@ -50,9 +50,9 @@ def need_envs(self): return {'FLAGS_sync_before_allreduce': '1'} def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_fleet_raw_program_optimizer.py", delta=1e-5, diff --git a/test/legacy_test/test_dist_fleet_raw_program_optimizer_fuse_allreduce.py b/test/legacy_test/test_dist_fleet_raw_program_optimizer_fuse_allreduce.py index f80207e310c22..68c58ac9a5d90 100644 --- a/test/legacy_test/test_dist_fleet_raw_program_optimizer_fuse_allreduce.py +++ b/test/legacy_test/test_dist_fleet_raw_program_optimizer_fuse_allreduce.py @@ -34,9 +34,9 @@ def _setup_config(self): self._use_fleet_api_20 = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_fleet_raw_program_optimizer_fuse_allreduce.py", delta=1e-5, diff --git a/test/legacy_test/test_dist_fleet_sparse_embedding_ctr.py b/test/legacy_test/test_dist_fleet_sparse_embedding_ctr.py index 1995df11a09d3..c20c1afc0f5db 100644 --- a/test/legacy_test/test_dist_fleet_sparse_embedding_ctr.py +++ b/test/legacy_test/test_dist_fleet_sparse_embedding_ctr.py @@ -24,7 +24,7 @@ from dist_fleet_sparse_embedding_ctr import fake_ctr_reader from test_dist_fleet_base import TestFleetBase -from paddle import fluid +from paddle import base @unittest.skip(reason="Skip unstable ut, need paddle sync mode fix") @@ -218,7 +218,7 @@ def net(): input=dnn_data, size=[dnn_input_dim, dnn_layer_dims[0]], is_test=inference, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="deep_embedding", initializer=init ), ) @@ -231,7 +231,7 @@ def net(): x=dnn_out, size=dim, activation="relu", - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01) ), name='dnn-fc-%d' % i, @@ -243,7 +243,7 @@ def net(): input=lr_data, size=[lr_input_dim, 1], is_test=inference, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="wide_embedding", initializer=paddle.nn.initializer.Constant(value=0.01), ), @@ -260,15 +260,15 @@ def net(): reader = paddle.batch(fake_ctr_reader(), batch_size=4) datas, predict = net() - exe = fluid.Executor(fluid.CPUPlace()) - feeder = fluid.DataFeeder(place=fluid.CPUPlace(), feed_list=datas) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + feeder = base.DataFeeder(place=base.CPUPlace(), feed_list=datas) + exe.run(base.default_startup_program()) paddle.distributed.io.load_persistables(exe, model_file) for batch_id, data in enumerate(reader()): score = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed=feeder.feed(data), fetch_list=[predict], ) diff --git a/test/legacy_test/test_dist_fleet_spmt.py b/test/legacy_test/test_dist_fleet_spmt.py index d9b5ad66b4fa3..2579e2576227d 100644 --- a/test/legacy_test/test_dist_fleet_spmt.py +++ b/test/legacy_test/test_dist_fleet_spmt.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -76,7 +76,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_emb = paddle.static.nn.sparse_embedding( input=q, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -92,7 +92,7 @@ def get_loss(cos_q_pt, cos_q_nt): q_fc = paddle.static.nn.fc( x=q_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__q_fc__", learning_rate=base_lr, @@ -108,7 +108,7 @@ def get_loss(cos_q_pt, cos_q_nt): pt_emb = paddle.static.nn.sparse_embedding( input=pt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -124,12 +124,12 @@ def get_loss(cos_q_pt, cos_q_nt): pt_fc = paddle.static.nn.fc( x=pt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) # nt nt = paddle.static.data( @@ -139,7 +139,7 @@ def get_loss(cos_q_pt, cos_q_nt): nt_emb = paddle.static.nn.sparse_embedding( input=nt, size=[dict_dim, emb_dim], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__emb__", learning_rate=emb_lr, @@ -155,12 +155,12 @@ def get_loss(cos_q_pt, cos_q_nt): nt_fc = paddle.static.nn.fc( x=nt_ss, size=hid_dim, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.01), name="__fc__", learning_rate=base_lr, ), - bias_attr=fluid.ParamAttr(name="__fc_b__"), + bias_attr=base.ParamAttr(name="__fc_b__"), ) cos_q_pt = paddle.nn.functional.cosine_similarity(q_fc, pt_fc) cos_q_nt = paddle.nn.functional.cosine_similarity(q_fc, nt_fc) @@ -235,10 +235,10 @@ def test_SingleProcessMultiThread(self): os.environ["PADDLE_FUSE_ALLREDUCE"] = "1" os.environ["PADDLE_LOSS_SCALE"] = "1" - startup_program = fluid.Program() - main_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): - with fluid.unique_name.guard(): + startup_program = base.Program() + main_program = base.Program() + with base.program_guard(main_program, startup_program): + with base.unique_name.guard(): loss, acc, _ = self.net() optimizer = paddle.optimizer.Adam(learning_rate=0.01) optimizer.minimize(loss) diff --git a/test/legacy_test/test_dist_hapi_model.py b/test/legacy_test/test_dist_hapi_model.py index 314a7621f07fc..1e5ec1d341f71 100644 --- a/test/legacy_test/test_dist_hapi_model.py +++ b/test/legacy_test/test_dist_hapi_model.py @@ -18,7 +18,7 @@ import time import unittest -from paddle import fluid +from paddle import base from paddle.distributed.utils.launch_utils import ( TrainerProc, find_free_ports, @@ -105,7 +105,7 @@ def start_local_trainers( class TestMultipleGpus(unittest.TestCase): def run_mnist_2gpu(self, target_file_name): - if fluid.core.get_cuda_device_count() == 0: + if base.core.get_cuda_device_count() == 0: return selected_gpus = get_gpus('0,1') diff --git a/test/legacy_test/test_dist_lookup_sparse_table_fuse_ops.py b/test/legacy_test/test_dist_lookup_sparse_table_fuse_ops.py index 1a2df8979f712..b320b887d2298 100644 --- a/test/legacy_test/test_dist_lookup_sparse_table_fuse_ops.py +++ b/test/legacy_test/test_dist_lookup_sparse_table_fuse_ops.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -32,16 +32,16 @@ def test_fuse(self): self.check_with_place(place) def check_with_place(self, place): - scope = fluid.global_scope() + scope = base.global_scope() scope.var("LearningRate").get_tensor().set([0.01], place) scope.var("Ids").get_tensor().set(list(range(100)), place) - init_program = fluid.Program() + init_program = base.Program() lr = init_program.global_block().create_var( name="LearningRate", persistable=True, - type=fluid.core.VarDesc.VarType.LOD_TENSOR, + type=base.core.VarDesc.VarType.LOD_TENSOR, shape=[1], dtype="float32", ) @@ -49,14 +49,14 @@ def check_with_place(self, place): ids = init_program.global_block().create_var( name="Ids", persistable=True, - type=fluid.core.VarDesc.VarType.LOD_TENSOR, + type=base.core.VarDesc.VarType.LOD_TENSOR, shape=[100], dtype="int64", ) output = init_program.global_block().create_var( name="output", - type=fluid.core.VarDesc.VarType.LOD_TENSOR, + type=base.core.VarDesc.VarType.LOD_TENSOR, shape=[100, 8], dtype="float32", ) @@ -98,10 +98,10 @@ def check_with_place(self, place): }, ) - executor = fluid.Executor(place) + executor = base.Executor(place) executor.run(init_program) - training_program = fluid.Program() + training_program = base.Program() scope.var('Beta1Pow').get_tensor().set( np.array([0]).astype("float32"), place @@ -124,7 +124,7 @@ def check_with_place(self, place): lr = training_program.global_block().create_var( name="LearningRate", persistable=True, - type=fluid.core.VarDesc.VarType.LOD_TENSOR, + type=base.core.VarDesc.VarType.LOD_TENSOR, shape=[1], dtype="float32", ) @@ -132,7 +132,7 @@ def check_with_place(self, place): grads = training_program.global_block().create_var( name="Grad", persistable=True, - type=fluid.core.VarDesc.VarType.SELECTED_ROWS, + type=base.core.VarDesc.VarType.SELECTED_ROWS, shape=[100, 8], dtype="float32", ) @@ -140,7 +140,7 @@ def check_with_place(self, place): beta1 = training_program.global_block().create_var( name="Beta1Pow", persistable=True, - type=fluid.core.VarDesc.VarType.LOD_TENSOR, + type=base.core.VarDesc.VarType.LOD_TENSOR, shape=[1], dtype="float32", ) @@ -148,7 +148,7 @@ def check_with_place(self, place): beta2 = training_program.global_block().create_var( name="Beta2Pow", persistable=True, - type=fluid.core.VarDesc.VarType.LOD_TENSOR, + type=base.core.VarDesc.VarType.LOD_TENSOR, shape=[1], dtype="float32", ) diff --git a/test/legacy_test/test_dist_mnist_backward_deps.py b/test/legacy_test/test_dist_mnist_backward_deps.py index 10089293b3b08..5a55891242154 100644 --- a/test/legacy_test/test_dist_mnist_backward_deps.py +++ b/test/legacy_test/test_dist_mnist_backward_deps.py @@ -30,9 +30,9 @@ def _setup_config(self): self._enable_backward_deps = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place("dist_mnist.py", delta=1e-5) diff --git a/test/legacy_test/test_dist_mnist_fleet_save.py b/test/legacy_test/test_dist_mnist_fleet_save.py index 0cee8d58cc677..aa25a34b469f2 100644 --- a/test/legacy_test/test_dist_mnist_fleet_save.py +++ b/test/legacy_test/test_dist_mnist_fleet_save.py @@ -33,46 +33,46 @@ def _setup_config(self): self._save_model = True def _rm_temp_files(self, dirname): - fluid_model_path = os.path.join(dirname, 'fluid_persistables') + base_model_path = os.path.join(dirname, 'base_persistables') fleet_model_path = os.path.join(dirname, 'fleet_persistables') - fluid_infer_path = os.path.join(dirname, 'fluid_infer') + base_infer_path = os.path.join(dirname, 'base_infer') fleet_infer_path = os.path.join(dirname, 'fleet_infer') - fluid_model_path_2 = os.path.join(dirname, 'fluid_persistables_2') + base_model_path_2 = os.path.join(dirname, 'base_persistables_2') fleet_model_path_2 = os.path.join(dirname, 'fleet_persistables_2') - fluid_infer_path_2 = os.path.join(dirname, 'fluid_infer_2') + base_infer_path_2 = os.path.join(dirname, 'base_infer_2') fleet_infer_path_2 = os.path.join(dirname, 'fleet_infer_2') - shutil.rmtree(fluid_model_path) + shutil.rmtree(base_model_path) shutil.rmtree(fleet_model_path) - shutil.rmtree(fluid_infer_path) + shutil.rmtree(base_infer_path) shutil.rmtree(fleet_infer_path) - shutil.rmtree(fluid_model_path_2) + shutil.rmtree(base_model_path_2) shutil.rmtree(fleet_model_path_2) - shutil.rmtree(fluid_infer_path_2) + shutil.rmtree(base_infer_path_2) shutil.rmtree(fleet_infer_path_2) def _test_saved_files(self, dirname): - fluid_model_path = os.path.join(dirname, 'fluid_persistables') - fluid_persistables = sorted(os.listdir(fluid_model_path)) + base_model_path = os.path.join(dirname, 'base_persistables') + base_persistables = sorted(os.listdir(base_model_path)) fleet_model_path = os.path.join(dirname, 'fleet_persistables') fleet_persistables = sorted(os.listdir(fleet_model_path)) - fluid_infer_path = os.path.join(dirname, 'fluid_infer') - fluid_infer_files = sorted(os.listdir(fluid_infer_path)) + base_infer_path = os.path.join(dirname, 'base_infer') + base_infer_files = sorted(os.listdir(base_infer_path)) fleet_infer_path = os.path.join(dirname, 'fleet_infer') fleet_infer_files = sorted(os.listdir(fleet_infer_path)) - if len(fluid_persistables) != len(fleet_persistables): + if len(base_persistables) != len(fleet_persistables): self._rm_temp_files(dirname) raise ValueError("Test Failed.") - for i in range(len(fluid_persistables)): - if fluid_persistables[i] != fleet_persistables[i]: + for i in range(len(base_persistables)): + if base_persistables[i] != fleet_persistables[i]: self._rm_temp_files(dirname) raise ValueError("Test Failed.") - if len(fluid_infer_files) != len(fleet_infer_files): + if len(base_infer_files) != len(fleet_infer_files): self._rm_temp_files(dirname) raise ValueError("Test Failed.") - for i in range(len(fluid_infer_files)): - if fluid_infer_files[i] != fleet_infer_files[i]: + for i in range(len(base_infer_files)): + if base_infer_files[i] != fleet_infer_files[i]: self._rm_temp_files(dirname) raise ValueError("Test Failed.") self._rm_temp_files(dirname) @@ -100,9 +100,9 @@ def check_with_place( self._test_saved_files(dirname) def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place("dist_mnist.py", delta=1e-5) diff --git a/test/legacy_test/test_dist_mnist_fleetapi.py b/test/legacy_test/test_dist_mnist_fleetapi.py index 96435a8163778..8a79af12a8d54 100644 --- a/test/legacy_test/test_dist_mnist_fleetapi.py +++ b/test/legacy_test/test_dist_mnist_fleetapi.py @@ -31,9 +31,9 @@ def _setup_config(self): self._sync_batch_norm = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_mnist.py", delta=1e-5, @@ -44,14 +44,14 @@ def test_dist_train(self): class FleetCollectiveTest(unittest.TestCase): def test_open_sync_batch_norm(self): - from paddle import fluid + from paddle import base from paddle.incubate.distributed.fleet import role_maker from paddle.incubate.distributed.fleet.collective import ( DistributedStrategy, fleet, ) - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): # Operator "gen_nccl_id" has not been registered return diff --git a/test/legacy_test/test_dist_mnist_fp16_allreduce.py b/test/legacy_test/test_dist_mnist_fp16_allreduce.py index 707f25beed0f8..799799a52598b 100644 --- a/test/legacy_test/test_dist_mnist_fp16_allreduce.py +++ b/test/legacy_test/test_dist_mnist_fp16_allreduce.py @@ -25,9 +25,9 @@ def _setup_config(self): self._nccl2_reduce_layer = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_mnist_fp16_allreduce.py", delta=1e-5, check_error_log=True ) diff --git a/test/legacy_test/test_dist_mnist_hallreduce.py b/test/legacy_test/test_dist_mnist_hallreduce.py index 8fc5181c63cd7..bc056ca3cc382 100644 --- a/test/legacy_test/test_dist_mnist_hallreduce.py +++ b/test/legacy_test/test_dist_mnist_hallreduce.py @@ -33,9 +33,9 @@ def _setup_config(self): self._use_hallreduce = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_mnist.py", delta=1e-5, diff --git a/test/legacy_test/test_dist_mnist_multi_comm.py b/test/legacy_test/test_dist_mnist_multi_comm.py index ae19548be40f9..d57515b000219 100644 --- a/test/legacy_test/test_dist_mnist_multi_comm.py +++ b/test/legacy_test/test_dist_mnist_multi_comm.py @@ -32,9 +32,9 @@ def _setup_config(self): self._nccl_comm_num = 3 def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_mnist.py", delta=1e-5, diff --git a/test/legacy_test/test_dist_mnist_pg.py b/test/legacy_test/test_dist_mnist_pg.py index cf46394f68ddc..38cae5f257bfc 100644 --- a/test/legacy_test/test_dist_mnist_pg.py +++ b/test/legacy_test/test_dist_mnist_pg.py @@ -29,9 +29,9 @@ def _setup_config(self): self._nccl2_mode = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_mnist.py", delta=1, diff --git a/test/legacy_test/test_dist_mnist_ring_allreduce.py b/test/legacy_test/test_dist_mnist_ring_allreduce.py index cb3fb5d34eb5e..fdb65440c8f63 100644 --- a/test/legacy_test/test_dist_mnist_ring_allreduce.py +++ b/test/legacy_test/test_dist_mnist_ring_allreduce.py @@ -29,9 +29,9 @@ def _setup_config(self): self._nccl2_mode = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place("dist_mnist.py", delta=1e-5) diff --git a/test/legacy_test/test_dist_mnist_with_program.py b/test/legacy_test/test_dist_mnist_with_program.py index 42b47648ba86a..9c29944cc3777 100644 --- a/test/legacy_test/test_dist_mnist_with_program.py +++ b/test/legacy_test/test_dist_mnist_with_program.py @@ -31,9 +31,9 @@ def _setup_config(self): self._use_local_sgd = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place("dist_mnist.py", delta=1e-5) @@ -47,9 +47,9 @@ def _setup_config(self): self._ut4grad_allreduce = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place("dist_mnist.py", delta=1e-5) diff --git a/test/legacy_test/test_dist_op.py b/test/legacy_test/test_dist_op.py index 958071cfc62a1..be7f05b638d8a 100644 --- a/test/legacy_test/test_dist_op.py +++ b/test/legacy_test/test_dist_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -246,9 +246,9 @@ def init_data_type(self): def test_api(self): self.init_data_type() - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): x = paddle.static.data( name='x', shape=[2, 3, 4, 5], dtype=self.data_type ) @@ -260,13 +260,13 @@ def test_api(self): y_i = np.random.random((3, 1, 5)).astype(self.data_type) result = paddle.dist(x, y, p) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'x': x_i, 'y': y_i}, fetch_list=[result], ) diff --git a/test/legacy_test/test_dist_se_resnext_nccl.py b/test/legacy_test/test_dist_se_resnext_nccl.py index 64d63048525f5..a56009d8b9bd2 100644 --- a/test/legacy_test/test_dist_se_resnext_nccl.py +++ b/test/legacy_test/test_dist_se_resnext_nccl.py @@ -30,9 +30,9 @@ def _setup_config(self): self._nccl2_mode = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_se_resnext.py", delta=1e-5, @@ -49,9 +49,9 @@ def _setup_config(self): self._mp_mode = True def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "dist_se_resnext.py", delta=1e-5, diff --git a/test/legacy_test/test_dist_sharding_save.py b/test/legacy_test/test_dist_sharding_save.py index ed994b813b59a..448cc900e3257 100755 --- a/test/legacy_test/test_dist_sharding_save.py +++ b/test/legacy_test/test_dist_sharding_save.py @@ -84,9 +84,9 @@ def check_with_place( self._test_saved_files(dirname) def test_dist_train(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place("dist_sharding_save.py", delta=1e-5) diff --git a/test/legacy_test/test_dist_sparse_load_ps0.py b/test/legacy_test/test_dist_sparse_load_ps0.py index 985aa0d933771..4b697d3db2fef 100644 --- a/test/legacy_test/test_dist_sparse_load_ps0.py +++ b/test/legacy_test/test_dist_sparse_load_ps0.py @@ -20,7 +20,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet import fleet from paddle.distributed.fleet.base import role_maker @@ -29,7 +29,7 @@ class SparseLoadOp(unittest.TestCase): """Test load operator.""" def net(self, emb_array, fc_array): - with fluid.unique_name.guard(): + with base.unique_name.guard(): dense_input = paddle.static.data( 'input', shape=[None, 1], dtype="int64" ) @@ -38,7 +38,7 @@ def net(self, emb_array, fc_array): input=dense_input, is_sparse=True, size=[10, 10], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="embedding", initializer=paddle.nn.initializer.Assign(emb_array), ), @@ -48,7 +48,7 @@ def net(self, emb_array, fc_array): x=emb, size=10, activation="relu", - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name='fc', initializer=paddle.nn.initializer.Assign(fc_array), ), @@ -57,15 +57,15 @@ def net(self, emb_array, fc_array): return loss def save_origin_model(self, emb_array, fc_array): - startup_program = fluid.framework.Program() - test_program = fluid.framework.Program() - with fluid.framework.program_guard(test_program, startup_program): - with fluid.unique_name.guard(): + startup_program = base.framework.Program() + test_program = base.framework.Program() + with base.framework.program_guard(test_program, startup_program): + with base.unique_name.guard(): loss = self.net(emb_array, fc_array) optimizer = paddle.optimizer.Adam(1e-3) optimizer.minimize(loss) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) model_path = tempfile.mkdtemp() paddle.distributed.io.save_persistables( @@ -112,10 +112,10 @@ def test_2ps_0_load(self): optimizer.minimize(loss) fleet.init_server(model_path) - fc_w = np.array(fluid.global_scope().find_var("fc").get_tensor()) + fc_w = np.array(base.global_scope().find_var("fc").get_tensor()) emb = np.array( - fluid.global_scope().find_var("embedding.block0").get_tensor() + base.global_scope().find_var("embedding.block0").get_tensor() ) assert fc_w.all() == fc_array.all() diff --git a/test/legacy_test/test_dist_sparse_load_ps1.py b/test/legacy_test/test_dist_sparse_load_ps1.py index c5bb9b47c98b9..069eb70560a75 100644 --- a/test/legacy_test/test_dist_sparse_load_ps1.py +++ b/test/legacy_test/test_dist_sparse_load_ps1.py @@ -20,7 +20,7 @@ from test_dist_sparse_load_ps0 import SparseLoadOp import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet import fleet from paddle.distributed.fleet.base import role_maker @@ -53,8 +53,8 @@ def test_2ps_0_load(self): fc_array = np.arange(0, 1, 0.1).repeat(10).reshape(10, 10) model_path = self.save_origin_model(emb_array, fc_array) - startup_program = fluid.framework.Program() - test_program = fluid.framework.Program() + startup_program = base.framework.Program() + test_program = base.framework.Program() role = role_maker.PaddleCloudRoleMaker() fleet.init(role) loss = self.net(emb_array, fc_array) @@ -65,7 +65,7 @@ def test_2ps_0_load(self): optimizer.minimize(loss) fleet.init_server(model_path) emb = np.array( - fluid.global_scope().find_var("embedding.block1").get_tensor() + base.global_scope().find_var("embedding.block1").get_tensor() ) assert emb.all() == emb_array[1::2].all() shutil.rmtree(model_path) diff --git a/test/legacy_test/test_dist_sparse_tensor_load_adagrad.py b/test/legacy_test/test_dist_sparse_tensor_load_adagrad.py index 88354c6d797fb..35dd48accd42a 100644 --- a/test/legacy_test/test_dist_sparse_tensor_load_adagrad.py +++ b/test/legacy_test/test_dist_sparse_tensor_load_adagrad.py @@ -17,7 +17,7 @@ from test_dist_sparse_tensor_load_sgd import TestSparseLoadProgram import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet import fleet @@ -28,8 +28,8 @@ class TestSparseLoadProgramAdagrad(TestSparseLoadProgram): def test_server_init(self): scope, train_program, startup_program, loss = self.net() - with fluid.scope_guard(scope): - with fluid.program_guard(train_program, startup_program): + with base.scope_guard(scope): + with base.program_guard(train_program, startup_program): optimizer = paddle.optimizer.Adam(1e-3) optimizer = fleet.distributed_optimizer( optimizer, self.strategy diff --git a/test/legacy_test/test_dist_sparse_tensor_load_adam.py b/test/legacy_test/test_dist_sparse_tensor_load_adam.py index 1a2c60657c1a9..b5eae0e39807e 100644 --- a/test/legacy_test/test_dist_sparse_tensor_load_adam.py +++ b/test/legacy_test/test_dist_sparse_tensor_load_adam.py @@ -17,7 +17,7 @@ from test_dist_sparse_tensor_load_sgd import TestSparseLoadProgram import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet import fleet @@ -28,8 +28,8 @@ class TestSparseLoadProgramAdam(TestSparseLoadProgram): def test_server_init(self): scope, train_program, startup_program, loss = self.net() - with fluid.scope_guard(scope): - with fluid.program_guard(train_program, startup_program): + with base.scope_guard(scope): + with base.program_guard(train_program, startup_program): optimizer = paddle.optimizer.Adam(1e-3) optimizer = fleet.distributed_optimizer( optimizer, self.strategy diff --git a/test/legacy_test/test_dist_sparse_tensor_load_ftrl.py b/test/legacy_test/test_dist_sparse_tensor_load_ftrl.py index b473a6ac08922..6a1f0175b1619 100644 --- a/test/legacy_test/test_dist_sparse_tensor_load_ftrl.py +++ b/test/legacy_test/test_dist_sparse_tensor_load_ftrl.py @@ -17,7 +17,7 @@ from test_dist_sparse_tensor_load_sgd import TestSparseLoadProgram import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet import fleet @@ -28,8 +28,8 @@ class TestSparseLoadProgramFtrl(TestSparseLoadProgram): def test_server_init(self): scope, train_program, startup_program, loss = self.net() - with fluid.scope_guard(scope): - with fluid.program_guard(train_program, startup_program): + with base.scope_guard(scope): + with base.program_guard(train_program, startup_program): optimizer = paddle.optimizer.SGD(1e-3) optimizer = fleet.distributed_optimizer( optimizer, self.strategy diff --git a/test/legacy_test/test_dist_sparse_tensor_load_momentum.py b/test/legacy_test/test_dist_sparse_tensor_load_momentum.py index 6b45d5b72cc21..b7b590cbb3224 100644 --- a/test/legacy_test/test_dist_sparse_tensor_load_momentum.py +++ b/test/legacy_test/test_dist_sparse_tensor_load_momentum.py @@ -17,7 +17,7 @@ from test_dist_sparse_tensor_load_sgd import TestSparseLoadProgram import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet import fleet @@ -28,8 +28,8 @@ class TestSparseLoadProgramMomentum(TestSparseLoadProgram): def test_server_init(self): scope, train_program, startup_program, loss = self.net() - with fluid.scope_guard(scope): - with fluid.program_guard(train_program, startup_program): + with base.scope_guard(scope): + with base.program_guard(train_program, startup_program): optimizer = paddle.optimizer.SGD(1e-3) optimizer = fleet.distributed_optimizer( optimizer, self.strategy diff --git a/test/legacy_test/test_dist_sparse_tensor_load_rmsprop.py b/test/legacy_test/test_dist_sparse_tensor_load_rmsprop.py index 999b9315c83eb..9ce8e211f1e67 100644 --- a/test/legacy_test/test_dist_sparse_tensor_load_rmsprop.py +++ b/test/legacy_test/test_dist_sparse_tensor_load_rmsprop.py @@ -17,7 +17,7 @@ from test_dist_sparse_tensor_load_sgd import TestSparseLoadProgram import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet import fleet @@ -28,8 +28,8 @@ class TestSparseLoadProgramRmsprop(TestSparseLoadProgram): def test_server_init(self): scope, train_program, startup_program, loss = self.net() - with fluid.scope_guard(scope): - with fluid.program_guard(train_program, startup_program): + with base.scope_guard(scope): + with base.program_guard(train_program, startup_program): optimizer = paddle.optimizer.SGD(1e-3) optimizer = fleet.distributed_optimizer( optimizer, self.strategy diff --git a/test/legacy_test/test_dist_sparse_tensor_load_sgd.py b/test/legacy_test/test_dist_sparse_tensor_load_sgd.py index 5444ebdcae4c9..a8da5d52c61ec 100644 --- a/test/legacy_test/test_dist_sparse_tensor_load_sgd.py +++ b/test/legacy_test/test_dist_sparse_tensor_load_sgd.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet import fleet from paddle.distributed.fleet.base import role_maker @@ -40,12 +40,12 @@ def setUp(self): self.strategy.a_sync = True def net(self): - train_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(train_program, startup_program): - with fluid.unique_name.guard(): + train_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(train_program, startup_program): + with base.unique_name.guard(): inputs = paddle.static.data( 'input', shape=[None, 1], dtype="int64" ) @@ -63,8 +63,8 @@ def net(self): class TestSparseLoadProgramSGD(TestSparseLoadProgram): def test_server_init(self): scope, train_program, startup_program, loss = self.net() - with fluid.scope_guard(scope): - with fluid.program_guard(train_program, startup_program): + with base.scope_guard(scope): + with base.program_guard(train_program, startup_program): optimizer = paddle.optimizer.SGD(1e-3) optimizer = fleet.distributed_optimizer( optimizer, self.strategy diff --git a/test/legacy_test/test_dist_train.py b/test/legacy_test/test_dist_train.py index 968594397c7d5..c1d8e5426db35 100644 --- a/test/legacy_test/test_dist_train.py +++ b/test/legacy_test/test_dist_train.py @@ -22,9 +22,9 @@ from dist_test_utils import remove_ps_flag import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.layers import ops +from paddle import base +from paddle.base import core +from paddle.base.layers import ops from paddle.incubate.nn.layer.io import ListenAndServ, Recv, Send RPC_OP_ROLE_ATTR_NAME = ( @@ -37,7 +37,7 @@ class TestSendOp(unittest.TestCase): def test_send(self): remove_ps_flag(os.getpid()) # Run init_serv in a thread - place = fluid.CPUPlace() + place = base.CPUPlace() # NOTE: python thread will not work here due to GIL. p = Process(target=self.init_serv, args=(place,)) p.daemon = True @@ -71,9 +71,9 @@ def _wait_ps_ready(self, pid): start_left_time -= sleep_time def init_serv(self, place): - main = fluid.Program() + main = base.Program() - with fluid.program_guard(main): + with base.program_guard(main): serv = ListenAndServ("127.0.0.1:0", ["X"], optimizer_mode=False) with serv.do(): out_var = main.global_block().create_var( @@ -92,12 +92,12 @@ def init_serv(self, place): ) ops._scale(x=x, scale=10.0, out=out_var) - self.server_exe = fluid.Executor(place) + self.server_exe = base.Executor(place) self.server_exe.run(main) def init_client(self, place, port): - main = fluid.Program() - with fluid.program_guard(main): + main = base.Program() + with base.program_guard(main): main.global_block().append_op( type="fetch_barrier", inputs={}, @@ -132,16 +132,16 @@ def init_client(self, place, port): Send("127.0.0.1:%d" % port, [x]) o = Recv("127.0.0.1:%d" % port, [get_var]) - exe = fluid.Executor(place) + exe = base.Executor(place) self.dist_out = exe.run(main, fetch_list=o) # o is a list def run_local(self, place): - main = fluid.Program() - with fluid.program_guard(main): + main = base.Program() + with base.program_guard(main): x = paddle.static.data(shape=[32, 32], dtype='float32', name='X') paddle.nn.initializer.Constant(value=2.3)(x, main.global_block()) o = paddle.scale(x=x, scale=10.0) - exe = fluid.Executor(place) + exe = base.Executor(place) self.local_out = exe.run(main, fetch_list=[o]) diff --git a/test/legacy_test/test_dist_transpiler.py b/test/legacy_test/test_dist_transpiler.py index 094bdc2f6cf68..89a306f84045e 100644 --- a/test/legacy_test/test_dist_transpiler.py +++ b/test/legacy_test/test_dist_transpiler.py @@ -22,7 +22,7 @@ gc.set_debug(gc.DEBUG_COLLECTABLE) import paddle -from paddle import fluid +from paddle import base class TranspilerTest(unittest.TestCase): @@ -42,8 +42,8 @@ def net_conf(self): y_predict = paddle.static.nn.fc( x, size=1000, - weight_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b'), + weight_attr=base.ParamAttr(name='fc_w'), + bias_attr=base.ParamAttr(name='fc_b'), ) y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) @@ -52,20 +52,20 @@ def net_conf(self): sgd_optimizer.minimize(avg_cost) def get_main_program(self): - main = fluid.Program() + main = base.Program() main.random_seed = 1 - with fluid.program_guard(main): + with base.program_guard(main): self.net_conf() self.origin_prog = main.clone() return main def get_trainer(self, config=None, sync_mode=True): - src = fluid.default_startup_program().clone() + src = base.default_startup_program().clone() t = self._transpiler_instance(config, sync_mode=True) trainer_main = t.get_trainer_program(wait_port=False) - trainer_startup = fluid.default_startup_program() + trainer_startup = base.default_startup_program() assert src.num_blocks == 1 assert trainer_startup.num_blocks == src.num_blocks @@ -100,10 +100,10 @@ def transpiler_test_impl(self): pass def test_transpiler(self): - main = fluid.Program() - startup = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, startup): self.transpiler_test_impl() # NOTE: run gc.collect to eliminate pybind side objects to # prevent random double-deallocate when inherited in python. @@ -300,8 +300,8 @@ def net_conf(self): y_predict = paddle.static.nn.fc( x, size=1000, - weight_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b'), + weight_attr=base.ParamAttr(name='fc_w'), + bias_attr=base.ParamAttr(name='fc_b'), ) y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) @@ -356,7 +356,7 @@ def net_conf(self): input=inputs[0], is_sparse=True, size=[dict_size, embedding_size], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name='emb', initializer=paddle.nn.initializer.Uniform( -init_width, init_width @@ -368,7 +368,7 @@ def net_conf(self): input=inputs[1], is_sparse=True, size=[dict_size, embedding_size], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name='emb_w', initializer=paddle.nn.initializer.Constant(value=0.0), ), @@ -378,7 +378,7 @@ def net_conf(self): input=inputs[1], is_sparse=True, size=[dict_size, 1], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name='emb_b', initializer=paddle.nn.initializer.Constant(value=0.0), ), @@ -391,7 +391,7 @@ def net_conf(self): input=neg_word_reshape, is_sparse=True, size=[dict_size, embedding_size], - param_attr=fluid.ParamAttr(name='emb_w', learning_rate=1.0), + param_attr=base.ParamAttr(name='emb_w', learning_rate=1.0), ) neg_emb_w_re = paddle.reshape( @@ -402,7 +402,7 @@ def net_conf(self): input=neg_word_reshape, is_sparse=True, size=[dict_size, 1], - param_attr=fluid.ParamAttr(name='emb_b', learning_rate=1.0), + param_attr=base.ParamAttr(name='emb_b', learning_rate=1.0), ) neg_emb_b_vec = paddle.reshape(neg_emb_b, shape=[-1, neg_num]) @@ -470,8 +470,8 @@ def net_conf(self): y_predict = paddle.static.nn.fc( x, size=1000, - weight_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b'), + weight_attr=base.ParamAttr(name='fc_w'), + bias_attr=base.ParamAttr(name='fc_b'), ) y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) @@ -533,10 +533,10 @@ def net_conf(self): y_predict = paddle.static.nn.fc( x, size=1000, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name='fc_w', regularizer=paddle.regularizer.L2Decay() ), - bias_attr=fluid.ParamAttr(name='fc_b'), + bias_attr=base.ParamAttr(name='fc_b'), ) y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) @@ -571,8 +571,8 @@ def net_conf(self): y_predict = paddle.static.nn.fc( x, size=1000, - weight_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b'), + weight_attr=base.ParamAttr(name='fc_w'), + bias_attr=base.ParamAttr(name='fc_b'), ) y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) @@ -646,7 +646,7 @@ def net_conf(self): y_predict = paddle.static.nn.fc( x, size=1000, - weight_attr=fluid.ParamAttr(name='fc_w'), + weight_attr=base.ParamAttr(name='fc_w'), bias_attr=False, ) y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') @@ -701,8 +701,8 @@ def emb_pool(ids, table_name, is_distributed): predict = paddle.static.nn.fc( x=fc0, size=2, - weight_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b'), + weight_attr=base.ParamAttr(name='fc_w'), + bias_attr=base.ParamAttr(name='fc_b'), ) label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') @@ -1088,8 +1088,8 @@ def net_conf(self): y_predict = paddle.static.nn.fc( x, size=1000, - weight_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b'), + weight_attr=base.ParamAttr(name='fc_w'), + bias_attr=base.ParamAttr(name='fc_b'), ) y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) @@ -1120,8 +1120,8 @@ def net_conf(self): y_predict = paddle.static.nn.fc( x, size=1000, - weight_attr=fluid.ParamAttr(name='fc_w'), - bias_attr=fluid.ParamAttr(name='fc_b'), + weight_attr=base.ParamAttr(name='fc_w'), + bias_attr=base.ParamAttr(name='fc_b'), ) y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') cost = paddle.nn.functional.square_error_cost(input=y_predict, label=y) @@ -1178,10 +1178,10 @@ def transpiler_test_impl(self): class TestNCCL2Transpile(TranspilerTest): def test_nccl2_transpile(self): - if fluid.core.is_compiled_with_cuda(): # test nccl2 only with cuda - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + if base.core.is_compiled_with_cuda(): # test nccl2 only with cuda + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): self.net_conf() config = paddle.distributed.transpiler.DistributeTranspilerConfig() @@ -1290,7 +1290,7 @@ def network_with_table(self, is_sparse, is_distributed): label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") w_param = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_parameter( shape=[num_total_classes, 10], @@ -1300,7 +1300,7 @@ def network_with_table(self, is_sparse, is_distributed): ) ) b_param = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_parameter( shape=[num_total_classes, 1], @@ -1367,7 +1367,7 @@ def network_with_table(self, is_sparse, is_distributed): name='path_code', shape=[-1, 3], dtype='int64' ) w_param = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_parameter( shape=[num_total_classes, 10], @@ -1377,7 +1377,7 @@ def network_with_table(self, is_sparse, is_distributed): ) ) b_param = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_parameter( shape=[3, 1], @@ -1391,7 +1391,7 @@ def network_with_table(self, is_sparse, is_distributed): input=input, is_sparse=is_sparse, size=[3, 3], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Normal( scale=1 / math.sqrt(num_total_classes) ) diff --git a/test/legacy_test/test_dlpack.py b/test/legacy_test/test_dlpack.py index d908b316c065b..dbf9505cebc3a 100644 --- a/test/legacy_test/test_dlpack.py +++ b/test/legacy_test/test_dlpack.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestDLPack(unittest.TestCase): @@ -29,7 +29,7 @@ def test_dlpack_dygraph(self): out_from_dlpack = paddle.utils.dlpack.from_dlpack(dlpack) if paddle.in_dynamic_mode(): self.assertTrue( - isinstance(out_from_dlpack, paddle.fluid.core.eager.Tensor) + isinstance(out_from_dlpack, paddle.base.core.eager.Tensor) ) else: self.assertTrue(isinstance(out_from_dlpack, paddle.Tensor)) @@ -48,14 +48,14 @@ def test_dlpack_tensor_larger_than_2dim(self): def test_dlpack_static(self): paddle.enable_static() - tensor = fluid.create_lod_tensor( + tensor = base.create_lod_tensor( np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]], - fluid.CPUPlace(), + base.CPUPlace(), ) dlpack = paddle.utils.dlpack.to_dlpack(tensor) out_from_dlpack = paddle.utils.dlpack.from_dlpack(dlpack) - self.assertTrue(isinstance(out_from_dlpack, fluid.core.Tensor)) + self.assertTrue(isinstance(out_from_dlpack, base.core.Tensor)) np.testing.assert_array_equal( np.array(out_from_dlpack), np.array([[1], [2], [3], [4]]).astype('int'), @@ -63,14 +63,14 @@ def test_dlpack_static(self): # when build with cuda if core.is_compiled_with_cuda(): - gtensor = fluid.create_lod_tensor( + gtensor = base.create_lod_tensor( np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]], - fluid.CUDAPlace(0), + base.CUDAPlace(0), ) gdlpack = paddle.utils.dlpack.to_dlpack(gtensor) gout_from_dlpack = paddle.utils.dlpack.from_dlpack(gdlpack) - self.assertTrue(isinstance(gout_from_dlpack, fluid.core.Tensor)) + self.assertTrue(isinstance(gout_from_dlpack, base.core.Tensor)) np.testing.assert_array_equal( np.array(gout_from_dlpack), np.array([[1], [2], [3], [4]]).astype('int'), diff --git a/test/legacy_test/test_dot_op.py b/test/legacy_test/test_dot_op.py index a8b7d4dd53902..2fdaf3dd1598b 100644 --- a/test/legacy_test/test_dot_op.py +++ b/test/legacy_test/test_dot_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class DotOp(OpTest): @@ -30,8 +30,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {} @@ -164,17 +164,17 @@ def test_errors(self): class TestDygraph(unittest.TestCase): def test_dygraph(self): - with fluid.dygraph.guard(): - x1 = fluid.dygraph.to_variable(np.array([1, 3]).astype(np.float32)) - y1 = fluid.dygraph.to_variable(np.array([2, 5]).astype(np.float32)) + with base.dygraph.guard(): + x1 = base.dygraph.to_variable(np.array([1, 3]).astype(np.float32)) + y1 = base.dygraph.to_variable(np.array([2, 5]).astype(np.float32)) np.testing.assert_allclose( paddle.dot(x1, y1).numpy(), np.array([17]), rtol=1e-05 ) - x1 = fluid.dygraph.to_variable( + x1 = base.dygraph.to_variable( np.array([[1, 3], [3, 5]]).astype(np.float32) ) - y1 = fluid.dygraph.to_variable( + y1 = base.dygraph.to_variable( np.array([[2, 5], [6, 8]]).astype(np.float32) ) np.testing.assert_array_equal( @@ -225,8 +225,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {} diff --git a/test/legacy_test/test_downpoursgd.py b/test/legacy_test/test_downpoursgd.py index 5bb65133b98a1..c2ae5f54ed4a0 100644 --- a/test/legacy_test/test_downpoursgd.py +++ b/test/legacy_test/test_downpoursgd.py @@ -21,8 +21,8 @@ import paddle import paddle.incubate.distributed.fleet.parameter_server.pslib.ps_pb2 as pslib -from paddle import fluid -from paddle.fluid.trainer_factory import TrainerFactory +from paddle import base +from paddle.base.trainer_factory import TrainerFactory from paddle.incubate.distributed.fleet.parameter_server.pslib.node import ( DownpourServer, DownpourWorker, @@ -67,11 +67,11 @@ def test_device_work_use_cvm(self): with open(f"{cache_path}/fleet_desc.prototxt") as f: text_format.Merge(f.read(), ps_param) fleet_desc = ps_param - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) opt_info = {} - main_program = fluid.default_main_program() + main_program = base.default_main_program() program_id = str(id(avg_cost.block.program)) program_configs = {} program_configs[program_id] = { @@ -131,11 +131,11 @@ def test_device_work(self): with open(f"{cache_path}/fleet_desc.prototxt") as f: text_format.Merge(f.read(), ps_param) fleet_desc = ps_param - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) opt_info = {} - main_program = fluid.default_main_program() + main_program = base.default_main_program() program_id = str(id(avg_cost.block.program)) program_configs = {} program_configs[program_id] = { @@ -193,11 +193,11 @@ def test_downpour_opt_work(self): with open(f"{cache_path}/fleet_desc.prototxt") as f: text_format.Merge(f.read(), ps_param) fleet_desc = ps_param - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) opt_info = {} - main_program = fluid.default_main_program() + main_program = base.default_main_program() program_id = str(id(avg_cost.block.program)) program_configs = {} program_configs[program_id] = { diff --git a/test/legacy_test/test_dropout_nd_op.py b/test/legacy_test/test_dropout_nd_op.py index 070e0f102a8d6..003977fe7ebe2 100644 --- a/test/legacy_test/test_dropout_nd_op.py +++ b/test/legacy_test/test_dropout_nd_op.py @@ -18,11 +18,11 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import _legacy_C_ops, fluid -from paddle.fluid import core -from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import in_dygraph_mode -from paddle.fluid.layer_helper import LayerHelper +from paddle import _legacy_C_ops, base +from paddle.base import core +from paddle.base.data_feeder import check_variable_and_dtype +from paddle.base.framework import in_dygraph_mode +from paddle.base.layer_helper import LayerHelper from paddle.static import default_main_program @@ -178,14 +178,14 @@ class TestDropoutNdAPI(unittest.TestCase): def setUp(self): paddle.seed(123) np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def test_dygraph(self): paddle.disable_static() for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): in_np = np.random.random([4, 32, 16]).astype("float32") input = paddle.to_tensor(in_np) dropout_1 = paddle.incubate.nn.FusedDropout(p=0.0, axis=[0, 1]) diff --git a/test/legacy_test/test_dropout_op.py b/test/legacy_test/test_dropout_op.py index 088e9ce483014..dbfbfca070c31 100644 --- a/test/legacy_test/test_dropout_op.py +++ b/test/legacy_test/test_dropout_op.py @@ -19,8 +19,8 @@ from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci import paddle -from paddle import _C_ops, fluid, static -from paddle.fluid import Program, core, program_guard +from paddle import _C_ops, base, static +from paddle.base import Program, core, program_guard from paddle.incubate.autograd import primapi @@ -336,7 +336,7 @@ def setUp(self): x = np.random.random(self.input_size).astype("float16") out = x * (1.0 - self.prob) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.attrs = { 'dropout_prob': self.prob, 'fix_seed': self.fix_seed, @@ -457,10 +457,10 @@ def test_seed_cpu_place(self): attrs={'dropout_prob': 0.0}, outputs={'Out': x_out_var, 'Mask': mask_var}, ) - place = fluid.CPUPlace() + place = base.CPUPlace() if core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CUDAPlace(0) + exe = base.Executor(place) x_out, mask_out = exe.run( main_program, feed={}, @@ -477,8 +477,8 @@ def test_errors(self): def test_Variable(): # the input of dropout must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) paddle.nn.functional.dropout(x1, p=0.5) @@ -498,13 +498,13 @@ def test_dtype(): class TestDropoutFAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): paddle.enable_static() - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[-1, -1], dtype="float32" ) @@ -571,7 +571,7 @@ def check_static_result(self, place): res_np = in_np res_np2 = np.zeros_like(in_np) - exe = fluid.Executor(place) + exe = base.Executor(place) res_list = [ res1, res2, @@ -587,19 +587,19 @@ def check_static_result(self, place): ] for res in res_list: fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": in_np}, fetch_list=[res], ) np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05) fetches2 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": in_np}, fetch_list=[res10], ) np.testing.assert_allclose(fetches2[0], res_np2, rtol=1e-05) fetches3 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": in_np}, fetch_list=[res13], ) @@ -610,11 +610,11 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): in_np = np.random.random([40, 40]).astype("float32") res_np = in_np res_np2 = np.zeros_like(in_np) - input = fluid.dygraph.to_variable(in_np) + input = base.dygraph.to_variable(in_np) res1 = paddle.nn.functional.dropout( x=input, p=0.0, training=False @@ -722,8 +722,8 @@ def test_errors(self): def test_Variable(): # the input of dropout must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) paddle.nn.functional.dropout(x1, p=0.5) @@ -731,8 +731,8 @@ def test_Variable(): def test_Variable2(): # the input of dropout must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) paddle.nn.functional.dropout(x1, p=0.5, axis=0) @@ -815,16 +815,16 @@ def test_axis_len(): class TestDropoutCAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([40, 40]).astype("float32") result_np = input_np - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) m = paddle.nn.Dropout(p=0.0) m.eval() result = m(input) @@ -836,13 +836,13 @@ def test_dygraph(self): class TestDropout2DFAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): paddle.enable_static() - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 4, 5], dtype="float32" ) @@ -856,11 +856,11 @@ def check_static_result(self, place): in_np = np.random.random([2, 3, 4, 5]).astype("float32") res_np = in_np - exe = fluid.Executor(place) + exe = base.Executor(place) res_list = [res1, res2] for res in res_list: fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": in_np}, fetch_list=[res], ) @@ -872,10 +872,10 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): in_np = np.random.random([2, 3, 4, 5]).astype("float32") res_np = in_np - input = fluid.dygraph.to_variable(in_np) + input = base.dygraph.to_variable(in_np) res1 = paddle.nn.functional.dropout2d( x=input, p=0.0, training=False, data_format='NCHW' @@ -916,16 +916,16 @@ def test_dataformat(): class TestDropout2DCAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 4, 5]).astype("float32") result_np = input_np - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) m = paddle.nn.Dropout2D(p=0.0) m.eval() result = m(input) @@ -934,7 +934,7 @@ def test_dygraph(self): ) def test_static_fp16_with_gpu(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) paddle.enable_static() with paddle.static.program_guard( @@ -961,13 +961,13 @@ def test_static_fp16_with_gpu(self): class TestDropout3DFAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): paddle.enable_static() - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 4, 5, 6], dtype="float32" ) @@ -981,11 +981,11 @@ def check_static_result(self, place): in_np = np.random.random([2, 3, 4, 5, 6]).astype("float32") res_np = in_np - exe = fluid.Executor(place) + exe = base.Executor(place) res_list = [res1, res2] for res in res_list: fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": in_np}, fetch_list=[res], ) @@ -997,10 +997,10 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): in_np = np.random.random([2, 3, 4, 5, 6]).astype("float32") res_np = in_np - input = fluid.dygraph.to_variable(in_np) + input = base.dygraph.to_variable(in_np) res1 = paddle.nn.functional.dropout3d( x=input, p=0.0, training=False, data_format='NCDHW' @@ -1041,16 +1041,16 @@ def test_dataformat(): class TestDropout3DCAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 4, 5, 6]).astype("float32") result_np = input_np - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) m = paddle.nn.Dropout3D(p=0.0) m.eval() result = m(input) @@ -1062,12 +1062,12 @@ def test_dygraph(self): class TestAlphaDropoutFAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[40, 40], dtype="float32" ) @@ -1081,17 +1081,17 @@ def check_static_result(self, place): res_np = in_np res_np3 = np.zeros_like(in_np) - exe = fluid.Executor(place) + exe = base.Executor(place) res_list = [res1, res2] for res in res_list: fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": in_np}, fetch_list=[res], ) np.testing.assert_allclose(fetches[0], res_np, rtol=1e-05) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": in_np}, fetch_list=[res3], ) @@ -1103,11 +1103,11 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): in_np = np.random.random([40, 40]).astype("float32") res_np = in_np res_np3 = np.zeros_like(in_np) - input = fluid.dygraph.to_variable(in_np) + input = base.dygraph.to_variable(in_np) res1 = paddle.nn.functional.alpha_dropout(x=input, p=0.0) res2 = paddle.nn.functional.alpha_dropout( @@ -1127,8 +1127,8 @@ def test_errors(self): def test_Variable(): # the input of dropout must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) paddle.nn.functional.alpha_dropout(x1, p=0.5) @@ -1165,16 +1165,16 @@ def test_pvalue(): class TestAlphaDropoutCAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([40, 40]).astype("float32") result_np = input_np - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) m = paddle.nn.AlphaDropout(p=0.0) m.eval() result = m(input) @@ -1183,7 +1183,7 @@ def test_dygraph(self): ) def test_static_fp16_gpu(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() @@ -1260,9 +1260,9 @@ def test_static(self): class TestDropoutBackward(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def cal_grad_upscale_train(self, mask, prob): return mask.astype("float32") / (1 - prob) @@ -1272,7 +1272,7 @@ def cal_grad_downscale_in_infer(self, mask): def test_backward_downscale_in_infer(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input = paddle.uniform([40, 40], dtype="float32") input.stop_gradient = False out, mask = _C_ops.dropout( @@ -1287,7 +1287,7 @@ def test_backward_downscale_in_infer(self): def test_backward_upscale_train(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): prob = 0.5 input = paddle.uniform([40, 40], dtype="float32") input.stop_gradient = False @@ -1304,7 +1304,7 @@ def test_backward_upscale_train(self): def test_backward_upscale_train_2(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): prob = 0.3 input = paddle.uniform([40, 40], dtype="float32") input.stop_gradient = False @@ -1356,7 +1356,7 @@ def run_static(self, x): def run_dygraph(self, x): paddle.seed(2022) - with fluid.dygraph.guard(self.place): + with base.dygraph.guard(self.place): out = self.api_case(paddle.to_tensor(x)) return out @@ -1658,9 +1658,9 @@ def setUp(self): def get_eager_desire(self, place): paddle.disable_static() paddle.seed(self.seed) - if isinstance(place, fluid.CPUPlace): + if isinstance(place, base.CPUPlace): paddle.set_device("cpu") - if isinstance(place, fluid.CUDAPlace): + if isinstance(place, base.CUDAPlace): paddle.set_device("gpu") core.set_prim_eager_enabled(False) input_ = paddle.to_tensor( @@ -1682,7 +1682,7 @@ def test_static_comp(self): fwd_actual = [] rev_actual = [] mps = [] - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): for place in self.places: paddle.seed(self.seed) mp, sp = paddle.static.Program(), paddle.static.Program() @@ -1738,9 +1738,9 @@ def test_jit_comp(self): rev_actual = [] paddle.disable_static() for place in self.places: - if isinstance(place, fluid.CPUPlace): + if isinstance(place, base.CPUPlace): paddle.set_device("cpu") - if isinstance(place, fluid.CUDAPlace): + if isinstance(place, base.CUDAPlace): paddle.set_device("gpu") paddle.seed(self.seed) input_ = paddle.to_tensor( @@ -1779,7 +1779,7 @@ def test_jit_comp_with_cinn(self): rev_actual = [] paddle.disable_static() for place in self.places: - if not isinstance(place, fluid.CUDAPlace): + if not isinstance(place, base.CUDAPlace): continue paddle.set_device("gpu") paddle.seed(self.seed) @@ -1802,7 +1802,7 @@ def test_jit_comp_with_cinn(self): rev_actual.append(grad[0].numpy()) i = 0 for place in self.places: - if not isinstance(self.places[i], fluid.CUDAPlace): + if not isinstance(self.places[i], base.CUDAPlace): continue np.testing.assert_allclose( self.fwd_desire[i].sum(), diff --git a/test/legacy_test/test_dygraph_mnist_fp16.py b/test/legacy_test/test_dygraph_mnist_fp16.py index 6b95be0a5404d..0e2fb6570376b 100644 --- a/test/legacy_test/test_dygraph_mnist_fp16.py +++ b/test/legacy_test/test_dygraph_mnist_fp16.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.nn import Linear @@ -120,14 +120,14 @@ def forward(self, inputs, label): class TestMnist(unittest.TestCase): def func_mnist_fp16(self): - if not fluid.is_compiled_with_cuda(): + if not base.is_compiled_with_cuda(): return x = np.random.randn(1, 3, 224, 224).astype("float32") y = np.random.randint(10, size=[1, 1], dtype="int64") - with fluid.dygraph.guard(fluid.CUDAPlace(0)): + with base.dygraph.guard(base.CUDAPlace(0)): model = MNIST(dtype="float32") - x = fluid.dygraph.to_variable(x) - y = fluid.dygraph.to_variable(y) + x = base.dygraph.to_variable(x) + y = base.dygraph.to_variable(y) # using amp.auto_cast because paddle.nn.Conv2D doesn't suppport setting dtype with paddle.amp.auto_cast(dtype='float16'): diff --git a/test/legacy_test/test_dygraph_multi_forward.py b/test/legacy_test/test_dygraph_multi_forward.py index afcfe12c30f53..42729091d93b3 100644 --- a/test/legacy_test/test_dygraph_multi_forward.py +++ b/test/legacy_test/test_dygraph_multi_forward.py @@ -18,9 +18,9 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable from paddle.nn import Linear SEED = 123123111 @@ -108,7 +108,7 @@ class TestDygraphMultiForward(unittest.TestCase): def test_mnist_forward_float32(self): epoch_num = 1 - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) mnist = MNIST() @@ -151,10 +151,10 @@ def test_mnist_forward_float32(self): with new_program_scope(): paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) mnist = MNIST() @@ -182,7 +182,7 @@ def test_mnist_forward_float32(self): static_param_name_list.append(param.name) out = exe.run( - fluid.default_startup_program(), + base.default_startup_program(), fetch_list=static_param_name_list, ) @@ -202,7 +202,7 @@ def test_mnist_forward_float32(self): fetch_list = [avg_loss.name] out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"pixel": static_x_data, "label": y_data}, fetch_list=fetch_list, ) diff --git a/test/legacy_test/test_dygraph_weight_norm.py b/test/legacy_test/test_dygraph_weight_norm.py index 433f31d19e873..c8ac9db557e95 100644 --- a/test/legacy_test/test_dygraph_weight_norm.py +++ b/test/legacy_test/test_dygraph_weight_norm.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.nn.utils import remove_weight_norm, weight_norm @@ -121,7 +121,7 @@ def weight_normalize(self, w, dim=None): return g, v def test_check_output(self): - fluid.enable_imperative() + base.enable_imperative() linear = paddle.nn.Conv2D(2, 3, 3) before_weight = linear.weight.numpy() if self.dim is None: @@ -132,7 +132,7 @@ def test_check_output(self): wn = weight_norm(linear, dim=self.dim) outputs = [] for name, data in self.data.items(): - output = linear(fluid.dygraph.to_variable(data)) + output = linear(base.dygraph.to_variable(data)) outputs.append(output.numpy()) after_weight = linear.weight self.actual_outputs = [linear.weight_g.numpy(), linear.weight_v.numpy()] @@ -183,7 +183,7 @@ def init_test_case(self): self.dim = None def test_check_output(self): - fluid.enable_imperative() + base.enable_imperative() linear = paddle.nn.Conv2D(2, 3, 3) before_weight = linear.weight wn = weight_norm(linear, dim=self.dim) diff --git a/test/legacy_test/test_dynamic_rnn_stop_gradient.py b/test/legacy_test/test_dynamic_rnn_stop_gradient.py index 4f5048fd48391..5e61be6117ef1 100644 --- a/test/legacy_test/test_dynamic_rnn_stop_gradient.py +++ b/test/legacy_test/test_dynamic_rnn_stop_gradient.py @@ -17,15 +17,15 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.tensor.manipulation import tensor_array_to_tensor paddle.enable_static() def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): - fluid.default_startup_program().random_seed = 1 - fluid.default_main_program().random_seed = 1 + base.default_startup_program().random_seed = 1 + base.default_main_program().random_seed = 1 np.random.seed(2) x = paddle.assign( @@ -64,7 +64,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): loss = paddle.mean(out) opt = paddle.optimizer.Adam(0.01) opt.minimize(loss) - exe = fluid.Executor(place) + exe = base.Executor(place) data = np.random.random_integers( low=0, high=beam_size - 1, size=(batch_size, beam_size) ).astype("int64") @@ -79,8 +79,8 @@ def setUp(self): self.beam_size = 64 def run_main(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): - with fluid.scope_guard(fluid.Scope()): + with base.program_guard(base.Program(), base.Program()): + with base.scope_guard(base.Scope()): value1 = build_and_run_program( place, self.batch_size, self.beam_size, False ) @@ -91,9 +91,9 @@ def run_main(self, place): np.testing.assert_array_equal(value1, value2) def test_check_main(self): - places = [fluid.CPUPlace()] - if fluid.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for p in places: self.run_main(p) diff --git a/test/legacy_test/test_eager_deletion_conditional_block.py b/test/legacy_test/test_eager_deletion_conditional_block.py index 998c02db814eb..eb18a3f4133a8 100644 --- a/test/legacy_test/test_eager_deletion_conditional_block.py +++ b/test/legacy_test/test_eager_deletion_conditional_block.py @@ -14,9 +14,9 @@ import unittest -from paddle import fluid +from paddle import base -fluid.core._set_eager_deletion_mode(0.0, 1.0, True) +base.core._set_eager_deletion_mode(0.0, 1.0, True) if __name__ == '__main__': unittest.main() diff --git a/test/legacy_test/test_eager_deletion_delete_vars.py b/test/legacy_test/test_eager_deletion_delete_vars.py index 065de1feb0455..7420e15981c27 100644 --- a/test/legacy_test/test_eager_deletion_delete_vars.py +++ b/test/legacy_test/test_eager_deletion_delete_vars.py @@ -23,11 +23,11 @@ from functools import reduce import paddle -from paddle import fluid +from paddle import base paddle.enable_static() -fluid.core._set_eager_deletion_mode(0.0, 1.0, True) +base.core._set_eager_deletion_mode(0.0, 1.0, True) def simple_fc_net(): @@ -39,7 +39,7 @@ def simple_fc_net(): hidden, size=200, activation='tanh', - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ), ) @@ -70,15 +70,15 @@ def get_persistables_and_non_persistables(prog, fetch_list): class TestExecutor(unittest.TestCase): def test_executor_main(self): - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for p in places: self.place = p - with fluid.program_guard(fluid.Program(), fluid.Program()): - with fluid.scope_guard(fluid.Scope()): - with fluid.unique_name.guard(): + with base.program_guard(base.Program(), base.Program()): + with base.scope_guard(base.Scope()): + with base.unique_name.guard(): self.executor_main() def prepare_feed(self, image, label, dev_cnt=1): @@ -124,7 +124,7 @@ def assertScopeVar(self, scope, persitables, non_persistables): self.assertEqual(len(outline_np_vars), 0) def assert_gc_vars(self, program, skip_vars, non_persistable_vars): - gc_vars = fluid.core._get_eager_deletion_vars(program.desc, skip_vars) + gc_vars = base.core._get_eager_deletion_vars(program.desc, skip_vars) self.assertEqual(len(gc_vars), program.num_blocks) gc_vars = reduce(lambda x, y: x + y, gc_vars[0]) self.assertEqual(set(gc_vars), set(non_persistable_vars)) @@ -133,42 +133,42 @@ def executor_main(self): image, label, loss = simple_fc_net() loss.persistable = False persistables, non_persistables = get_persistables_and_non_persistables( - fluid.default_main_program(), [loss.name] + base.default_main_program(), [loss.name] ) print(f'Non-persistable var number {len(non_persistables)}') print(non_persistables) self.assert_gc_vars( - fluid.default_main_program(), [loss.name], non_persistables + base.default_main_program(), [loss.name], non_persistables ) - exe = fluid.Executor(self.place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(self.place) + exe.run(base.default_startup_program()) - p = fluid.core.Place() + p = base.core.Place() p.set_place(self.place) - exe = fluid.core.Executor(p) + exe = base.core.Executor(p) for _ in range(10): image_np, label_np = self.prepare_feed(image, label) - fluid.global_scope().var(image.name).get_tensor().set( + base.global_scope().var(image.name).get_tensor().set( image_np, self.place ) - fluid.global_scope().var(label.name).get_tensor().set( + base.global_scope().var(label.name).get_tensor().set( label_np, self.place ) # exe.run would not create local scope # so that we can detect whether gc clears temporary variables exe.run( - fluid.default_main_program().desc, - fluid.global_scope(), + base.default_main_program().desc, + base.global_scope(), 0, False, True, [loss.name], ) self.assertScopeVar( - fluid.global_scope(), persistables, non_persistables + base.global_scope(), persistables, non_persistables ) diff --git a/test/legacy_test/test_eager_deletion_dynamic_rnn_base.py b/test/legacy_test/test_eager_deletion_dynamic_rnn_base.py index 5716eaa8b03ea..aac329502abd6 100644 --- a/test/legacy_test/test_eager_deletion_dynamic_rnn_base.py +++ b/test/legacy_test/test_eager_deletion_dynamic_rnn_base.py @@ -21,8 +21,8 @@ from fake_reader import fake_imdb_reader import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def train(network, use_cuda, batch_size=32, pass_num=2): @@ -45,16 +45,16 @@ def train(network, use_cuda, batch_size=32, pass_num=2): optimizer = paddle.optimizer.Adagrad(learning_rate=0.2) optimizer.minimize(cost) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - feeder = fluid.DataFeeder(feed_list=[data, label], place=place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + feeder = base.DataFeeder(feed_list=[data, label], place=place) reader = feeder.feed(train_reader()) - exe = fluid.Executor(place) - fluid.default_startup_program().random_seed = 1 - fluid.default_main_program().random_seed = 1 - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + base.default_startup_program().random_seed = 1 + base.default_main_program().random_seed = 1 + exe.run(base.default_startup_program()) - train_cp = fluid.default_main_program() + train_cp = base.default_main_program() fetch_list = [cost] for pass_id in range(pass_num): @@ -80,6 +80,6 @@ def test_network(self): for use_cuda in [True, False]: print(f'network: {self.net.__name__}, use_cuda: {use_cuda}') - with fluid.program_guard(fluid.Program(), fluid.Program()): - with fluid.scope_guard(core.Scope()): + with base.program_guard(base.Program(), base.Program()): + with base.scope_guard(core.Scope()): train(self.net, use_cuda) diff --git a/test/legacy_test/test_eager_deletion_mnist.py b/test/legacy_test/test_eager_deletion_mnist.py index e9fbd2de4961b..888f1800a0635 100644 --- a/test/legacy_test/test_eager_deletion_mnist.py +++ b/test/legacy_test/test_eager_deletion_mnist.py @@ -14,9 +14,9 @@ import unittest -from paddle import fluid +from paddle import base -fluid.core._set_eager_deletion_mode(0.0, 1.0, True) +base.core._set_eager_deletion_mode(0.0, 1.0, True) if __name__ == '__main__': unittest.main() diff --git a/test/legacy_test/test_eager_deletion_padding_rnn.py b/test/legacy_test/test_eager_deletion_padding_rnn.py index 874731e35d27a..0bc835a86368f 100644 --- a/test/legacy_test/test_eager_deletion_padding_rnn.py +++ b/test/legacy_test/test_eager_deletion_padding_rnn.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.executor import Executor +from paddle import base +from paddle.base.executor import Executor os.environ["CPU_NUM"] = "1" @@ -245,7 +245,7 @@ def encoder_static( size=[vocab_size, hidden_size], dtype='float32', is_sparse=False, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name='embedding_para', initializer=paddle.nn.initializer.Uniform( low=-init_scale, high=init_scale @@ -325,20 +325,20 @@ def setUp(self): # The default exec_strategy used for PaddingRNN. # You can change it in set_customed_config. - self.exec_strategy = fluid.ExecutionStrategy() + self.exec_strategy = base.ExecutionStrategy() self.exec_strategy.num_threads = self.device_count self.exec_strategy.num_iteration_per_drop_scope = 100 # The default build_strategy used for PaddingRNN. # You can change it in set_customed_config. - self.build_strategy = fluid.BuildStrategy() + self.build_strategy = base.BuildStrategy() self.build_strategy.enable_inplace = True self.build_strategy.memory_optimize = False self.build_strategy.fuse_all_optimizer_ops = True # CPU executor is used for PaddingRNN default. # You can change to CUDA executor in set_customed_config. - self.exe = Executor(fluid.CPUPlace()) + self.exe = Executor(base.CPUPlace()) def set_customed_config(self): # This function will be called before training. @@ -347,10 +347,10 @@ def set_customed_config(self): def _prepare_program(self, config): paddle.seed(config.random_seed) - self.main_program = fluid.Program() - self.startup_program = fluid.Program() - with fluid.program_guard(self.main_program, self.startup_program): - with fluid.unique_name.guard(): + self.main_program = base.Program() + self.startup_program = base.Program() + with base.program_guard(self.main_program, self.startup_program): + with base.unique_name.guard(): res_vars = lm_model( config.hidden_size, config.vocab_size, diff --git a/test/legacy_test/test_eager_deletion_recurrent_op.py b/test/legacy_test/test_eager_deletion_recurrent_op.py index 7f98c7dbe387f..ef47eba277c70 100644 --- a/test/legacy_test/test_eager_deletion_recurrent_op.py +++ b/test/legacy_test/test_eager_deletion_recurrent_op.py @@ -18,13 +18,13 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base paddle.enable_static() np.random.seed(123) os.environ["CPU_NUM"] = "1" -fluid.core._set_eager_deletion_mode(0.0, 1.0, True) +base.core._set_eager_deletion_mode(0.0, 1.0, True) class RecurrentNet(paddle.nn.Layer): diff --git a/test/legacy_test/test_eager_deletion_while_op.py b/test/legacy_test/test_eager_deletion_while_op.py index 55d0e4ae15039..b909946d9704b 100644 --- a/test/legacy_test/test_eager_deletion_while_op.py +++ b/test/legacy_test/test_eager_deletion_while_op.py @@ -21,12 +21,12 @@ import numpy import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.executor import Executor +from paddle import base +from paddle.base import core +from paddle.base.executor import Executor paddle.enable_static() -fluid.core._set_eager_deletion_mode(0.0, 1.0, True) +base.core._set_eager_deletion_mode(0.0, 1.0, True) class TestEagerDeletionWhileOpBase(unittest.TestCase): @@ -38,8 +38,8 @@ def test_main(self): places.append(core.CUDAPlace(0)) for p in places: - with fluid.program_guard(fluid.Program(), fluid.Program()): - with fluid.scope_guard(fluid.Scope()): + with base.program_guard(base.Program(), base.Program()): + with base.scope_guard(base.Scope()): self.run_main(p) def run_main(self, place): @@ -121,14 +121,14 @@ def run_main(self, place): optim.minimize(loss) gc_vars = core._get_eager_deletion_vars( - fluid.default_main_program().desc, [loss.name] + base.default_main_program().desc, [loss.name] ) self.assertEqual(len(gc_vars), 3) exe = Executor(self.place) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) - prog = fluid.default_main_program() + prog = base.default_main_program() for _ in range(5): d = [] diff --git a/test/legacy_test/test_eager_run_program.py b/test/legacy_test/test_eager_run_program.py index e110383a42658..3014b791b47e1 100644 --- a/test/legacy_test/test_eager_run_program.py +++ b/test/legacy_test/test_eager_run_program.py @@ -18,9 +18,9 @@ import paddle from paddle import _legacy_C_ops -from paddle.fluid import core -from paddle.fluid.dygraph.base import switch_to_static_graph -from paddle.fluid.framework import Variable +from paddle.base import core +from paddle.base.dygraph.base import switch_to_static_graph +from paddle.base.framework import Variable def _append_backward_desc(main_program, outs): @@ -32,7 +32,7 @@ def _append_backward_desc(main_program, outs): targets.append(program.global_block().var(out.name)) if targets: - paddle.fluid.backward.gradients(targets=targets, inputs=[]) + paddle.base.backward.gradients(targets=targets, inputs=[]) return program @@ -76,7 +76,7 @@ def _add_build_strategy_for(input_program, start_op_index, end_op_index): compiled_program._compile( core.Scope(), paddle.framework._current_expected_place() ) - ir_graph = paddle.fluid.framework.IrGraph(compiled_program._graph) + ir_graph = paddle.base.framework.IrGraph(compiled_program._graph) builded_program = ir_graph.to_program() return builded_program diff --git a/test/legacy_test/test_eager_trace_op.py b/test/legacy_test/test_eager_trace_op.py index 91e95ff04a1f4..ee3ed9bb17989 100644 --- a/test/legacy_test/test_eager_trace_op.py +++ b/test/legacy_test/test_eager_trace_op.py @@ -24,18 +24,18 @@ def test_branches(self): data = np.random.random([1, 1]).astype(np.float32) x = paddle.to_tensor(data) - paddle.fluid.framework._dygraph_tracer().trace_op( + paddle.base.framework._dygraph_tracer().trace_op( 'broadcast_tensors', {'X': [x, x], 'Out': [x, x]}, {'Out': [x, x]}, {}, ) - paddle.fluid.framework._dygraph_tracer().trace_op( + paddle.base.framework._dygraph_tracer().trace_op( 'scale', {'X': x}, {'Out': x}, {'scale': 0.5} ) scale = paddle.to_tensor(np.random.random([1]).astype(np.float32)) - paddle.fluid.framework._dygraph_tracer().trace_op( + paddle.base.framework._dygraph_tracer().trace_op( 'instance_norm', {'Scale': [scale], 'X': [x]}, {'Y': [x]}, {} ) diff --git a/test/legacy_test/test_egr_python_api.py b/test/legacy_test/test_egr_python_api.py index 10f86571af101..9082956887e2b 100644 --- a/test/legacy_test/test_egr_python_api.py +++ b/test/legacy_test/test_egr_python_api.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle.fluid import core -from paddle.fluid.framework import ( +from paddle.base import core +from paddle.base.framework import ( EagerParamBase, _current_expected_place, in_dygraph_mode, @@ -167,7 +167,7 @@ def constructor(self, place): self.assertEqual(egr_tensor3.stop_gradient, True) self.assertTrue( egr_tensor3.place._equals( - paddle.fluid.framework._current_expected_place() + paddle.base.framework._current_expected_place() ) ) np.testing.assert_array_equal(egr_tensor3.numpy(), arr2) @@ -181,7 +181,7 @@ def constructor(self, place): self.assertEqual(egr_tensor4.stop_gradient, True) self.assertTrue( egr_tensor4.place._equals( - paddle.fluid.framework._current_expected_place() + paddle.base.framework._current_expected_place() ) ) np.testing.assert_array_equal(egr_tensor4.numpy(), egr_tensor3.numpy()) @@ -233,8 +233,8 @@ def constructor(self, place): np.testing.assert_array_equal(egr_tensor9.numpy(), arr4) x = np.random.rand(3, 3).astype('float32') - t = paddle.fluid.Tensor() - t.set(x, paddle.fluid.CPUPlace()) + t = paddle.base.Tensor() + t.set(x, paddle.base.CPUPlace()) egr_tensor10 = core.eager.Tensor(t, place) self.assertEqual(egr_tensor10.persistable, False) self.assertTrue("generated_tensor" in egr_tensor10.name) @@ -259,7 +259,7 @@ def constructor(self, place): self.assertEqual(egr_tensor12.shape, [3, 3]) self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor12.stop_gradient, True) - self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace())) + self.assertTrue(egr_tensor12.place._equals(paddle.base.CPUPlace())) np.testing.assert_array_equal(egr_tensor12.numpy(), x) zero_dim_param = EagerParamBase(shape=[], dtype="float32") @@ -321,7 +321,7 @@ def constructor_with_kwargs(self, place): self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) self.assertTrue( egr_tensor0.place._equals( - paddle.fluid.framework._current_expected_place() + paddle.base.framework._current_expected_place() ) ) self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP32) @@ -507,7 +507,7 @@ def constructor_with_kwargs(self, place): self.assertEqual(egr_tensor15.stop_gradient, True) self.assertTrue( egr_tensor15.place._equals( - paddle.fluid.framework._current_expected_place() + paddle.base.framework._current_expected_place() ) ) np.testing.assert_array_equal(egr_tensor15.numpy(), egr_tensor4.numpy()) @@ -522,7 +522,7 @@ def constructor_with_kwargs(self, place): self.assertEqual(egr_tensor16.stop_gradient, True) self.assertTrue( egr_tensor16.place._equals( - paddle.fluid.framework._current_expected_place() + paddle.base.framework._current_expected_place() ) ) np.testing.assert_array_equal(egr_tensor16.numpy(), egr_tensor4.numpy()) @@ -568,8 +568,8 @@ def constructor_with_kwargs(self, place): # init eager tensor by framework tensor x = np.random.rand(3, 3).astype('float32') - t = paddle.fluid.Tensor() - t.set(x, paddle.fluid.CPUPlace()) + t = paddle.base.Tensor() + t.set(x, paddle.base.CPUPlace()) egr_tensor20 = core.eager.Tensor(value=t) self.assertEqual(egr_tensor20.persistable, False) self.assertTrue("generated_tensor" in egr_tensor20.name) @@ -578,7 +578,7 @@ def constructor_with_kwargs(self, place): self.assertEqual(egr_tensor20.stop_gradient, True) self.assertTrue( egr_tensor20.place._equals( - paddle.fluid.framework._current_expected_place() + paddle.base.framework._current_expected_place() ) ) np.testing.assert_array_equal(egr_tensor20.numpy(), x) @@ -800,13 +800,13 @@ def test_global_properties(self): def test_place_guard(self): if core.is_compiled_with_cuda(): paddle.set_device("gpu:0") - with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()): + with paddle.base.framework._dygraph_place_guard(core.CPUPlace()): self.assertTrue( isinstance(_current_expected_place(), type(core.CPUPlace())) ) else: paddle.set_device("cpu") - with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()): + with paddle.base.framework._dygraph_place_guard(core.CPUPlace()): self.assertTrue( isinstance(_current_expected_place(), type(core.CPUPlace())) ) @@ -820,7 +820,7 @@ def test_value(self): self.assertEqual(egr_tensor0.shape, [4, 16, 16, 32]) self.assertTrue( egr_tensor0.place._equals( - paddle.fluid.framework._current_expected_place() + paddle.base.framework._current_expected_place() ) ) self.assertEqual(egr_tensor0.dtype, core.VarDesc.VarType.FP64) @@ -831,7 +831,7 @@ def test_value(self): ) self.assertTrue( egr_tensor0.value().get_tensor()._place(), - paddle.fluid.framework._current_expected_place(), + paddle.base.framework._current_expected_place(), ) self.assertTrue(egr_tensor0.value().get_tensor()._is_initialized()) @@ -936,13 +936,13 @@ def func_fp16_initilaizer(self): return res def func_layer_helper_base(self, value): - base = paddle.fluid.layer_helper_base.LayerHelperBase( + base = paddle.base.layer_helper_base.LayerHelperBase( "test_layer", "test_layer" ) return base.to_variable(value).numpy() def func_base_to_variable(self, value): - paddle.fluid.dygraph.base.to_variable(value) + paddle.base.dygraph.base.to_variable(value) def test_backward_with_single_tensor(self): arr4 = np.random.rand(4, 16, 16, 32).astype('float32') @@ -954,7 +954,7 @@ def test_backward_with_single_tensor(self): self.assertEqual(egr_tensor12.shape, [4, 16, 16, 32]) self.assertEqual(egr_tensor12.dtype, core.VarDesc.VarType.FP32) self.assertEqual(egr_tensor12.stop_gradient, True) - self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace())) + self.assertTrue(egr_tensor12.place._equals(paddle.base.CPUPlace())) np.testing.assert_array_equal(egr_tensor12.numpy(), arr4) np.testing.assert_array_equal(egr_tensor12.gradient(), None) egr_tensor12.stop_gradient = False diff --git a/test/legacy_test/test_egr_string_tensor_api.py b/test/legacy_test/test_egr_string_tensor_api.py index 16388ee645808..2fad942cf3e58 100644 --- a/test/legacy_test/test_egr_string_tensor_api.py +++ b/test/legacy_test/test_egr_string_tensor_api.py @@ -16,7 +16,7 @@ import numpy as np -from paddle.fluid import core +from paddle.base import core class EagerStringTensorTestCase(unittest.TestCase): diff --git a/test/legacy_test/test_eig_op.py b/test/legacy_test/test_eig_op.py index 5e804be327ec6..da28ca66bc000 100644 --- a/test/legacy_test/test_eig_op.py +++ b/test/legacy_test/test_eig_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, skip_check_grad_ci import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core # cast output to complex for numpy.linalg.eig @@ -66,7 +66,7 @@ def setUp(self): self.python_api = paddle.linalg.eig self.__class__.op_type = self.op_type self.init_input() - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(self.x)} self.outputs = {'Eigenvalues': self.out[0], 'Eigenvectors': self.out[1]} def init_input(self): @@ -243,15 +243,15 @@ def test_check_output_with_place(self): place = core.CPUPlace() input_np = np.random.random([3, 3]).astype('complex') expect_val, expect_vec = np.linalg.eig(input_np) - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[3, 3], dtype='complex' ) act_val, act_vec = paddle.linalg.eig(input) - exe = fluid.Executor(place) + exe = base.Executor(place) fetch_val, fetch_vec = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[act_val, act_vec], ) @@ -326,8 +326,8 @@ def test_check_grad(self): grad_v = np.ones(real_v.shape, test_type) grad_x = eig_backward(real_w, real_v, grad_w, grad_v) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(input_np) + with base.dygraph.guard(): + x = base.dygraph.to_variable(input_np) x.stop_gradient = False w, v = paddle.linalg.eig(x) (w.sum() + v.sum()).backward() diff --git a/test/legacy_test/test_eigvals_op.py b/test/legacy_test/test_eigvals_op.py index 8c367c31c340c..811eee672e1a5 100644 --- a/test/legacy_test/test_eigvals_op.py +++ b/test/legacy_test/test_eigvals_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core np.set_printoptions(threshold=np.inf) diff --git a/test/legacy_test/test_einsum.py b/test/legacy_test/test_einsum.py index a689c5f1e42fe..551c7948191f8 100644 --- a/test/legacy_test/test_einsum.py +++ b/test/legacy_test/test_einsum.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core os.environ['FLAGS_new_einsum'] = "0" @@ -179,14 +179,14 @@ def test_forward(self): expected_result = np.einsum(self.sample["paradigm"], *operands) equation = self.sample["paradigm"] - with paddle.fluid.dygraph.guard( + with paddle.base.dygraph.guard( self._get_place(force_to_use_cpu=False) ): pd_operands = [paddle.to_tensor(operand) for operand in operands] result = paddle.einsum(equation, *pd_operands) self.check_output_equal(result.numpy(), expected_result) - with paddle.fluid.dygraph.guard(self._get_place(force_to_use_cpu=True)): + with paddle.base.dygraph.guard(self._get_place(force_to_use_cpu=True)): pd_operands = [paddle.to_tensor(operand) for operand in operands] result = paddle.einsum(equation, *pd_operands) self.check_output_equal(result.numpy(), expected_result) @@ -348,7 +348,7 @@ def check_output_equal(self, actual, expect, rtol=1.0e-5, atol=1.0e-8): def check_output(self, eqn, *ops): expect = np.einsum(eqn, *ops) - with paddle.fluid.dygraph.guard( + with paddle.base.dygraph.guard( self._get_place(force_to_use_cpu=False) ): pd_operands = [paddle.to_tensor(op) for op in ops] @@ -449,14 +449,14 @@ def test_large_nops(self): def test_static_graph(self): paddle.enable_static() - fluid = paddle.fluid - if fluid.core.is_compiled_with_cuda(): - self.place = fluid.CUDAPlace(0) + base = paddle.base + if base.core.is_compiled_with_cuda(): + self.place = base.CUDAPlace(0) else: - self.place = fluid.CPUPlace() - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + self.place = base.CPUPlace() + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): a = paddle.static.data( name='a', shape=[3, None, None, None], dtype='float' ) @@ -479,7 +479,7 @@ def test_static_graph(self): outs.append(paddle.einsum('...kj, ...ik', d, e)) outs.append(paddle.einsum('ijk..., ikj', c, e)) outs.append(paddle.einsum('ijk..., ikj->...ij', c, e)) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(startup) a = np.arange(72).reshape(3, 2, 3, 4).astype('float') b = np.arange(48).reshape(2, 2, 3, 4).astype('float') diff --git a/test/legacy_test/test_einsum_op.py b/test/legacy_test/test_einsum_op.py index b69bdcf075124..8595094ee0fed 100644 --- a/test/legacy_test/test_einsum_op.py +++ b/test/legacy_test/test_einsum_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def einsum_wrapper(a, b): diff --git a/test/legacy_test/test_einsum_v2.py b/test/legacy_test/test_einsum_v2.py index 6b4bf6958b946..cb93963b7dd81 100644 --- a/test/legacy_test/test_einsum_v2.py +++ b/test/legacy_test/test_einsum_v2.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core os.environ['FLAGS_new_einsum'] = "1" @@ -181,14 +181,14 @@ def test_forward(self): expected_result = np.einsum(self.sample["paradigm"], *operands) equation = self.sample["paradigm"] - with paddle.fluid.dygraph.guard( + with paddle.base.dygraph.guard( self._get_place(force_to_use_cpu=False) ): pd_operands = [paddle.to_tensor(operand) for operand in operands] result = paddle.einsum(equation, *pd_operands) self.check_output_equal(result.numpy(), expected_result) - with paddle.fluid.dygraph.guard(self._get_place(force_to_use_cpu=True)): + with paddle.base.dygraph.guard(self._get_place(force_to_use_cpu=True)): pd_operands = [paddle.to_tensor(operand) for operand in operands] result = paddle.einsum(equation, *pd_operands) self.check_output_equal(result.numpy(), expected_result) @@ -390,7 +390,7 @@ def check_output_equal(self, actual, expect, rtol=1.0e-5, atol=1.0e-8): def check_output(self, eqn, *ops): expect = np.einsum(eqn, *ops) - with paddle.fluid.dygraph.guard( + with paddle.base.dygraph.guard( self._get_place(force_to_use_cpu=False) ): pd_operands = [paddle.to_tensor(op) for op in ops] @@ -471,14 +471,14 @@ def test_sums(self): def test_static_graph(self): paddle.enable_static() - fluid = paddle.fluid - if fluid.core.is_compiled_with_cuda(): - self.place = fluid.CUDAPlace(0) + base = paddle.base + if base.core.is_compiled_with_cuda(): + self.place = base.CUDAPlace(0) else: - self.place = fluid.CPUPlace() - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + self.place = base.CPUPlace() + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): a = paddle.static.data( name='a', shape=[3, None, None, None], dtype='float' ) @@ -501,7 +501,7 @@ def test_static_graph(self): outs.append(paddle.einsum('...kj, ...ik', d, e)) outs.append(paddle.einsum('ijk..., ikj', c, e)) outs.append(paddle.einsum('ijk..., ikj->...ij', c, e)) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(startup) a = np.arange(72).reshape(3, 2, 3, 4).astype('float') b = np.arange(48).reshape(2, 2, 3, 4).astype('float') diff --git a/test/legacy_test/test_elementwise_add_op.py b/test/legacy_test/test_elementwise_add_op.py index 5eb25c977e646..4a9ecc51aa73a 100644 --- a/test/legacy_test/test_elementwise_add_op.py +++ b/test/legacy_test/test_elementwise_add_op.py @@ -21,9 +21,9 @@ from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.layer_helper import LayerHelper +from paddle import base +from paddle.base import core +from paddle.base.layer_helper import LayerHelper class TestElementwiseAddOp(OpTest): @@ -43,8 +43,8 @@ def setUp(self): self.if_enable_cinn() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -187,10 +187,10 @@ def setUp(self): self.axis = -1 self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype( + 'X': OpTest.np_dtype_to_base_dtype( convert_float_to_uint16(self.x) ), - 'Y': OpTest.np_dtype_to_fluid_dtype( + 'Y': OpTest.np_dtype_to_base_dtype( convert_float_to_uint16(self.y) ), } @@ -581,7 +581,7 @@ def _executed_api(self, x, y, name=None): return paddle.add(x, y, name) def test_name(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') @@ -589,7 +589,7 @@ def test_name(self): self.assertEqual(('add_res' in y_1.name), True) def test_declarative(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def gen_data(): return { @@ -601,18 +601,18 @@ def gen_data(): y = paddle.static.data(name="y", shape=[3], dtype='float32') z = self._executed_api(x, y) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((z_value == z_expected).all(), True) def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.array([2, 3, 4]).astype('float64') np_y = np.array([1, 5, 2]).astype('float64') - x = fluid.dygraph.to_variable(np_x) - y = fluid.dygraph.to_variable(np_y) + x = base.dygraph.to_variable(np_x) + y = base.dygraph.to_variable(np_y) z = self._executed_api(x, y) np_z = z.numpy() z_expected = np.array([3.0, 8.0, 6.0]) @@ -691,8 +691,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -877,7 +877,7 @@ def _floa32_bfloat16_or_float16_add(self, y_dtype): class TestTensorFloa32Bfloat16Add(TestTensorFloa32Bfloat16OrFloat16Add): def test_floa32_bfloat16_add(self): place = core.CUDAPlace(0) - with fluid.dygraph.base.guard(place=place): + with base.dygraph.base.guard(place=place): self._floa32_bfloat16_or_float16_add(y_dtype=paddle.bfloat16) @@ -888,7 +888,7 @@ def test_floa32_bfloat16_add(self): class TestTensorFloa32Float16Add(TestTensorFloa32Bfloat16OrFloat16Add): def test_floa32_float16_add(self): place = core.CUDAPlace(0) - with fluid.dygraph.base.guard(place=place): + with base.dygraph.base.guard(place=place): self._floa32_bfloat16_or_float16_add(y_dtype=paddle.float16) diff --git a/test/legacy_test/test_elementwise_div_op.py b/test/legacy_test/test_elementwise_div_op.py index 7fb7ec87e4501..8a2adb65908bb 100644 --- a/test/legacy_test/test_elementwise_div_op.py +++ b/test/legacy_test/test_elementwise_div_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def broadcast_wrapper(shape=[1, 10, 12, 1]): @@ -472,13 +472,13 @@ def test_check_gradient(self): class TestElementwiseDivBroadcast(unittest.TestCase): def test_shape_with_batch_sizes(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x_var = paddle.static.data( name='x', dtype='float32', shape=[None, 3, None, None] ) one = 2.0 out = one / x_var - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype("float32") (out_result,) = exe.run(feed={'x': x}, fetch_list=[out]) self.assertEqual((out_result == (2 / x)).all(), True) @@ -486,7 +486,7 @@ def test_shape_with_batch_sizes(self): class TestDivideOp(unittest.TestCase): def test_name(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') @@ -494,7 +494,7 @@ def test_name(self): self.assertEqual(('div_res' in y_1.name), True) def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.array([2, 3, 4]).astype('float64') np_y = np.array([1, 5, 2]).astype('float64') x = paddle.to_tensor(np_x) @@ -513,8 +513,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} diff --git a/test/legacy_test/test_elementwise_floordiv_op.py b/test/legacy_test/test_elementwise_floordiv_op.py index 79f5c00deabed..17a5d0f2ed91a 100644 --- a/test/legacy_test/test_elementwise_floordiv_op.py +++ b/test/legacy_test/test_elementwise_floordiv_op.py @@ -20,7 +20,7 @@ from eager_op_test import OpTest, paddle_static_guard import paddle -from paddle import fluid +from paddle import base class TestElementwiseModOp(OpTest): @@ -38,8 +38,8 @@ def setUp(self): self.init_axis() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -106,7 +106,7 @@ def device_guard(device=None): class TestFloorDivideOp(unittest.TestCase): def test_name(self): with paddle_static_guard(): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data(name="x", shape=[2, 3], dtype="int64") y = paddle.static.data(name='y', shape=[2, 3], dtype='int64') @@ -115,9 +115,9 @@ def test_name(self): def test_dygraph(self): paddle.disable_static() - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for p in places: for dtype in ( 'uint8', diff --git a/test/legacy_test/test_elementwise_floormod_op.py b/test/legacy_test/test_elementwise_floormod_op.py index 42082a3b7b2c8..9a3bb665f80fb 100644 --- a/test/legacy_test/test_elementwise_floormod_op.py +++ b/test/legacy_test/test_elementwise_floormod_op.py @@ -16,12 +16,12 @@ import unittest import paddle -from paddle import fluid +from paddle import base class TestFloorModOp(unittest.TestCase): def test_dygraph(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): # mod by zero x = paddle.to_tensor([59], dtype='int32') y = paddle.to_tensor([0], dtype='int32') diff --git a/test/legacy_test/test_elementwise_gradient_op.py b/test/legacy_test/test_elementwise_gradient_op.py index 2e518a2afb389..e06ac89200a7c 100644 --- a/test/legacy_test/test_elementwise_gradient_op.py +++ b/test/legacy_test/test_elementwise_gradient_op.py @@ -16,8 +16,8 @@ import numpy as np -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestElementWiseAddOp(unittest.TestCase): @@ -45,8 +45,8 @@ def test_with_place(place): var_names = ['x', 'y', 'out', 'y@GRAD', 'x@GRAD', 'out@GRAD'] ground_truth = {name: var_dict[name] for name in var_names} - program = fluid.Program() - with fluid.program_guard(program): + program = base.Program() + with base.program_guard(program): block = program.global_block() for name in ground_truth: block.create_var( @@ -83,7 +83,7 @@ def test_with_place(place): grad_var = block.desc.find_var(arg.encode("ascii")) grad_var.set_dtype(core.VarDesc.VarType.FP32) - exe = fluid.Executor(place) + exe = base.Executor(place) out = exe.run( program, feed={ diff --git a/test/legacy_test/test_elementwise_heaviside_op.py b/test/legacy_test/test_elementwise_heaviside_op.py index a034c74fb8415..5e02e6cd0a945 100644 --- a/test/legacy_test/test_elementwise_heaviside_op.py +++ b/test/legacy_test/test_elementwise_heaviside_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def Heaviside_grad(x, y, dout, astype="float16", is_bfloat16=False): diff --git a/test/legacy_test/test_elementwise_max_op.py b/test/legacy_test/test_elementwise_max_op.py index 1283cfe287232..4c94992cec075 100644 --- a/test/legacy_test/test_elementwise_max_op.py +++ b/test/legacy_test/test_elementwise_max_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci import paddle -from paddle.fluid import core +from paddle.base import core class TestElementwiseOp(OpTest): diff --git a/test/legacy_test/test_elementwise_min_op.py b/test/legacy_test/test_elementwise_min_op.py index 9ba527ef8035a..9d2e5b76b9a48 100644 --- a/test/legacy_test/test_elementwise_min_op.py +++ b/test/legacy_test/test_elementwise_min_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() @@ -344,7 +344,7 @@ def test_check_output(self): def test_check_grad_normal(self): places = self._get_places() for place in places: - if type(place) is paddle.fluid.libpaddle.CPUPlace: + if type(place) is paddle.base.libpaddle.CPUPlace: check_prim = False else: check_prim = True @@ -369,7 +369,7 @@ def test_check_grad_normal(self): def test_check_grad_ingore_x(self): places = self._get_places() for place in places: - if isinstance(place, paddle.fluid.libpaddle.CPUPlace): + if isinstance(place, paddle.base.libpaddle.CPUPlace): check_prim = False else: check_prim = True @@ -394,7 +394,7 @@ def test_check_grad_ingore_x(self): def test_check_grad_ingore_y(self): places = self._get_places() for place in places: - if isinstance(place, paddle.fluid.libpaddle.CPUPlace): + if isinstance(place, paddle.base.libpaddle.CPUPlace): check_prim = False else: check_prim = True diff --git a/test/legacy_test/test_elementwise_mod_op.py b/test/legacy_test/test_elementwise_mod_op.py index 8f34328241fef..d75ae9e0a181e 100644 --- a/test/legacy_test/test_elementwise_mod_op.py +++ b/test/legacy_test/test_elementwise_mod_op.py @@ -23,8 +23,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestElementwiseModOp(OpTest): @@ -41,8 +41,8 @@ def setUp(self): self.init_axis() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -178,10 +178,10 @@ def setUp(self): self.init_axis() self.inputs = { 'X': convert_float_to_uint16( - OpTest.np_dtype_to_fluid_dtype(self.x) + OpTest.np_dtype_to_base_dtype(self.x) ), 'Y': convert_float_to_uint16( - OpTest.np_dtype_to_fluid_dtype(self.y) + OpTest.np_dtype_to_base_dtype(self.y) ), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} @@ -217,7 +217,7 @@ def _executed_api(self, x, y, name=None): return paddle.remainder(x, y, name) def test_name(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data(name="x", shape=[2, 3], dtype="int64") y = paddle.static.data(name='y', shape=[2, 3], dtype='int64') @@ -225,7 +225,7 @@ def test_name(self): self.assertEqual(('div_res' in y_1.name), True) def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.array([2, 3, 8, 7]).astype('int64') np_y = np.array([1, 5, 3, 3]).astype('int64') x = paddle.to_tensor(np_x) diff --git a/test/legacy_test/test_elementwise_mul_op.py b/test/legacy_test/test_elementwise_mul_op.py index 987d15419109c..281087ce26420 100644 --- a/test/legacy_test/test_elementwise_mul_op.py +++ b/test/legacy_test/test_elementwise_mul_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci import paddle -from paddle.fluid import core +from paddle.base import core class ElementwiseMulOp(OpTest): @@ -39,8 +39,8 @@ def setUp(self): self.if_enable_cinn() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} @@ -107,8 +107,8 @@ def setUp(self): self.if_enable_cinn() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis} @@ -182,10 +182,10 @@ def setUp(self): self.axis = -1 self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype( + 'X': OpTest.np_dtype_to_base_dtype( convert_float_to_uint16(self.x) ), - 'Y': OpTest.np_dtype_to_fluid_dtype( + 'Y': OpTest.np_dtype_to_base_dtype( convert_float_to_uint16(self.y) ), } @@ -302,8 +302,8 @@ def init_input_attr_output(self): self.y = np.random.uniform(0.1, 1, [17, 17]).astype(self.dtype) self.out = np.multiply(self.x, self.y) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} @@ -327,8 +327,8 @@ def init_input_attr_output(self): self.y = np.random.rand(100).astype(self.dtype) self.out = self.x * self.y.reshape(100, 1, 1) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} @@ -500,8 +500,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} diff --git a/test/legacy_test/test_elementwise_nn_grad.py b/test/legacy_test/test_elementwise_nn_grad.py index 9a4baa7c526c3..2f7b7c5fa41d0 100644 --- a/test/legacy_test/test_elementwise_nn_grad.py +++ b/test/legacy_test/test_elementwise_nn_grad.py @@ -19,8 +19,8 @@ from decorator_helper import prog_scope import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestElementwiseMulDoubleGradCheck(unittest.TestCase): @@ -45,9 +45,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -74,9 +74,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -103,9 +103,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -132,9 +132,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -171,9 +171,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -200,9 +200,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -241,9 +241,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -271,9 +271,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -300,9 +300,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -329,9 +329,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -368,9 +368,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -397,9 +397,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_elementwise_pow_op.py b/test/legacy_test/test_elementwise_pow_op.py index 1f7566df1bb19..ce8a63355c5c7 100644 --- a/test/legacy_test/test_elementwise_pow_op.py +++ b/test/legacy_test/test_elementwise_pow_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def pow_grad(x, y, dout): @@ -217,13 +217,13 @@ def setUp(self): ).astype("int") def test_grad(self): - places = [fluid.CPUPlace()] - if fluid.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for place in places: - with fluid.dygraph.guard(place): - x = fluid.dygraph.to_variable(self.x, zero_copy=False) - y = fluid.dygraph.to_variable(self.y, zero_copy=False) + with base.dygraph.guard(place): + x = base.dygraph.to_variable(self.x, zero_copy=False) + y = base.dygraph.to_variable(self.y, zero_copy=False) x.stop_gradient = False y.stop_gradient = False res = x**y diff --git a/test/legacy_test/test_elementwise_sub_op.py b/test/legacy_test/test_elementwise_sub_op.py index a2f199fb4b21d..87abcc2b07e57 100644 --- a/test/legacy_test/test_elementwise_sub_op.py +++ b/test/legacy_test/test_elementwise_sub_op.py @@ -20,9 +20,9 @@ from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.layer_helper import LayerHelper +from paddle import base +from paddle.base import core +from paddle.base.layer_helper import LayerHelper class TestElementwiseOp(OpTest): @@ -789,8 +789,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -862,7 +862,7 @@ def _executed_api(self, x, y, name=None): return paddle.subtract(x, y, name) def test_name(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") y = paddle.static.data(name='y', shape=[2, 3], dtype=np.float32) @@ -870,7 +870,7 @@ def test_name(self): self.assertEqual(('subtract_res' in y_1.name), True) def test_declarative(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def gen_data(): return { @@ -881,18 +881,18 @@ def gen_data(): x = paddle.static.data(name="x", shape=[3], dtype=np.float32) y = paddle.static.data(name="y", shape=[3], dtype=np.float32) z = self._executed_api(x, y) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) z_expected = np.array([1.0, -2.0, 2.0]) self.assertEqual((z_value == z_expected).all(), True) def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.array([2, 3, 4]).astype('float64') np_y = np.array([1, 5, 2]).astype('float64') - x = fluid.dygraph.to_variable(np_x) - y = fluid.dygraph.to_variable(np_y) + x = base.dygraph.to_variable(np_x) + y = base.dygraph.to_variable(np_y) z = self._executed_api(x, y) np_z = z.numpy(False) z_expected = np.array([1.0, -2.0, 2.0]) diff --git a/test/legacy_test/test_ema.py b/test/legacy_test/test_ema.py index 54625de715e45..acfd4479fe096 100644 --- a/test/legacy_test/test_ema.py +++ b/test/legacy_test/test_ema.py @@ -17,21 +17,21 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestExponentialMovingAverage(unittest.TestCase): def setUp(self): - self._places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - self._places.append(fluid.CUDAPlace(0)) + self._places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + self._places.append(base.CUDAPlace(0)) self._ema_decay = 0.999 self._param_name = "fc.weight" - self._train_program = fluid.Program() - self._startup_prog = fluid.Program() - with fluid.program_guard(self._train_program, self._startup_prog): - with fluid.unique_name.guard(): + self._train_program = base.Program() + self._startup_prog = base.Program() + with base.program_guard(self._train_program, self._startup_prog): + with base.unique_name.guard(): data = paddle.static.data( name='x', shape=[-1, 5], dtype='float32' ) @@ -40,7 +40,7 @@ def setUp(self): ) cost = paddle.mean(hidden) - self._test_program = fluid.default_main_program().clone( + self._test_program = base.default_main_program().clone( for_test=True ) @@ -53,7 +53,7 @@ def setUp(self): self._ema.update() def train(self, place): - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(self._startup_prog) params = [] @@ -61,17 +61,17 @@ def train(self, place): for batch_id in range(3): data = np.random.random(size=(10, 5)).astype('float32') tmp_param = np.array( - fluid.global_scope().find_var(self._param_name).get_tensor() + base.global_scope().find_var(self._param_name).get_tensor() ) exe.run(program=self._train_program, feed={'x': data}) tmp_param = np.array( - fluid.global_scope().find_var(self._param_name).get_tensor() + base.global_scope().find_var(self._param_name).get_tensor() ) params.append(tmp_param) with self._ema.apply(exe): final_ema = np.array( - fluid.global_scope().find_var(self._param_name).get_tensor() + base.global_scope().find_var(self._param_name).get_tensor() ) data = np.random.random(size=(10, 5)).astype('float32') exe.run(program=self._test_program, feed={'x': data}) diff --git a/test/legacy_test/test_embedding_id_stop_gradient.py b/test/legacy_test/test_embedding_id_stop_gradient.py index eef33dd44ec5f..e39d9c4674c67 100644 --- a/test/legacy_test/test_embedding_id_stop_gradient.py +++ b/test/legacy_test/test_embedding_id_stop_gradient.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestEmbeddingIdStopGradientBase(unittest.TestCase): @@ -26,9 +26,9 @@ def setUp(self): self.iteration = 10 def get_places(self): - places = [fluid.CPUPlace()] - if fluid.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) return places @@ -43,12 +43,12 @@ def run_program(self, place, stop_gradient=False): paddle.seed(1) paddle.framework.random._manual_program_seed(1) - startup_program = fluid.Program() - main_program = fluid.Program() + startup_program = base.Program() + main_program = base.Program() - scope = fluid.Scope() - with fluid.program_guard(main_program, startup_program): - with fluid.scope_guard(scope): + scope = base.Scope() + with base.program_guard(main_program, startup_program): + with base.scope_guard(scope): x_1 = paddle.static.data(name='x1', shape=[4, 1], dtype='int64') x_2 = paddle.static.data(name='x2', shape=[4, 1], dtype='int64') x = paddle.concat([x_1, x_2], axis=-1) @@ -65,7 +65,7 @@ def run_program(self, place, stop_gradient=False): optim = paddle.optimizer.SGD(learning_rate=0.001) optim.minimize(avg_cost) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup_program) x1_data = np.random.randint(0, 9, x_1.shape).astype('int64') diff --git a/test/legacy_test/test_empty_like_op.py b/test/legacy_test/test_empty_like_op.py index 164275b1a7d83..6074e74ae30f1 100644 --- a/test/legacy_test/test_empty_like_op.py +++ b/test/legacy_test/test_empty_like_op.py @@ -18,8 +18,8 @@ from eager_op_test import convert_uint16_to_float import paddle -from paddle.fluid import core -from paddle.fluid.data_feeder import convert_dtype +from paddle.base import core +from paddle.base.data_feeder import convert_dtype from paddle.static import Program, program_guard @@ -214,7 +214,7 @@ def init_config(self): def test_static_graph(self): paddle.enable_static() - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() @@ -247,7 +247,7 @@ def init_config(self): def test_static_graph(self): paddle.enable_static() - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() diff --git a/test/legacy_test/test_empty_op.py b/test/legacy_test/test_empty_op.py index bfd3184c4bb2b..19980e55d4718 100644 --- a/test/legacy_test/test_empty_op.py +++ b/test/legacy_test/test_empty_op.py @@ -18,9 +18,9 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle import base +from paddle.base import core +from paddle.base.framework import convert_np_dtype_to_dtype_ # Situation 1: Attr(shape) is a list(without tensor) @@ -263,7 +263,7 @@ def test_static_graph(self): place = paddle.CPUPlace() exe = paddle.static.Executor(place) res_1, res_2, res_3, res_4, res_5, res_6 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "shape_tensor_int32": np.array([200, 3]).astype("int32"), "shape_tensor_int64": np.array([200, 3]).astype("int64"), diff --git a/test/legacy_test/test_entry_attr.py b/test/legacy_test/test_entry_attr.py index a475adca2bc5c..fc73bdf9c399a 100644 --- a/test/legacy_test/test_entry_attr.py +++ b/test/legacy_test/test_entry_attr.py @@ -18,7 +18,7 @@ import unittest -from paddle import fluid +from paddle import base from paddle.distributed import ( CountFilterEntry, ProbabilityEntry, @@ -62,11 +62,11 @@ def showclick_entry(self): self.assertEqual("show_click_entry:show:click", ss) def spaese_layer(self): - prog = fluid.Program() - scope = fluid.core.Scope() + prog = base.Program() + scope = base.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog): + with base.scope_guard(scope): + with base.program_guard(prog): input = paddle.static.data( name="dnn_data", shape=[-1, 1], dtype="int64", lod_level=1 ) @@ -76,7 +76,7 @@ def spaese_layer(self): size=[100, 10], is_test=False, entry=prob, - param_attr=fluid.ParamAttr(name="deep_embedding"), + param_attr=base.ParamAttr(name="deep_embedding"), ) pool = paddle.static.nn.sequence_lod.sequence_pool( diff --git a/test/legacy_test/test_entry_attr2.py b/test/legacy_test/test_entry_attr2.py index 358e43c088cd2..c00013bb746e8 100644 --- a/test/legacy_test/test_entry_attr2.py +++ b/test/legacy_test/test_entry_attr2.py @@ -18,16 +18,16 @@ import unittest -from paddle import fluid +from paddle import base class EntryAttrChecks(unittest.TestCase): def embedding_layer(self): - prog = fluid.Program() - scope = fluid.core.Scope() + prog = base.Program() + scope = base.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog): + with base.scope_guard(scope): + with base.program_guard(prog): input = paddle.static.data( name="dnn_data", shape=[-1, 1], dtype="int64", lod_level=1 ) @@ -36,7 +36,7 @@ def embedding_layer(self): size=[100, 10], is_sparse=True, is_distributed=True, - param_attr=fluid.ParamAttr(name="deep_embedding"), + param_attr=base.ParamAttr(name="deep_embedding"), ) pool = paddle.static.nn.sequence_lod.sequence_pool( diff --git a/test/legacy_test/test_erf_op.py b/test/legacy_test/test_erf_op.py index 23ccec74c2386..e7eb19b156a70 100644 --- a/test/legacy_test/test_erf_op.py +++ b/test/legacy_test/test_erf_op.py @@ -19,8 +19,8 @@ from scipy.special import erf import paddle -import paddle.fluid.dygraph as dg -from paddle import fluid +import paddle.base.dygraph as dg +from paddle import base class TestErfOp(OpTest): @@ -65,14 +65,14 @@ def _test_case(self, place): np.testing.assert_allclose(y_ref, y_test, rtol=1e-05) def test_case(self): - with paddle.fluid.framework._static_guard(): - self._test_case(fluid.CPUPlace()) - if fluid.is_compiled_with_cuda(): - self._test_case(fluid.CUDAPlace(0)) + with paddle.base.framework._static_guard(): + self._test_case(base.CPUPlace()) + if base.is_compiled_with_cuda(): + self._test_case(base.CUDAPlace(0)) def test_name(self): - with paddle.fluid.framework._static_guard(): - with fluid.program_guard(fluid.Program()): + with paddle.base.framework._static_guard(): + with base.program_guard(base.Program()): x = paddle.static.data('x', [3, 4]) y = paddle.erf(x, name='erf') self.assertTrue('erf' in y.name) @@ -99,9 +99,9 @@ def test_check_grad(self): @unittest.skipIf( - not paddle.fluid.core.is_compiled_with_cuda() - or not paddle.fluid.core.is_bfloat16_supported( - paddle.fluid.core.CUDAPlace(0) + not paddle.base.core.is_compiled_with_cuda() + or not paddle.base.core.is_bfloat16_supported( + paddle.base.core.CUDAPlace(0) ), "core is not complied with CUDA and not support the bfloat16", ) @@ -119,11 +119,11 @@ def setUp(self): self.outputs = {'Out': convert_float_to_uint16(y_ref)} def test_check_output(self): - place = paddle.fluid.core.CUDAPlace(0) + place = paddle.base.core.CUDAPlace(0) self.check_output_with_place(place) def test_check_grad(self): - place = paddle.fluid.core.CUDAPlace(0) + place = paddle.base.core.CUDAPlace(0) self.check_grad_with_place(place, ['X'], 'Out', check_prim=True) diff --git a/test/legacy_test/test_erfinv_op.py b/test/legacy_test/test_erfinv_op.py index f6a51285de6e2..1a4f54230991a 100644 --- a/test/legacy_test/test_erfinv_op.py +++ b/test/legacy_test/test_erfinv_op.py @@ -23,7 +23,7 @@ from scipy.special import erfinv import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() np.random.seed(0) diff --git a/test/legacy_test/test_error_clip.py b/test/legacy_test/test_error_clip.py index 1fba67df22677..754410aeb3b72 100644 --- a/test/legacy_test/test_error_clip.py +++ b/test/legacy_test/test_error_clip.py @@ -15,16 +15,16 @@ import sys import paddle -from paddle import fluid +from paddle import base BATCH_SIZE = 128 CLIP_MAX = 2e-6 CLIP_MIN = -1e-6 paddle.enable_static() -prog = fluid.framework.Program() +prog = base.framework.Program() -with fluid.program_guard(main_program=prog): +with base.program_guard(main_program=prog): image = paddle.static.data(name='x', shape=[-1, 784], dtype='float32') hidden1 = paddle.static.nn.fc(x=image, size=128, activation='relu') @@ -44,8 +44,8 @@ ) avg_cost_clip = prog_clip.block(0).var(avg_cost.name) -fluid.backward.append_backward(loss=avg_cost) -fluid.backward.append_backward( +base.backward.append_backward(loss=avg_cost) +base.backward.append_backward( loss=avg_cost_clip, callbacks=[paddle.nn.clip.error_clip_callback] ) @@ -60,10 +60,10 @@ batch_size=BATCH_SIZE, ) -place = fluid.CPUPlace() -exe = fluid.Executor(place) -feeder = fluid.DataFeeder(feed_list=[image, label], place=place) -exe.run(fluid.default_startup_program()) +place = base.CPUPlace() +exe = base.Executor(place) +feeder = base.DataFeeder(feed_list=[image, label], place=place) +exe.run(base.default_startup_program()) count = 0 for data in train_reader(): diff --git a/test/legacy_test/test_exception.py b/test/legacy_test/test_exception.py index 1c8e40cf94ed7..d8e998bf7f9eb 100644 --- a/test/legacy_test/test_exception.py +++ b/test/legacy_test/test_exception.py @@ -17,8 +17,8 @@ import numpy import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestException(unittest.TestCase): @@ -54,7 +54,7 @@ class TestExceptionNoCStack(unittest.TestCase): def setUp(self): paddle.enable_static() # test no C++ stack format - fluid.set_flags({'FLAGS_call_stack_level': 1}) + base.set_flags({'FLAGS_call_stack_level': 1}) def test_exception_in_static_mode(self): x = paddle.static.data(name='X', shape=[-1, 13], dtype='float32') @@ -65,26 +65,26 @@ def test_exception_in_static_mode(self): paddle.optimizer.SGD(learning_rate=0.01).minimize(avg_loss) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) x = numpy.random.random(size=(8, 12)).astype('float32') y = numpy.random.random(size=(8, 1)).astype('float32') with self.assertRaises(ValueError): exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'X': x, 'Y': y}, fetch_list=[avg_loss.name], ) def test_exception_in_dynamic_mode(self): - place = fluid.CPUPlace() - with fluid.dygraph.guard(place): + place = base.CPUPlace() + with base.dygraph.guard(place): x = numpy.random.random(size=(10, 2)).astype('float32') linear = paddle.nn.Linear(1, 10) - data = fluid.dygraph.to_variable(x) + data = base.dygraph.to_variable(x) with self.assertRaises(ValueError): res = linear(data) diff --git a/test/legacy_test/test_executor_and_mul.py b/test/legacy_test/test_executor_and_mul.py index b7770c17e206c..1b577289e8718 100644 --- a/test/legacy_test/test_executor_and_mul.py +++ b/test/legacy_test/test_executor_and_mul.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid.executor import Executor +from paddle.base.executor import Executor from paddle.static import data from paddle.tensor import array_write diff --git a/test/legacy_test/test_executor_and_use_program_cache.py b/test/legacy_test/test_executor_and_use_program_cache.py index f209407fcf2f2..3e6536e277e69 100644 --- a/test/legacy_test/test_executor_and_use_program_cache.py +++ b/test/legacy_test/test_executor_and_use_program_cache.py @@ -18,14 +18,14 @@ from test_eager_deletion_padding_rnn import PaddingRNNTestBase, RNNConfig import paddle -from paddle import fluid +from paddle import base class TestExecutor(unittest.TestCase): def test_mul(self): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): a = paddle.static.data(name='a', shape=[-1, 784], dtype='float32') b = paddle.static.data(name='b', shape=[784, 100], dtype='float32') a.desc.set_need_check_feed(False) @@ -38,7 +38,7 @@ def test_mul(self): out_np = np.dot(a_np, b_np) place = paddle.CPUPlace() - exe = fluid.Executor(place) + exe = base.Executor(place) def _train(use_program_cache, max_iters=1): import time @@ -86,7 +86,7 @@ def train_and_save_inference_program( self, rnn_model="static", use_program_cache=True ): config = RNNConfig("test", rnn_model) - with fluid.scope_guard(fluid.Scope()): + with base.scope_guard(base.Scope()): self.train(config, use_program_cache) paddle.static.io.save_inference_model( path_prefix="padding_rnn." + rnn_model + ".inference_model", @@ -125,7 +125,7 @@ def test_inference_output(self): ).astype("float32") for use_program_cache in [False, True]: - with fluid.scope_guard(fluid.Scope()): + with base.scope_guard(base.Scope()): save_dirname = ( "padding_rnn." + rnn_model + ".inference_model" ) diff --git a/test/legacy_test/test_executor_check_feed.py b/test/legacy_test/test_executor_check_feed.py index 02529bce96479..78fb383885ec4 100644 --- a/test/legacy_test/test_executor_check_feed.py +++ b/test/legacy_test/test_executor_check_feed.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base class TestExecutor(unittest.TestCase): @@ -34,13 +34,13 @@ def net(self): return paddle.to_tensor(lr), avg_cost def test_program_check_feed(self): - main_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.program_guard(main_program, startup_program): - with fluid.scope_guard(scope): - cpu = fluid.CPUPlace() - exe = fluid.Executor(cpu) + main_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.program_guard(main_program, startup_program): + with base.scope_guard(scope): + cpu = base.CPUPlace() + exe = base.Executor(cpu) lr, cost = self.net() exe.run(startup_program) train_data = [[1.0], [2.0], [3.0], [4.0]] @@ -55,16 +55,16 @@ def test_program_check_feed(self): ) def test_compiled_program_check_feed(self): - main_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.program_guard(main_program, startup_program): - with fluid.scope_guard(scope): - cpu = fluid.CPUPlace() - exe = fluid.Executor(cpu) + main_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.program_guard(main_program, startup_program): + with base.scope_guard(scope): + cpu = base.CPUPlace() + exe = base.Executor(cpu) lr, cost = self.net() exe.run(startup_program) - compiled_prog = fluid.CompiledProgram(main_program) + compiled_prog = base.CompiledProgram(main_program) train_data = [[1.0], [2.0], [3.0], [4.0]] y_true = [[2.0], [4.0], [6.0], [8.0]] a = 0 diff --git a/test/legacy_test/test_executor_feed_non_tensor.py b/test/legacy_test/test_executor_feed_non_tensor.py index d92b7370482c0..f6bec1de5d6b5 100644 --- a/test/legacy_test/test_executor_feed_non_tensor.py +++ b/test/legacy_test/test_executor_feed_non_tensor.py @@ -17,7 +17,7 @@ import numpy import paddle -from paddle import fluid +from paddle import base class TestExecutor(unittest.TestCase): @@ -36,13 +36,13 @@ def net(self): return paddle.to_tensor(lr), avg_cost def test_program_feed_float(self): - main_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.program_guard(main_program, startup_program): - with fluid.scope_guard(scope): - cpu = fluid.CPUPlace() - exe = fluid.Executor(cpu) + main_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.program_guard(main_program, startup_program): + with base.scope_guard(scope): + cpu = base.CPUPlace() + exe = base.Executor(cpu) lr, cost = self.net() exe.run(startup_program) train_data = numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype( @@ -58,17 +58,17 @@ def test_program_feed_float(self): return_numpy=False, ) self.assertEqual(_lr._dtype(), lr.dtype) - self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32) + self.assertEqual(_lr._dtype(), base.core.VarDesc.VarType.FP32) self.assertEqual(type(a), float) def test_program_feed_int(self): - main_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.program_guard(main_program, startup_program): - with fluid.scope_guard(scope): - cpu = fluid.CPUPlace() - exe = fluid.Executor(cpu) + main_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.program_guard(main_program, startup_program): + with base.scope_guard(scope): + cpu = base.CPUPlace() + exe = base.Executor(cpu) lr, cost = self.net() exe.run(startup_program) train_data = numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype( @@ -84,17 +84,17 @@ def test_program_feed_int(self): return_numpy=False, ) self.assertEqual(_lr._dtype(), lr.dtype) - self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32) + self.assertEqual(_lr._dtype(), base.core.VarDesc.VarType.FP32) self.assertEqual(type(a), int) def test_program_feed_list(self): - main_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.program_guard(main_program, startup_program): - with fluid.scope_guard(scope): - cpu = fluid.CPUPlace() - exe = fluid.Executor(cpu) + main_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.program_guard(main_program, startup_program): + with base.scope_guard(scope): + cpu = base.CPUPlace() + exe = base.Executor(cpu) lr, cost = self.net() exe.run(startup_program) train_data = [[1.0], [2.0], [3.0], [4.0]] @@ -106,20 +106,20 @@ def test_program_feed_list(self): return_numpy=False, ) self.assertEqual(_lr._dtype(), lr.dtype) - self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32) + self.assertEqual(_lr._dtype(), base.core.VarDesc.VarType.FP32) self.assertEqual(type(y_true), list) def test_compiled_program_feed_scalar(self): - main_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.program_guard(main_program, startup_program): - with fluid.scope_guard(scope): + main_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.program_guard(main_program, startup_program): + with base.scope_guard(scope): lr, cost = self.net() - cpu = fluid.CPUPlace() - exe = fluid.Executor(cpu) + cpu = base.CPUPlace() + exe = base.Executor(cpu) exe.run(startup_program) - compiled_prog = fluid.CompiledProgram(main_program) + compiled_prog = base.CompiledProgram(main_program) train_data = numpy.array([[1.0], [2.0], [3.0], [4.0]]).astype( 'float32' ) @@ -134,61 +134,61 @@ def test_compiled_program_feed_scalar(self): return_numpy=False, ) self.assertEqual(_lr._dtype(), lr.dtype) - self.assertEqual(_lr._dtype(), fluid.core.VarDesc.VarType.FP32) + self.assertEqual(_lr._dtype(), base.core.VarDesc.VarType.FP32) self.assertEqual(type(a), float) class TestAsLodTensor(unittest.TestCase): def test_as_lodtensor_int32(self): - cpu = fluid.CPUPlace() - tensor = fluid.executor._as_lodtensor( - 1.0, cpu, fluid.core.VarDesc.VarType.INT32 + cpu = base.CPUPlace() + tensor = base.executor._as_lodtensor( + 1.0, cpu, base.core.VarDesc.VarType.INT32 ) - self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.INT32) + self.assertEqual(tensor._dtype(), base.core.VarDesc.VarType.INT32) def test_as_lodtensor_fp64(self): - cpu = fluid.CPUPlace() - tensor = fluid.executor._as_lodtensor( - 1, cpu, fluid.core.VarDesc.VarType.FP64 + cpu = base.CPUPlace() + tensor = base.executor._as_lodtensor( + 1, cpu, base.core.VarDesc.VarType.FP64 ) - self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.FP64) + self.assertEqual(tensor._dtype(), base.core.VarDesc.VarType.FP64) def test_as_lodtensor_assertion_error(self): - cpu = fluid.CPUPlace() - self.assertRaises(AssertionError, fluid.executor._as_lodtensor, 1, cpu) + cpu = base.CPUPlace() + self.assertRaises(AssertionError, base.executor._as_lodtensor, 1, cpu) def test_as_lodtensor_type_error(self): - cpu = fluid.CPUPlace() + cpu = base.CPUPlace() self.assertRaises( TypeError, - fluid.executor._as_lodtensor, + base.executor._as_lodtensor, {"a": 1}, cpu, - fluid.core.VarDesc.VarType.INT32, + base.core.VarDesc.VarType.INT32, ) def test_as_lodtensor_list(self): - cpu = fluid.CPUPlace() - tensor = fluid.executor._as_lodtensor( - [1, 2], cpu, fluid.core.VarDesc.VarType.FP64 + cpu = base.CPUPlace() + tensor = base.executor._as_lodtensor( + [1, 2], cpu, base.core.VarDesc.VarType.FP64 ) - self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.FP64) + self.assertEqual(tensor._dtype(), base.core.VarDesc.VarType.FP64) def test_as_lodtensor_tuple(self): - cpu = fluid.CPUPlace() - tensor = fluid.executor._as_lodtensor( - (1, 2), cpu, fluid.core.VarDesc.VarType.FP64 + cpu = base.CPUPlace() + tensor = base.executor._as_lodtensor( + (1, 2), cpu, base.core.VarDesc.VarType.FP64 ) - self.assertEqual(tensor._dtype(), fluid.core.VarDesc.VarType.FP64) + self.assertEqual(tensor._dtype(), base.core.VarDesc.VarType.FP64) def test_as_lodtensor_nested_list(self): - cpu = fluid.CPUPlace() + cpu = base.CPUPlace() self.assertRaises( TypeError, - fluid.executor._as_lodtensor, + base.executor._as_lodtensor, [{1.2, 1.2}, {1, 2}], cpu, - fluid.core.VarDesc.VarType.INT32, + base.core.VarDesc.VarType.INT32, ) diff --git a/test/legacy_test/test_executor_return_tensor_not_overwriting.py b/test/legacy_test/test_executor_return_tensor_not_overwriting.py index bcffb9ef5abfb..66fa0001ef030 100644 --- a/test/legacy_test/test_executor_return_tensor_not_overwriting.py +++ b/test/legacy_test/test_executor_return_tensor_not_overwriting.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, skip_check_grad_ci import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -33,8 +33,8 @@ def calc_add_out(self, place=None, parallel=None): self.y = np.random.random((2, 5)).astype(np.float32) self.out = np.add(self.x, self.y) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} self.op_type = "elementwise_add" @@ -47,8 +47,8 @@ def calc_mul_out(self, place=None, parallel=None): self.y = np.random.random((5, 2)).astype(np.float32) self.out = np.dot(self.x, self.y) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} self.op_type = "mul" @@ -57,9 +57,9 @@ def calc_mul_out(self, place=None, parallel=None): return outs def test_executor_run_twice(self): - places = [fluid.CPUPlace()] - if fluid.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for place in places: for parallel in [True, False]: @@ -78,8 +78,8 @@ def calc_add_out(self, place=None): x = paddle.ones(shape=[3, 3], dtype='float32') y = paddle.ones(shape=[3, 3], dtype='float32') out = paddle.add(x=x, y=y) - program = fluid.default_main_program() - exe = fluid.Executor(place) + program = base.default_main_program() + exe = base.Executor(place) out = exe.run(program, fetch_list=[out], return_numpy=False) return out @@ -87,15 +87,15 @@ def calc_sub_out(self, place=None): x = paddle.ones(shape=[2, 2], dtype='float32') y = paddle.ones(shape=[2, 2], dtype='float32') out = paddle.subtract(x=x, y=y) - program = fluid.default_main_program() - exe = fluid.Executor(place) + program = base.default_main_program() + exe = base.Executor(place) out = exe.run(program, fetch_list=[out], return_numpy=False) return out def test_executor_run_twice(self): - places = [fluid.CPUPlace()] - if fluid.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for place in places: add_out = self.calc_add_out(place) diff --git a/test/legacy_test/test_expand_as_v2_op.py b/test/legacy_test/test_expand_as_v2_op.py index 68c9801acb4d8..0f9d9d3da6477 100755 --- a/test/legacy_test/test_expand_as_v2_op.py +++ b/test/legacy_test/test_expand_as_v2_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestExpandAsBasic(OpTest): @@ -250,7 +250,7 @@ def test_check_grad(self): class TestExpandAsV2Error(unittest.TestCase): def test_errors(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x1 = paddle.static.data(name='x1', shape=[-1, 4], dtype="uint8") x2 = paddle.static.data(name='x2', shape=[-1, 4], dtype="int32") self.assertRaises(TypeError, paddle.tensor.expand_as, x1, x2) @@ -274,9 +274,9 @@ def test_api(self): out_1 = paddle.expand_as(x, y=y) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) res_1 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": input1, "target_tensor": input2}, fetch_list=[out_1], ) diff --git a/test/legacy_test/test_expand_op.py b/test/legacy_test/test_expand_op.py index f4d6258d82025..90fb5222dfae3 100644 --- a/test/legacy_test/test_expand_op.py +++ b/test/legacy_test/test_expand_op.py @@ -17,7 +17,7 @@ import numpy as np from eager_op_test import OpTest -from paddle import fluid +from paddle import base # Situation 1: expand_times is a list(without tensor) @@ -26,7 +26,7 @@ def setUp(self): self.op_type = "expand" self.init_data() self.dtype = ( - "float32" if fluid.core.is_compiled_with_rocm() else "float64" + "float32" if base.core.is_compiled_with_rocm() else "float64" ) self.inputs = {'X': np.random.random(self.ori_shape).astype(self.dtype)} @@ -81,7 +81,7 @@ def setUp(self): self.op_type = "expand" self.init_data() self.dtype = ( - "float32" if fluid.core.is_compiled_with_rocm() else "float64" + "float32" if base.core.is_compiled_with_rocm() else "float64" ) expand_times_tensor = [] @@ -130,7 +130,7 @@ def setUp(self): self.op_type = "expand" self.init_data() self.dtype = ( - "float32" if fluid.core.is_compiled_with_rocm() else "float64" + "float32" if base.core.is_compiled_with_rocm() else "float64" ) self.inputs = { diff --git a/test/legacy_test/test_expand_v2_op.py b/test/legacy_test/test_expand_v2_op.py index 9b9f6b631a2bf..9352b34f69aa0 100644 --- a/test/legacy_test/test_expand_v2_op.py +++ b/test/legacy_test/test_expand_v2_op.py @@ -20,8 +20,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard # Situation 1: shape is a list(without tensor) @@ -278,8 +278,8 @@ def test_check_grad(self): class TestExpandV2Error(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) shape = [2, 2] self.assertRaises(TypeError, paddle.tensor.expand, x1, shape) @@ -307,11 +307,11 @@ def test_api(self): out_2 = paddle.expand(x, shape=[positive_2, 14]) out_3 = paddle.expand(x, shape=expand_shape) - g0 = fluid.backward.calc_gradient(out_2, x) + g0 = base.backward.calc_gradient(out_2, x) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) res_1, res_2, res_3 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": input, "expand_shape": np.array([12, 14]).astype("int32"), @@ -340,7 +340,7 @@ def test_shape_with_var(self): # Test python Dygraph API class TestExpandV2DygraphAPI(unittest.TestCase): def test_expand_times_is_tensor(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): paddle.seed(1) a = paddle.rand([2, 5]) expand_1 = paddle.expand(a, shape=[2, 5]) @@ -373,9 +373,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -404,9 +404,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_exponential_op.py b/test/legacy_test/test_exponential_op.py index 2974e2d4e513b..4debe069b8235 100644 --- a/test/legacy_test/test_exponential_op.py +++ b/test/legacy_test/test_exponential_op.py @@ -22,7 +22,7 @@ ) import paddle -from paddle.fluid import core +from paddle.base import core class TestExponentialOp1(OpTest): diff --git a/test/legacy_test/test_eye_op.py b/test/legacy_test/test_eye_op.py index b7ef848b42fad..a0340dc2c5f56 100644 --- a/test/legacy_test/test_eye_op.py +++ b/test/legacy_test/test_eye_op.py @@ -20,9 +20,9 @@ from test_attribute_var import UnittestBase import paddle -from paddle import fluid -from paddle.fluid import core, framework -from paddle.fluid.framework import Program, program_guard +from paddle import base +from paddle.base import core, framework +from paddle.base.framework import Program, program_guard class TestEyeOp(OpTest): @@ -92,7 +92,7 @@ class API_TestTensorEye(unittest.TestCase): def test_out(self): with paddle.static.program_guard(paddle.static.Program()): data = paddle.eye(10) - place = fluid.CPUPlace() + place = base.CPUPlace() exe = paddle.static.Executor(place) (result,) = exe.run(fetch_list=[data]) expected_result = np.eye(10, dtype="float32") diff --git a/test/legacy_test/test_fake_dequantize_op.py b/test/legacy_test/test_fake_dequantize_op.py index a7a5801a4a06e..d47ba3fd91b55 100644 --- a/test/legacy_test/test_fake_dequantize_op.py +++ b/test/legacy_test/test_fake_dequantize_op.py @@ -319,11 +319,11 @@ def setUp(self): def _get_places(self): import paddle - from paddle.fluid import core + from paddle.base import core if core.is_compiled_with_cuda(): - place = paddle.fluid.core.CUDAPlace(0) - if paddle.fluid.core.is_float16_supported(place): + place = paddle.base.core.CUDAPlace(0) + if paddle.base.core.is_float16_supported(place): return [place] else: return [] diff --git a/test/legacy_test/test_fake_init_op.py b/test/legacy_test/test_fake_init_op.py index 30b934a7c93ce..4a8fe0b4ac997 100644 --- a/test/legacy_test/test_fake_init_op.py +++ b/test/legacy_test/test_fake_init_op.py @@ -16,7 +16,7 @@ from op import Operator -from paddle.fluid import core +from paddle.base import core class TestFakeInitOpSelectedRows(unittest.TestCase): diff --git a/test/legacy_test/test_fc_op.py b/test/legacy_test/test_fc_op.py index 44b8e2f599d8e..a59720e9145c8 100644 --- a/test/legacy_test/test_fc_op.py +++ b/test/legacy_test/test_fc_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, paddle_static_guard import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard SEED = 2020 @@ -158,11 +158,11 @@ def run_program(num_flatten_dims): ) place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) - exe = fluid.Executor(place=place) + exe = base.Executor(place=place) exe.run(startup_program) out = exe.run(main_program, feed={"x": input}, fetch_list=[out]) return out diff --git a/test/legacy_test/test_feed_data_check_shape_type.py b/test/legacy_test/test_feed_data_check_shape_type.py index c8a85e0556f1a..54607fb2575e4 100644 --- a/test/legacy_test/test_feed_data_check_shape_type.py +++ b/test/legacy_test/test_feed_data_check_shape_type.py @@ -19,8 +19,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core os.environ['CPU_NUM'] = str(4) np.random.seed(123) @@ -203,16 +203,16 @@ def _test_feed_lod_tensor(self, use_cuda): feed_in_data = np.random.uniform(size=[sum_length, 3, 4, 5]).astype( np.float32 ) - feed_data_tensor = fluid.LoDTensor() - feed_data_tensor.set(feed_in_data, fluid.CPUPlace()) + feed_data_tensor = base.LoDTensor() + feed_data_tensor.set(feed_in_data, base.CPUPlace()) feed_data_tensor.set_recursive_sequence_lengths(sequence_lengths) label_size = [device_count, 1] - feed_label_tensor = fluid.LoDTensor() + feed_label_tensor = base.LoDTensor() feed_label = np.random.randint( low=0, high=self.class_num, size=[sum_length, 1] ).astype(np.int64) - feed_label_tensor.set(feed_label, fluid.CPUPlace()) + feed_label_tensor.set(feed_label, base.CPUPlace()) feed_label_tensor.set_recursive_sequence_lengths(sequence_lengths) self._feed_data_in_executor( @@ -231,17 +231,17 @@ def _feed_data_in_executor( feed_label, use_cuda, ): - startup_program = fluid.Program() - main_program = fluid.Program() + startup_program = base.Program() + main_program = base.Program() - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): in_data, label, loss = self._simple_fc_net( in_size, label_size, self.class_num, self.hidden_sizes ) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup_program) train_program = main_program diff --git a/test/legacy_test/test_feed_fetch_method.py b/test/legacy_test/test_feed_fetch_method.py index 1ef4946b217f0..6f72928a05812 100644 --- a/test/legacy_test/test_feed_fetch_method.py +++ b/test/legacy_test/test_feed_fetch_method.py @@ -16,7 +16,7 @@ import numpy as np -from paddle.fluid import core +from paddle.base import core class TestFeedFetch(unittest.TestCase): diff --git a/test/legacy_test/test_fetch_handler.py b/test/legacy_test/test_fetch_handler.py index 9a43432930089..5a41a6ea33b97 100644 --- a/test/legacy_test/test_fetch_handler.py +++ b/test/legacy_test/test_fetch_handler.py @@ -17,9 +17,9 @@ import numpy as np -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.framework import Program +from paddle import base +from paddle.base import core +from paddle.base.framework import Program class TestFetchHandler(unittest.TestCase): @@ -35,24 +35,24 @@ def test_fetch_handler(self): var_emb = block.create_var(name='emb', type=core.VarDesc.VarType.FP32) var_emb3 = block.create_var(name='emb3', type=core.VarDesc.VarType.FP32) - class FH(fluid.executor.FetchHandler): + class FH(base.executor.FetchHandler): def handler(self, fetch_dict): assert len(fetch_dict) == 1 table_var = scope.var('emb').get_tensor() table_var.set(table, place) fh = FH(var_dict={'emb': var_emb}, period_secs=2) - fm = fluid.trainer_factory.FetchHandlerMonitor(scope, fh) + fm = base.trainer_factory.FetchHandlerMonitor(scope, fh) fm.start() time.sleep(3) fm.stop() - default_fh = fluid.executor.FetchHandler( + default_fh = base.executor.FetchHandler( var_dict={'emb': var_emb, 'emb2': None, 'emb3': var_emb3}, period_secs=1, ) - default_fm = fluid.trainer_factory.FetchHandlerMonitor( + default_fm = base.trainer_factory.FetchHandlerMonitor( scope, default_fh ) default_fm.start() diff --git a/test/legacy_test/test_fetch_lod_tensor_array.py b/test/legacy_test/test_fetch_lod_tensor_array.py index 8a8288476dd43..f9abc882f3af3 100644 --- a/test/legacy_test/test_fetch_lod_tensor_array.py +++ b/test/legacy_test/test_fetch_lod_tensor_array.py @@ -18,13 +18,13 @@ from simple_nets import simple_fc_net, simple_fc_net_with_inputs import paddle -from paddle import fluid +from paddle import base class TestFetchLoDTensorArray(unittest.TestCase): def build_program(self, main_program, startup_program): - with fluid.unique_name.guard(): - with fluid.program_guard(main_program, startup_program): + with base.unique_name.guard(): + with base.program_guard(main_program, startup_program): i = paddle.zeros(shape=[1], dtype='int64') img = paddle.static.data( name='image', shape=[-1, 784], dtype='float32' @@ -46,8 +46,8 @@ def build_program(self, main_program, startup_program): return loss, array def check_network(self, use_cuda=True): - main_program = fluid.Program() - startup_program = fluid.Program() + main_program = base.Program() + startup_program = base.Program() loss, array = self.build_program(main_program, startup_program) @@ -55,13 +55,13 @@ def check_network(self, use_cuda=True): image = np.random.normal(size=(batch_size, 784)).astype('float32') label = np.random.randint(0, 10, (batch_size, 1), dtype="int64") - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) exe.run(startup_program) feed_dict = {'image': image, 'label': label} - build_strategy = fluid.BuildStrategy() - binary = fluid.CompiledProgram( + build_strategy = base.BuildStrategy() + binary = base.CompiledProgram( main_program, build_strategy=build_strategy ) @@ -76,7 +76,7 @@ def check_network(self, use_cuda=True): np.testing.assert_allclose(loss_v, array_v[2], rtol=1e-05) def test_fetch_lod_tensor_array(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_network(use_cuda=True) self.check_network(use_cuda=False) diff --git a/test/legacy_test/test_fetch_var.py b/test/legacy_test/test_fetch_var.py index eb1bdcc4878c7..6978be07405de 100644 --- a/test/legacy_test/test_fetch_var.py +++ b/test/legacy_test/test_fetch_var.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestFetchVar(unittest.TestCase): @@ -30,9 +30,9 @@ def test_fetch_var(self): dtype="int32", persistable=True, name="x" ) paddle.assign(self.val, output=x) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_main_program(), feed={}, fetch_list=[]) - fetched_x = fluid.executor._fetch_var("x") + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_main_program(), feed={}, fetch_list=[]) + fetched_x = base.executor._fetch_var("x") np.testing.assert_array_equal(fetched_x, self.val) self.assertEqual(fetched_x.dtype, self.val.dtype) diff --git a/test/legacy_test/test_fill_any_like_op.py b/test/legacy_test/test_fill_any_like_op.py index 31a3fa3836323..f32f07f39d07b 100644 --- a/test/legacy_test/test_fill_any_like_op.py +++ b/test/legacy_test/test_fill_any_like_op.py @@ -19,7 +19,7 @@ import paddle import paddle.framework.dtype as dtypes -from paddle.fluid import core +from paddle.base import core def fill_any_like_wrapper(x, value, out_dtype=None, name=None): diff --git a/test/legacy_test/test_fill_any_op.py b/test/legacy_test/test_fill_any_op.py index 2137cb1e22ea4..b2325d06dfa37 100644 --- a/test/legacy_test/test_fill_any_op.py +++ b/test/legacy_test/test_fill_any_op.py @@ -73,7 +73,7 @@ def init(self): class TestFillAnyInplace(unittest.TestCase): def test_fill_any_version(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32)) self.assertEqual(var.inplace_version, 0) @@ -87,7 +87,7 @@ def test_fill_any_version(self): self.assertEqual(var.inplace_version, 3) def test_fill_any_eqaul(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): tensor = paddle.to_tensor( np.random.random((20, 30)).astype(np.float32) ) @@ -98,7 +98,7 @@ def test_fill_any_eqaul(self): self.assertEqual((tensor.numpy() == target).all().item(), True) def test_backward(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = paddle.full([10, 10], -1.0, dtype='float32') x.stop_gradient = False y = 2 * x diff --git a/test/legacy_test/test_fill_constant_op.py b/test/legacy_test/test_fill_constant_op.py index 614cd29668d88..ee9d5e058c1e0 100644 --- a/test/legacy_test/test_fill_constant_op.py +++ b/test/legacy_test/test_fill_constant_op.py @@ -19,8 +19,8 @@ from op import Operator import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def fill_wrapper(shape, value=0.0): @@ -328,9 +328,9 @@ def test_api(self): shape=shape_tensor_int64, dtype=np.float32, value=val2 ) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) res_1, res_2, res_3, res_4, res_5, res_6, res_7, res_8 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "shape_tensor_int32": np.array([1, 2]).astype("int32"), "shape_tensor_int64": np.array([1, 2]).astype("int64"), @@ -366,13 +366,13 @@ def test_api(self): class TestFillConstantImperative(unittest.TestCase): def test_api(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): data1 = np.array([1, 2]).astype('int32') data2 = np.array([1.1]).astype('float32') data3 = np.array([88]).astype('int32') - shape = fluid.dygraph.to_variable(data1) - val = fluid.dygraph.to_variable(data2) - value = fluid.dygraph.to_variable(data3) + shape = base.dygraph.to_variable(data1) + val = base.dygraph.to_variable(data2) + value = base.dygraph.to_variable(data3) res1 = paddle.tensor.fill_constant( shape=[1, 2], dtype='float32', value=1.1 ) @@ -399,17 +399,17 @@ def test_api(self): ) def test_nan(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): res = paddle.tensor.fill_constant([1], 'float32', np.nan) self.assertTrue(np.isnan(res.numpy().item(0))) def test_inf(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): res = paddle.tensor.fill_constant([1], 'float32', np.inf) self.assertTrue(np.isinf(res.numpy().item(0))) def test_ninf(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): res = paddle.tensor.fill_constant([1], 'float32', np.NINF) self.assertTrue(np.isinf(res.numpy().item(0))) self.assertEqual(np.NINF, res.numpy().item(0)) diff --git a/test/legacy_test/test_fill_diagonal_tensor_op.py b/test/legacy_test/test_fill_diagonal_tensor_op.py index a1217efee9a5b..be5f435b001ea 100644 --- a/test/legacy_test/test_fill_diagonal_tensor_op.py +++ b/test/legacy_test/test_fill_diagonal_tensor_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def fill_diagonal_ndarray(x, value, offset=0, dim1=0, dim2=1): diff --git a/test/legacy_test/test_fill_op.py b/test/legacy_test/test_fill_op.py index bd7dc4dccf878..88686999b2d91 100644 --- a/test/legacy_test/test_fill_op.py +++ b/test/legacy_test/test_fill_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 from op import Operator -from paddle.fluid import core +from paddle.base import core class TestFillOp1(OpTest): diff --git a/test/legacy_test/test_fill_zeros_like2_op.py b/test/legacy_test/test_fill_zeros_like2_op.py index e6320ed0ab832..cbacc81d3985b 100644 --- a/test/legacy_test/test_fill_zeros_like2_op.py +++ b/test/legacy_test/test_fill_zeros_like2_op.py @@ -17,7 +17,7 @@ import numpy as np from eager_op_test import OpTest -from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle.base.framework import convert_np_dtype_to_dtype_ class TestFillZerosLike2Op(OpTest): diff --git a/test/legacy_test/test_flash_attention.py b/test/legacy_test/test_flash_attention.py index 979217d7d221a..245e51a36a0d0 100644 --- a/test/legacy_test/test_flash_attention.py +++ b/test/legacy_test/test_flash_attention.py @@ -20,8 +20,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.nn.functional.flash_attention import ( flash_attention, flash_attn_unpadded, @@ -177,7 +177,7 @@ def test_unpadded(self): self.return_softmax, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) fetches_result = exe.run( feed={ "q": query.astype('float16'), @@ -293,7 +293,7 @@ def test_all(self): qs, ks, vs, self.dropout, self.causal, self.return_softmax ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) fetches_result = exe.run( feed={ "q": query.astype('float16'), diff --git a/test/legacy_test/test_flatten_contiguous_range_op.py b/test/legacy_test/test_flatten_contiguous_range_op.py index 658f03979a9c4..f8b76771b4e65 100644 --- a/test/legacy_test/test_flatten_contiguous_range_op.py +++ b/test/legacy_test/test_flatten_contiguous_range_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core class TestFlattenOp(OpTest): diff --git a/test/legacy_test/test_flatten_op.py b/test/legacy_test/test_flatten_op.py index 0803db60c73c0..d3756946b1d89 100644 --- a/test/legacy_test/test_flatten_op.py +++ b/test/legacy_test/test_flatten_op.py @@ -68,7 +68,7 @@ def init_test_case(self): class TestFlattenOpFP16(unittest.TestCase): def test_fp16_with_gpu(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): with paddle_static_guard(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( diff --git a/test/legacy_test/test_fleet.py b/test/legacy_test/test_fleet.py index 52d6df39b92ee..0e9eb0579cc98 100644 --- a/test/legacy_test/test_fleet.py +++ b/test/legacy_test/test_fleet.py @@ -33,7 +33,7 @@ def setUp(self): def test_pslib_1(self): """Test cases for pslib.""" import paddle - from paddle import fluid + from paddle import base from paddle.incubate.distributed.fleet.parameter_server.pslib import ( fleet, ) @@ -49,13 +49,13 @@ def test_pslib_1(self): os.environ["PADDLE_TRAINER_ID"] = "0" role_maker = GeneralRoleMaker() # role_maker.generate_role() - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) # fleet.init(role_maker) - train_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.program_guard(train_program, startup_program): + train_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.program_guard(train_program, startup_program): show = paddle.static.data( name="show", shape=[-1, 1], @@ -67,7 +67,7 @@ def test_pslib_1(self): size=[1, 1], is_sparse=True, is_distributed=True, - param_attr=fluid.ParamAttr(name="embedding"), + param_attr=base.ParamAttr(name="embedding"), ) bow = paddle.static.nn.sequence_lod.sequence_pool( input=emb, pool_type='sum' diff --git a/test/legacy_test/test_fleet_base.py b/test/legacy_test/test_fleet_base.py index 2164c8fade4d1..6a20b425f9d45 100644 --- a/test/legacy_test/test_fleet_base.py +++ b/test/legacy_test/test_fleet_base.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base import role_maker @@ -161,7 +161,7 @@ def setUp(self): def test_dygraph_method(self): paddle.disable_static() value = np.arange(26).reshape(2, 13).astype("float32") - a = fluid.dygraph.to_variable(value) + a = base.dygraph.to_variable(value) layer = paddle.nn.Linear(13, 5) adam = paddle.optimizer.Adam( learning_rate=0.01, parameters=layer.parameters() @@ -216,8 +216,8 @@ def test_single_error(): # in non_distributed mode(use `python` to launch), raise error if has multi cards if ( - fluid.core.is_compiled_with_cuda() - and fluid.core.get_cuda_device_count() > 1 + base.core.is_compiled_with_cuda() + and base.core.get_cuda_device_count() > 1 ): self.assertRaises(ValueError, test_single_error) else: diff --git a/test/legacy_test/test_fleet_base_2.py b/test/legacy_test/test_fleet_base_2.py index 667de8759f6b6..836f9486f8667 100644 --- a/test/legacy_test/test_fleet_base_2.py +++ b/test/legacy_test/test_fleet_base_2.py @@ -21,7 +21,7 @@ import os -from paddle import fluid +from paddle import base class TestFleetBase(unittest.TestCase): @@ -66,11 +66,11 @@ def test_ps_minimize(self): optimizer = fleet.distributed_optimizer(optimizer, strategy=strategy) optimizer.minimize(avg_cost) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(paddle.static.default_startup_program()) - compiled_prog = fluid.compiler.CompiledProgram( - fluid.default_main_program() + compiled_prog = base.compiler.CompiledProgram( + base.default_main_program() ) temp_dir = tempfile.TemporaryDirectory() diff --git a/test/legacy_test/test_fleet_base_single.py b/test/legacy_test/test_fleet_base_single.py index 352b64b19155f..4efc7e240661f 100644 --- a/test/legacy_test/test_fleet_base_single.py +++ b/test/legacy_test/test_fleet_base_single.py @@ -24,7 +24,7 @@ import unittest import paddle -from paddle import fluid, nn +from paddle import base, nn from paddle.distributed import fleet @@ -95,12 +95,12 @@ def test_single_run_collective_minimize(self): optimizer.minimize(avg_cost) place = ( - fluid.CUDAPlace(0) - if paddle.fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if paddle.base.is_compiled_with_cuda() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(paddle.static.default_startup_program()) for i in range(10): @@ -139,13 +139,13 @@ def test_single_run_ps_minimize(self): fleet.init_server() fleet.run_server() elif fleet.is_worker(): - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(paddle.static.default_startup_program()) step = 10 for i in range(step): cost_val = exe.run( - program=fluid.default_main_program(), + program=base.default_main_program(), feed=self.gen_data(), fetch_list=[avg_cost.name], ) diff --git a/test/legacy_test/test_fleet_exe_dist_model_run.py b/test/legacy_test/test_fleet_exe_dist_model_run.py index ebc790c403463..9f2d0a09da20a 100644 --- a/test/legacy_test/test_fleet_exe_dist_model_run.py +++ b/test/legacy_test/test_fleet_exe_dist_model_run.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_fleet_exe_dist_model_tensor.py b/test/legacy_test/test_fleet_exe_dist_model_tensor.py index b5c4af9fc763f..728ab1f5681f0 100644 --- a/test/legacy_test/test_fleet_exe_dist_model_tensor.py +++ b/test/legacy_test/test_fleet_exe_dist_model_tensor.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid.core import DistModelDataType, DistModelTensor +from paddle.base.core import DistModelDataType, DistModelTensor paddle.enable_static() diff --git a/test/legacy_test/test_fleet_executor.py b/test/legacy_test/test_fleet_executor.py index ef13706b89c63..9402602083a9b 100644 --- a/test/legacy_test/test_fleet_executor.py +++ b/test/legacy_test/test_fleet_executor.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -44,7 +44,7 @@ def fake_fleet_opt(self): def run_fleet_executor(self, place, x_data, y_data): exe = paddle.static.Executor(place) empty_program = paddle.static.Program() - with fluid.program_guard(empty_program, empty_program): + with base.program_guard(empty_program, empty_program): x = paddle.static.data( name='x', shape=[-1] + list(x_data.shape), dtype=x_data.dtype ) @@ -82,13 +82,13 @@ def run_fleet_executor(self, place, x_data, y_data): return res def test_executor_on_single_device(self): - if fluid.is_compiled_with_cuda(): + if base.is_compiled_with_cuda(): shape = (10000, 3462) x_data = np.random.rand(*shape) y_data = np.random.rand(*shape) z_data = x_data + y_data a_data = 2 * x_data + 3 * y_data - res = self.run_fleet_executor(fluid.CUDAPlace(0), x_data, y_data) + res = self.run_fleet_executor(base.CUDAPlace(0), x_data, y_data) np.testing.assert_allclose(res[0], z_data, rtol=1e-05) np.testing.assert_allclose(res[1], a_data, rtol=1e-05) diff --git a/test/legacy_test/test_fleet_executor_cond_interceptor.py b/test/legacy_test/test_fleet_executor_cond_interceptor.py index 32fd99fef514f..f6c02b5d815b0 100644 --- a/test/legacy_test/test_fleet_executor_cond_interceptor.py +++ b/test/legacy_test/test_fleet_executor_cond_interceptor.py @@ -18,7 +18,7 @@ import paddle from paddle.distributed.fleet.fleet_executor_utils import TaskNode -from paddle.fluid import core +from paddle.base import core paddle.enable_static() @@ -58,7 +58,7 @@ def test_cond_interceptor(self): ) # loop length data = paddle.static.data(name='x', shape=[1]) - loader = paddle.fluid.io.DataLoader.from_generator( + loader = paddle.base.io.DataLoader.from_generator( feed_list=[data], capacity=num_micro_batches * 4, iterable=False ) loader.set_batch_generator( diff --git a/test/legacy_test/test_fleet_executor_multi_devices.py b/test/legacy_test/test_fleet_executor_multi_devices.py index 871dd68042cc9..6f68e0d9d2763 100644 --- a/test/legacy_test/test_fleet_executor_multi_devices.py +++ b/test/legacy_test/test_fleet_executor_multi_devices.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet paddle.enable_static() @@ -26,7 +26,7 @@ class TestFleetExecutor(unittest.TestCase): def run_fleet_executor(self, place, fleet_opt={}): exe = paddle.static.Executor(place) empty_program = paddle.static.Program() - with fluid.program_guard(empty_program, empty_program): + with base.program_guard(empty_program, empty_program): x = paddle.static.data( name='x', shape=[-1, 1], dtype=paddle.float32 ) @@ -52,7 +52,7 @@ def test_dist_executor_on_multi_devices(self): "dist_strategy": strategy.sharding_configs, "num_micro_batches": strategy.pipeline_configs["accumulate_steps"], } - if fluid.is_compiled_with_cuda(): + if base.is_compiled_with_cuda(): # TODO: Distribute test case is not supported for executor can not stop pass diff --git a/test/legacy_test/test_fleet_executor_origin_scheduler.py b/test/legacy_test/test_fleet_executor_origin_scheduler.py index a017f3283b0ba..8cb5941ba4407 100644 --- a/test/legacy_test/test_fleet_executor_origin_scheduler.py +++ b/test/legacy_test/test_fleet_executor_origin_scheduler.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -44,7 +44,7 @@ def fake_fleet_opt(self): def run_fleet_executor(self, place, x_data, y_data): exe = paddle.static.Executor(place) empty_program = paddle.static.Program() - with fluid.program_guard(empty_program, empty_program): + with base.program_guard(empty_program, empty_program): x = paddle.static.data( name='x', shape=[-1] + list(x_data.shape), dtype=x_data.dtype ) @@ -82,13 +82,13 @@ def run_fleet_executor(self, place, x_data, y_data): return res def test_executor_on_single_device(self): - if fluid.is_compiled_with_cuda(): + if base.is_compiled_with_cuda(): shape = (10000, 3462) x_data = np.random.rand(*shape) y_data = np.random.rand(*shape) z_data = x_data + y_data a_data = 2 * x_data + 3 * y_data - res = self.run_fleet_executor(fluid.CUDAPlace(0), x_data, y_data) + res = self.run_fleet_executor(base.CUDAPlace(0), x_data, y_data) np.testing.assert_allclose(res[0], z_data, rtol=1e-05) np.testing.assert_allclose(res[1], a_data, rtol=1e-05) diff --git a/test/legacy_test/test_fleet_executor_task_node.py b/test/legacy_test/test_fleet_executor_task_node.py index 41795aa51a936..a15547f4f4551 100644 --- a/test/legacy_test/test_fleet_executor_task_node.py +++ b/test/legacy_test/test_fleet_executor_task_node.py @@ -16,7 +16,7 @@ import paddle from paddle.distributed.fleet.fleet_executor_utils import TaskNode -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_fleet_executor_with_task_nodes.py b/test/legacy_test/test_fleet_executor_with_task_nodes.py index 27b66a862c6ea..f7cad5f89362f 100644 --- a/test/legacy_test/test_fleet_executor_with_task_nodes.py +++ b/test/legacy_test/test_fleet_executor_with_task_nodes.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet.fleet_executor_utils import TaskNode paddle.enable_static() @@ -27,7 +27,7 @@ class TestFleetExecutor(unittest.TestCase): def run_fleet_executor(self, place, x_data, y_data): exe = paddle.static.Executor(place) empty_program = paddle.static.Program() - with fluid.program_guard(empty_program, empty_program): + with base.program_guard(empty_program, empty_program): x = paddle.static.data( name='x', shape=[-1] + list(x_data.shape), dtype=x_data.dtype ) @@ -76,13 +76,13 @@ def run_fleet_executor(self, place, x_data, y_data): return res def test_executor_on_single_device(self): - if fluid.is_compiled_with_cuda(): + if base.is_compiled_with_cuda(): shape = (10000, 3462) x_data = np.random.rand(*shape) y_data = np.random.rand(*shape) z_data = x_data + y_data a_data = 2 * x_data + 3 * y_data - res = self.run_fleet_executor(fluid.CUDAPlace(0), x_data, y_data) + res = self.run_fleet_executor(base.CUDAPlace(0), x_data, y_data) np.testing.assert_allclose(res[0], z_data, rtol=1e-05) np.testing.assert_allclose(res[1], a_data, rtol=1e-05) diff --git a/test/legacy_test/test_fleet_gradient_scale.py b/test/legacy_test/test_fleet_gradient_scale.py index ef7741883c9d1..26483fee7433a 100644 --- a/test/legacy_test/test_fleet_gradient_scale.py +++ b/test/legacy_test/test_fleet_gradient_scale.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet @@ -52,8 +52,8 @@ def test_single_gpu(self): startup_program = paddle.static.Program() strategy = fleet.DistributedStrategy() strategy.gradient_scale_configs = {'scale_strategy': 'sum'} - with fluid.program_guard(main_program, startup_program): - with fluid.unique_name.guard(): + with base.program_guard(main_program, startup_program): + with base.unique_name.guard(): input_x = paddle.static.data( name="x", shape=[None, 32], dtype='float32' ) diff --git a/test/legacy_test/test_fleet_metric.py b/test/legacy_test/test_fleet_metric.py index 4adaab8e90e93..d29bb9590edbd 100644 --- a/test/legacy_test/test_fleet_metric.py +++ b/test/legacy_test/test_fleet_metric.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet from paddle.distributed.fleet.base.util_factory import UtilBase from paddle.distributed.fleet.metrics import metric @@ -54,7 +54,7 @@ class FakeFleet: def __init__(self): """Init.""" - self.gloo = fluid.core.Gloo() + self.gloo = base.core.Gloo() self.gloo.set_rank(0) self.gloo.set_size(1) self.gloo.set_prefix("123") @@ -76,9 +76,9 @@ def _barrier(self, comm_world="worker"): def test_metric_1(self): """Test cases for metrics.""" - train = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(train, startup): + train = base.Program() + startup = base.Program() + with base.program_guard(train, startup): t = paddle.static.create_global_var( shape=[1, 1], value=1, @@ -93,10 +93,10 @@ def test_metric_1(self): persistable=True, force_cpu=True, ) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - scope = fluid.Scope() - with fluid.scope_guard(scope): + place = base.CPUPlace() + exe = base.Executor(place) + scope = base.Scope() + with base.scope_guard(scope): exe.run(startup) metric.sum(t, scope, self.util) metric.max(t, scope, self.util) diff --git a/test/legacy_test/test_fleet_nocvm_1.py b/test/legacy_test/test_fleet_nocvm_1.py index f4014613525f5..d9962c1a27b38 100644 --- a/test/legacy_test/test_fleet_nocvm_1.py +++ b/test/legacy_test/test_fleet_nocvm_1.py @@ -33,7 +33,7 @@ def setUp(self): def test_pslib_1(self): """Test cases for pslib.""" - from paddle import fluid + from paddle import base from paddle.incubate.distributed.fleet.parameter_server.pslib import ( fleet, ) @@ -49,13 +49,13 @@ def test_pslib_1(self): os.environ["PADDLE_TRAINER_ID"] = "0" role_maker = GeneralRoleMaker() # role_maker.generate_role() - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) # fleet.init(role_maker) - train_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.program_guard(train_program, startup_program): + train_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.program_guard(train_program, startup_program): show = paddle.static.data( name="show", shape=[-1, 1], @@ -67,7 +67,7 @@ def test_pslib_1(self): size=[1, 1], is_sparse=True, is_distributed=True, - param_attr=fluid.ParamAttr(name="embedding"), + param_attr=base.ParamAttr(name="embedding"), ) fc = paddle.static.nn.fc(x=emb, size=1, activation=None) label = paddle.static.data( diff --git a/test/legacy_test/test_fleet_ps.py b/test/legacy_test/test_fleet_ps.py index 75cb4b595b309..941ef3776a32e 100644 --- a/test/legacy_test/test_fleet_ps.py +++ b/test/legacy_test/test_fleet_ps.py @@ -14,7 +14,7 @@ import unittest -from paddle.fluid.framework import default_main_program +from paddle.base.framework import default_main_program from paddle.incubate.distributed.fleet.parameter_server.ir.pserver_pass import ( _get_optimizer_input_shape, ) diff --git a/test/legacy_test/test_fleet_pyramid_hash.py b/test/legacy_test/test_fleet_pyramid_hash.py index 9c86ea8f770b9..cfd02ee72ced9 100644 --- a/test/legacy_test/test_fleet_pyramid_hash.py +++ b/test/legacy_test/test_fleet_pyramid_hash.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.incubate.distributed.fleet import role_maker from paddle.incubate.distributed.fleet.parameter_server.distribute_transpiler import ( fleet, @@ -47,11 +47,11 @@ def test_dist_geo_server_transpiler(self): black_list_len=2800, seed=3, lr=0.002, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="PyramidHash_emb_0", learning_rate=0, ), - param_attr_wl=fluid.ParamAttr( + param_attr_wl=base.ParamAttr( name="Filter", learning_rate=0, ), diff --git a/test/legacy_test/test_fleet_rolemaker.py b/test/legacy_test/test_fleet_rolemaker.py index b9af57199fc36..7caf6452bfb14 100644 --- a/test/legacy_test/test_fleet_rolemaker.py +++ b/test/legacy_test/test_fleet_rolemaker.py @@ -62,7 +62,7 @@ def test_training_role(self): def test_pslib_1(self): """Test cases for pslib.""" - from paddle import fluid + from paddle import base from paddle.incubate.distributed.fleet.parameter_server.pslib import ( fleet, ) @@ -79,13 +79,13 @@ def test_pslib_1(self): role_maker = GeneralRoleMaker() # print("init rolemaker") # role_maker.generate_role() - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) # fleet.init(role_maker) - train_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.program_guard(train_program, startup_program): + train_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.program_guard(train_program, startup_program): show = paddle.static.data( name="show", shape=[-1, 1], dtype="float32", lod_level=1 ) diff --git a/test/legacy_test/test_fleet_rolemaker_2.py b/test/legacy_test/test_fleet_rolemaker_2.py index a43cebd4c66c3..b7ee8ed7a3049 100644 --- a/test/legacy_test/test_fleet_rolemaker_2.py +++ b/test/legacy_test/test_fleet_rolemaker_2.py @@ -34,7 +34,7 @@ def tearDown(self): def test_pslib_2(self): """Test cases for pslib.""" - from paddle import fluid + from paddle import base from paddle.incubate.distributed.fleet.parameter_server.distribute_transpiler import ( fleet, ) @@ -52,17 +52,17 @@ def test_pslib_2(self): os.environ["PADDLE_PSERVERS_IP_PORT_LIST"] = "127.0.0.1:36002" os.environ["PADDLE_TRAINER_ID"] = "0" os.environ["PADDLE_TRAINERS_NUM"] = "1" - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) try: fleet.init(None) except: print("no mpi4py, skip test_pslib_2") return - train_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.program_guard(train_program, startup_program): + train_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.program_guard(train_program, startup_program): show = paddle.static.data( name="show", shape=[-1, 1], dtype="float32", lod_level=1 ) diff --git a/test/legacy_test/test_fleet_rolemaker_3.py b/test/legacy_test/test_fleet_rolemaker_3.py index 88541d58b24c3..205b25c3b3911 100644 --- a/test/legacy_test/test_fleet_rolemaker_3.py +++ b/test/legacy_test/test_fleet_rolemaker_3.py @@ -33,7 +33,7 @@ def setUp(self): def test_pslib_1(self): """Test cases for pslib.""" - from paddle import fluid + from paddle import base from paddle.incubate.distributed.fleet.parameter_server.pslib import ( fleet, ) @@ -53,13 +53,13 @@ def test_pslib_1(self): http_ip_port="127.0.0.1:36003", ) # role_maker.generate_role() - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) # fleet.init(role_maker) - train_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.program_guard(train_program, startup_program): + train_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.program_guard(train_program, startup_program): show = paddle.static.data( name="show", shape=[-1, 1], dtype="float32", lod_level=1 ) diff --git a/test/legacy_test/test_fleet_unitaccessor.py b/test/legacy_test/test_fleet_unitaccessor.py index 8146bf90efd25..f6e33ed1ee6b3 100644 --- a/test/legacy_test/test_fleet_unitaccessor.py +++ b/test/legacy_test/test_fleet_unitaccessor.py @@ -33,7 +33,7 @@ def setUp(self): def test_pslib_1(self): """Test cases for pslib.""" - from paddle import fluid + from paddle import base from paddle.incubate.distributed.fleet.parameter_server.pslib import ( fleet, ) @@ -49,13 +49,13 @@ def test_pslib_1(self): os.environ["PADDLE_TRAINER_ID"] = "0" role_maker = GeneralRoleMaker() # role_maker.generate_role() - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) # fleet.init(role_maker) - train_program = fluid.Program() - startup_program = fluid.Program() - scope = fluid.Scope() - with fluid.program_guard(train_program, startup_program): + train_program = base.Program() + startup_program = base.Program() + scope = base.Scope() + with base.program_guard(train_program, startup_program): show = paddle.static.data( name="show", shape=[-1, 1], dtype="int64", lod_level=1 ) @@ -64,7 +64,7 @@ def test_pslib_1(self): size=[1, 1], is_sparse=True, is_distributed=True, - param_attr=fluid.ParamAttr(name="embedding"), + param_attr=base.ParamAttr(name="embedding"), ) fc = paddle.static.nn.fc(x=emb, size=1, activation=None) label = paddle.static.data( diff --git a/test/legacy_test/test_flip.py b/test/legacy_test/test_flip.py index e899511b150c1..d4125049f90eb 100644 --- a/test/legacy_test/test_flip.py +++ b/test/legacy_test/test_flip.py @@ -20,17 +20,17 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestFlipOp_API(unittest.TestCase): """Test flip api.""" def test_static_graph(self): - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): axis = [0] input = paddle.static.data( name='input', dtype='float32', shape=[2, 3] @@ -38,10 +38,10 @@ def test_static_graph(self): output = paddle.flip(input, axis) output = paddle.flip(output, -1) output = output.flip(0) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) res = exe.run( @@ -56,8 +56,8 @@ def test_static_graph(self): def test_dygraph(self): img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - with fluid.dygraph.guard(): - inputs = fluid.dygraph.to_variable(img) + with base.dygraph.guard(): + inputs = base.dygraph.to_variable(img) ret = paddle.flip(inputs, [0]) ret = ret.flip(0) ret = paddle.flip(ret, 1) @@ -247,9 +247,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -278,9 +278,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_fmax_op.py b/test/legacy_test/test_fmax_op.py index 0271854aebb72..0ea78cb41cc5e 100644 --- a/test/legacy_test/test_fmax_op.py +++ b/test/legacy_test/test_fmax_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core class ApiFMaxTest(unittest.TestCase): diff --git a/test/legacy_test/test_fmin_op.py b/test/legacy_test/test_fmin_op.py index 1956d5b4fc433..26e2c585355ca 100644 --- a/test/legacy_test/test_fmin_op.py +++ b/test/legacy_test/test_fmin_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_fold_op.py b/test/legacy_test/test_fold_op.py index 8fdb37deadf21..62f3a05311e63 100644 --- a/test/legacy_test/test_fold_op.py +++ b/test/legacy_test/test_fold_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -108,7 +108,7 @@ def calc_fold(self): def set_data(self): self.init_data() self.calc_fold() - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(self.x)} self.attrs = { 'kernel_sizes': self.kernel_sizes, 'paddings': self.paddings, @@ -151,13 +151,13 @@ def setUp(self): self.op_type = 'fold' self.python_api = paddle.nn.functional.fold self.set_data() - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def test_api(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input = paddle.to_tensor(self.x) m = paddle.nn.Fold(**self.attrs) m.eval() @@ -172,7 +172,7 @@ def test_info(self): class TestFoldOpError(unittest.TestCase): def test_errors(self): - from paddle.fluid.framework import Program, program_guard + from paddle.base.framework import Program, program_guard from paddle.nn.functional import fold with program_guard(Program(), Program()): diff --git a/test/legacy_test/test_frac_api.py b/test/legacy_test/test_frac_api.py index 9b32a0a4686cc..26bc74225e54b 100644 --- a/test/legacy_test/test_frac_api.py +++ b/test/legacy_test/test_frac_api.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def ref_frac(x): @@ -45,10 +45,10 @@ def test_api_static(self): with program_guard(Program()): input = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = paddle.frac(input) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) (res,) = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_frac(self.x_np) np.testing.assert_allclose(out_ref, res, rtol=1e-05) diff --git a/test/legacy_test/test_frame_op.py b/test/legacy_test/test_frame_op.py index 8f166f443462b..6f96d952cc63e 100644 --- a/test/legacy_test/test_frame_op.py +++ b/test/legacy_test/test_frame_op.py @@ -19,7 +19,7 @@ from numpy.lib.stride_tricks import as_strided import paddle -from paddle.fluid import core +from paddle.base import core def frame_from_librosa(x, frame_length, hop_length, axis=-1): diff --git a/test/legacy_test/test_framework_debug_str.py b/test/legacy_test/test_framework_debug_str.py index ef9f294055b46..24bc2d62c8632 100644 --- a/test/legacy_test/test_framework_debug_str.py +++ b/test/legacy_test/test_framework_debug_str.py @@ -14,7 +14,7 @@ import unittest -from paddle.fluid.framework import Program +from paddle.base.framework import Program class TestDebugStringFramework(unittest.TestCase): diff --git a/test/legacy_test/test_frexp_api.py b/test/legacy_test/test_frexp_api.py index f14216ef718c6..520b6a90785a4 100644 --- a/test/legacy_test/test_frexp_api.py +++ b/test/legacy_test/test_frexp_api.py @@ -16,7 +16,7 @@ import numpy as np import paddle -import paddle.fluid +import paddle.base class TestFrexpAPI(unittest.TestCase): diff --git a/test/legacy_test/test_ftrl_op.py b/test/legacy_test/test_ftrl_op.py index 4493f428aaf68..2659eb423905f 100644 --- a/test/legacy_test/test_ftrl_op.py +++ b/test/legacy_test/test_ftrl_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest from op import Operator -from paddle.fluid import core +from paddle.base import core def ftrl_step(param, grad, rows, sq_accum, lin_accum, lr, l1, l2, lr_power): diff --git a/test/legacy_test/test_full_like_op.py b/test/legacy_test/test_full_like_op.py index d0c326d7b19b1..38e2b426e4775 100644 --- a/test/legacy_test/test_full_like_op.py +++ b/test/legacy_test/test_full_like_op.py @@ -19,8 +19,8 @@ import paddle import paddle.framework.dtype as dtypes -from paddle.fluid import core -from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle.base import core +from paddle.base.framework import convert_np_dtype_to_dtype_ from paddle.static import Program, program_guard @@ -114,7 +114,7 @@ def setUp(self): bf16_flag = self.dtype == np.uint16 x = np.zeros(self.shape).astype(np.float32 if bf16_flag else self.dtype) - x = OpTest.np_dtype_to_fluid_dtype(x) + x = OpTest.np_dtype_to_base_dtype(x) out = np.full_like(x, self.fill_value, self.dtype) diff --git a/test/legacy_test/test_full_op.py b/test/legacy_test/test_full_op.py index 9a5c95044927f..74e928e58a52a 100644 --- a/test/legacy_test/test_full_op.py +++ b/test/legacy_test/test_full_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard # Test python API @@ -64,9 +64,9 @@ def test_api(self): shape=shape_tensor_int64, dtype=np.float32, fill_value=val ) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "shape_tensor_int32": np.array([1, 2]).astype("int32"), "shape_tensor_int64": np.array([1, 2]).astype("int64"), @@ -97,7 +97,7 @@ def test_api(self): ) def test_api_eager(self): - with fluid.dygraph.base.guard(): + with base.dygraph.base.guard(): positive_2_int32 = paddle.tensor.fill_constant([1], "int32", 2) positive_2_int64 = paddle.tensor.fill_constant([1], "int64", 2) positive_4_int64 = paddle.tensor.fill_constant( diff --git a/test/legacy_test/test_functional_conv1d.py b/test/legacy_test/test_functional_conv1d.py index d050c6163900b..5100385497b50 100644 --- a/test/legacy_test/test_functional_conv1d.py +++ b/test/legacy_test/test_functional_conv1d.py @@ -18,7 +18,7 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F diff --git a/test/legacy_test/test_functional_conv1d_transpose.py b/test/legacy_test/test_functional_conv1d_transpose.py index 865c848f8ba1d..1f1dbd05940e4 100644 --- a/test/legacy_test/test_functional_conv1d_transpose.py +++ b/test/legacy_test/test_functional_conv1d_transpose.py @@ -18,7 +18,7 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F diff --git a/test/legacy_test/test_functional_conv2d.py b/test/legacy_test/test_functional_conv2d.py index 2d8484cbee1e2..5b68ef2d46c0b 100644 --- a/test/legacy_test/test_functional_conv2d.py +++ b/test/legacy_test/test_functional_conv2d.py @@ -18,9 +18,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid +from paddle import base class TestFunctionalConv2D(TestCase): @@ -72,10 +72,10 @@ def prepare(self): ) def static_graph_case_1(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): if self.channel_last: x = paddle.static.data( "input", @@ -103,16 +103,16 @@ def static_graph_case_1(self): act=self.act, data_format=self.data_format, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(start) (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out def static_graph_case_2(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): if self.channel_last: x = x = paddle.static.data( "input", @@ -146,7 +146,7 @@ def static_graph_case_2(self): if self.act == 'sigmoid': y = F.sigmoid(y) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(start) feed_dict = {"input": self.input, "weight": self.weight} if not self.no_bias: @@ -185,14 +185,14 @@ def _test_identity(self): np.testing.assert_array_almost_equal(out2, out3) def test_identity_cpu(self): - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() self._test_identity() @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) def test_identity_gpu(self): - self.place = fluid.CUDAPlace(0) + self.place = base.CUDAPlace(0) self._test_identity() @@ -230,10 +230,10 @@ def prepare(self): self.bias_shape = (self.out_channels,) def static_graph_case(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): self.channel_last = self.data_format == "NHWC" if self.channel_last: x = x = paddle.static.data( @@ -505,10 +505,10 @@ def setUp(self): self.data_format = "NCHW" def static_graph_case(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): x = paddle.static.data( "input", self.input.shape, dtype=paddle.float32 ) @@ -527,7 +527,7 @@ def static_graph_case(self): act=None, data_format=self.data_format, ) - exe = fluid.Executor() + exe = base.Executor() exe.run(start) (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out diff --git a/test/legacy_test/test_functional_conv2d_transpose.py b/test/legacy_test/test_functional_conv2d_transpose.py index dd708614b8818..3bd999962ef09 100644 --- a/test/legacy_test/test_functional_conv2d_transpose.py +++ b/test/legacy_test/test_functional_conv2d_transpose.py @@ -18,9 +18,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid +from paddle import base class TestFunctionalConv2D(TestCase): @@ -73,10 +73,10 @@ def prepare(self): ) def static_graph_case_1(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): if self.channel_last: x = paddle.static.data( "input", @@ -104,16 +104,16 @@ def static_graph_case_1(self): else paddle.nn.initializer.Assign(self.bias), data_format=self.data_format, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(start) (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out def static_graph_case_2(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): if self.channel_last: x = x = paddle.static.data( "input", @@ -144,7 +144,7 @@ def static_graph_case_2(self): groups=self.groups, data_format=self.data_format, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(start) feed_dict = {"input": self.input, "weight": self.weight} if not self.no_bias: @@ -180,14 +180,14 @@ def _test_identity(self): np.testing.assert_array_almost_equal(out2, out3) def test_identity_cpu(self): - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() self._test_identity() @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) def test_identity_gpu(self): - self.place = fluid.CUDAPlace(0) + self.place = base.CUDAPlace(0) self._test_identity() @@ -226,10 +226,10 @@ def prepare(self): self.bias_shape = (self.out_channels,) def static_graph_case(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): self.channel_last = self.data_format == "NHWC" if self.channel_last: x = x = paddle.static.data( @@ -513,10 +513,10 @@ def setUp(self): self.data_format = "NCHW" def static_graph_case(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): x = paddle.static.data( "input", self.input.shape, dtype=paddle.float32 ) @@ -535,7 +535,7 @@ def static_graph_case(self): act=None, data_format=self.data_format, ) - exe = fluid.Executor() + exe = base.Executor() exe.run(start) (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out diff --git a/test/legacy_test/test_functional_conv3d.py b/test/legacy_test/test_functional_conv3d.py index 3f3415afdbf11..0a5eb925f5835 100644 --- a/test/legacy_test/test_functional_conv3d.py +++ b/test/legacy_test/test_functional_conv3d.py @@ -18,9 +18,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid +from paddle import base class TestFunctionalConv3D(TestCase): @@ -72,10 +72,10 @@ def prepare(self): ) def static_graph_case_1(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): if self.channel_last: x = paddle.static.data( "input", @@ -103,16 +103,16 @@ def static_graph_case_1(self): act=self.act, data_format=self.data_format, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(start) (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out def static_graph_case_2(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): if self.channel_last: x = x = paddle.static.data( "input", @@ -146,7 +146,7 @@ def static_graph_case_2(self): if self.act == 'sigmoid': y = F.sigmoid(y) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(start) feed_dict = {"input": self.input, "weight": self.weight} if not self.no_bias: @@ -185,14 +185,14 @@ def _test_identity(self): np.testing.assert_array_almost_equal(out2, out3) def test_identity_cpu(self): - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() self._test_identity() @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) def test_identity_gpu(self): - self.place = fluid.CUDAPlace(0) + self.place = base.CUDAPlace(0) self._test_identity() @@ -230,10 +230,10 @@ def prepare(self): self.bias_shape = (self.out_channels,) def static_graph_case(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): self.channel_last = self.data_format == "NDHWC" if self.channel_last: x = x = paddle.static.data( @@ -480,10 +480,10 @@ def setUp(self): self.data_format = "NCDHW" def static_graph_case(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): x = paddle.static.data( "input", self.input.shape, dtype=paddle.float32 ) @@ -502,7 +502,7 @@ def static_graph_case(self): act=None, data_format=self.data_format, ) - exe = fluid.Executor() + exe = base.Executor() exe.run(start) (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out diff --git a/test/legacy_test/test_functional_conv3d_transpose.py b/test/legacy_test/test_functional_conv3d_transpose.py index 22aaeb02a92f5..b0bd104d2935b 100644 --- a/test/legacy_test/test_functional_conv3d_transpose.py +++ b/test/legacy_test/test_functional_conv3d_transpose.py @@ -18,9 +18,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid +from paddle import base class TestFunctionalConv3DTranspose(TestCase): @@ -73,10 +73,10 @@ def prepare(self): ) def static_graph_case_1(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): if self.channel_last: x = paddle.static.data( "input", @@ -105,16 +105,16 @@ def static_graph_case_1(self): act=self.act, data_format=self.data_format, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(start) (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out def static_graph_case_2(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): if self.channel_last: x = x = paddle.static.data( "input", @@ -147,7 +147,7 @@ def static_graph_case_2(self): ) if self.act == 'sigmoid': y = F.sigmoid(y) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(start) feed_dict = {"input": self.input, "weight": self.weight} if not self.no_bias: @@ -185,14 +185,14 @@ def _test_identity(self): np.testing.assert_array_almost_equal(out2, out3) def test_identity_cpu(self): - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() self._test_identity() @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) def test_identity_gpu(self): - self.place = fluid.CUDAPlace(0) + self.place = base.CUDAPlace(0) self._test_identity() @@ -231,10 +231,10 @@ def prepare(self): self.bias_shape = (self.out_channels,) def static_graph_case(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): self.channel_last = self.data_format == "NDHWC" if self.channel_last: x = x = paddle.static.data( @@ -538,10 +538,10 @@ def setUp(self): self.data_format = "NCDHW" def static_graph_case(self): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): x = paddle.static.data( "input", self.input.shape, dtype=paddle.float32 ) @@ -560,7 +560,7 @@ def static_graph_case(self): act=None, data_format=self.data_format, ) - exe = fluid.Executor() + exe = base.Executor() exe.run(start) (out,) = exe.run(main, feed={"input": self.input}, fetch_list=[y]) return out diff --git a/test/legacy_test/test_fuse_all_reduce_pass.py b/test/legacy_test/test_fuse_all_reduce_pass.py index b7ea594522b9f..0745844bda323 100644 --- a/test/legacy_test/test_fuse_all_reduce_pass.py +++ b/test/legacy_test/test_fuse_all_reduce_pass.py @@ -21,8 +21,8 @@ from simple_nets import bow_net, fc_with_batchnorm, init_data, simple_fc_net import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -142,8 +142,8 @@ def setUpClass(cls): cls.train_data = next(reader) def get_data_from_feeder(self): - place = fluid.CPUPlace() - feeder = fluid.DataFeeder(feed_list=["words", "label"], place=place) + place = base.CPUPlace() + feeder = base.DataFeeder(feed_list=["words", "label"], place=place) return feeder.feed(self.train_data) def _decorate_compare_fused_all_reduce(self, model, use_device): diff --git a/test/legacy_test/test_fuse_bn_act_pass.py b/test/legacy_test/test_fuse_bn_act_pass.py index 711b4061506d5..6faaff59b51ef 100644 --- a/test/legacy_test/test_fuse_bn_act_pass.py +++ b/test/legacy_test/test_fuse_bn_act_pass.py @@ -15,12 +15,12 @@ import unittest import paddle -from paddle import fluid +from paddle import base class TestFuseBatchNormActPass(unittest.TestCase): def build_program(self, main_program, startup_program, use_cuda, seed=1): - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): x = paddle.static.data( name='x', shape=[-1, 1, 28, 28], dtype='float32' ) @@ -35,11 +35,11 @@ def build_program(self, main_program, startup_program, use_cuda, seed=1): bias_attr=False, data_format='NHWC', ) - param_attr = fluid.ParamAttr( + param_attr = base.ParamAttr( name='batch_norm_w', initializer=paddle.nn.initializer.Constant(value=1.0), ) - bias_attr = fluid.ParamAttr( + bias_attr = base.ParamAttr( name='batch_norm_b', initializer=paddle.nn.initializer.Constant(value=0.0), ) @@ -72,26 +72,26 @@ def build_program(self, main_program, startup_program, use_cuda, seed=1): def check(self, place, use_cuda): paddle.seed(1) paddle.framework.random._manual_program_seed(1) - main_program = fluid.Program() - startup_program = fluid.Program() + main_program = base.Program() + startup_program = base.Program() x, y, loss = self.build_program(main_program, startup_program, use_cuda) - exe = fluid.Executor(place) + exe = base.Executor(place) iters = 8 batch_size = 16 - feeder = fluid.DataFeeder(feed_list=[x, y], place=place) + feeder = base.DataFeeder(feed_list=[x, y], place=place) # close fused_bn_act_ops - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.fuse_bn_act_ops = False - binary = fluid.CompiledProgram( + binary = base.CompiledProgram( main_program, build_strategy=build_strategy ) train_reader = paddle.batch( paddle.dataset.mnist.train(), batch_size=batch_size ) loss_vals = [] - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): exe.run(startup_program) for _ in range(iters): data = next(train_reader()) @@ -101,17 +101,17 @@ def check(self, place, use_cuda): loss_vals.append(loss_v[0]) # open fused_bn_act_ops - build_strategy_fused = fluid.BuildStrategy() + build_strategy_fused = base.BuildStrategy() build_strategy_fused.fuse_bn_act_ops = True - binary_fused = fluid.CompiledProgram( + binary_fused = base.CompiledProgram( main_program, build_strategy=build_strategy_fused ) train_reader_fused = paddle.batch( paddle.dataset.mnist.train(), batch_size=batch_size ) loss_vals_fused = [] - scope_fused = fluid.Scope() - with fluid.scope_guard(scope_fused): + scope_fused = base.Scope() + with base.scope_guard(scope_fused): exe.run(startup_program) for _ in range(iters): data = next(train_reader_fused()) @@ -125,12 +125,12 @@ def check(self, place, use_cuda): self.assertAlmostEqual(loss_vals[i], loss_vals_fused[i], delta=1e-5) def test_fuse_bn_act_pass_cpu(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self.check(place, use_cuda=False) def test_fuse_bn_act_pass_cuda(self): - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self.check(place, use_cuda=True) diff --git a/test/legacy_test/test_fuse_bn_add_act_pass.py b/test/legacy_test/test_fuse_bn_add_act_pass.py index f0b11655916de..d121a211e7afd 100644 --- a/test/legacy_test/test_fuse_bn_add_act_pass.py +++ b/test/legacy_test/test_fuse_bn_add_act_pass.py @@ -18,8 +18,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -29,33 +29,33 @@ ) class TestFusedBnAddActAPI(unittest.TestCase): def setUp(self): - self.conv_param_attr1 = fluid.ParamAttr( + self.conv_param_attr1 = base.ParamAttr( name='conv2d_1.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001, ) - self.conv_param_attr2 = fluid.ParamAttr( + self.conv_param_attr2 = base.ParamAttr( name='conv2d_2.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001, ) - self.bn_param_attr1 = fluid.ParamAttr( + self.bn_param_attr1 = base.ParamAttr( name='batch_norm_w_1', initializer=paddle.nn.initializer.Constant(value=1.0), ) - self.bn_bias_attr1 = fluid.ParamAttr( + self.bn_bias_attr1 = base.ParamAttr( name='batch_norm_b_1', initializer=paddle.nn.initializer.Constant(value=0.0), ) - self.bn_param_attr2 = fluid.ParamAttr( + self.bn_param_attr2 = base.ParamAttr( name='batch_norm_w_2', initializer=paddle.nn.initializer.Constant(value=1.0), ) - self.bn_bias_attr2 = fluid.ParamAttr( + self.bn_bias_attr2 = base.ParamAttr( name='batch_norm_b_2', initializer=paddle.nn.initializer.Constant(value=0.0), ) - self.fc_param_attr = fluid.ParamAttr( + self.fc_param_attr = base.ParamAttr( name='fc.weight', initializer=paddle.nn.initializer.XavierNormal(), ) @@ -63,7 +63,7 @@ def setUp(self): def build_fused_program( self, main_program, startup_program, use_cuda, seed=1 ): - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): x = paddle.static.data( name='x', shape=[-1, 1, 28, 28], dtype='float32' ) @@ -124,7 +124,7 @@ def build_fused_program( def build_origin_program( self, main_program, startup_program, use_cuda, seed=1 ): - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): x = paddle.static.data( name='x', shape=[-1, 1, 28, 28], dtype='float32' ) @@ -191,22 +191,22 @@ def check(self, place, use_cuda): batch_size = 16 # build_fused_program: turn on fuse_bn_add_act_ops - main_program = fluid.Program() - startup_program = fluid.Program() + main_program = base.Program() + startup_program = base.Program() loss = self.build_origin_program( main_program, startup_program, use_cuda ) - build_strategy_fused = fluid.BuildStrategy() + build_strategy_fused = base.BuildStrategy() build_strategy_fused.fuse_bn_add_act_ops = True - binary_fused = fluid.CompiledProgram( + binary_fused = base.CompiledProgram( main_program, build_strategy=build_strategy_fused ) - exe = fluid.Executor(place) + exe = base.Executor(place) loss_vals_fused = [] x_data = [] y_data = [] - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): exe.run(startup_program) for _ in range(iters): x = np.random.random((batch_size, 1, 28, 28)).astype("float32") @@ -219,14 +219,14 @@ def check(self, place, use_cuda): loss_vals_fused.append(loss_v[0]) # build_origin_program: turn off fused_bn_act_ops - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.fuse_bn_add_act_ops = False - binary = fluid.CompiledProgram( + binary = base.CompiledProgram( main_program, build_strategy=build_strategy_fused ) loss_vals = [] - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): exe.run(startup_program) for i in range(iters): loss_v = exe.run( @@ -241,20 +241,20 @@ def check(self, place, use_cuda): self.assertAlmostEqual(loss_vals[i], loss_vals_fused[i], delta=1e-5) def test_fuse_bn_add_act(self): - place = fluid.CUDAPlace(0) + place = base.CUDAPlace(0) self.check(place, use_cuda=True) def test_fuse_bn_add_act_API(self): # build_fused_program: use fused_bn_add_act python API - main_program = fluid.Program() - startup_program = fluid.Program() - place = fluid.CUDAPlace(0) + main_program = base.Program() + startup_program = base.Program() + place = base.CUDAPlace(0) x, y, loss = self.build_fused_program( main_program, startup_program, use_cuda=True ) - exe = fluid.Executor(place) - scope = fluid.Scope() - with fluid.scope_guard(scope): + exe = base.Executor(place) + scope = base.Scope() + with base.scope_guard(scope): exe.run(startup_program) for _ in range(5): x = np.random.random((4, 1, 28, 28)).astype("float32") diff --git a/test/legacy_test/test_fuse_elewise_add_act_pass.py b/test/legacy_test/test_fuse_elewise_add_act_pass.py index 02348fae60a0e..b9237a14bd108 100644 --- a/test/legacy_test/test_fuse_elewise_add_act_pass.py +++ b/test/legacy_test/test_fuse_elewise_add_act_pass.py @@ -21,8 +21,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestMNIST(TestParallelExecutorBase): @@ -111,23 +111,23 @@ def check(self, place): paddle.seed(1) numpy.random.seed(1) paddle.framework.random._manual_program_seed(1) - main_program = fluid.Program() - startup_program = fluid.Program() + main_program = base.Program() + startup_program = base.Program() X, Y, loss = self.build_program(main_program, startup_program) - exe = fluid.Executor(place) + exe = base.Executor(place) x = numpy.random.random(size=(3, 3)).astype('float32') y = numpy.random.random(size=(3, 3)).astype('float32') label = numpy.random.random(size=(3, 3)).astype('float32') # open fused_pass - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.fuse_elewise_add_act_ops = True compiled_prog_fused = paddle.static.CompiledProgram( main_program, build_strategy=build_strategy ) - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): exe.run(startup_program) loss_data_fused = exe.run( compiled_prog_fused, @@ -136,13 +136,13 @@ def check(self, place): ) # close fused_pass - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.fuse_elewise_add_act_ops = False compiled_prog = paddle.static.CompiledProgram( main_program, build_strategy=build_strategy ) - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): exe.run(startup_program) loss_data = exe.run( compiled_prog, feed={"X": x, "Y": y}, fetch_list=[loss.name] @@ -151,12 +151,12 @@ def check(self, place): self.assertEqual(loss_data_fused, loss_data) def test_fuse_act_add_grad_pass_cpu(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self.check(place) def test_fuse_act_add_grad_pass_cuda(self): - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self.check(place) diff --git a/test/legacy_test/test_fuse_gemm_epilogue_pass.py b/test/legacy_test/test_fuse_gemm_epilogue_pass.py index 13480e3d75ded..177ebfa6b1819 100644 --- a/test/legacy_test/test_fuse_gemm_epilogue_pass.py +++ b/test/legacy_test/test_fuse_gemm_epilogue_pass.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core def compare(ref, res, atol, rtol): diff --git a/test/legacy_test/test_fuse_optimizer_pass.py b/test/legacy_test/test_fuse_optimizer_pass.py index d85355306dfc3..3fa7f3d999a61 100644 --- a/test/legacy_test/test_fuse_optimizer_pass.py +++ b/test/legacy_test/test_fuse_optimizer_pass.py @@ -21,8 +21,8 @@ from simple_nets import bow_net, fc_with_batchnorm, init_data import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestFuseOptimizationOps(TestParallelExecutorBase): @@ -124,8 +124,8 @@ def setUpClass(cls): cls.train_data = next(reader) def _get_data_from_feeder(self): - place = fluid.CPUPlace() - feeder = fluid.DataFeeder(feed_list=["words", "label"], place=place) + place = base.CPUPlace() + feeder = base.DataFeeder(feed_list=["words", "label"], place=place) return feeder.feed(self.train_data) def _decorate_compare_fused_optimizer_ops( diff --git a/test/legacy_test/test_fuse_relu_depthwise_conv_pass.py b/test/legacy_test/test_fuse_relu_depthwise_conv_pass.py index 70487e22448f4..fd294535d55d7 100644 --- a/test/legacy_test/test_fuse_relu_depthwise_conv_pass.py +++ b/test/legacy_test/test_fuse_relu_depthwise_conv_pass.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core def norm(*args, **kargs): diff --git a/test/legacy_test/test_fused_attention_no_dropout.py b/test/legacy_test/test_fused_attention_no_dropout.py index 4f18abd79e0fe..ca9b4d90f1dde 100644 --- a/test/legacy_test/test_fused_attention_no_dropout.py +++ b/test/legacy_test/test_fused_attention_no_dropout.py @@ -195,7 +195,7 @@ def set_configs(self): class TestFusedAttentionAPIError(unittest.TestCase): def test_invalid_x_rank(self): def test_x_rank_1(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): layer = FusedMultiHeadAttention(embed_dim=1, num_heads=1) array = np.array([1.9], dtype=np.float32) x = paddle.to_tensor(np.reshape(array, [1]), dtype='float32') diff --git a/test/legacy_test/test_fused_attention_op.py b/test/legacy_test/test_fused_attention_op.py index 1feed2cccce3a..271b4ab6bec33 100644 --- a/test/legacy_test/test_fused_attention_op.py +++ b/test/legacy_test/test_fused_attention_op.py @@ -21,7 +21,7 @@ import paddle.incubate.nn.functional as incubate_f import paddle.nn.functional as F from paddle import tensor -from paddle.fluid.framework import default_main_program +from paddle.base.framework import default_main_program from paddle.nn.layer.common import Dropout, Linear from paddle.nn.layer.norm import LayerNorm from paddle.nn.layer.transformer import _convert_attention_mask diff --git a/test/legacy_test/test_fused_attention_pass.py b/test/legacy_test/test_fused_attention_pass.py index 3387662d75827..db938ab39974e 100644 --- a/test/legacy_test/test_fused_attention_pass.py +++ b/test/legacy_test/test_fused_attention_pass.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F from paddle.distributed.passes import PassManager, new_pass -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_fused_bias_act_op.py b/test/legacy_test/test_fused_bias_act_op.py index 53ed9cc330693..054932f13f57d 100644 --- a/test/legacy_test/test_fused_bias_act_op.py +++ b/test/legacy_test/test_fused_bias_act_op.py @@ -20,7 +20,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core def round_type_1_process(val): diff --git a/test/legacy_test/test_fused_bias_dropout_residual_layer_norm_op.py b/test/legacy_test/test_fused_bias_dropout_residual_layer_norm_op.py index 8d9295c27276e..87c7bcf4db908 100644 --- a/test/legacy_test/test_fused_bias_dropout_residual_layer_norm_op.py +++ b/test/legacy_test/test_fused_bias_dropout_residual_layer_norm_op.py @@ -19,7 +19,7 @@ import paddle import paddle.incubate.nn.functional as incubate_f -from paddle.fluid.framework import default_main_program +from paddle.base.framework import default_main_program from paddle.nn.layer.common import Dropout from paddle.nn.layer.norm import LayerNorm diff --git a/test/legacy_test/test_fused_dropout_add_op.py b/test/legacy_test/test_fused_dropout_add_op.py index efd1833a713cb..6466775f432da 100644 --- a/test/legacy_test/test_fused_dropout_add_op.py +++ b/test/legacy_test/test_fused_dropout_add_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.incubate.nn.functional import fused_dropout_add from paddle.incubate.nn.layer.fused_dropout_add import FusedDropoutAdd @@ -152,7 +152,7 @@ def test_static_op(self): outs = fused_dropout_add(xs, ys, p=0.5, training=True) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) out_s = exe.run( feed={ "xs": x_data.astype('float16'), diff --git a/test/legacy_test/test_fused_ec_moe_op.py b/test/legacy_test/test_fused_ec_moe_op.py index 87b065a9017ec..3ac780806c911 100644 --- a/test/legacy_test/test_fused_ec_moe_op.py +++ b/test/legacy_test/test_fused_ec_moe_op.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid.framework import default_main_program +from paddle.base.framework import default_main_program from paddle.incubate.nn.functional import fused_ec_moe from paddle.nn.layer.common import Linear diff --git a/test/legacy_test/test_fused_elemwise_activation_op.py b/test/legacy_test/test_fused_elemwise_activation_op.py index 44a4588a9ad34..28cffdcabd579 100644 --- a/test/legacy_test/test_fused_elemwise_activation_op.py +++ b/test/legacy_test/test_fused_elemwise_activation_op.py @@ -18,7 +18,7 @@ import numpy as np from eager_op_test import OpTest -from paddle.fluid import core +from paddle.base import core # TestFusedElementwiseActivationOp # TestFusedElementwiseActivationOp_scalar @@ -51,8 +51,8 @@ def setUp(self): self.intermediate_out = self.intermediate_out.astype(self.dtype) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } if self.attrs["save_intermediate_out"]: self.outputs = { diff --git a/test/legacy_test/test_fused_emb_seq_pool_op.py b/test/legacy_test/test_fused_emb_seq_pool_op.py index 9a8e1f3913f22..4d2e774e5465c 100644 --- a/test/legacy_test/test_fused_emb_seq_pool_op.py +++ b/test/legacy_test/test_fused_emb_seq_pool_op.py @@ -108,7 +108,7 @@ class TestFusedEmbeddingSeqPoolApi(unittest.TestCase): def test_api(self): with paddle_static_guard(): if ver.mkl() == "ON" and 'Linux' in platform.platform(): - from paddle import fluid + from paddle import base dict_size = 20 data_t = paddle.static.data( @@ -123,11 +123,11 @@ def test_api(self): is_sparse=False, ) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) # prepare input words' idx - x_tensor = fluid.core.LoDTensor() + x_tensor = base.core.LoDTensor() idxs = np.random.randint(1, 10, (8)).astype("int64") x_tensor.set(idxs, place) diff --git a/test/legacy_test/test_fused_fc_elementwise_layernorm_op.py b/test/legacy_test/test_fused_fc_elementwise_layernorm_op.py index f4812116d9385..330931fdc4170 100644 --- a/test/legacy_test/test_fused_fc_elementwise_layernorm_op.py +++ b/test/legacy_test/test_fused_fc_elementwise_layernorm_op.py @@ -19,7 +19,7 @@ from test_fc_op import MatrixGenerate, fc_refer from test_layer_norm_op import _reference_layer_norm_naive -from paddle.fluid import core +from paddle.base import core np.random.random(123) diff --git a/test/legacy_test/test_fused_feedforward_op.py b/test/legacy_test/test_fused_feedforward_op.py index c8739c38e7699..6aba4784ff6f6 100644 --- a/test/legacy_test/test_fused_feedforward_op.py +++ b/test/legacy_test/test_fused_feedforward_op.py @@ -19,7 +19,7 @@ import paddle import paddle.incubate.nn.functional as incubate_f import paddle.nn.functional as F -from paddle.fluid.framework import default_main_program +from paddle.base.framework import default_main_program from paddle.nn.layer import transformer from paddle.nn.layer.common import Dropout, Linear from paddle.nn.layer.norm import LayerNorm diff --git a/test/legacy_test/test_fused_feedforward_pass.py b/test/legacy_test/test_fused_feedforward_pass.py index 107e3f319d3d7..9fe700f6a683e 100644 --- a/test/legacy_test/test_fused_feedforward_pass.py +++ b/test/legacy_test/test_fused_feedforward_pass.py @@ -19,7 +19,7 @@ import paddle from paddle import nn from paddle.distributed.passes import PassManager, new_pass -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_fused_gate_attention_op.py b/test/legacy_test/test_fused_gate_attention_op.py index e88b923f287b0..7bb5790f0c766 100644 --- a/test/legacy_test/test_fused_gate_attention_op.py +++ b/test/legacy_test/test_fused_gate_attention_op.py @@ -30,7 +30,7 @@ import paddle import paddle.incubate.nn.functional as F from paddle import _legacy_C_ops, nn -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/legacy_test/test_fused_gemm_epilogue_grad_op.py b/test/legacy_test/test_fused_gemm_epilogue_grad_op.py index f7a7d8dbf6608..0014f5a440cc9 100644 --- a/test/legacy_test/test_fused_gemm_epilogue_grad_op.py +++ b/test/legacy_test/test_fused_gemm_epilogue_grad_op.py @@ -19,7 +19,7 @@ from eager_op_test import OpTest, skip_check_grad_ci import paddle -from paddle.fluid import core +from paddle.base import core def get_outputs(DOut, X, Y): diff --git a/test/legacy_test/test_fused_gemm_epilogue_op.py b/test/legacy_test/test_fused_gemm_epilogue_op.py index 5aeae6671882e..6064536ec4a90 100644 --- a/test/legacy_test/test_fused_gemm_epilogue_op.py +++ b/test/legacy_test/test_fused_gemm_epilogue_op.py @@ -19,7 +19,7 @@ from eager_op_test import OpTest, skip_check_grad_ci, skip_check_inplace_ci import paddle -from paddle.fluid import core +from paddle.base import core from paddle.incubate.nn.functional import fused_linear_activation diff --git a/test/legacy_test/test_fused_layernorm_op.py b/test/legacy_test/test_fused_layernorm_op.py index a50f216c67315..35c897483550a 100644 --- a/test/legacy_test/test_fused_layernorm_op.py +++ b/test/legacy_test/test_fused_layernorm_op.py @@ -16,8 +16,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def quant_helper( @@ -433,7 +433,7 @@ def check_layernorm(self, x_np, gamma_np, beta_np, dtype): self.epsilon, begin_norm_axis=1, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) out_s = exe.run( feed={ "x_static": x_np.astype(dtype), @@ -483,7 +483,7 @@ def check_layernorm_int8(self, x_np, gamma_np, beta_np, dtype): quant_max_bound=self.quant_max_bound, quant_min_bound=self.quant_min_bound, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) out_s = exe.run( feed={ "x_static": x_np.astype(dtype), @@ -590,7 +590,7 @@ def check_residual_bias_layernorm( residual=residual_static, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) out_s = exe.run( feed={ "x_static": x_np.astype(dtype), @@ -664,7 +664,7 @@ def check_residual_bias_layernorm_int8( quant_min_bound=self.quant_min_bound, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) out_s = exe.run( feed={ "x_static": x_np.astype(dtype), diff --git a/test/legacy_test/test_fused_matmul_bias.py b/test/legacy_test/test_fused_matmul_bias.py index 6210ad56cd847..85666710b0e45 100644 --- a/test/legacy_test/test_fused_matmul_bias.py +++ b/test/legacy_test/test_fused_matmul_bias.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core from paddle.incubate.nn import FusedLinear from paddle.incubate.nn.functional import fused_linear, fused_matmul_bias diff --git a/test/legacy_test/test_fused_multi_transformer_int8_op.py b/test/legacy_test/test_fused_multi_transformer_int8_op.py index d54eff322b64d..ba4be8f9831a4 100644 --- a/test/legacy_test/test_fused_multi_transformer_int8_op.py +++ b/test/legacy_test/test_fused_multi_transformer_int8_op.py @@ -20,8 +20,8 @@ import paddle import paddle.nn.functional as F from paddle import _legacy_C_ops, tensor -from paddle.fluid import core -from paddle.fluid.framework import default_main_program +from paddle.base import core +from paddle.base.framework import default_main_program from paddle.nn.layer.common import Dropout from paddle.nn.layer.norm import LayerNorm from paddle.nn.layer.transformer import _convert_attention_mask diff --git a/test/legacy_test/test_fused_multi_transformer_op.py b/test/legacy_test/test_fused_multi_transformer_op.py index 5ae564dcdb0ff..dbe4fa6f014e8 100644 --- a/test/legacy_test/test_fused_multi_transformer_op.py +++ b/test/legacy_test/test_fused_multi_transformer_op.py @@ -21,7 +21,7 @@ import paddle import paddle.nn.functional as F from paddle import tensor -from paddle.fluid.framework import default_main_program +from paddle.base.framework import default_main_program from paddle.incubate.nn import FusedMultiTransformer from paddle.incubate.nn.functional import fused_multi_transformer from paddle.nn.layer.common import Dropout, Linear @@ -57,7 +57,7 @@ def setUp(self): # use autograd to check grad in this unittest. self.__class__.no_need_check_grad = False - bias_attr = paddle.fluid.ParamAttr( + bias_attr = paddle.base.ParamAttr( initializer=paddle.paddle.nn.initializer.Constant(value=0.0005) ) self.q_proj = Linear( @@ -996,13 +996,13 @@ def GetFusedMultiTransformerOutStatic(self): } if self.has_pre_cache: out = exe.run( - paddle.fluid.default_main_program(), + paddle.base.default_main_program(), feed=feed_data, fetch_list=[final_out[0].name], ) else: out = exe.run( - paddle.fluid.default_main_program(), + paddle.base.default_main_program(), feed=feed_data, fetch_list=[final_out.name], ) diff --git a/test/legacy_test/test_fused_multihead_matmul_op.py b/test/legacy_test/test_fused_multihead_matmul_op.py index d600aef0b98b4..ebf73fd6353fb 100644 --- a/test/legacy_test/test_fused_multihead_matmul_op.py +++ b/test/legacy_test/test_fused_multihead_matmul_op.py @@ -17,7 +17,7 @@ import numpy as np from eager_op_test import OpTest -from paddle.fluid import core +from paddle.base import core np.random.random(123) diff --git a/test/legacy_test/test_fused_rotary_position_embedding.py b/test/legacy_test/test_fused_rotary_position_embedding.py index de6355d56a5ee..5be92f6f9b705 100644 --- a/test/legacy_test/test_fused_rotary_position_embedding.py +++ b/test/legacy_test/test_fused_rotary_position_embedding.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core from paddle.incubate.nn.functional import fused_rotary_position_embedding diff --git a/test/legacy_test/test_fused_transformer_encoder_layer.py b/test/legacy_test/test_fused_transformer_encoder_layer.py index c4b0a47420a1a..ed73401252828 100644 --- a/test/legacy_test/test_fused_transformer_encoder_layer.py +++ b/test/legacy_test/test_fused_transformer_encoder_layer.py @@ -16,7 +16,7 @@ import numpy as np import paddle -from paddle.fluid.framework import default_main_program, in_dygraph_mode +from paddle.base.framework import default_main_program, in_dygraph_mode from paddle.incubate.nn import FusedTransformerEncoderLayer from paddle.nn import TransformerEncoderLayer diff --git a/test/legacy_test/test_fusion_transpose_flatten_concat_op.py b/test/legacy_test/test_fusion_transpose_flatten_concat_op.py index 873687963faba..cb461ec073651 100644 --- a/test/legacy_test/test_fusion_transpose_flatten_concat_op.py +++ b/test/legacy_test/test_fusion_transpose_flatten_concat_op.py @@ -17,7 +17,7 @@ import numpy as np from eager_op_test import OpTest -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/legacy_test/test_gather_nd_op.py b/test/legacy_test/test_gather_nd_op.py index 6102a0a8fcc69..d934b047ae149 100644 --- a/test/legacy_test/test_gather_nd_op.py +++ b/test/legacy_test/test_gather_nd_op.py @@ -22,8 +22,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestGatherNdOpWithEmptyIndex(OpTest): @@ -554,14 +554,14 @@ def test_index_dtype(): class TestGatherNdAPI2(unittest.TestCase): def test_static(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data('data1', shape=[-1, 2], dtype='float64') data1.desc.set_need_check_feed(False) index = paddle.static.data('index', shape=[-1, 1], dtype='int32') index.desc.set_need_check_feed(False) out = paddle.gather_nd(data1, index) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) input = np.array([[1, 2], [3, 4], [5, 6]]) index_1 = np.array([[1]]).astype('int32') (result,) = exe.run( @@ -571,7 +571,7 @@ def test_static(self): np.testing.assert_allclose(result, expected_output, rtol=1e-05) def test_static_fp16_with_gpu(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() @@ -607,8 +607,8 @@ def test_imperative(self): paddle.disable_static() input_1 = np.array([[1, 2], [3, 4], [5, 6]]) index_1 = np.array([[1]]) - input = fluid.dygraph.to_variable(input_1) - index = fluid.dygraph.to_variable(index_1) + input = base.dygraph.to_variable(input_1) + index = base.dygraph.to_variable(index_1) output = paddle.gather(input, index) output_np = output.numpy() expected_output = np.array([[3, 4]]) diff --git a/test/legacy_test/test_gather_op.py b/test/legacy_test/test_gather_op.py index ff67a85484a86..8bc012883fc65 100644 --- a/test/legacy_test/test_gather_op.py +++ b/test/legacy_test/test_gather_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid.dygraph.base import switch_to_static_graph +from paddle import base +from paddle.base.dygraph.base import switch_to_static_graph from paddle.framework import core @@ -419,14 +419,14 @@ def config_dtype(self): class API_TestGather(unittest.TestCase): def test_out1(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data('data1', shape=[-1, 2], dtype='float64') data1.desc.set_need_check_feed(False) index = paddle.static.data('index', shape=[-1, 1], dtype='int32') index.desc.set_need_check_feed(False) out = paddle.gather(data1, index) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) input = np.array([[1, 2], [3, 4], [5, 6]]) index_1 = np.array([1, 2]) (result,) = exe.run( @@ -500,7 +500,7 @@ def test_large_data(self): index = np.random.randint(0, 22682, size=(8859027)) def test_dygraph(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): gpu_out = paddle.gather( paddle.to_tensor(x), paddle.to_tensor(index) ) @@ -560,7 +560,7 @@ def test_axis_dtype1(): self.assertRaises(TypeError, test_axis_dtype1) def test_error2(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): shape = [8, 9, 6] x = paddle.static.data(shape=shape, dtype='int8', name='x') index = paddle.static.data(shape=shape, dtype='int32', name='mask') diff --git a/test/legacy_test/test_gather_tree_op.py b/test/legacy_test/test_gather_tree_op.py index 3bc12a1a40954..b59e30098aecc 100644 --- a/test/legacy_test/test_gather_tree_op.py +++ b/test/legacy_test/test_gather_tree_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid.framework import Program, program_guard +from paddle.base.framework import Program, program_guard class TestGatherTreeOp(OpTest): diff --git a/test/legacy_test/test_gaussian_nll_loss.py b/test/legacy_test/test_gaussian_nll_loss.py index 1480c83eb26ae..4a33a19fb766a 100644 --- a/test/legacy_test/test_gaussian_nll_loss.py +++ b/test/legacy_test/test_gaussian_nll_loss.py @@ -18,7 +18,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core np.random.seed(10) diff --git a/test/legacy_test/test_gaussian_random_op.py b/test/legacy_test/test_gaussian_random_op.py index d134d8578cbda..293e79d05fcea 100644 --- a/test/legacy_test/test_gaussian_random_op.py +++ b/test/legacy_test/test_gaussian_random_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_uint16_to_float, paddle_static_guard import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.tensor import random @@ -75,7 +75,7 @@ def setUp(self): "mean": self.mean, "std": self.std, "seed": 10, - "dtype": paddle.fluid.core.VarDesc.VarType.FP16, + "dtype": paddle.base.core.VarDesc.VarType.FP16, "use_mkldnn": self.use_mkldnn, } paddle.seed(10) @@ -118,7 +118,7 @@ def setUp(self): "mean": self.mean, "std": self.std, "seed": 10, - "dtype": paddle.fluid.core.VarDesc.VarType.BF16, + "dtype": paddle.base.core.VarDesc.VarType.BF16, "use_mkldnn": self.use_mkldnn, } paddle.seed(10) @@ -308,9 +308,9 @@ def test_api(self): seed=10, ) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) res_1, res_2, res_3, res_4, res_5, res_6 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "shape_tensor_int32": np.array([2000, 500]).astype("int32"), "shape_tensor_int64": np.array([2000, 500]).astype("int64"), @@ -335,17 +335,17 @@ def test_default_dtype(self): def test_default_fp16(): paddle.framework.set_default_dtype('float16') out = paddle.tensor.random.gaussian([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') out = paddle.tensor.random.gaussian([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP32) def test_default_fp64(): paddle.framework.set_default_dtype('float64') out = paddle.tensor.random.gaussian([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP64) if paddle.is_compiled_with_cuda(): paddle.set_device('gpu') @@ -359,17 +359,17 @@ def test_default_dtype(self): def test_default_fp16(): paddle.framework.set_default_dtype('float16') out = paddle.tensor.random.standard_normal([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') out = paddle.tensor.random.standard_normal([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP32) def test_default_fp64(): paddle.framework.set_default_dtype('float64') out = paddle.tensor.random.standard_normal([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP64) if paddle.is_compiled_with_cuda(): paddle.set_device('gpu') diff --git a/test/legacy_test/test_gcd.py b/test/legacy_test/test_gcd.py index 738c040ea9890..a7ec34eca42c7 100644 --- a/test/legacy_test/test_gcd.py +++ b/test/legacy_test/test_gcd.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -31,9 +31,9 @@ def setUp(self): self.y_shape = [1] def test_static_graph(self): - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(startup_program, train_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(startup_program, train_program): x = paddle.static.data( name='input1', dtype='int32', shape=self.x_shape ) @@ -43,13 +43,13 @@ def test_static_graph(self): out = paddle.gcd(x, y) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'input1': self.x_np, 'input2': self.y_np}, fetch_list=[out], ) diff --git a/test/legacy_test/test_gelu_op.py b/test/legacy_test/test_gelu_op.py index d0ab6be7d89f0..acd3ef5aed796 100644 --- a/test/legacy_test/test_gelu_op.py +++ b/test/legacy_test/test_gelu_op.py @@ -18,9 +18,9 @@ from scipy.special import erf import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid +from paddle import base def gelu(x, approximate): @@ -43,7 +43,7 @@ def _test_case1_cpu(self, approximate): x = np.random.uniform(-1, 1, size=(11, 17)).astype(np.float32) y_ref = gelu(x, approximate) - place = fluid.CPUPlace() + place = base.CPUPlace() with dg.guard(place) as g: x_var = dg.to_variable(x) y_var = F.gelu(x_var, approximate) @@ -54,7 +54,7 @@ def _test_case1_gpu(self, approximate): x = np.random.uniform(-1, 1, size=(11, 17)).astype(np.float32) y_ref = gelu(x, approximate) - place = fluid.CUDAPlace(0) + place = base.CUDAPlace(0) with dg.guard(place) as g: x_var = dg.to_variable(x) y_var = F.gelu(x_var, approximate) @@ -64,7 +64,7 @@ def _test_case1_gpu(self, approximate): def test_cases(self): for approximate in [True, False]: self._test_case1_cpu(approximate) - if fluid.is_compiled_with_cuda(): + if base.is_compiled_with_cuda(): self._test_case1_gpu(approximate) def test_fast_math(self): diff --git a/test/legacy_test/test_generator_dataloader.py b/test/legacy_test/test_generator_dataloader.py index 9d89553852190..7de57eb3eb5ca 100644 --- a/test/legacy_test/test_generator_dataloader.py +++ b/test/legacy_test/test_generator_dataloader.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.reader import DataLoaderBase +from paddle import base +from paddle.base.reader import DataLoaderBase EPOCH_NUM = 20 BATCH_SIZE = 32 @@ -38,18 +38,18 @@ def random_reader(): def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): paddle.seed(1) paddle.framework.random._manual_program_seed(1) - startup_prog = fluid.Program() - main_prog = fluid.Program() + startup_prog = base.Program() + main_prog = base.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main_prog, startup_prog): + with base.unique_name.guard(): + with base.program_guard(main_prog, startup_prog): image = paddle.static.data( name='image', shape=[-1, 784], dtype='float32' ) label = paddle.static.data( name='label', shape=[-1, 1], dtype='int64' ) - py_reader = fluid.io.DataLoader.from_generator( + py_reader = base.io.DataLoader.from_generator( feed_list=[image, label], capacity=4, iterable=not use_legacy_py_reader, @@ -61,7 +61,7 @@ def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): hidden, size=hidden_size, activation='tanh', - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ), ) @@ -90,24 +90,24 @@ def run_main( places, use_double_buffer, ): - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): startup_prog, main_prog, py_reader, loss = simple_fc_net( places, use_legacy_py_reader, use_double_buffer ) reader = paddle.batch(random_reader, batch_size=BATCH_SIZE) - ps = places if use_double_buffer else fluid.cpu_places(len(places)) + ps = places if use_double_buffer else base.cpu_places(len(places)) py_reader.set_sample_list_generator( reader, places=ps if py_reader.iterable else None ) - exe = fluid.Executor(place=places[0]) + exe = base.Executor(place=places[0]) exe.run(startup_prog) - prog = fluid.CompiledProgram(main_prog) + prog = base.CompiledProgram(main_prog) step = 0 step_list = [] @@ -126,7 +126,7 @@ def run_main( ) loss_list.append(np.mean(L)) step += 1 - except fluid.core.EOFException: + except base.core.EOFException: py_reader.reset() break step_list.append(step) @@ -164,10 +164,10 @@ def run_main( def prepare_places(self, with_cpu=True, with_gpu=True): places = [] if with_cpu: - places.append([fluid.CPUPlace()]) + places.append([base.CPUPlace()]) - if with_gpu and fluid.core.is_compiled_with_cuda(): - tmp = fluid.cuda_places() + if with_gpu and base.core.is_compiled_with_cuda(): + tmp = base.cuda_places() assert len(tmp) > 0, "no gpu detected" places.append([tmp[0]]) return places diff --git a/test/legacy_test/test_get_all_op_or_kernel_names.py b/test/legacy_test/test_get_all_op_or_kernel_names.py index 8ee2d2cb0f569..275cdbf8c81da 100644 --- a/test/legacy_test/test_get_all_op_or_kernel_names.py +++ b/test/legacy_test/test_get_all_op_or_kernel_names.py @@ -14,21 +14,21 @@ import unittest -from paddle.fluid import core +from paddle.base import core class TestGetAllRegisteredOpKernels(unittest.TestCase): - # reshape kernel is in fluid while not in phi + # reshape kernel is in base while not in phi def test_phi_kernels(self): self.assertTrue(core._get_all_register_op_kernels('phi')['sign']) with self.assertRaises(KeyError): core._get_all_register_op_kernels('phi')['reshape'] - # sign kernel is removed from fluid and added into phi - def test_fluid_kernels(self): - self.assertTrue(core._get_all_register_op_kernels('fluid')['reshape']) + # sign kernel is removed from base and added into phi + def test_base_kernels(self): + self.assertTrue(core._get_all_register_op_kernels('base')['reshape']) with self.assertRaises(KeyError): - core._get_all_register_op_kernels('fluid')['sign'] + core._get_all_register_op_kernels('base')['sign'] def test_all_kernels(self): self.assertTrue(core._get_all_register_op_kernels('all')['reshape']) @@ -42,11 +42,11 @@ class TestGetAllOpNames(unittest.TestCase): def test_get_all_op_names(self): all_op_names = core.get_all_op_names() all_op_with_phi_kernels = core.get_all_op_names("phi") - all_op_with_fluid_kernels = core.get_all_op_names("fluid") + all_op_with_base_kernels = core.get_all_op_names("base") self.assertTrue( len(all_op_names) - > len(set(all_op_with_phi_kernels) | set(all_op_with_fluid_kernels)) + > len(set(all_op_with_phi_kernels) | set(all_op_with_base_kernels)) ) self.assertTrue("scale" in all_op_with_phi_kernels) self.assertTrue("scale" in all_op_with_phi_kernels) diff --git a/test/legacy_test/test_get_device_properties.py b/test/legacy_test/test_get_device_properties.py index aa3cac5d9f34a..0cbfc4d16b0f4 100644 --- a/test/legacy_test/test_get_device_properties.py +++ b/test/legacy_test/test_get_device_properties.py @@ -15,7 +15,7 @@ import unittest from paddle.device.cuda import device_count, get_device_properties -from paddle.fluid import core +from paddle.base import core class TestGetDeviceProperties(unittest.TestCase): diff --git a/test/legacy_test/test_get_set_flags.py b/test/legacy_test/test_get_set_flags.py index 4b4cb9a7f7155..d732feb6c5f1d 100644 --- a/test/legacy_test/test_get_set_flags.py +++ b/test/legacy_test/test_get_set_flags.py @@ -14,7 +14,7 @@ import unittest -from paddle import fluid +from paddle import base class TestGetAndSetFlags(unittest.TestCase): @@ -24,13 +24,13 @@ def test_api(self): 'FLAGS_check_nan_inf': True, } - fluid.set_flags(flags) + base.set_flags(flags) flags_list = ['FLAGS_eager_delete_tensor_gb', 'FLAGS_check_nan_inf'] flag = 'FLAGS_eager_delete_tensor_gb' - res_list = fluid.get_flags(flags_list) - res = fluid.get_flags(flag) + res_list = base.get_flags(flags_list) + res = base.get_flags(flag) self.assertTrue(res_list['FLAGS_eager_delete_tensor_gb'], 1.0) self.assertTrue(res_list['FLAGS_check_nan_inf'], True) @@ -45,25 +45,25 @@ def test_errors(self): # flags type of set_flags should be dict. def test_set_flags_input_type(): - fluid.set_flags(flags_list) + base.set_flags(flags_list) self.assertRaises(TypeError, test_set_flags_input_type) # flags in set_flags should be public flags. def test_set_private_flag(): - fluid.set_flags(flag_private) + base.set_flags(flag_private) self.assertRaises(ValueError, test_set_private_flag) # flags type of set_flags should be list, tuple or string def test_get_flags_input_type(): - fluid.get_flags(flag) + base.get_flags(flag) self.assertRaises(TypeError, test_get_flags_input_type) # flags in get_flags should be public flags. def test_get_private_flag(): - fluid.get_flags('FLAGS_free_idle_chunk') + base.get_flags('FLAGS_free_idle_chunk') self.assertRaises(ValueError, test_get_private_flag) diff --git a/test/legacy_test/test_get_tensor_from_selected_rows_op.py b/test/legacy_test/test_get_tensor_from_selected_rows_op.py index dbbef2ce2cc74..e60b4711543ad 100644 --- a/test/legacy_test/test_get_tensor_from_selected_rows_op.py +++ b/test/legacy_test/test_get_tensor_from_selected_rows_op.py @@ -18,7 +18,7 @@ from op import Operator import paddle -from paddle.fluid import Program, core, program_guard +from paddle.base import Program, core, program_guard from paddle.nn import clip diff --git a/test/legacy_test/test_global_var_getter_setter.py b/test/legacy_test/test_global_var_getter_setter.py index 3da1e85dc22cf..5ad33b36d0a2b 100644 --- a/test/legacy_test/test_global_var_getter_setter.py +++ b/test/legacy_test/test_global_var_getter_setter.py @@ -14,7 +14,7 @@ import unittest -from paddle import fluid +from paddle import base class VarInfo: @@ -31,7 +31,7 @@ def test_main(self): VarInfo("FLAGS_eager_delete_tensor_gb", float, True), ] - g = fluid.core.globals() + g = base.core.globals() for var in var_infos: self.assertTrue(var.name in g) self.assertTrue(var.name in g.keys()) diff --git a/test/legacy_test/test_glu.py b/test/legacy_test/test_glu.py index 91fe30651bb54..9ffbc5a706181 100644 --- a/test/legacy_test/test_glu.py +++ b/test/legacy_test/test_glu.py @@ -17,8 +17,8 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg -from paddle import fluid +import paddle.base.dygraph as dg +from paddle import base from paddle.nn import functional as F @@ -47,9 +47,9 @@ def check_identity(self, place): np.testing.assert_allclose(y_np, self.out) def test_case(self): - self.check_identity(fluid.CPUPlace()) - if fluid.is_compiled_with_cuda(): - self.check_identity(fluid.CUDAPlace(0)) + self.check_identity(base.CPUPlace()) + if base.is_compiled_with_cuda(): + self.check_identity(base.CUDAPlace(0)) class TestGlu(unittest.TestCase): diff --git a/test/legacy_test/test_gpu_package_without_gpu_device.py b/test/legacy_test/test_gpu_package_without_gpu_device.py index 59db731117766..2429ff6c095f0 100644 --- a/test/legacy_test/test_gpu_package_without_gpu_device.py +++ b/test/legacy_test/test_gpu_package_without_gpu_device.py @@ -18,7 +18,7 @@ import tempfile import unittest -from paddle.fluid import core +from paddle.base import core class TestGPUPackagePaddle(unittest.TestCase): diff --git a/test/legacy_test/test_grad_clip_minimize.py b/test/legacy_test/test_grad_clip_minimize.py index 168eb701644bd..c6434d96e43dc 100644 --- a/test/legacy_test/test_grad_clip_minimize.py +++ b/test/legacy_test/test_grad_clip_minimize.py @@ -16,8 +16,8 @@ import numpy as np -from paddle import fluid -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base.dygraph.base import to_variable from paddle.nn import ClipGradByGlobalNorm, ClipGradByNorm, ClipGradByValue @@ -60,7 +60,7 @@ def get_numpy_global_norm_result(self): return new_np_p_g def get_dygrap_global_norm_result(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): gloabl_norm_clip = ClipGradByGlobalNorm(self.max_global_norm) p_g_var = [] for p, g in self.para_and_grad: @@ -132,7 +132,7 @@ def get_numpy_norm_result(self): return new_p_g def get_dygrap_norm_result(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): norm_clip = ClipGradByNorm(self.max_norm) p_g_var = [] for p, g in self.para_and_grad: @@ -200,7 +200,7 @@ def get_numpy_clip_result(self): return new_p_g def get_dygrap_clip_result(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): value_clip = ClipGradByValue(max=self.max_value, min=self.min_value) p_g_var = [] for p, g in self.para_and_grad: diff --git a/test/legacy_test/test_gradient_clip.py b/test/legacy_test/test_gradient_clip.py index dae8b7b47d884..96c5de1bfe3a3 100644 --- a/test/legacy_test/test_gradient_clip.py +++ b/test/legacy_test/test_gradient_clip.py @@ -18,8 +18,8 @@ from fake_reader import fake_imdb_reader import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.nn.clip import _allow_pure_fp16_global_norm_clip paddle.enable_static() @@ -31,7 +31,7 @@ def bow_net( """ BOW net This model is from https://github.com/PaddlePaddle/models: - fluid/PaddleNLP/text_classification/nets.py + base/PaddleNLP/text_classification/nets.py """ emb = paddle.static.nn.embedding( input=data, is_sparse=True, size=[dict_dim, emb_dim] @@ -66,18 +66,18 @@ def init(self): pass def get_places(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) return places def check_clip_result(self, out, out_clip): pass def check_gradient_clip(self, place, dtype='float32'): - prog = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard( + prog = base.Program() + startup_program = base.Program() + with base.program_guard( main_program=prog, startup_program=startup_program ): image = paddle.static.data( @@ -105,12 +105,12 @@ def check_gradient_clip(self, place, dtype='float32'): prog_clip = prog.clone() avg_cost_clip = prog_clip.block(0).var(avg_cost.name) - p_g = fluid.backward.append_backward(loss=avg_cost) - p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip) + p_g = base.backward.append_backward(loss=avg_cost) + p_g_clip = base.backward.append_backward(loss=avg_cost_clip) p_g = sorted(p_g, key=lambda x: x[0].name) p_g_clip = sorted(p_g_clip, key=lambda x: x[0].name) - with fluid.program_guard( + with base.program_guard( main_program=prog_clip, startup_program=startup_program ): p_g_clip = self.clip_gradient(p_g_clip) @@ -119,8 +119,8 @@ def check_gradient_clip(self, place, dtype='float32'): grad_clip_list = [elem[1] for elem in p_g_clip] train_reader = paddle.batch(paddle.dataset.mnist.train(), batch_size=3) - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[image, label], place=place) + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=[image, label], place=place) exe.run(startup_program) data = next(train_reader()) @@ -131,9 +131,9 @@ def check_gradient_clip(self, place, dtype='float32'): self.check_clip_result(out, out_clip) def check_sparse_gradient_clip(self, place): - prog = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard( + prog = base.Program() + startup_program = base.Program() + with base.program_guard( main_program=prog, startup_program=startup_program ): data = paddle.static.data( @@ -146,8 +146,8 @@ def check_sparse_gradient_clip(self, place): self.backward_and_optimize(cost) - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=[data, label], place=place) + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=[data, label], place=place) exe.run(startup_program) data = next(self.train_data()) @@ -192,7 +192,7 @@ def func(params_grads): return paddle.nn.clip.append_gradient_clip_ops(params_grads) self.clip_gradient = func - self.check_gradient_clip(fluid.CPUPlace()) + self.check_gradient_clip(base.CPUPlace()) # test whether the output is right when use grad_clip def test_new_gradient_clip(self): @@ -201,7 +201,7 @@ def func(params_grads): return clip(params_grads) self.clip_gradient = func - self.check_gradient_clip(fluid.CPUPlace()) + self.check_gradient_clip(base.CPUPlace()) # test whether the output is right when use grad_clip under float64 def test_new_gradient_clip_fp64(self): @@ -210,7 +210,7 @@ def func(params_grads): return clip(params_grads) self.clip_gradient = func - self.check_gradient_clip(fluid.CPUPlace(), "float64") + self.check_gradient_clip(base.CPUPlace(), "float64") # invoke 'set_gradient_clip' in a wrong order def test_wrong_API_order(self): @@ -276,19 +276,19 @@ def test_none_grad_fp16(self): ) def _test_none_grad_helper(self, dtype): - prog = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard( + prog = base.Program() + startup_program = base.Program() + with base.program_guard( main_program=prog, startup_program=startup_program ): clip = paddle.nn.ClipGradByGlobalNorm(self.clip_norm) x = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_parameter(name="x", shape=[2, 3], dtype=dtype) ) y = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_parameter(name="y", shape=[2, 3], dtype=dtype) ) @@ -329,20 +329,20 @@ def func(params_grads): return clip(params_grads) self.clip_gradient = func - self.check_gradient_clip(fluid.CPUPlace()) + self.check_gradient_clip(base.CPUPlace()) # if grad is None or not need clip def test_none_grad(self): clip = paddle.nn.ClipGradByNorm(self.clip_norm) x = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_parameter( name="x", shape=[2, 3], dtype="float32", need_clip=False ) ) y = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_parameter( name="y", shape=[2, 3], dtype="float32", need_clip=False @@ -387,20 +387,20 @@ def func(params_grads): return clip(params_grads) self.clip_gradient = func - self.check_gradient_clip(fluid.CPUPlace()) + self.check_gradient_clip(base.CPUPlace()) # if grad is None or not need clip def test_none_grad(self): clip = paddle.nn.ClipGradByValue(self.max, self.min) x = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_parameter( name="x", shape=[2, 3], dtype="float32", need_clip=False ) ) y = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_parameter( name="y", shape=[2, 3], dtype="float32", need_clip=False @@ -422,10 +422,10 @@ def test_none_grad(self): class TestDygraphGradientClip(unittest.TestCase): def test_gradient_clip(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): linear = paddle.nn.Linear(5, 5) inputs = paddle.uniform([16, 5], min=-10, max=10).astype('float32') - out = linear(fluid.dygraph.to_variable(inputs)) + out = linear(base.dygraph.to_variable(inputs)) loss = paddle.mean(out) loss.backward() sgd_optimizer = paddle.optimizer.SGD( @@ -447,10 +447,10 @@ def setUp(self): def check_clip_result(self, loss, optimizer): # if grad is None - x = fluid.dygraph.to_variable( + x = base.dygraph.to_variable( np.array([2, 3]).astype("float32"), name="x" ) - y = fluid.dygraph.to_variable( + y = base.dygraph.to_variable( np.array([3, 4]).astype("float32"), name="y" ) assert len(self.clip1([(x, x), (x, y), (x, None)])) == 2 @@ -487,10 +487,10 @@ def setUp(self): def check_clip_result(self, loss, optimizer): # if grad is None - x = fluid.dygraph.to_variable(np.array([2, 3]).astype("float32")) + x = base.dygraph.to_variable(np.array([2, 3]).astype("float32")) assert len(self.clip([(x, None)])) == 0 # get params and grads from network - self.clip([(fluid.dygraph.to_variable(np.array([2, 3])), None)]) + self.clip([(base.dygraph.to_variable(np.array([2, 3])), None)]) opt, params_grads = optimizer.minimize(loss) _, grads = zip(*params_grads) params_grads = self.clip(params_grads) @@ -516,7 +516,7 @@ def setUp(self): def check_clip_result(self, loss, optimizer): # if grad is None - x = fluid.dygraph.to_variable(np.array([2, 3]).astype("float32")) + x = base.dygraph.to_variable(np.array([2, 3]).astype("float32")) assert len(self.clip([(x, None)])) == 0 # get params and grads from network opt, params_grads = optimizer.minimize(loss) @@ -549,8 +549,8 @@ def forward(self, x): class TestDygraphGradientClipFP16(unittest.TestCase): def test_gradient_clip(self): - if fluid.core.is_compiled_with_cuda(): - with fluid.dygraph.guard(): + if base.core.is_compiled_with_cuda(): + with base.dygraph.guard(): paddle.seed(10) model = SimpleNet() sgd_optimizer = paddle.optimizer.SGD( @@ -564,7 +564,7 @@ def test_gradient_clip(self): 'float32' ) with paddle.amp.auto_cast(level='O2'): - out = model(fluid.dygraph.to_variable(inputs)) + out = model(base.dygraph.to_variable(inputs)) loss = paddle.mean(out) scaled = scaler.scale(loss) scaled.backward() @@ -606,10 +606,10 @@ def test_gradient_clip(self): class TestDygraphGradientClipFP64(unittest.TestCase): def test_gradient_clip(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): inputs = paddle.uniform([16, 5], min=-10, max=10).astype('float32') linear = paddle.nn.Linear(5, 5) - out = linear(fluid.dygraph.to_variable(inputs)) + out = linear(base.dygraph.to_variable(inputs)) loss = paddle.mean(out) loss.backward() # before clip diff --git a/test/legacy_test/test_graph_khop_sampler.py b/test/legacy_test/test_graph_khop_sampler.py index 4070ad72a9442..cee848d549e72 100644 --- a/test/legacy_test/test_graph_khop_sampler.py +++ b/test/legacy_test/test_graph_khop_sampler.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestGraphKhopSampler(unittest.TestCase): @@ -91,20 +91,20 @@ def test_sample_result(self): def test_uva_sample_result(self): paddle.disable_static() - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): row = None - if fluid.framework.in_dygraph_mode(): - row = paddle.fluid.core.eager.to_uva_tensor( + if base.framework.in_dygraph_mode(): + row = paddle.base.core.eager.to_uva_tensor( self.row.astype(self.row.dtype), 0 ) - sorted_eid = paddle.fluid.core.eager.to_uva_tensor( + sorted_eid = paddle.base.core.eager.to_uva_tensor( self.sorted_eid.astype(self.sorted_eid.dtype), 0 ) else: - row = paddle.fluid.core.to_uva_tensor( + row = paddle.base.core.to_uva_tensor( self.row.astype(self.row.dtype) ) - sorted_eid = paddle.fluid.core.to_uva_tensor( + sorted_eid = paddle.base.core.to_uva_tensor( self.sorted_eid.astype(self.sorted_eid.dtype) ) colptr = paddle.to_tensor(self.colptr) diff --git a/test/legacy_test/test_graph_sample_neighbors.py b/test/legacy_test/test_graph_sample_neighbors.py index 1f3829721e9ea..8616b88374bc4 100644 --- a/test/legacy_test/test_graph_sample_neighbors.py +++ b/test/legacy_test/test_graph_sample_neighbors.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestGraphSampleNeighbors(unittest.TestCase): @@ -81,7 +81,7 @@ def test_sample_result(self): def test_sample_result_fisher_yates_sampling(self): paddle.disable_static() - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): row = paddle.to_tensor(self.row) colptr = paddle.to_tensor(self.colptr) nodes = paddle.to_tensor(self.nodes) @@ -322,7 +322,7 @@ def test_sample_result(self): def test_sample_result_fisher_yates_sampling(self): paddle.disable_static() - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): row = paddle.to_tensor(self.row) colptr = paddle.to_tensor(self.colptr) nodes = paddle.to_tensor(self.nodes) diff --git a/test/legacy_test/test_graph_send_ue_recv_op.py b/test/legacy_test/test_graph_send_ue_recv_op.py index abfabb3870b6f..10ce31514df36 100644 --- a/test/legacy_test/test_graph_send_ue_recv_op.py +++ b/test/legacy_test/test_graph_send_ue_recv_op.py @@ -19,7 +19,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core def get_broadcast_shape(shp1, shp2): diff --git a/test/legacy_test/test_greater_equal_op.py b/test/legacy_test/test_greater_equal_op.py index 9ef5827e1440c..52b6e24e7d78f 100644 --- a/test/legacy_test/test_greater_equal_op.py +++ b/test/legacy_test/test_greater_equal_op.py @@ -19,7 +19,7 @@ import paddle from paddle import static -from paddle.fluid import core +from paddle.base import core class Test_Greater_Equal_Op_Fp16(unittest.TestCase): diff --git a/test/legacy_test/test_grid_sample_function.py b/test/legacy_test/test_grid_sample_function.py index 4f8042c8bad66..fff624c9bd95a 100644 --- a/test/legacy_test/test_grid_sample_function.py +++ b/test/legacy_test/test_grid_sample_function.py @@ -17,9 +17,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid +from paddle import base class GridSampleTestCase(unittest.TestCase): @@ -46,10 +46,10 @@ def setUp(self): self.grid = np.random.uniform(-1, 1, self.grid_shape).astype(self.dtype) def static_functional(self, place): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): x = paddle.static.data("x", self.x_shape, dtype=self.dtype) grid = paddle.static.data( "grid", self.grid_shape, dtype=self.dtype @@ -62,7 +62,7 @@ def static_functional(self, place): align_corners=self.align_corners, ) feed_dict = {"x": self.x, "grid": self.grid} - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np @@ -87,17 +87,17 @@ def _test_equivalence(self, place): np.testing.assert_array_almost_equal(result1, result2) def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self._test_equivalence(place) - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self._test_equivalence(place) class GridSampleErrorTestCase(GridSampleTestCase): def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() with self.assertRaises(ValueError): self.static_functional(place) diff --git a/test/legacy_test/test_grid_sampler_op.py b/test/legacy_test/test_grid_sampler_op.py index 3670d3dd22e37..cbc40b12f59ba 100644 --- a/test/legacy_test/test_grid_sampler_op.py +++ b/test/legacy_test/test_grid_sampler_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, skip_check_grad_ci import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_group_norm_op.py b/test/legacy_test/test_group_norm_op.py index 543d98943c4a0..ef0c030f8f93e 100644 --- a/test/legacy_test/test_group_norm_op.py +++ b/test/legacy_test/test_group_norm_op.py @@ -26,8 +26,8 @@ from testsuite import create_op import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def group_norm_naive(x, scale, bias, epsilon, groups, data_layout): @@ -50,7 +50,7 @@ def group_norm_naive(x, scale, bias, epsilon, groups, data_layout): class TestGroupNormOpError(unittest.TestCase): def test_errors(self): with paddle_static_guard(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): def test_x_type(): input = np.random.random(2, 100, 3, 5).astype('float32') @@ -106,9 +106,9 @@ def setUp(self): ) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(input), - 'Scale': OpTest.np_dtype_to_fluid_dtype(scale), - 'Bias': OpTest.np_dtype_to_fluid_dtype(bias), + 'X': OpTest.np_dtype_to_base_dtype(input), + 'Scale': OpTest.np_dtype_to_base_dtype(scale), + 'Bias': OpTest.np_dtype_to_base_dtype(bias), } self.outputs = {'Y': output, 'Mean': mean, 'Variance': var} self.attrs['data_layout'] = self.data_format @@ -474,9 +474,9 @@ def test_case1(self): bias = np.array([0]).astype("float64") place = core.CPUPlace() - exe = fluid.Executor(place) + exe = base.Executor(place) results = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"data1": data1_np, "data2": data2_np}, fetch_list=[out1, out2], return_numpy=True, @@ -526,13 +526,13 @@ def test_dygraph_api(self): self.shape = (8, 32, 32) input = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - tensor_1 = fluid.dygraph.to_variable(input) + with base.dygraph.guard(): + tensor_1 = base.dygraph.to_variable(input) tensor_1.stop_gradient = False groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4) ret1 = groupNorm(tensor_1) ret1.backward() - tensor_eager_1 = fluid.dygraph.to_variable(input) + tensor_eager_1 = base.dygraph.to_variable(input) tensor_eager_1.stop_gradient = False groupNorm_eager = paddle.nn.GroupNorm(num_channels=32, num_groups=4) ret2 = groupNorm_eager(tensor_eager_1) @@ -546,13 +546,13 @@ def test_dygraph_api(self): self.shape = (8, 32, 32) input = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - tensor_1 = fluid.dygraph.to_variable(input) + with base.dygraph.guard(): + tensor_1 = base.dygraph.to_variable(input) tensor_1.stop_gradient = False groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4) ret1 = groupNorm(tensor_1) ret1.backward() - tensor_eager_1 = fluid.dygraph.to_variable(input) + tensor_eager_1 = base.dygraph.to_variable(input) tensor_eager_1.stop_gradient = False groupNorm_eager = paddle.nn.GroupNorm(num_channels=32, num_groups=4) ret2 = groupNorm_eager(tensor_eager_1) @@ -572,13 +572,13 @@ def test_dygraph_api(self): self.shape = (8, 32, 32) input = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - tensor_1 = fluid.dygraph.to_variable(input) + with base.dygraph.guard(): + tensor_1 = base.dygraph.to_variable(input) tensor_1.stop_gradient = False groupNorm = paddle.nn.GroupNorm(num_channels=32, num_groups=4) ret1 = groupNorm(tensor_1) ret1.backward() - tensor_eager_1 = fluid.dygraph.to_variable(input) + tensor_eager_1 = base.dygraph.to_variable(input) tensor_eager_1.stop_gradient = False groupNorm_eager = paddle.nn.GroupNorm(num_channels=32, num_groups=4) ret2 = groupNorm_eager(tensor_eager_1) @@ -1024,9 +1024,9 @@ def setUp(self): self.static_rev_desire[-1].append(rev[2]) def get_eager_desire(self, place): - if isinstance(place, fluid.CPUPlace): + if isinstance(place, base.CPUPlace): paddle.set_device("cpu") - if isinstance(place, fluid.CUDAPlace): + if isinstance(place, base.CUDAPlace): paddle.set_device("gpu") core.set_prim_eager_enabled(False) paddle.disable_static() @@ -1061,9 +1061,9 @@ def get_eager_desire(self, place): def get_static_desire(self, place): core._set_prim_all_enabled(False) paddle.enable_static() - if isinstance(place, fluid.CPUPlace): + if isinstance(place, base.CPUPlace): paddle.set_device("cpu") - if isinstance(place, fluid.CUDAPlace): + if isinstance(place, base.CUDAPlace): paddle.set_device("gpu") mp, sp = paddle.static.Program(), paddle.static.Program() @@ -1154,7 +1154,7 @@ def test_static_comp(self): if len(self.places) < 1: return - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): for place in self.places: fwd_actual.append([]) rev_actual.append([]) @@ -1394,7 +1394,7 @@ def test_jit_comp_with_cinn(self): fwd_actual = [] rev_actual = [] for place in self.places: - if not isinstance(place, fluid.CUDAPlace): + if not isinstance(place, base.CUDAPlace): continue input_ = paddle.to_tensor( data=self.x, dtype=self.dtype, place=place, stop_gradient=False @@ -1436,7 +1436,7 @@ def test_jit_comp_with_cinn(self): i = 0 for place in self.places: - if not isinstance(place, fluid.CUDAPlace): + if not isinstance(place, base.CUDAPlace): continue atol = self.threshold_list[i][2] rtol = self.threshold_list[i][2] diff --git a/test/legacy_test/test_group_norm_op_v2.py b/test/legacy_test/test_group_norm_op_v2.py index 0b3beee6797d7..94232e7f70a00 100644 --- a/test/legacy_test/test_group_norm_op_v2.py +++ b/test/legacy_test/test_group_norm_op_v2.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def group_norm_naive_for_general_dimension(x, scale, bias, epsilon, groups): @@ -48,9 +48,9 @@ def test_numerical_accuracy(self): (2, 6, 6, 6, 2, 3), ] np.random.seed(10) - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: for shape in shapes: @@ -87,9 +87,9 @@ def test_numerical_accuracy(self): (2, 6, 6, 6, 256, 3), ] np.random.seed(10) - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: for shape in shapes: diff --git a/test/legacy_test/test_gru_rnn_op.py b/test/legacy_test/test_gru_rnn_op.py index 450cb89dbd220..4a4420a8c335d 100644 --- a/test/legacy_test/test_gru_rnn_op.py +++ b/test/legacy_test/test_gru_rnn_op.py @@ -20,7 +20,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core sys.path.append("../../test/rnn") from convert import get_params_for_net diff --git a/test/legacy_test/test_gru_unit_op.py b/test/legacy_test/test_gru_unit_op.py index f62163aaf56af..1359583ae1ade 100644 --- a/test/legacy_test/test_gru_unit_op.py +++ b/test/legacy_test/test_gru_unit_op.py @@ -18,7 +18,7 @@ import numpy as np from eager_op_test import OpTest -from paddle import fluid +from paddle import base class GRUActivationType(OpTest): @@ -118,7 +118,7 @@ def set_outputs(self, origin_mode=False): def setUp(self): self.dtype = ( - 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + 'float32' if base.core.is_compiled_with_rocm() else 'float64' ) self.set_inputs() self.set_outputs() @@ -136,7 +136,7 @@ def test_check_grad(self): class TestGRUUnitOpOriginMode(TestGRUUnitOp): def setUp(self): self.dtype = ( - 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + 'float32' if base.core.is_compiled_with_rocm() else 'float64' ) self.set_inputs(origin_mode=True) self.set_outputs(origin_mode=True) @@ -176,7 +176,7 @@ def test_check_grad_ingore_input(self): class TestGRUUnitOpWithBiasOriginMode(TestGRUUnitOpWithBias): def setUp(self): self.dtype = ( - 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' + 'float32' if base.core.is_compiled_with_rocm() else 'float64' ) self.set_inputs(origin_mode=True) self.set_outputs(origin_mode=True) diff --git a/test/legacy_test/test_gumbel_softmax_op.py b/test/legacy_test/test_gumbel_softmax_op.py index c0396a7a02929..608b73739c902 100644 --- a/test/legacy_test/test_gumbel_softmax_op.py +++ b/test/legacy_test/test_gumbel_softmax_op.py @@ -17,7 +17,7 @@ import paddle import paddle.nn.functional as F -from paddle import fluid +from paddle import base paddle.enable_static() @@ -42,7 +42,7 @@ def setUp(self): np.random.seed(0) x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.zeros(self.shape).astype(self.dtype) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} def test_check_output(self): @@ -172,7 +172,7 @@ def setUp(self): batch_x = np.ones(self.shape) * single_x out = np.zeros(self.shape).astype(self.dtype) self.probs = self.softmax(single_x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(batch_x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(batch_x)} self.outputs = {'Out': out} def test_check_output(self): @@ -228,7 +228,7 @@ def setUp(self): self.count_expected = 24 self.place = ( paddle.CUDAPlace(0) - if paddle.fluid.core.is_compiled_with_cuda() + if paddle.base.core.is_compiled_with_cuda() else paddle.CPUPlace() ) @@ -243,7 +243,7 @@ def test_check_api(self): self.assertEqual(out_np.sum(), self.count_expected) # test dygrapg api - with paddle.fluid.dygraph.base.guard(): + with paddle.base.dygraph.base.guard(): x = paddle.to_tensor(self.x) y = paddle.nn.functional.gumbel_softmax(x, hard=True) out_np = np.array(y) @@ -255,8 +255,8 @@ def test_errors(self): paddle.disable_static() def test_Variable(): - x1 = fluid.create_lod_tensor( - np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.zeros((100, 784)), [[10, 10, 10, 70]], base.CPUPlace() ) paddle.nn.functional.gumbel_softmax(x1) diff --git a/test/legacy_test/test_hapi_amp.py b/test/legacy_test/test_hapi_amp.py index 654f1351f3661..5f91b9a86baf4 100644 --- a/test/legacy_test/test_hapi_amp.py +++ b/test/legacy_test/test_hapi_amp.py @@ -23,7 +23,7 @@ import paddle import paddle.vision.transforms as T -from paddle import Model, fluid +from paddle import Model, base from paddle.nn.layer.loss import CrossEntropyLoss from paddle.static import InputSpec from paddle.vision.datasets import MNIST @@ -31,7 +31,7 @@ @unittest.skipIf( - not fluid.is_compiled_with_cuda(), 'CPU testing is not supported' + not base.is_compiled_with_cuda(), 'CPU testing is not supported' ) class TestHapiWithAmp(unittest.TestCase): def get_model(self, amp_config): @@ -100,7 +100,7 @@ def test_save_load(self): lenet_amp_path = os.path.join(temp_dir.name, './lenet_amp') model.save(lenet_amp_path) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): paddle.seed(2021) new_model = self.get_model(amp_level) train_dataset = MNIST(mode='train', transform=transform) @@ -143,7 +143,7 @@ def test_dynamic_check_input(self): {"level": "O1", "use_fp16_guard": True}, "O3", ] - if not fluid.is_compiled_with_cuda(): + if not base.is_compiled_with_cuda(): self.skipTest('module not tested when ONLY_CPU compling') paddle.set_device('gpu') net = LeNet() @@ -170,7 +170,7 @@ def test_dynamic_check_input(self): def test_static_check_input(self): paddle.enable_static() amp_configs = {"level": "O2", "use_pure_fp16": True} - if not fluid.is_compiled_with_cuda(): + if not base.is_compiled_with_cuda(): self.skipTest('module not tested when ONLY_CPU compling') paddle.set_device('gpu') diff --git a/test/legacy_test/test_histogram_op.py b/test/legacy_test/test_histogram_op.py index 36df3209ac76e..8ca5b4dd500da 100644 --- a/test/legacy_test/test_histogram_op.py +++ b/test/legacy_test/test_histogram_op.py @@ -18,25 +18,25 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard class TestHistogramOpAPI(unittest.TestCase): """Test histogram api.""" def test_static_graph(self): - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): inputs = paddle.static.data( name='input', dtype='int64', shape=[2, 3] ) output = paddle.histogram(inputs, bins=5, min=1, max=5) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) img = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int64) res = exe.run( @@ -50,9 +50,9 @@ def test_static_graph(self): ) def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): inputs_np = np.array([[2, 4, 2], [2, 5, 4]]).astype(np.int64) - inputs = fluid.dygraph.to_variable(inputs_np) + inputs = base.dygraph.to_variable(inputs_np) actual = paddle.histogram(inputs, bins=5, min=1, max=5) expected = np.array([0, 3, 0, 2, 1]).astype(np.int64) self.assertTrue( @@ -73,11 +73,11 @@ class TestHistogramOpError(unittest.TestCase): """Test histogram op error.""" def run_network(self, net_func): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): net_func() - exe = fluid.Executor() + exe = base.Executor() exe.run(main_program) def test_bins_error(self): diff --git a/test/legacy_test/test_host_memory_stats.py b/test/legacy_test/test_host_memory_stats.py index a1ae645244094..35da81454dba2 100644 --- a/test/legacy_test/test_host_memory_stats.py +++ b/test/legacy_test/test_host_memory_stats.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle.fluid import core +from paddle.base import core paddle.set_device('cpu') diff --git a/test/legacy_test/test_hsigmoid_op.py b/test/legacy_test/test_hsigmoid_op.py index b9c4338fa4c05..6144285586055 100644 --- a/test/legacy_test/test_hsigmoid_op.py +++ b/test/legacy_test/test_hsigmoid_op.py @@ -20,7 +20,7 @@ import paddle import paddle.nn.functional as F -from paddle import fluid +from paddle import base paddle.enable_static() np.random.seed(100) @@ -298,7 +298,7 @@ def hs_net_conf(self, is_sparse): input=input_word, is_sparse=is_sparse, size=[3, 3], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Normal(std=1 / math.sqrt(3)) ), ) @@ -323,9 +323,9 @@ def hs_net_conf(self, is_sparse): return avg_cost, data_list def training_test(self, is_sparse): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): paddle.seed(1) - start_up = fluid.default_startup_program() + start_up = base.default_startup_program() x = np.arange(6).reshape(6) path_table = np.array([(1, 2, -1), (1, 2, -1)]).astype('int64') path_code = np.array([(1, 0, -1), (0, 0, -1)]).astype('int64') @@ -335,10 +335,10 @@ def training_test(self, is_sparse): optimizer = paddle.optimizer.SGD(learning_rate=1e-3) optimizer.minimize(loss) - main_program = fluid.default_main_program() - place = fluid.CPUPlace() - feeder = fluid.DataFeeder(feed_list=data_list, place=place) - exe = fluid.Executor(place) + main_program = base.default_main_program() + place = base.CPUPlace() + feeder = base.DataFeeder(feed_list=data_list, place=place) + exe = base.Executor(place) exe.run(start_up) result = [] @@ -619,10 +619,10 @@ def test_static_api(self): for ret in [ret1, ret2]: np.testing.assert_allclose(self.out_np, ret, rtol=1e-05) - def test_fluid_api(self): - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + def test_base_api(self): + train_program = base.Program() + startup_program = base.Program() + with base.program_guard(train_program, startup_program): x = paddle.static.data('x', [-1, self.feature_size]) labels = paddle.static.data('labels', [-1, 1], 'int64') path_table = None @@ -647,7 +647,7 @@ def test_fluid_api(self): path_code=path_code, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(startup_program) feed_dict = {'x': self.x_np, 'labels': self.labels_np} if self.is_custom: diff --git a/test/legacy_test/test_huber_loss_op.py b/test/legacy_test/test_huber_loss_op.py index 318e2bf68fc32..5b74de590dab9 100644 --- a/test/legacy_test/test_huber_loss_op.py +++ b/test/legacy_test/test_huber_loss_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def huber_loss_forward(val, delta): diff --git a/test/legacy_test/test_i0_op.py b/test/legacy_test/test_i0_op.py index a30f67312c8d0..6019a868cf0d2 100644 --- a/test/legacy_test/test_i0_op.py +++ b/test/legacy_test/test_i0_op.py @@ -19,7 +19,7 @@ from scipy import special import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(100) paddle.seed(100) diff --git a/test/legacy_test/test_i0e_op.py b/test/legacy_test/test_i0e_op.py index 0543d53effec8..8a3df4ab77afd 100644 --- a/test/legacy_test/test_i0e_op.py +++ b/test/legacy_test/test_i0e_op.py @@ -19,7 +19,7 @@ from scipy import special import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(100) paddle.seed(100) diff --git a/test/legacy_test/test_i1_op.py b/test/legacy_test/test_i1_op.py index b5846d6e15647..d4ab42cd4b4fa 100644 --- a/test/legacy_test/test_i1_op.py +++ b/test/legacy_test/test_i1_op.py @@ -19,7 +19,7 @@ from scipy import special import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(42) paddle.seed(42) diff --git a/test/legacy_test/test_i1e_op.py b/test/legacy_test/test_i1e_op.py index a4c360ae9a36f..49379918b2666 100644 --- a/test/legacy_test/test_i1e_op.py +++ b/test/legacy_test/test_i1e_op.py @@ -19,7 +19,7 @@ from scipy import special import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(42) paddle.seed(42) diff --git a/test/legacy_test/test_identity_loss_op.py b/test/legacy_test/test_identity_loss_op.py index ce8fa7dbfd825..6fc0ce658ff41 100644 --- a/test/legacy_test/test_identity_loss_op.py +++ b/test/legacy_test/test_identity_loss_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard class TestIdentityLossOp(OpTest): @@ -114,7 +114,7 @@ class TestIdentityLossAPI(unittest.TestCase): def setUp(self): self.x_shape = [2, 3, 4, 5] self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32) - self.place = fluid.CPUPlace() + self.place = base.CPUPlace() def identity_loss_ref(self, input, reduction): if reduction == 0 or reduction == "sum": diff --git a/test/legacy_test/test_image_classification_layer.py b/test/legacy_test/test_image_classification_layer.py index 4abb4312eb61b..75ca22fb28c97 100644 --- a/test/legacy_test/test_image_classification_layer.py +++ b/test/legacy_test/test_image_classification_layer.py @@ -17,8 +17,8 @@ import nets import paddle -from paddle import fluid -from paddle.fluid.framework import Program +from paddle import base +from paddle.base.framework import Program def conv_block(input, num_filter, groups, dropouts): @@ -39,7 +39,7 @@ class TestLayer(unittest.TestCase): def test_batch_norm_layer(self): main_program = Program() startup_program = Program() - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): images = paddle.static.data( name='pixel', shape=[-1, 3, 48, 48], dtype='float32' ) @@ -54,7 +54,7 @@ def test_batch_norm_layer(self): def test_dropout_layer(self): main_program = Program() startup_program = Program() - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): images = paddle.static.data( name='pixel', shape=[-1, 3, 48, 48], dtype='float32' ) @@ -66,7 +66,7 @@ def test_img_conv_group(self): main_program = Program() startup_program = Program() - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): images = paddle.static.data( name='pixel', shape=[-1, 3, 48, 48], dtype='float32' ) @@ -78,7 +78,7 @@ def test_img_conv_group(self): def test_elementwise_add_with_act(self): main_program = Program() startup_program = Program() - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): image1 = paddle.static.data( name='pixel1', shape=[-1, 3, 48, 48], dtype='float32' ) diff --git a/test/legacy_test/test_imperative_auto_prune.py b/test/legacy_test/test_imperative_auto_prune.py index 4491bf705a3d5..59f8da20287ce 100644 --- a/test/legacy_test/test_imperative_auto_prune.py +++ b/test/legacy_test/test_imperative_auto_prune.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.nn import Embedding from paddle.tensor import random @@ -167,24 +167,24 @@ def embed_linear0(self, x): class TestImperativeAutoPrune(unittest.TestCase): def test_auto_prune(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): case1 = AutoPruneLayer0(input_size=5) value1 = np.arange(25).reshape(5, 5).astype("float32") value2 = np.arange(25).reshape(5, 5).astype("float32") - v1 = fluid.dygraph.to_variable(value1) - v2 = fluid.dygraph.to_variable(value2) + v1 = base.dygraph.to_variable(value1) + v2 = base.dygraph.to_variable(value2) loss = case1(v1, v2) loss.backward() self.assertIsNotNone(case1.linear2.weight._grad_ivar()) self.assertIsNotNone(case1.linear1.weight._grad_ivar()) def test_auto_prune2(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): case2 = AutoPruneLayer1(input_size=5) value1 = np.arange(25).reshape(5, 5).astype("float32") value2 = np.arange(25).reshape(5, 5).astype("float32") - v1 = fluid.dygraph.to_variable(value1) - v2 = fluid.dygraph.to_variable(value2) + v1 = base.dygraph.to_variable(value1) + v2 = base.dygraph.to_variable(value2) loss = case2(v1, v2) loss.backward() @@ -193,12 +193,12 @@ def test_auto_prune2(self): # TODO(jiabin): Support this when we support better split tensor def test_auto_prune3(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): case3 = AutoPruneLayer3(input_size=784) value1 = np.arange(784).reshape(1, 784).astype("float32") value2 = np.arange(1).reshape(1, 1).astype("int64") - v1 = fluid.dygraph.to_variable(value1) - v2 = fluid.dygraph.to_variable(value2) + v1 = base.dygraph.to_variable(value1) + v2 = base.dygraph.to_variable(value2) loss, part2 = case3(v1, v2, 1) part2.retain_grads() loss.backward() @@ -206,12 +206,12 @@ def test_auto_prune3(self): self.assertTrue((part2.gradient() == 0).all()) def test_auto_prune4(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): case4 = AutoPruneLayer3(input_size=784) value1 = np.arange(784).reshape(1, 784).astype("float32") value2 = np.arange(1).reshape(1, 1).astype("int64") - v1 = fluid.dygraph.to_variable(value1) - v2 = fluid.dygraph.to_variable(value2) + v1 = base.dygraph.to_variable(value1) + v2 = base.dygraph.to_variable(value2) loss, part2 = case4(v1, v2, 1) part2.retain_grads() part2.backward() @@ -219,12 +219,12 @@ def test_auto_prune4(self): self.assertTrue((part2.gradient() == 1).all()) def test_auto_prune5(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): case4 = AutoPruneLayer3(input_size=784) value1 = np.arange(784).reshape(1, 784).astype("float32") value2 = np.arange(1).reshape(1, 1).astype("int64") - v1 = fluid.dygraph.to_variable(value1) - v2 = fluid.dygraph.to_variable(value2) + v1 = base.dygraph.to_variable(value1) + v2 = base.dygraph.to_variable(value2) loss, part1, part2 = case4(v1, v2, 2) part2.retain_grads() part1.backward() @@ -232,15 +232,15 @@ def test_auto_prune5(self): self.assertTrue((part2.gradient() == 0).all()) def test_auto_prune6(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): value0 = np.arange(26).reshape(2, 13).astype("float32") value1 = np.arange(6).reshape(2, 3).astype("float32") value2 = np.arange(10).reshape(2, 5).astype("float32") linear = paddle.nn.Linear(13, 5) linear2 = paddle.nn.Linear(3, 3) - a = fluid.dygraph.to_variable(value0) - b = fluid.dygraph.to_variable(value1) - c = fluid.dygraph.to_variable(value2) + a = base.dygraph.to_variable(value0) + b = base.dygraph.to_variable(value1) + c = base.dygraph.to_variable(value2) out1 = linear(a) out2 = linear2(b) out1.stop_gradient = True @@ -250,15 +250,15 @@ def test_auto_prune6(self): self.assertIsNone(out1.gradient()) def test_auto_prune7(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): value0 = np.arange(26).reshape(2, 13).astype("float32") value1 = np.arange(6).reshape(2, 3).astype("float32") value2 = np.arange(10).reshape(2, 5).astype("float32") linear = paddle.nn.Linear(13, 5) linear2 = paddle.nn.Linear(3, 3) - a = fluid.dygraph.to_variable(value0) - b = fluid.dygraph.to_variable(value1) - c = fluid.dygraph.to_variable(value2) + a = base.dygraph.to_variable(value0) + b = base.dygraph.to_variable(value1) + c = base.dygraph.to_variable(value2) out1 = linear(a) out2 = linear2(b) out1.stop_gradient = True @@ -268,15 +268,15 @@ def test_auto_prune7(self): self.assertIsNone(out1.gradient()) def test_auto_prune8(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): value0 = np.arange(26).reshape(2, 13).astype("float32") value1 = np.arange(6).reshape(2, 3).astype("float32") value2 = np.arange(10).reshape(2, 5).astype("float32") linear = paddle.nn.Linear(13, 5) linear2 = paddle.nn.Linear(5, 3) - a = fluid.dygraph.to_variable(value0) - b = fluid.dygraph.to_variable(value1) - c = fluid.dygraph.to_variable(value2) + a = base.dygraph.to_variable(value0) + b = base.dygraph.to_variable(value1) + c = base.dygraph.to_variable(value2) out1 = linear(a) linear_origin = linear.weight.numpy() out2 = linear2(out1) @@ -296,15 +296,15 @@ def test_auto_prune8(self): ) def test_auto_prune9(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): value0 = np.arange(26).reshape(2, 13).astype("float32") value1 = np.arange(6).reshape(2, 3).astype("float32") value2 = np.arange(10).reshape(2, 5).astype("float32") linear = paddle.nn.Linear(13, 5) linear2 = paddle.nn.Linear(5, 3) - a = fluid.dygraph.to_variable(value0) - b = fluid.dygraph.to_variable(value1) - c = fluid.dygraph.to_variable(value2) + a = base.dygraph.to_variable(value0) + b = base.dygraph.to_variable(value1) + c = base.dygraph.to_variable(value2) out1 = linear(a) linear_origin = linear.weight.numpy() out2 = linear2(out1) @@ -326,21 +326,21 @@ def test_auto_prune9(self): assert type(e) == ValueError def test_auto_prune10(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): value0 = np.arange(26).reshape(2, 13).astype("float32") value1 = np.arange(6).reshape(2, 3).astype("float32") value2 = np.arange(10).reshape(2, 5).astype("float32") linear = paddle.nn.Linear(13, 5) linear2 = paddle.nn.Linear(3, 3) - a = fluid.dygraph.to_variable(value0) - b = fluid.dygraph.to_variable(value1) - c = fluid.dygraph.to_variable(value2) + a = base.dygraph.to_variable(value0) + b = base.dygraph.to_variable(value1) + c = base.dygraph.to_variable(value2) out1 = linear(a) out2 = linear2(b) out1.stop_gradient = True out = paddle.concat([out1, out2, c], axis=1) - # TODO(jiabin): In Eager Mode we don't actually need sort_sum_gradient, this test should be removed when we don't support fluid anymore. - fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + # TODO(jiabin): In Eager Mode we don't actually need sort_sum_gradient, this test should be removed when we don't support base anymore. + base.set_flags({'FLAGS_sort_sum_gradient': True}) out.backward() self.assertIsNone(linear.weight.gradient()) self.assertIsNone(out1.gradient()) @@ -355,15 +355,15 @@ def test_auto_prune_with_optimizer(self): ).astype("int64") embed = np.random.randn(batch_size, size).astype("float32") - place = fluid.CPUPlace() - with fluid.dygraph.guard(place): + place = base.CPUPlace() + with base.dygraph.guard(place): model = MyLayer(size, vocab_size, size) grad_clip = paddle.nn.ClipGradByGlobalNorm(0.001) optimizer = paddle.optimizer.Adam( 0.001, parameters=model.parameters(), grad_clip=grad_clip ) - indices = fluid.dygraph.to_variable(indices) - embed = fluid.dygraph.to_variable(embed) + indices = base.dygraph.to_variable(indices) + embed = base.dygraph.to_variable(embed) dummy_loss = model(embed) loss = model.embed_linear0(indices) @@ -375,15 +375,15 @@ def test_auto_prune_with_optimizer(self): assert model.embed1.weight._grad_ivar() is None assert model.linear_1.weight._grad_ivar() is None - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): model = MyLayer2(size, vocab_size, size) grad_clip = paddle.nn.ClipGradByGlobalNorm(0.001) optimizer = paddle.optimizer.Adam( 0.001, parameters=model.parameters(), grad_clip=grad_clip ) - indices = fluid.dygraph.to_variable(indices) - emebd = fluid.dygraph.to_variable(embed) + indices = base.dygraph.to_variable(indices) + emebd = base.dygraph.to_variable(embed) dummy_loss = model(indices) loss = model.embed_linear0(indices) @@ -396,11 +396,11 @@ def test_auto_prune_with_optimizer(self): assert model.linear_1.weight._grad_ivar() is None def test_case2_prune_no_grad_branch(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): value1 = np.arange(784).reshape(1, 784) value2 = np.arange(1).reshape(1, 1) - v1 = fluid.dygraph.to_variable(value1).astype("float32") - v2 = fluid.dygraph.to_variable(value2).astype("float32") + v1 = base.dygraph.to_variable(value1).astype("float32") + v2 = base.dygraph.to_variable(value2).astype("float32") case3 = AutoPruneLayer2(input_size=784) loss = case3(v1, v2) loss.backward() @@ -408,10 +408,10 @@ def test_case2_prune_no_grad_branch(self): self.assertIsNotNone(case3.linear.weight._grad_ivar()) def test_case3_prune_no_grad_branch2(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): value1 = np.arange(1).reshape(1, 1) linear = paddle.nn.Linear(1, 1) - label = fluid.dygraph.to_variable(value1).astype("float32") + label = base.dygraph.to_variable(value1).astype("float32") label = linear(label) label = paddle.cast(label, dtype="float32") label = paddle.cast(label, dtype='int64') @@ -421,7 +421,7 @@ def test_case3_prune_no_grad_branch2(self): self.assertIsNone(linear.weight._grad_ivar()) def test_case4_with_no_grad_op_maker(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): out = random.gaussian(shape=[20, 30]) loss = paddle.mean(out) loss.backward() diff --git a/test/legacy_test/test_imperative_base.py b/test/legacy_test/test_imperative_base.py index 9cd0a61bf35aa..f3a2d0dc503ff 100644 --- a/test/legacy_test/test_imperative_base.py +++ b/test/legacy_test/test_imperative_base.py @@ -14,15 +14,15 @@ import contextlib -from paddle import fluid +from paddle import base @contextlib.contextmanager def new_program_scope(main=None, startup=None, scope=None): - prog = main if main else fluid.Program() - startup_prog = startup if startup else fluid.Program() - scope = scope if scope else fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): - with fluid.unique_name.guard(): + prog = main if main else base.Program() + startup_prog = startup if startup else base.Program() + scope = scope if scope else base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): + with base.unique_name.guard(): yield diff --git a/test/legacy_test/test_imperative_container_layerlist.py b/test/legacy_test/test_imperative_container_layerlist.py index d920d2705bfeb..371e46ce1849d 100644 --- a/test/legacy_test/test_imperative_container_layerlist.py +++ b/test/legacy_test/test_imperative_container_layerlist.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class MyLayer(paddle.nn.Layer): @@ -37,10 +37,10 @@ def paddle_imperative_list(self): [paddle.nn.Linear(2**i, 2 ** (i + 1)) for i in range(6)] ) - def layer_list(self, use_fluid_api): + def layer_list(self, use_base_api): data_np = np.random.uniform(-1, 1, [5, 1]).astype('float32') - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(data_np) + with base.dygraph.guard(): + x = base.dygraph.to_variable(data_np) layerlist = self.paddle_imperative_list() size = len(layerlist) diff --git a/test/legacy_test/test_imperative_container_parameterlist.py b/test/legacy_test/test_imperative_container_parameterlist.py index c6304f6a55e64..87cf1a6a2c8ef 100644 --- a/test/legacy_test/test_imperative_container_parameterlist.py +++ b/test/legacy_test/test_imperative_container_parameterlist.py @@ -17,11 +17,11 @@ import numpy as np import paddle -from paddle import _legacy_C_ops, fluid +from paddle import _legacy_C_ops, base class MyLayer(paddle.nn.Layer): - def __init__(self, num_stacked_param, use_fluid_api): + def __init__(self, num_stacked_param, use_base_api): super().__init__() # create ParameterList with iterable Parameters self.params = self.paddle_imperative_ParameterList(num_stacked_param) @@ -39,12 +39,12 @@ def forward(self, x): class TestImperativeContainerParameterList(unittest.TestCase): - def paramter_list(self, use_fluid_api): + def paramter_list(self, use_base_api): data_np = np.random.uniform(-1, 1, [5, 2]).astype('float32') - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(data_np) + with base.dygraph.guard(): + x = base.dygraph.to_variable(data_np) num_stacked_param = 4 - model = MyLayer(num_stacked_param, use_fluid_api) + model = MyLayer(num_stacked_param, use_base_api) self.assertEqual(len(model.params), num_stacked_param) res = model(x) self.assertListEqual(res.shape, [5, 2]) diff --git a/test/legacy_test/test_imperative_container_sequential.py b/test/legacy_test/test_imperative_container_sequential.py index d0ea6bd79a7fb..834fce4af4d78 100644 --- a/test/legacy_test/test_imperative_container_sequential.py +++ b/test/legacy_test/test_imperative_container_sequential.py @@ -17,15 +17,15 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.nn import Linear class TestImperativeContainerSequential(unittest.TestCase): def test_sequential(self): data = np.random.uniform(-1, 1, [5, 10]).astype('float32') - with fluid.dygraph.guard(): - data = fluid.dygraph.to_variable(data) + with base.dygraph.guard(): + data = base.dygraph.to_variable(data) model1 = paddle.nn.Sequential(Linear(10, 1), Linear(1, 2)) res1 = model1(data) self.assertListEqual(res1.shape, [5, 2]) @@ -58,8 +58,8 @@ def test_sequential(self): def test_sequential_list_params(self): data = np.random.uniform(-1, 1, [5, 10]).astype('float32') - with fluid.dygraph.guard(): - data = fluid.dygraph.to_variable(data) + with base.dygraph.guard(): + data = base.dygraph.to_variable(data) model1 = paddle.nn.Sequential(Linear(10, 1), Linear(1, 2)) res1 = model1(data) self.assertListEqual(res1.shape, [5, 2]) diff --git a/test/legacy_test/test_imperative_data_loader_base.py b/test/legacy_test/test_imperative_data_loader_base.py index 6104ebbb67485..11d0524744b2c 100644 --- a/test/legacy_test/test_imperative_data_loader_base.py +++ b/test/legacy_test/test_imperative_data_loader_base.py @@ -17,8 +17,8 @@ import numpy as np import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid.reader import use_pinned_memory +from paddle import base +from paddle.base.reader import use_pinned_memory def get_random_images_and_labels(image_shape, label_shape): @@ -52,32 +52,32 @@ def iter_loader_data(self, loader): self.assertEqual(relu.shape, [self.batch_size, 784]) def test_single_process_loader(self): - with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator( + with base.dygraph.guard(): + loader = base.io.DataLoader.from_generator( capacity=self.capacity, iterable=False, use_multiprocess=False ) loader.set_sample_generator( sample_generator_creator(self.batch_size, self.batch_num), batch_size=self.batch_size, - places=fluid.CPUPlace(), + places=base.CPUPlace(), ) self.iter_loader_data(loader) def test_multi_process_loader(self): - with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator( + with base.dygraph.guard(): + loader = base.io.DataLoader.from_generator( capacity=self.capacity, use_multiprocess=True ) loader.set_sample_generator( sample_generator_creator(self.batch_size, self.batch_num), batch_size=self.batch_size, - places=fluid.CPUPlace(), + places=base.CPUPlace(), ) self.iter_loader_data(loader) def test_generator_no_places(self): - with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator(capacity=self.capacity) + with base.dygraph.guard(): + loader = base.io.DataLoader.from_generator(capacity=self.capacity) loader.set_sample_generator( sample_generator_creator(self.batch_size, self.batch_num), batch_size=self.batch_size, @@ -85,15 +85,15 @@ def test_generator_no_places(self): self.iter_loader_data(loader) def test_set_pin_memory(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): use_pinned_memory(False) - loader = fluid.io.DataLoader.from_generator( + loader = base.io.DataLoader.from_generator( capacity=self.capacity, iterable=False, use_multiprocess=False ) loader.set_sample_generator( sample_generator_creator(self.batch_size, self.batch_num), batch_size=self.batch_size, - places=fluid.CPUPlace(), + places=base.CPUPlace(), ) self.iter_loader_data(loader) use_pinned_memory(True) diff --git a/test/legacy_test/test_imperative_data_loader_exception.py b/test/legacy_test/test_imperative_data_loader_exception.py index 22717c0f03a25..af196e33d2cdb 100644 --- a/test/legacy_test/test_imperative_data_loader_exception.py +++ b/test/legacy_test/test_imperative_data_loader_exception.py @@ -18,8 +18,8 @@ import numpy as np import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def get_random_images_and_labels(image_shape, label_shape): @@ -36,11 +36,11 @@ def setUp(self): self.capacity = 5 def test_not_capacity(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): with self.assertRaisesRegex( ValueError, "Please give value to capacity." ): - fluid.io.DataLoader.from_generator() + base.io.DataLoader.from_generator() def test_single_process_with_thread_expection(self): def error_sample_genarator(batch_num): @@ -50,12 +50,12 @@ def __reader__(): return __reader__ - with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator( + with base.dygraph.guard(): + loader = base.io.DataLoader.from_generator( capacity=self.capacity, iterable=False, use_multiprocess=False ) loader.set_batch_generator( - error_sample_genarator(self.batch_num), places=fluid.CPUPlace() + error_sample_genarator(self.batch_num), places=base.CPUPlace() ) exception = None try: @@ -74,12 +74,12 @@ def __reader__(): return __reader__ - with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator( + with base.dygraph.guard(): + loader = base.io.DataLoader.from_generator( capacity=self.capacity, use_multiprocess=True ) loader.set_batch_generator( - error_sample_genarator(self.batch_num), places=fluid.CPUPlace() + error_sample_genarator(self.batch_num), places=base.CPUPlace() ) exception = None try: @@ -101,13 +101,13 @@ def __reader__(): return __reader__ - with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator( + with base.dygraph.guard(): + loader = base.io.DataLoader.from_generator( capacity=self.capacity, use_multiprocess=True ) loader.set_batch_generator( slow_batch_generator_creator(self.batch_size, self.batch_num), - places=fluid.CPUPlace(), + places=base.CPUPlace(), ) exception = None try: diff --git a/test/legacy_test/test_imperative_data_loader_exit_func.py b/test/legacy_test/test_imperative_data_loader_exit_func.py index 7dd95299639e4..82505099a4b0f 100644 --- a/test/legacy_test/test_imperative_data_loader_exit_func.py +++ b/test/legacy_test/test_imperative_data_loader_exit_func.py @@ -18,7 +18,7 @@ import time import unittest -from paddle.fluid.reader import ( +from paddle.base.reader import ( CleanupFuncRegistrar, _cleanup, multiprocess_queue_set, diff --git a/test/legacy_test/test_imperative_data_loader_fds_clear.py b/test/legacy_test/test_imperative_data_loader_fds_clear.py index 5950ecb58235d..31b2306761af4 100644 --- a/test/legacy_test/test_imperative_data_loader_fds_clear.py +++ b/test/legacy_test/test_imperative_data_loader_fds_clear.py @@ -17,7 +17,7 @@ import numpy as np import paddle.nn.functional as F -from paddle import fluid +from paddle import base from paddle.io import DataLoader, Dataset @@ -60,12 +60,12 @@ def setUp(self): self.capacity = 50 def prepare_data_loader(self): - loader = fluid.io.DataLoader.from_generator( + loader = base.io.DataLoader.from_generator( capacity=self.capacity, use_multiprocess=True ) loader.set_batch_generator( batch_generator_creator(self.batch_size, self.batch_num), - places=fluid.CPUPlace(), + places=base.CPUPlace(), ) return loader @@ -80,14 +80,14 @@ def run_one_epoch_with_break(self, loader): break def test_data_loader_break(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): loader = self.prepare_data_loader() for _ in range(self.epoch_num): self.run_one_epoch_with_break(loader) break def test_data_loader_continue_break(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): loader = self.prepare_data_loader() for _ in range(self.epoch_num): self.run_one_epoch_with_break(loader) @@ -95,8 +95,8 @@ def test_data_loader_continue_break(self): class TestMultiProcessDataLoaderMmapFdsClear(TestDygraphDataLoaderMmapFdsClear): def prepare_data_loader(self): - place = fluid.CPUPlace() - with fluid.dygraph.guard(place): + place = base.CPUPlace() + with base.dygraph.guard(place): dataset = RandomDataset(self.batch_size * self.batch_num) loader = DataLoader( dataset, diff --git a/test/legacy_test/test_imperative_data_loader_process.py b/test/legacy_test/test_imperative_data_loader_process.py index c610050acfd7b..f1fbc1471a0b4 100644 --- a/test/legacy_test/test_imperative_data_loader_process.py +++ b/test/legacy_test/test_imperative_data_loader_process.py @@ -18,8 +18,8 @@ import numpy as np -from paddle import fluid -from paddle.fluid.reader import _reader_process_loop +from paddle import base +from paddle.base.reader import _reader_process_loop def get_random_images_and_labels(image_shape, label_shape): @@ -57,13 +57,13 @@ def __clear_process__(util_queue): except queue.Empty: break - with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator( + with base.dygraph.guard(): + loader = base.io.DataLoader.from_generator( capacity=self.batch_num + 1, use_multiprocess=True ) loader.set_batch_generator( batch_generator_creator(self.batch_size, self.batch_num), - places=fluid.CPUPlace(), + places=base.CPUPlace(), ) loader._data_queue = queue.Queue(self.batch_num + 1) _reader_process_loop(loader._batch_reader, loader._data_queue) @@ -87,12 +87,12 @@ def __reader__(): return __reader__ - with fluid.dygraph.guard(): - loader = fluid.io.DataLoader.from_generator( + with base.dygraph.guard(): + loader = base.io.DataLoader.from_generator( capacity=self.batch_num + 1, use_multiprocess=True ) loader.set_batch_generator( - none_sample_genarator(self.batch_num), places=fluid.CPUPlace() + none_sample_genarator(self.batch_num), places=base.CPUPlace() ) loader._data_queue = queue.Queue(self.batch_num + 1) exception = None diff --git a/test/legacy_test/test_imperative_data_parallel.py b/test/legacy_test/test_imperative_data_parallel.py index c9ee7f1342cc8..7184a9e640357 100644 --- a/test/legacy_test/test_imperative_data_parallel.py +++ b/test/legacy_test/test_imperative_data_parallel.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.nn import Linear @@ -37,7 +37,7 @@ def forward(self, inputs): class TestDataParallelStateDict(unittest.TestCase): def test_data_parallel_state_dict(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.distributed.init_parallel_env() mlp = MLP() parallel_mlp = paddle.DataParallel(mlp) @@ -47,9 +47,9 @@ def test_data_parallel_state_dict(self): base_para = {} place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) for k, v in single_state.items(): self.assertTrue(k in parallel_state) diff --git a/test/legacy_test/test_imperative_decorator.py b/test/legacy_test/test_imperative_decorator.py index d02819ff2937c..6e4eb4af3cb14 100644 --- a/test/legacy_test/test_imperative_decorator.py +++ b/test/legacy_test/test_imperative_decorator.py @@ -18,8 +18,8 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import framework +from paddle import base +from paddle.base import framework class TestTracerMode(unittest.TestCase): @@ -29,7 +29,7 @@ def setUp(self): def get_tracer_mode(self): assert framework.in_dygraph_mode(), "Dygraph mode must be enabled" - @fluid.dygraph.no_grad + @base.dygraph.no_grad def no_grad_func(self, a): self.assertEqual(self.tracer._has_grad, False) return a @@ -47,7 +47,7 @@ def check_not_support_rlt(self, ans): self.assertEqual(rlt, ans) def test_main(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): self.tracer = framework._dygraph_tracer() self.tracer._train_mode = self.init_mode @@ -57,7 +57,7 @@ def test_main(self): def need_no_grad_func(a, b=1): return a + b - decorated_func = fluid.dygraph.no_grad(need_no_grad_func) + decorated_func = base.dygraph.no_grad(need_no_grad_func) self.assertTrue( str(inspect.getfullargspec(decorated_func)) == str(inspect.getfullargspec(need_no_grad_func)) @@ -65,7 +65,7 @@ def need_no_grad_func(a, b=1): self.assertEqual(self.tracer._train_mode, self.init_mode) - with fluid.dygraph.guard(): + with base.dygraph.guard(): self.check_not_support_rlt(False) paddle.enable_static() diff --git a/test/legacy_test/test_imperative_deepcf.py b/test/legacy_test/test_imperative_deepcf.py index 7827e40974658..e80f9d2974dd8 100644 --- a/test/legacy_test/test_imperative_deepcf.py +++ b/test/legacy_test/test_imperative_deepcf.py @@ -21,9 +21,9 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable from paddle.nn import Linear @@ -117,7 +117,7 @@ def __init__(self, num_users, num_items, matrix): self._num_users = num_users self._num_items = num_items self._rating_matrix = self.create_parameter( - attr=fluid.ParamAttr(trainable=False), + attr=base.ParamAttr(trainable=False), shape=matrix.shape, dtype=matrix.dtype, is_bias=False, @@ -257,10 +257,10 @@ def test_deefcf(self): ) = self.get_data() paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - startup = fluid.Program() - main = fluid.Program() + startup = base.Program() + main = base.Program() - scope = fluid.core.Scope() + scope = base.core.Scope() with new_program_scope(main=main, startup=startup, scope=scope): users = paddle.static.data('users', [-1, 1], dtype='int32') items = paddle.static.data('items', [-1, 1], dtype='int32') @@ -272,10 +272,10 @@ def test_deefcf(self): adam = paddle.optimizer.Adam(0.01) adam.minimize(loss) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) exe.run(startup) for e in range(self.num_epoches): @@ -302,7 +302,7 @@ def test_deefcf(self): )[0] sys.stderr.write('static loss %s\n' % static_loss) - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) @@ -333,13 +333,13 @@ def test_deefcf(self): dy_loss = loss.numpy() sys.stderr.write(f'dynamic loss: {slice} {dy_loss}\n') - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) deepcf2 = DeepCF(num_users, num_items, matrix) adam2 = paddle.optimizer.Adam(0.01, parameters=deepcf2.parameters()) - fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + base.set_flags({'FLAGS_sort_sum_gradient': True}) for e in range(self.num_epoches): sys.stderr.write('epoch %d\n' % e) for slice in range( @@ -365,11 +365,11 @@ def test_deefcf(self): dy_loss2 = loss2.numpy() sys.stderr.write(f'dynamic loss: {slice} {dy_loss2}\n') - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed deepcf = DeepCF(num_users, num_items, matrix) adam = paddle.optimizer.Adam(0.01, parameters=deepcf.parameters()) diff --git a/test/legacy_test/test_imperative_double_grad.py b/test/legacy_test/test_imperative_double_grad.py index 069a99b55875b..2f8d81d2e0ed2 100644 --- a/test/legacy_test/test_imperative_double_grad.py +++ b/test/legacy_test/test_imperative_double_grad.py @@ -19,17 +19,17 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid.wrapped_decorator import wrap_decorator +from paddle import base +from paddle.base.wrapped_decorator import wrap_decorator from paddle.vision.models import resnet50, resnet101 def _dygraph_guard_(func): def __impl__(*args, **kwargs): - if fluid.in_dygraph_mode(): + if base.in_dygraph_mode(): return func(*args, **kwargs) else: - with fluid.dygraph.guard(): + with base.dygraph.guard(): return func(*args, **kwargs) return __impl__ @@ -40,7 +40,7 @@ def __impl__(*args, **kwargs): def random_var(size, low=-1, high=1, dtype='float32'): x_np = np.random.uniform(low=low, high=high, size=size).astype(dtype) - return fluid.dygraph.to_variable(x_np) + return base.dygraph.to_variable(x_np) class TestEagerGrad(TestCase): @@ -52,7 +52,7 @@ def test_simple_example_eager_grad(self): x = paddle.to_tensor(np_x, dtype="float64", stop_gradient=False) y = paddle.to_tensor(np_y, dtype="float64", stop_gradient=False) out = paddle.matmul(x, y) - dx = fluid.dygraph.grad(out, x) + dx = base.dygraph.grad(out, x) dout = np.ones_like(np_y) expected_dx = np.matmul(dout, np.transpose(np_y)) @@ -73,7 +73,7 @@ def test_simple_example_eager_grad_allow_unused(self): out_z = paddle.nn.functional.sigmoid(z) out = paddle.matmul(x, y) - dx = fluid.dygraph.grad(out, [x, z], allow_unused=True) + dx = base.dygraph.grad(out, [x, z], allow_unused=True) dout = np.ones_like(np_y) expected_dx = np.matmul(dout, np.transpose(np_y)) np.testing.assert_allclose(dx[0].numpy(), expected_dx, rtol=1e-05) @@ -96,7 +96,7 @@ def test_simple_example_eager_grad_not_allow_unused(self): try: # allow_unused is false in default - dx = fluid.dygraph.grad(out, [x, z]) + dx = base.dygraph.grad(out, [x, z]) except ValueError as e: error_msg = str(e) assert error_msg.find("allow_unused") > 0 @@ -115,7 +115,7 @@ def test_simple_example_eager_grad_duplicate_input(self): try: # duplicate input will arise RuntimeError errors - dx = fluid.dygraph.grad(out, [x, x]) + dx = base.dygraph.grad(out, [x, x]) except RuntimeError as e: error_msg = str(e) assert error_msg.find("duplicate") > 0 @@ -134,7 +134,7 @@ def test_simple_example_eager_grad_duplicate_output(self): try: # duplicate output will arise RuntimeError errors - dx = fluid.dygraph.grad([out, out], [x]) + dx = base.dygraph.grad([out, out], [x]) except RuntimeError as e: error_msg = str(e) assert error_msg.find("duplicate") > 0 @@ -194,8 +194,8 @@ def grad( create_graph=False, allow_unused=False, ): - fluid.set_flags({'FLAGS_sort_sum_gradient': self.sort_sum_gradient}) - return fluid.dygraph.grad( + base.set_flags({'FLAGS_sort_sum_gradient': self.sort_sum_gradient}) + return base.dygraph.grad( outputs=outputs, inputs=inputs, grad_outputs=grad_outputs, @@ -322,7 +322,7 @@ def test_none_one_initial_gradient(self): ) np.random.shuffle(x_np) - x = fluid.dygraph.to_variable(x_np) + x = base.dygraph.to_variable(x_np) x.stop_gradient = False alpha = 0.2 @@ -533,17 +533,17 @@ def model_f(input): out = out + linear(input) return out - fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + base.set_flags({'FLAGS_sort_sum_gradient': True}) - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(123) paddle.framework.random._manual_program_seed(123) - a = fluid.dygraph.to_variable(value) + a = base.dygraph.to_variable(value) a.stop_gradient = False out = model_f(a) - dx = fluid.dygraph.grad( + dx = base.dygraph.grad( outputs=[out], inputs=[a], create_graph=False, @@ -553,10 +553,10 @@ def model_f(input): grad_1 = dx[0].numpy() - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(123) paddle.framework.random._manual_program_seed(123) - a = fluid.dygraph.to_variable(value) + a = base.dygraph.to_variable(value) a.stop_gradient = False out = model_f(a) @@ -569,12 +569,12 @@ def model_f(input): class TestRaiseNoDoubleGradOp(TestCase): def test_no_grad_op(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = paddle.ones(shape=[2, 3, 2, 2], dtype='float32') x.stop_gradient = False y = paddle.static.nn.group_norm(x, groups=1) - dx = fluid.dygraph.grad( + dx = base.dygraph.grad( outputs=[y], inputs=[x], create_graph=True, retain_graph=True )[0] diff --git a/test/legacy_test/test_imperative_framework.py b/test/legacy_test/test_imperative_framework.py index 3c81da2c6d59f..33f53c8d3ab66 100644 --- a/test/legacy_test/test_imperative_framework.py +++ b/test/legacy_test/test_imperative_framework.py @@ -18,7 +18,7 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid +from paddle import base class MLP(paddle.nn.Layer): @@ -68,6 +68,6 @@ def test_dygraph_backward(self): def test_dygraph_to_string(self): np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) - with fluid.dygraph.guard(): - var_inp = fluid.dygraph.to_variable(np_inp) + with base.dygraph.guard(): + var_inp = base.dygraph.to_variable(np_inp) print(str(var_inp)) diff --git a/test/legacy_test/test_imperative_gan.py b/test/legacy_test/test_imperative_gan.py index 524ed9ec35f08..60261972956bd 100644 --- a/test/legacy_test/test_imperative_gan.py +++ b/test/legacy_test/test_imperative_gan.py @@ -18,9 +18,9 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable from paddle.nn import Linear @@ -58,11 +58,11 @@ def test_gan_float32(self): seed = 90 paddle.seed(1) paddle.framework.random._manual_program_seed(1) - startup = fluid.Program() - discriminate_p = fluid.Program() - generate_p = fluid.Program() + startup = base.Program() + discriminate_p = base.Program() + generate_p = base.Program() - scope = fluid.core.Scope() + scope = base.core.Scope() with new_program_scope( main=discriminate_p, startup=startup, scope=scope ): @@ -116,13 +116,13 @@ def test_gan_float32(self): sgd = paddle.optimizer.SGD(learning_rate=1e-3) sgd.minimize(g_loss) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) static_params = {} - with fluid.scope_guard(scope): + with base.scope_guard(scope): img = np.ones([2, 1], np.float32) noise = np.ones([2, 2], np.float32) exe.run(startup) @@ -142,7 +142,7 @@ def test_gan_float32(self): ) dy_params = {} - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(1) paddle.framework.random._manual_program_seed(1) @@ -197,8 +197,8 @@ def test_gan_float32(self): dy_d_loss = d_loss.numpy() dy_params2 = {} - with fluid.dygraph.guard(): - fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + with base.dygraph.guard(): + base.set_flags({'FLAGS_sort_sum_gradient': True}) paddle.seed(1) paddle.framework.random._manual_program_seed(1) discriminator2 = Discriminator() diff --git a/test/legacy_test/test_imperative_gnn.py b/test/legacy_test/test_imperative_gnn.py index 225dbe83b1df2..b68aa6926bf63 100644 --- a/test/legacy_test/test_imperative_gnn.py +++ b/test/legacy_test/test_imperative_gnn.py @@ -20,9 +20,9 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable from paddle.optimizer import Adam @@ -66,10 +66,10 @@ class TestDygraphGNN(unittest.TestCase): def test_gnn_float32(self): paddle.seed(90) paddle.framework.random._manual_program_seed(90) - startup = fluid.Program() - main = fluid.Program() + startup = base.Program() + main = base.Program() - scope = fluid.core.Scope() + scope = base.core.Scope() with new_program_scope(main=main, startup=startup, scope=scope): features = paddle.static.data( name='features', shape=[1, 100, 50], dtype='float32' @@ -94,10 +94,10 @@ def test_gnn_float32(self): adam = Adam(learning_rate=1e-3) adam.minimize(loss) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) exe.run(startup) static_loss = exe.run( @@ -113,7 +113,7 @@ def test_gnn_float32(self): scope.find_var(model.gc.weight.name).get_tensor() ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(90) paddle.framework.random._manual_program_seed(90) @@ -139,7 +139,7 @@ def test_gnn_float32(self): loss_value = loss.numpy() model_gc_weight_value = model.gc.weight.numpy() - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(90) paddle.framework.random._manual_program_seed(90) diff --git a/test/legacy_test/test_imperative_group.py b/test/legacy_test/test_imperative_group.py index 2a1fdad88bb87..1e28d94e0b10d 100644 --- a/test/legacy_test/test_imperative_group.py +++ b/test/legacy_test/test_imperative_group.py @@ -15,8 +15,8 @@ import unittest import paddle -from paddle.fluid import core -from paddle.fluid.framework import in_dygraph_mode +from paddle.base import core +from paddle.base.framework import in_dygraph_mode class TestDataParallelGroup(unittest.TestCase): diff --git a/test/legacy_test/test_imperative_hook_for_layer.py b/test/legacy_test/test_imperative_hook_for_layer.py index 3b0fc95fadc11..22935f97afc68 100644 --- a/test/legacy_test/test_imperative_hook_for_layer.py +++ b/test/legacy_test/test_imperative_hook_for_layer.py @@ -17,9 +17,9 @@ import numpy as np from test_imperative_lod_tensor_to_selected_rows import SimpleNet -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph import base +from paddle import base +from paddle.base import core +from paddle.base.dygraph import base call_forward_post_hook = False call_forward_pre_hook = False @@ -49,15 +49,15 @@ class Test_Forward_Hook(unittest.TestCase): def test_forward_hook_return_value(self): seed = 90 - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: - with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed - fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + with base.dygraph.guard(place): + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed + base.set_flags({'FLAGS_sort_sum_gradient': True}) input_word = ( np.array( @@ -131,15 +131,15 @@ def test_forward_hook_return_value(self): def test_forward_hook(self): seed = 90 - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: - with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed - fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + with base.dygraph.guard(place): + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed + base.set_flags({'FLAGS_sort_sum_gradient': True}) global call_forward_post_hook global call_forward_pre_hook diff --git a/test/legacy_test/test_imperative_layer_apply.py b/test/legacy_test/test_imperative_layer_apply.py index 402d2351a0308..140cd8676ba14 100644 --- a/test/legacy_test/test_imperative_layer_apply.py +++ b/test/legacy_test/test_imperative_layer_apply.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid, nn +from paddle import base, nn class LeNetDygraph(paddle.nn.Layer): @@ -73,7 +73,7 @@ def init_weights(layer): class TestLayerApply(unittest.TestCase): def test_apply_init_weight(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = LeNetDygraph() net.apply(init_weights) diff --git a/test/legacy_test/test_imperative_layer_children.py b/test/legacy_test/test_imperative_layer_children.py index bf440d511c566..38d4291d1a973 100644 --- a/test/legacy_test/test_imperative_layer_children.py +++ b/test/legacy_test/test_imperative_layer_children.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid, nn +from paddle import base, nn class LeNetDygraph(paddle.nn.Layer): @@ -39,7 +39,7 @@ def forward(self, inputs): class TestLayerChildren(unittest.TestCase): def func_apply_init_weight(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = LeNetDygraph() net.eval() diff --git a/test/legacy_test/test_imperative_layer_trainable.py b/test/legacy_test/test_imperative_layer_trainable.py index 449b448da31f7..a436dfb6e0e02 100644 --- a/test/legacy_test/test_imperative_layer_trainable.py +++ b/test/legacy_test/test_imperative_layer_trainable.py @@ -17,13 +17,13 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import dygraph +from paddle import base +from paddle.base import dygraph class TestImperativeLayerTrainable(unittest.TestCase): def test_set_trainable(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): label = np.random.uniform(-1, 1, [10, 10]).astype(np.float32) label = dygraph.to_variable(label) diff --git a/test/legacy_test/test_imperative_load_static_param.py b/test/legacy_test/test_imperative_load_static_param.py index 0e956c55ae685..83dc19a3945cb 100644 --- a/test/legacy_test/test_imperative_load_static_param.py +++ b/test/legacy_test/test_imperative_load_static_param.py @@ -19,8 +19,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import framework +from paddle import base +from paddle.base import framework from paddle.nn import BatchNorm, Linear paddle.enable_static() @@ -147,12 +147,12 @@ def testLoadStaticModel(self): [20, 200], 'float32', name="weight_test_2" ) - para_list = fluid.default_main_program().list_vars() + para_list = base.default_main_program().list_vars() - exe = fluid.Executor( - fluid.CPUPlace() - if not fluid.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + exe = base.Executor( + base.CPUPlace() + if not base.is_compiled_with_cuda() + else base.CUDAPlace(0) ) out = exe.run(framework.default_startup_program()) @@ -174,7 +174,7 @@ def testLoadStaticModel(self): else: new_dict[k] = v - with fluid.dygraph.guard(): + with base.dygraph.guard(): class MyTest(paddle.nn.Layer): def __init__(self): diff --git a/test/legacy_test/test_imperative_lod_tensor_to_selected_rows.py b/test/legacy_test/test_imperative_lod_tensor_to_selected_rows.py index 52e378265547e..f2c54048e6d28 100644 --- a/test/legacy_test/test_imperative_lod_tensor_to_selected_rows.py +++ b/test/legacy_test/test_imperative_lod_tensor_to_selected_rows.py @@ -19,9 +19,9 @@ from utils import DyGraphProgramDescTracerTestHelper import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable class SimpleNet(paddle.nn.Layer): @@ -44,7 +44,7 @@ def __init__( vocab_size, hidden_size, sparse=is_sparse, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name='embedding_para', initializer=paddle.nn.initializer.Uniform( low=-init_scale, high=init_scale @@ -52,7 +52,7 @@ def __init__( ), ) self.softmax_bias = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.vocab_size], dtype=dtype, default_initializer=paddle.nn.initializer.Uniform( @@ -87,9 +87,9 @@ def test_simple_net(self): self.simple_net_float32(is_sparse, dtype) def simple_net_float32(self, is_sparse, dtype): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: seed = 90 @@ -101,7 +101,7 @@ def simple_net_float32(self, is_sparse, dtype): batch_num = 200 for is_sort_sum_gradient in [True, False]: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) @@ -123,7 +123,7 @@ def simple_net_float32(self, is_sparse, dtype): dy_loss = None helper = DyGraphProgramDescTracerTestHelper(self) - fluid.set_flags( + base.set_flags( {'FLAGS_sort_sum_gradient': is_sort_sum_gradient} ) @@ -160,7 +160,7 @@ def simple_net_float32(self, is_sparse, dtype): dtype=dtype, ) - exe = fluid.Executor(place) + exe = base.Executor(place) sgd = paddle.optimizer.SGD(learning_rate=1e-3) x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' @@ -177,7 +177,7 @@ def simple_net_float32(self, is_sparse, dtype): static_param_name_list.append(param.name) out = exe.run( - fluid.default_startup_program(), + base.default_startup_program(), fetch_list=static_param_name_list, ) for i in range(len(static_param_name_list)): @@ -191,7 +191,7 @@ def simple_net_float32(self, is_sparse, dtype): fetch_list = [static_loss] fetch_list.extend(static_param_name_list) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": x_data, "y": y_data}, fetch_list=fetch_list, ) diff --git a/test/legacy_test/test_imperative_mnist.py b/test/legacy_test/test_imperative_mnist.py index ff436db8d8084..b2e8779b22193 100644 --- a/test/legacy_test/test_imperative_mnist.py +++ b/test/legacy_test/test_imperative_mnist.py @@ -19,8 +19,8 @@ from utils import DyGraphProgramDescTracerTestHelper import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.nn import Linear @@ -119,23 +119,23 @@ def test_mnist_float32(self): traced_layer = None - with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + with base.dygraph.guard(): + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed mnist = MNIST() sgd = paddle.optimizer.SGD( learning_rate=1e-3, parameters=mnist.parameters() ) - batch_py_reader = fluid.io.PyReader(capacity=1) + batch_py_reader = base.io.PyReader(capacity=1) batch_py_reader.decorate_sample_list_generator( paddle.batch( self.reader_decorator(paddle.dataset.mnist.train()), batch_size=batch_size, drop_last=True, ), - places=fluid.CPUPlace(), + places=base.CPUPlace(), ) mnist.train() @@ -177,13 +177,13 @@ def test_mnist_float32(self): dy_param_value[param.name] = param.numpy() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) mnist = MNIST() @@ -214,7 +214,7 @@ def test_mnist_float32(self): static_param_name_list.append(param.name) out = exe.run( - fluid.default_startup_program(), + base.default_startup_program(), fetch_list=static_param_name_list, ) @@ -241,7 +241,7 @@ def test_mnist_float32(self): traced_layer([static_x_data]) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"pixel": static_x_data, "label": y_data}, fetch_list=fetch_list, ) diff --git a/test/legacy_test/test_imperative_mnist_sorted_gradient.py b/test/legacy_test/test_imperative_mnist_sorted_gradient.py index a85bdae98cdeb..9629858b2d8bf 100644 --- a/test/legacy_test/test_imperative_mnist_sorted_gradient.py +++ b/test/legacy_test/test_imperative_mnist_sorted_gradient.py @@ -19,9 +19,9 @@ from test_imperative_mnist import MNIST import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable class TestImperativeMnistSortGradient(unittest.TestCase): @@ -29,10 +29,10 @@ def test_mnist_sort_gradient_float32(self): seed = 90 epoch_num = 1 - with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed - fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + with base.dygraph.guard(): + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed + base.set_flags({'FLAGS_sort_sum_gradient': True}) mnist2 = MNIST() sgd2 = paddle.optimizer.SGD( @@ -82,13 +82,13 @@ def test_mnist_sort_gradient_float32(self): break with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) mnist = MNIST() @@ -117,7 +117,7 @@ def test_mnist_sort_gradient_float32(self): static_param_name_list.append(param.name) out = exe.run( - fluid.default_startup_program(), + base.default_startup_program(), fetch_list=static_param_name_list, ) @@ -138,7 +138,7 @@ def test_mnist_sort_gradient_float32(self): fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"pixel": static_x_data, "label": y_data}, fetch_list=fetch_list, ) diff --git a/test/legacy_test/test_imperative_named_members.py b/test/legacy_test/test_imperative_named_members.py index 89a3834ca4009..4680447469928 100644 --- a/test/legacy_test/test_imperative_named_members.py +++ b/test/legacy_test/test_imperative_named_members.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class MyLayer(paddle.nn.Layer): @@ -34,7 +34,7 @@ def forward(self, x): class TestImperativeNamedSubLayers(unittest.TestCase): def test_named_sublayers(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): fc1 = paddle.nn.Linear(10, 3) fc2 = paddle.nn.Linear(3, 10, bias_attr=False) custom = MyLayer(3, 10) @@ -64,7 +64,7 @@ def test_named_sublayers(self): class TestImperativeNamedParameters(unittest.TestCase): def test_named_parameters(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): fc1 = paddle.nn.Linear(10, 3) fc2 = paddle.nn.Linear(3, 10, bias_attr=False) custom = MyLayer(3, 10) @@ -82,7 +82,7 @@ def test_named_parameters(self): self.assertListEqual(expected_named_parameters, named_parameters) def test_dir_layer(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): class Mymodel(paddle.nn.Layer): def __init__(self): @@ -91,12 +91,12 @@ def __init__(self): self.linear2 = paddle.nn.Linear(5, 5) self.conv2d = paddle.nn.Conv2D(3, 2, 3) self.embedding = paddle.nn.Embedding(128, 16) - self.h_0 = fluid.dygraph.to_variable( + self.h_0 = base.dygraph.to_variable( np.zeros([10, 10]).astype('float32') ) self.weight = self.create_parameter( shape=[2, 3], - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), dtype="float32", is_bias=False, ) diff --git a/test/legacy_test/test_imperative_numpy_bridge.py b/test/legacy_test/test_imperative_numpy_bridge.py index 0adb69adcf878..31c286a234792 100644 --- a/test/legacy_test/test_imperative_numpy_bridge.py +++ b/test/legacy_test/test_imperative_numpy_bridge.py @@ -17,29 +17,29 @@ import numpy as np -from paddle import fluid +from paddle import base class TestImperativeNumpyBridge(unittest.TestCase): def test_tensor_from_numpy(self): data_np = np.array([[2, 3, 1]]).astype('float32') - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") - var = fluid.dygraph.to_variable(data_np, zero_copy=True) + var = base.dygraph.to_variable(data_np, zero_copy=True) assert ( "Currently, zero_copy is not supported, and it will be discarded." in str(w[-1].message) ) # Temporally diable zero_copy - # var = fluid.dygraph.to_variable(data_np, zero_copy=True) + # var = base.dygraph.to_variable(data_np, zero_copy=True) # np.testing.assert_array_equal(var.numpy(), data_np) # data_np[0][0] = 4 # self.assertEqual(data_np[0][0], 4) # self.assertEqual(var[0][0].numpy()[0], 4) # np.testing.assert_array_equal(var.numpy(), data_np) - var2 = fluid.dygraph.to_variable(data_np, zero_copy=False) + var2 = base.dygraph.to_variable(data_np, zero_copy=False) np.testing.assert_array_equal(var2.numpy(), data_np) data_np[0][0] = -1 self.assertEqual(data_np[0][0], -1) diff --git a/test/legacy_test/test_imperative_ocr_attention_model.py b/test/legacy_test/test_imperative_ocr_attention_model.py index 8b07c7652fad9..f39fc790d55c2 100644 --- a/test/legacy_test/test_imperative_ocr_attention_model.py +++ b/test/legacy_test/test_imperative_ocr_attention_model.py @@ -18,9 +18,9 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable from paddle.nn import BatchNorm, Linear @@ -76,12 +76,12 @@ def __init__( filter_size = 3 conv_std_0 = (2.0 / (filter_size**2 * channels[0])) ** 0.5 - conv_param_0 = fluid.ParamAttr( + conv_param_0 = base.ParamAttr( initializer=paddle.nn.initializer.Normal(0.0, conv_std_0) ) conv_std_1 = (2.0 / (filter_size**2 * channels[1])) ** 0.5 - conv_param_1 = fluid.ParamAttr( + conv_param_1 = base.ParamAttr( initializer=paddle.nn.initializer.Normal(0.0, conv_std_1) ) @@ -199,14 +199,14 @@ def __init__( ): super().__init__() self.rnn_hidden_size = rnn_hidden_size - para_attr = fluid.ParamAttr( + para_attr = base.ParamAttr( initializer=paddle.nn.initializer.Normal(0.0, 0.02) ) - bias_attr = fluid.ParamAttr( + bias_attr = base.ParamAttr( initializer=paddle.nn.initializer.Normal(0.0, 0.02), learning_rate=2.0, ) - if fluid.framework.in_dygraph_mode(): + if base.framework.in_dygraph_mode(): h_0 = np.zeros( (Config.batch_size, rnn_hidden_size), dtype="float32" ) @@ -247,7 +247,7 @@ def __init__( def forward(self, inputs): conv_features = self.ocr_convs(inputs) - # sliced_feature = fluid.layers.im2sequence( + # sliced_feature = base.layers.im2sequence( # input=conv_features, # stride=[1, 1], # filter_size=[conv_features.shape[2], 1]) @@ -445,7 +445,7 @@ def test_ocr_test(self): ) def run_dygraph(): - fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + base.set_flags({'FLAGS_sort_sum_gradient': True}) paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) ocr_attention = OCRAttention() @@ -506,10 +506,10 @@ def run_dygraph(): return dy_out, dy_param_init_value, dy_param_value - with fluid.dygraph.guard(): + with base.dygraph.guard(): dy_out, dy_param_init_value, dy_param_value = run_dygraph() - with fluid.dygraph.guard(): + with base.dygraph.guard(): ( eager_out, eager_param_init_value, @@ -519,10 +519,10 @@ def run_dygraph(): with new_program_scope(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) ocr_attention = OCRAttention() @@ -564,7 +564,7 @@ def run_dygraph(): use_softmax=False, ) static_avg_loss = paddle.sum(cost) - # param_grad_list = fluid.backward.append_backward(static_avg_loss) + # param_grad_list = base.backward.append_backward(static_avg_loss) optimizer.minimize(static_avg_loss) static_param_init_value = {} @@ -578,7 +578,7 @@ def run_dygraph(): ) out = exe.run( - fluid.default_startup_program(), + base.default_startup_program(), fetch_list=static_param_name_list, ) @@ -594,7 +594,7 @@ def run_dygraph(): static_label_out = label_out_np static_label_out = static_label_out.reshape((-1, 1)) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "pixel": image_np, "label_in": static_label_in, diff --git a/test/legacy_test/test_imperative_optimizer.py b/test/legacy_test/test_imperative_optimizer.py index 170a3b7d18585..8f6e37fef04d0 100644 --- a/test/legacy_test/test_imperative_optimizer.py +++ b/test/legacy_test/test_imperative_optimizer.py @@ -18,9 +18,9 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet.meta_optimizers import DGCMomentumOptimizer -from paddle.fluid import core +from paddle.base import core # Note(wangzhongpu) # In dygraph, don't support ModelAverage, DGCMomentumOptimizer, ExponentialMovingAverage, PipelineOptimizer, LookaheadOptimizer, RecomputeOptimizer. @@ -63,12 +63,12 @@ def _check_exception(self, exception_message, place=None): batch_size = 128 if place is None: place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): try: paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) @@ -85,12 +85,12 @@ def _check_mlp(self, place=None): if place is None: place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) @@ -99,14 +99,14 @@ def _check_mlp(self, place=None): parameter_list=mlp.parameters() ) - batch_py_reader = fluid.io.PyReader(capacity=1) + batch_py_reader = base.io.PyReader(capacity=1) batch_py_reader.decorate_sample_list_generator( paddle.batch( self.reader_decorator(paddle.dataset.mnist.train()), batch_size=batch_size, drop_last=True, ), - places=fluid.CPUPlace(), + places=base.CPUPlace(), ) dy_param_init_value = {} @@ -140,12 +140,12 @@ def _check_mlp(self, place=None): if place is None: place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) - exe = fluid.Executor(place) + exe = base.Executor(place) mlp = MLP() optimizer = self.get_optimizer() @@ -171,7 +171,7 @@ def _check_mlp(self, place=None): static_param_name_list.append(param.name) out = exe.run( - fluid.default_startup_program(), + base.default_startup_program(), fetch_list=static_param_name_list, ) @@ -194,7 +194,7 @@ def _check_mlp(self, place=None): fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"pixel": static_x_data, "label": y_data}, fetch_list=fetch_list, ) @@ -229,12 +229,12 @@ def _check_mlp(self, place=None): class TestOptimizerLearningRate(unittest.TestCase): def test_constant_lr(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) - a = fluid.dygraph.to_variable(a) + a = base.dygraph.to_variable(a) b = linear(a) @@ -253,12 +253,12 @@ def test_constant_lr(self): np.testing.assert_allclose(lr, 0.001, rtol=1e-06, atol=0.0) def test_lr_decay(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) - a = fluid.dygraph.to_variable(a) + a = base.dygraph.to_variable(a) b = linear(a) @@ -284,12 +284,12 @@ def test_lr_decay(self): np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0) def test_lr_decay_natural_exp(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) - a = fluid.dygraph.to_variable(a) + a = base.dygraph.to_variable(a) b = linear(a) @@ -319,12 +319,12 @@ def test_lr_decay_natural_exp(self): np.testing.assert_allclose(lr, ret[i], rtol=1e-06, atol=0.0) def test_set_lr(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) - a = fluid.dygraph.to_variable(a) + a = base.dygraph.to_variable(a) b = linear(a) diff --git a/test/legacy_test/test_imperative_optimizer_v2.py b/test/legacy_test/test_imperative_optimizer_v2.py index e7e0f1c4a4782..3262c5a05dd8b 100644 --- a/test/legacy_test/test_imperative_optimizer_v2.py +++ b/test/legacy_test/test_imperative_optimizer_v2.py @@ -19,9 +19,9 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid +from paddle import base from paddle.distributed.fleet.meta_optimizers import DGCMomentumOptimizer -from paddle.fluid import core +from paddle.base import core # Note(wangzhongpu) # In dygraph, don't support ModelAverage, DGCMomentumOptimizer, ExponentialMovingAverage, PipelineOptimizer, LookaheadOptimizer, RecomputeOptimizer. @@ -64,9 +64,9 @@ def _check_exception(self, exception_message, place=None): batch_size = 128 if place is None: place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) try: @@ -88,9 +88,9 @@ def _check_mlp(self, place=None): if place is None: place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) paddle.disable_static(place) @@ -100,14 +100,14 @@ def _check_mlp(self, place=None): mlp = MLP() optimizer = self.get_optimizer_dygraph(parameter_list=mlp.parameters()) - batch_py_reader = fluid.io.PyReader(capacity=1) + batch_py_reader = base.io.PyReader(capacity=1) batch_py_reader.decorate_sample_list_generator( paddle.batch( self.reader_decorator(paddle.dataset.mnist.train()), batch_size=batch_size, drop_last=True, ), - places=fluid.CPUPlace(), + places=base.CPUPlace(), ) dy_param_init_value = {} @@ -153,12 +153,12 @@ def _check_mlp(self, place=None): if place is None: place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) - exe = fluid.Executor(place) + exe = base.Executor(place) mlp = MLP() optimizer = self.get_optimizer() @@ -184,7 +184,7 @@ def _check_mlp(self, place=None): static_param_name_list.append(param.name) out = exe.run( - fluid.default_startup_program(), + base.default_startup_program(), fetch_list=static_param_name_list, ) @@ -207,7 +207,7 @@ def _check_mlp(self, place=None): fetch_list = [avg_loss.name] fetch_list.extend(static_param_name_list) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"pixel": static_x_data, "label": y_data}, fetch_list=fetch_list, ) @@ -527,12 +527,12 @@ def test_sgd(self): class TestOptimizerLearningRate(unittest.TestCase): def test_constant_lr(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) - a = fluid.dygraph.to_variable(a) + a = base.dygraph.to_variable(a) b = linear(a) @@ -551,12 +551,12 @@ def test_constant_lr(self): np.testing.assert_allclose(lr, 0.001, rtol=1e-06, atol=0.0) def test_lr_decay(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) - a = fluid.dygraph.to_variable(a) + a = base.dygraph.to_variable(a) b = linear(a) @@ -580,11 +580,11 @@ def test_lr_decay(self): scheduler.step() def test_lr_scheduler_natural_exp(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) - a = fluid.dygraph.to_variable(a) + a = base.dygraph.to_variable(a) b = linear(a) loss = paddle.mean(b) @@ -605,12 +605,12 @@ def test_lr_scheduler_natural_exp(self): scheduler.step() def test_set_lr(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) - a = fluid.dygraph.to_variable(a) + a = base.dygraph.to_variable(a) b = linear(a) @@ -641,12 +641,12 @@ def test_set_lr(self): adam.set_lr(0.01) def test_set_lr_scheduler(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) - a = fluid.dygraph.to_variable(a) + a = base.dygraph.to_variable(a) b = linear(a) @@ -865,7 +865,7 @@ def test_recompute(self): class TestImperativeOptimizerList(unittest.TestCase): def test_parameter_list(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): linear_1 = paddle.nn.Linear(10, 10) linear_2 = paddle.nn.Linear(10, 10) @@ -877,7 +877,7 @@ def test_parameter_list(self): ) in_np = np.random.uniform(-0.1, 0.1, [10, 10]).astype("float32") - in_data = fluid.dygraph.to_variable(in_np) + in_data = base.dygraph.to_variable(in_np) y = linear_1(in_data) y = linear_2(y) diff --git a/test/legacy_test/test_imperative_parallel_coalesce_split.py b/test/legacy_test/test_imperative_parallel_coalesce_split.py index 4ae37b2588849..db9a2e56a766e 100644 --- a/test/legacy_test/test_imperative_parallel_coalesce_split.py +++ b/test/legacy_test/test_imperative_parallel_coalesce_split.py @@ -19,9 +19,9 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable class MyLayer(paddle.nn.Layer): @@ -42,7 +42,7 @@ def test_coalesce_split(self): _split_tensors, ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): test_layer = MyLayer("test_layer") strategy = core.ParallelStrategy() test_layer = paddle.DataParallel(test_layer, strategy) @@ -73,7 +73,7 @@ def test_coalesce_split(self): def test_reshape_inplace(self): from paddle.distributed.parallel import _reshape_inplace - with fluid.dygraph.guard(): + with base.dygraph.guard(): test_layer = MyLayer("test_layer") strategy = core.ParallelStrategy() test_layer = paddle.DataParallel(test_layer, strategy) diff --git a/test/legacy_test/test_imperative_partitial_backward.py b/test/legacy_test/test_imperative_partitial_backward.py index 77d531574a471..e61f24a36d27c 100644 --- a/test/legacy_test/test_imperative_partitial_backward.py +++ b/test/legacy_test/test_imperative_partitial_backward.py @@ -17,14 +17,14 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestImperativePartitialBackward(unittest.TestCase): def test_partitial_backward(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = np.random.randn(2, 4, 5).astype("float32") - x = fluid.dygraph.to_variable(x) + x = base.dygraph.to_variable(x) linear1 = paddle.nn.Linear(5, 10) linear2 = paddle.nn.Linear(5, 10) diff --git a/test/legacy_test/test_imperative_ptb_rnn.py b/test/legacy_test/test_imperative_ptb_rnn.py index e3790fa2b1931..1847b13e92840 100644 --- a/test/legacy_test/test_imperative_ptb_rnn.py +++ b/test/legacy_test/test_imperative_ptb_rnn.py @@ -19,9 +19,9 @@ from utils import DyGraphProgramDescTracerTestHelper import paddle -from paddle import fluid -from paddle.fluid import core, framework -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core, framework +from paddle.base.dygraph.base import to_variable from paddle.nn import Embedding @@ -48,7 +48,7 @@ def _create_parameter(self): for i in range(self._num_layers): weight_1 = self.create_parameter( - attr=fluid.ParamAttr( + attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-self._init_scale, high=self._init_scale ) @@ -61,7 +61,7 @@ def _create_parameter(self): ) self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1)) bias_1 = self.create_parameter( - attr=fluid.ParamAttr( + attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-self._init_scale, high=self._init_scale ) @@ -172,7 +172,7 @@ def __init__( vocab_size, hidden_size, sparse=is_sparse, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name='embedding_para', initializer=paddle.nn.initializer.Uniform( low=-init_scale, high=init_scale @@ -180,7 +180,7 @@ def __init__( ), ) self.softmax_weight = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype="float32", default_initializer=paddle.nn.initializer.Uniform( @@ -188,7 +188,7 @@ def __init__( ), ) self.softmax_bias = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.vocab_size], dtype="float32", default_initializer=paddle.nn.initializer.Uniform( @@ -250,7 +250,7 @@ def ptb_rnn_cpu_float32(self, is_sparse): batch_num = 200 traced_layer = None - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to @@ -320,10 +320,10 @@ def ptb_rnn_cpu_float32(self, is_sparse): is_sparse=is_sparse, ) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) sgd = paddle.optimizer.SGD(learning_rate=1e-3) x = paddle.static.data( @@ -374,7 +374,7 @@ def ptb_rnn_cpu_float32(self, is_sparse): fetch_list = [static_loss, static_last_hidden, static_last_cell] fetch_list.extend(static_param_name_list) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "y": y_data, diff --git a/test/legacy_test/test_imperative_ptb_rnn_sorted_gradient.py b/test/legacy_test/test_imperative_ptb_rnn_sorted_gradient.py index 972c38f4b6fa5..c79abac4c757d 100644 --- a/test/legacy_test/test_imperative_ptb_rnn_sorted_gradient.py +++ b/test/legacy_test/test_imperative_ptb_rnn_sorted_gradient.py @@ -19,9 +19,9 @@ from test_imperative_ptb_rnn import PtbModel import paddle -from paddle import fluid -from paddle.fluid import core, framework -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core, framework +from paddle.base.dygraph.base import to_variable class TestDygraphPtbRnnSortGradient(unittest.TestCase): @@ -39,8 +39,8 @@ def ptb_rnn_sort_gradient_cpu_float32(self, is_sparse): batch_size = 4 batch_num = 200 - with fluid.dygraph.guard(): - fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + with base.dygraph.guard(): + base.set_flags({'FLAGS_sort_sum_gradient': True}) paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) @@ -108,10 +108,10 @@ def ptb_rnn_sort_gradient_cpu_float32(self, is_sparse): is_sparse=is_sparse, ) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) sgd = paddle.optimizer.SGD(learning_rate=1e-3) x = paddle.static.data( @@ -162,7 +162,7 @@ def ptb_rnn_sort_gradient_cpu_float32(self, is_sparse): fetch_list = [static_loss, static_last_hidden, static_last_cell] fetch_list.extend(static_param_name_list) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "y": y_data, diff --git a/test/legacy_test/test_imperative_recurrent_usage.py b/test/legacy_test/test_imperative_recurrent_usage.py index a399a9266e116..a17bf58472bdd 100644 --- a/test/legacy_test/test_imperative_recurrent_usage.py +++ b/test/legacy_test/test_imperative_recurrent_usage.py @@ -18,9 +18,9 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable class RecurrentTest(paddle.nn.Layer): @@ -38,9 +38,9 @@ def test_recurrent_feed(self): seed = 90 original_np1 = np.arange(1, 5).reshape(2, 2).astype("float32") original_np2 = np.arange(5, 9).reshape(2, 2).astype("float32") - with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + with base.dygraph.guard(): + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed original_in1 = to_variable(original_np1) original_in2 = to_variable(original_np2) original_in1.stop_gradient = False @@ -57,9 +57,9 @@ def test_recurrent_feed(self): original_in1.stop_gradient = True rt.clear_gradients() - with fluid.dygraph.guard(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + with base.dygraph.guard(): + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed original_in1 = to_variable(original_np1) original_in2 = to_variable(original_np2) original_in1.stop_gradient = False @@ -77,30 +77,30 @@ def test_recurrent_feed(self): rt.clear_gradients() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed in1 = paddle.static.data(name="inp1", shape=[2, 2]) in1.stop_gradient = False in2 = paddle.static.data(name="inp2", shape=[2, 2]) in2.stop_gradient = False rt1 = RecurrentTest("RecurrentTest") static_sum_out, static_out = rt1(in1, in2) - fluid.backward.append_backward(static_sum_out) - exe = fluid.Executor( - fluid.CPUPlace() + base.backward.append_backward(static_sum_out) + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) static_dout = ( - fluid.default_main_program() + base.default_main_program() .block(0) ._find_var_recursive(static_out.name + "@GRAD") ) fetch_list = [static_sum_out, static_out, static_dout] for i in range(3): out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"inp1": original_np1, "inp2": original_np2}, fetch_list=fetch_list, ) diff --git a/test/legacy_test/test_imperative_reinforcement.py b/test/legacy_test/test_imperative_reinforcement.py index 481b78d613491..2959071cf6a9f 100644 --- a/test/legacy_test/test_imperative_reinforcement.py +++ b/test/legacy_test/test_imperative_reinforcement.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class Policy(paddle.nn.Layer): @@ -63,18 +63,18 @@ def run_dygraph(): policy = Policy(input_size=4) - dy_state = fluid.dygraph.base.to_variable(state) + dy_state = base.dygraph.base.to_variable(state) dy_state.stop_gradient = True loss_probs = policy(dy_state) - dy_mask = fluid.dygraph.base.to_variable(mask) + dy_mask = base.dygraph.base.to_variable(mask) dy_mask.stop_gradient = True loss_probs = paddle.log(loss_probs) loss_probs = paddle.multiply(loss_probs, dy_mask) loss_probs = paddle.sum(loss_probs, axis=-1) - dy_reward = fluid.dygraph.base.to_variable(reward) + dy_reward = base.dygraph.base.to_variable(reward) dy_reward.stop_gradient = True loss_probs = paddle.multiply(dy_reward, loss_probs) @@ -101,10 +101,10 @@ def run_dygraph(): return dy_out, dy_param_init_value, dy_param_value - with fluid.dygraph.guard(): + with base.dygraph.guard(): dy_out, dy_param_init_value, dy_param_value = run_dygraph() - with fluid.dygraph.guard(): + with base.dygraph.guard(): ( eager_out, eager_param_init_value, @@ -115,10 +115,10 @@ def run_dygraph(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) policy = Policy(input_size=4) @@ -153,7 +153,7 @@ def run_dygraph(): static_param_name_list.append(param.name) out = exe.run( - fluid.default_startup_program(), + base.default_startup_program(), fetch_list=static_param_name_list, ) @@ -164,7 +164,7 @@ def run_dygraph(): fetch_list.extend(static_param_name_list) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"st_state": state, "st_reward": reward, "st_mask": mask}, fetch_list=fetch_list, ) diff --git a/test/legacy_test/test_imperative_resnet.py b/test/legacy_test/test_imperative_resnet.py index 41e270c67958a..b91f840a57bf0 100644 --- a/test/legacy_test/test_imperative_resnet.py +++ b/test/legacy_test/test_imperative_resnet.py @@ -19,10 +19,10 @@ from utils import DyGraphProgramDescTracerTestHelper import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable -from paddle.fluid.layer_helper import LayerHelper +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable +from paddle.base.layer_helper import LayerHelper from paddle.nn import BatchNorm # NOTE(zhiqiu): run with FLAGS_cudnn_deterministic=1 @@ -58,14 +58,14 @@ def optimizer_setting(params, parameter_list=None): base_lr = params["lr"] lr = [] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] - if fluid.in_dygraph_mode(): + if base.in_dygraph_mode(): optimizer = paddle.optimizer.SGD( learning_rate=0.01, parameters=parameter_list ) else: optimizer = paddle.optimizer.SGD(learning_rate=0.01) # TODO(minqiyang): Add learning rate scheduler support to dygraph mode - # optimizer = fluid.optimizer.Momentum( + # optimizer = base.optimizer.Momentum( # learning_rate=params["lr"], # learning_rate=paddle.optimizer.lr.piecewise_decay( # boundaries=bd, values=lr), @@ -225,7 +225,7 @@ def __init__(self, layers=50, class_dim=102, use_cudnn=True): self.out = paddle.nn.Linear( self.pool2d_avg_output, class_dim, - weight_attr=fluid.param_attr.ParamAttr( + weight_attr=base.param_attr.ParamAttr( initializer=paddle.nn.initializer.Uniform(-stdv, stdv) ), ) @@ -260,7 +260,7 @@ def test_resnet_float32(self): traced_layer = None - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) @@ -346,10 +346,10 @@ def test_resnet_float32(self): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) resnet = ResNet() @@ -387,7 +387,7 @@ def test_resnet_float32(self): ) out = exe.run( - fluid.default_startup_program(), + base.default_startup_program(), fetch_list=static_param_name_list, ) @@ -414,7 +414,7 @@ def test_resnet_float32(self): fetch_list.extend(static_param_name_list) fetch_list.extend(static_grad_name_list) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"pixel": static_x_data, "label": y_data}, fetch_list=fetch_list, ) diff --git a/test/legacy_test/test_imperative_resnet_sorted_gradient.py b/test/legacy_test/test_imperative_resnet_sorted_gradient.py index ba71a803fb650..08805d6e6f990 100644 --- a/test/legacy_test/test_imperative_resnet_sorted_gradient.py +++ b/test/legacy_test/test_imperative_resnet_sorted_gradient.py @@ -19,9 +19,9 @@ from test_imperative_resnet import ResNet import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable batch_size = 8 train_parameters = { @@ -54,14 +54,14 @@ def optimizer_setting(params, parameter_list=None): base_lr = params["lr"] lr = [] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] - if fluid.in_dygraph_mode(): + if base.in_dygraph_mode(): optimizer = paddle.optimizer.SGD( learning_rate=0.01, parameters=parameter_list ) else: optimizer = paddle.optimizer.SGD(learning_rate=0.01) # TODO(minqiyang): Add learning rate scheduler support to dygraph mode - # optimizer = fluid.optimizer.Momentum( + # optimizer = base.optimizer.Momentum( # learning_rate=params["lr"], # learning_rate=paddle.optimizer.lr.piecewise_decay( # boundaries=bd, values=lr), @@ -77,8 +77,8 @@ def test_resnet_sort_gradient_float32(self): batch_size = train_parameters["batch_size"] batch_num = 10 - with fluid.dygraph.guard(): - fluid.set_flags({'FLAGS_sort_sum_gradient': True}) + with base.dygraph.guard(): + base.set_flags({'FLAGS_sort_sum_gradient': True}) paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) @@ -152,10 +152,10 @@ def test_resnet_sort_gradient_float32(self): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) resnet = ResNet() @@ -196,7 +196,7 @@ def test_resnet_sort_gradient_float32(self): ) out = exe.run( - fluid.default_startup_program(), + base.default_startup_program(), fetch_list=static_param_name_list, ) @@ -220,7 +220,7 @@ def test_resnet_sort_gradient_float32(self): fetch_list.extend(static_param_name_list) fetch_list.extend(static_grad_name_list) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"pixel": static_x_data, "label": y_data}, fetch_list=fetch_list, ) diff --git a/test/legacy_test/test_imperative_save_load_v2.py b/test/legacy_test/test_imperative_save_load_v2.py index 11f3386df3461..473872f1aadb3 100644 --- a/test/legacy_test/test_imperative_save_load_v2.py +++ b/test/legacy_test/test_imperative_save_load_v2.py @@ -19,9 +19,9 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable from paddle.nn import Embedding from paddle.optimizer import Adam from paddle.optimizer.lr import LRScheduler @@ -47,7 +47,7 @@ def __init__( for i in range(self._num_layers): weight_1 = self.create_parameter( - attr=fluid.ParamAttr( + attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-self._init_scale, high=self._init_scale ) @@ -60,7 +60,7 @@ def __init__( ) self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1)) bias_1 = self.create_parameter( - attr=fluid.ParamAttr( + attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-self._init_scale, high=self._init_scale ) @@ -170,7 +170,7 @@ def __init__( vocab_size, hidden_size, sparse=False, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name='embedding_para', initializer=paddle.nn.initializer.Uniform( low=-init_scale, high=init_scale @@ -179,7 +179,7 @@ def __init__( ) self.softmax_weight = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype="float32", default_initializer=paddle.nn.initializer.Uniform( @@ -187,7 +187,7 @@ def __init__( ), ) self.softmax_bias = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.vocab_size], dtype="float32", default_initializer=paddle.nn.initializer.Uniform( @@ -251,7 +251,7 @@ def func_setUp(self): batch_size = 4 batch_num = 200 - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to @@ -272,9 +272,9 @@ def func_setUp(self): lr_arr.append(new_lr) place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) scheduler = paddle.optimizer.lr.PiecewiseDecay( boundaries=bd, values=lr_arr @@ -354,7 +354,7 @@ def func_testLoadAndSetVarBase(self): batch_size = 4 batch_num = 200 - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to @@ -375,9 +375,9 @@ def func_testLoadAndSetVarBase(self): lr_arr.append(new_lr) place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) scheduler = paddle.optimizer.lr.PiecewiseDecay( boundaries=bd, values=lr_arr @@ -476,7 +476,7 @@ def func_testSetVariable(self): batch_size = 4 batch_num = 200 - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to @@ -497,9 +497,9 @@ def func_testSetVariable(self): lr_arr.append(new_lr) place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) scheduler = paddle.optimizer.lr.PiecewiseDecay( boundaries=bd, values=lr_arr @@ -594,7 +594,7 @@ def func_testSetNumpy(self): batch_size = 4 batch_num = 200 - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to @@ -615,9 +615,9 @@ def func_testSetNumpy(self): lr_arr.append(new_lr) place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) scheduler = paddle.optimizer.lr.PiecewiseDecay( boundaries=bd, values=lr_arr @@ -718,7 +718,7 @@ def func_testSetVariableBeforeTrain(self): batch_size = 4 batch_num = 200 - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to @@ -731,9 +731,9 @@ def func_testSetVariableBeforeTrain(self): ) place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) adam = Adam( learning_rate=0.0, @@ -806,7 +806,7 @@ def func_testLoadAndSetVarBaseBeforeTrain(self): batch_size = 4 batch_num = 200 - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to @@ -828,9 +828,9 @@ def func_testLoadAndSetVarBaseBeforeTrain(self): lr_arr.append(new_lr) place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) adam = Adam( learning_rate=0.0, @@ -908,7 +908,7 @@ def func_testSetNumpyBeforeTrain(self): batch_size = 4 batch_num = 200 - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) # TODO: marsyang1993 Change seed to @@ -930,9 +930,9 @@ def func_testSetNumpyBeforeTrain(self): lr_arr.append(new_lr) place = ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) scheduler = paddle.optimizer.lr.PiecewiseDecay( boundaries=bd, values=lr_arr @@ -1013,7 +1013,7 @@ def func_testSetNumpyBeforeTrain(self): np.testing.assert_array_equal(new_t, base_t) def func_testOnlyLoadParams(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): emb = paddle.nn.Embedding(10, 10) state_dict = emb.state_dict() paddle.save( @@ -1026,7 +1026,7 @@ def func_testOnlyLoadParams(self): ) def func_test_no_state_in_input_dict(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): emb = paddle.nn.Embedding(10, 10) state_dict = emb.state_dict() paddle.save( @@ -1042,7 +1042,7 @@ def func_test_no_state_in_input_dict(self): emb.set_state_dict(para_state_dict) def func_test_state_shape_mismatch(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): emb = paddle.nn.Embedding(10, 10) state_dict = emb.state_dict() paddle.save( diff --git a/test/legacy_test/test_imperative_se_resnext.py b/test/legacy_test/test_imperative_se_resnext.py index 09feff3444edd..24bc9e56d0e1d 100644 --- a/test/legacy_test/test_imperative_se_resnext.py +++ b/test/legacy_test/test_imperative_se_resnext.py @@ -18,9 +18,9 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.layer_helper import LayerHelper +from paddle import base +from paddle.base import core +from paddle.base.layer_helper import LayerHelper from paddle.nn import BatchNorm batch_size = 8 @@ -54,7 +54,7 @@ def optimizer_setting(params, parameter_list=None): # bd = [step * e for e in ls["epochs"]] # base_lr = params["lr"] # lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] - if fluid.in_dygraph_mode(): + if base.in_dygraph_mode(): optimizer = paddle.optimizer.SGD( learning_rate=0.01, parameters=parameter_list ) @@ -339,7 +339,7 @@ def run_dygraph(): ) np.random.seed(seed) - batch_py_reader = fluid.io.PyReader(capacity=1) + batch_py_reader = base.io.PyReader(capacity=1) batch_py_reader.decorate_sample_list_generator( paddle.batch( self.reader_decorator( @@ -348,7 +348,7 @@ def run_dygraph(): batch_size=batch_size, drop_last=True, ), - places=fluid.CPUPlace(), + places=base.CPUPlace(), ) dy_param_init_value = {} @@ -406,7 +406,7 @@ def run_dygraph(): dy_grad_value, ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): ( dy_out, dy_param_init_value, @@ -414,7 +414,7 @@ def run_dygraph(): dy_grad_value, ) = run_dygraph() - with fluid.dygraph.guard(): + with base.dygraph.guard(): ( eager_out, eager_param_init_value, @@ -426,10 +426,10 @@ def run_dygraph(): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) se_resnext = SeResNeXt() @@ -472,7 +472,7 @@ def run_dygraph(): ) out = exe.run( - fluid.default_startup_program(), + base.default_startup_program(), fetch_list=static_param_name_list, ) @@ -496,7 +496,7 @@ def run_dygraph(): fetch_list.extend(static_param_name_list) fetch_list.extend(static_grad_name_list) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"pixel": static_x_data, "label": y_data}, fetch_list=fetch_list, ) diff --git a/test/legacy_test/test_imperative_selected_rows.py b/test/legacy_test/test_imperative_selected_rows.py index 5f04e8d28a19f..8d97666e0f862 100644 --- a/test/legacy_test/test_imperative_selected_rows.py +++ b/test/legacy_test/test_imperative_selected_rows.py @@ -17,9 +17,9 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import to_variable class SimpleNet(paddle.nn.Layer): @@ -39,15 +39,15 @@ def forward(self, input): class TestSimpleNet(unittest.TestCase): def test_selectedrows_gradient1(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: for dtype in ["float32", "float64"]: for sort_sum_gradient in [True, False]: paddle.disable_static(place) - fluid.set_flags( + base.set_flags( {'FLAGS_sort_sum_gradient': sort_sum_gradient} ) # grad_clip = paddle.nn.ClipGradByGlobalNorm(5.0) @@ -78,14 +78,14 @@ def test_selectedrows_gradient1(self): paddle.enable_static() def test_selectedrows_gradient2(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: for sort_sum_gradient in [True, False]: - with fluid.dygraph.guard(place): - fluid.set_flags( + with base.dygraph.guard(place): + base.set_flags( {'FLAGS_sort_sum_gradient': sort_sum_gradient} ) grad_clip = paddle.nn.ClipGradByGlobalNorm(5.0) diff --git a/test/legacy_test/test_imperative_selected_rows_to_lod_tensor.py b/test/legacy_test/test_imperative_selected_rows_to_lod_tensor.py index 4f7e4780e9336..19be1a8b7ee3f 100644 --- a/test/legacy_test/test_imperative_selected_rows_to_lod_tensor.py +++ b/test/legacy_test/test_imperative_selected_rows_to_lod_tensor.py @@ -18,9 +18,9 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import core, framework -from paddle.fluid.dygraph.base import to_variable +from paddle import base +from paddle.base import core, framework +from paddle.base.dygraph.base import to_variable from paddle.nn import Embedding @@ -44,7 +44,7 @@ def __init__( vocab_size, hidden_size, sparse=is_sparse, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name='embedding_para', initializer=paddle.nn.initializer.Uniform( low=-init_scale, high=init_scale @@ -52,7 +52,7 @@ def __init__( ), ) self.softmax_weight = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.hidden_size, self.hidden_size], dtype=dtype, default_initializer=paddle.nn.initializer.Uniform( @@ -60,7 +60,7 @@ def __init__( ), ) self.softmax_bias = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.hidden_size], dtype=dtype, default_initializer=paddle.nn.initializer.Uniform( @@ -96,9 +96,9 @@ def test_simple_net(self): self.simple_net_float(is_sparse, dtype) def simple_net_float(self, is_sparse, dtype): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: seed = 90 @@ -111,7 +111,7 @@ def simple_net_float(self, is_sparse, dtype): for is_sort_sum_gradient in [True, False]: traced_layer = None - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) @@ -132,7 +132,7 @@ def simple_net_float(self, is_sparse, dtype): dy_param_init = {} dy_loss = None - fluid.set_flags( + base.set_flags( {'FLAGS_sort_sum_gradient': is_sort_sum_gradient} ) @@ -170,7 +170,7 @@ def simple_net_float(self, is_sparse, dtype): dtype=dtype, ) - exe = fluid.Executor(place) + exe = base.Executor(place) sgd = paddle.optimizer.SGD(learning_rate=1e-3) x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' @@ -201,7 +201,7 @@ def simple_net_float(self, is_sparse, dtype): fetch_list = [static_loss] fetch_list.extend(static_param_name_list) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": x_data, "y": y_data}, fetch_list=fetch_list, ) diff --git a/test/legacy_test/test_imperative_signal_handler.py b/test/legacy_test/test_imperative_signal_handler.py index a5e44f953df77..8ed10104b1cd8 100644 --- a/test/legacy_test/test_imperative_signal_handler.py +++ b/test/legacy_test/test_imperative_signal_handler.py @@ -19,7 +19,7 @@ import time import unittest -from paddle.fluid import core +from paddle.base import core def set_child_signal_handler(self, child_pid): diff --git a/test/legacy_test/test_imperative_star_gan_with_gradient_penalty.py b/test/legacy_test/test_imperative_star_gan_with_gradient_penalty.py index 62476046e8ab2..63a8e160d7112 100644 --- a/test/legacy_test/test_imperative_star_gan_with_gradient_penalty.py +++ b/test/legacy_test/test_imperative_star_gan_with_gradient_penalty.py @@ -17,18 +17,18 @@ import numpy as np import paddle -from paddle import _legacy_C_ops, fluid +from paddle import _legacy_C_ops, base from paddle.tensor import random -if fluid.is_compiled_with_cuda(): - fluid.core.globals()['FLAGS_cudnn_deterministic'] = True +if base.is_compiled_with_cuda(): + base.core.globals()['FLAGS_cudnn_deterministic'] = True class Config: def __init__(self, place, sort_sum_gradient=True): self.place = place - if isinstance(place, fluid.CPUPlace): + if isinstance(place, base.CPUPlace): # CPU cases are extremely slow self.g_base_dims = 1 self.d_base_dims = 1 @@ -114,7 +114,7 @@ def __init__(self, num_channels, epsilon=1e-5): self.bias = self.create_parameter(shape=[num_channels], is_bias=True) def forward(self, input): - if fluid.in_dygraph_mode(): + if base.in_dygraph_mode(): out, _, _ = _legacy_C_ops.instance_norm( input, self.scale, self.bias, 'epsilon', self.epsilon ) @@ -123,8 +123,8 @@ def forward(self, input): return paddle.static.nn.instance_norm( input, epsilon=self.epsilon, - param_attr=fluid.ParamAttr(self.scale.name), - bias_attr=fluid.ParamAttr(self.bias.name), + param_attr=base.ParamAttr(self.scale.name), + bias_attr=base.ParamAttr(self.bias.name), ) @@ -387,15 +387,15 @@ def loss_cls(cls, label, cfg): def calc_gradients(outputs, inputs, no_grad_set): - if fluid.in_dygraph_mode(): - return fluid.dygraph.grad( + if base.in_dygraph_mode(): + return base.dygraph.grad( outputs=outputs, inputs=inputs, no_grad_vars=no_grad_set, create_graph=True, ) else: - return fluid.gradients( + return base.gradients( targets=outputs, inputs=inputs, no_grad_set=no_grad_set ) @@ -481,7 +481,7 @@ def build_optimizer(layer, cfg, loss=None): learning_rate = 1e-3 beta1 = 0.5 beta2 = 0.999 - if fluid.in_dygraph_mode(): + if base.in_dygraph_mode(): return paddle.optimizer.Adam( learning_rate=learning_rate, beta1=beta1, @@ -510,7 +510,7 @@ def __init__(self, cfg): self.cfg = cfg - fluid.set_flags({'FLAGS_sort_sum_gradient': cfg.sort_sum_gradient}) + base.set_flags({'FLAGS_sort_sum_gradient': cfg.sort_sum_gradient}) def clear_gradients(self): if self.g_optimizer: @@ -520,9 +520,9 @@ def clear_gradients(self): self.d_optimizer.clear_gradients() def run(self, image_real, label_org, label_trg): - image_real = fluid.dygraph.to_variable(image_real) - label_org = fluid.dygraph.to_variable(label_org) - label_trg = fluid.dygraph.to_variable(label_trg) + image_real = base.dygraph.to_variable(image_real) + label_org = base.dygraph.to_variable(label_org) + label_trg = base.dygraph.to_variable(label_trg) g_loss = get_generator_loss( image_real, @@ -575,11 +575,11 @@ def create_data_layer(): paddle.seed(cfg.seed) paddle.framework.random._manual_program_seed(cfg.seed) - self.gen_program = fluid.Program() - gen_startup_program = fluid.Program() + self.gen_program = base.Program() + gen_startup_program = base.Program() - with fluid.program_guard(self.gen_program, gen_startup_program): - with fluid.unique_name.guard(): + with base.program_guard(self.gen_program, gen_startup_program): + with base.unique_name.guard(): image_real, label_org, label_trg = create_data_layer() generator = Generator(cfg) discriminator = Discriminator(cfg) @@ -593,10 +593,10 @@ def create_data_layer(): ) build_optimizer(generator, cfg, loss=g_loss) - self.dis_program = fluid.Program() - dis_startup_program = fluid.Program() - with fluid.program_guard(self.dis_program, dis_startup_program): - with fluid.unique_name.guard(): + self.dis_program = base.Program() + dis_startup_program = base.Program() + with base.program_guard(self.dis_program, dis_startup_program): + with base.unique_name.guard(): image_real, label_org, label_trg = create_data_layer() generator = Generator(cfg) discriminator = Discriminator(cfg) @@ -610,10 +610,10 @@ def create_data_layer(): ) build_optimizer(discriminator, cfg, loss=d_loss) - self.executor = fluid.Executor(cfg.place) - self.scope = fluid.Scope() + self.executor = base.Executor(cfg.place) + self.scope = base.Scope() - with fluid.scope_guard(self.scope): + with base.scope_guard(self.scope): self.executor.run(gen_startup_program) self.executor.run(dis_startup_program) @@ -626,7 +626,7 @@ def run(self, image_real, label_org, label_trg): 'label_org': label_org, 'label_trg': label_trg, } - with fluid.scope_guard(self.scope): + with base.scope_guard(self.scope): g_loss_val = self.executor.run( self.gen_program, feed=feed, fetch_list=[self.g_loss] )[0] @@ -638,10 +638,10 @@ def run(self, image_real, label_org, label_trg): class TestStarGANWithGradientPenalty(unittest.TestCase): def func_main(self): - self.place_test(fluid.CPUPlace()) + self.place_test(base.CPUPlace()) - if fluid.is_compiled_with_cuda(): - self.place_test(fluid.CUDAPlace(0)) + if base.is_compiled_with_cuda(): + self.place_test(base.CUDAPlace(0)) def place_test(self, place): cfg = Config(place, False) @@ -649,17 +649,17 @@ def place_test(self, place): dataset = create_mnist_dataset(cfg) dataset = paddle.reader.cache(dataset) - fluid_dygraph_loss = [] - with fluid.dygraph.guard(cfg.place): - fluid_dygraph_model = DyGraphTrainModel(cfg) + base_dygraph_loss = [] + with base.dygraph.guard(cfg.place): + base_dygraph_model = DyGraphTrainModel(cfg) for batch_id, (image_real, label_org, label_trg) in enumerate( dataset() ): - loss = fluid_dygraph_model.run(image_real, label_org, label_trg) - fluid_dygraph_loss.append(loss) + loss = base_dygraph_model.run(image_real, label_org, label_trg) + base_dygraph_loss.append(loss) eager_dygraph_loss = [] - with fluid.dygraph.guard(cfg.place): + with base.dygraph.guard(cfg.place): eager_dygraph_model = DyGraphTrainModel(cfg) for batch_id, (image_real, label_org, label_trg) in enumerate( dataset() diff --git a/test/legacy_test/test_imperative_tensor_clear_gradient.py b/test/legacy_test/test_imperative_tensor_clear_gradient.py index 11864a5902724..fe947c5a55cd3 100644 --- a/test/legacy_test/test_imperative_tensor_clear_gradient.py +++ b/test/legacy_test/test_imperative_tensor_clear_gradient.py @@ -18,16 +18,16 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.wrapped_decorator import wrap_decorator +from paddle import base +from paddle.base.wrapped_decorator import wrap_decorator def _dygraph_guard_(func): def __impl__(*args, **kwargs): - if fluid.in_dygraph_mode(): + if base.in_dygraph_mode(): return func(*args, **kwargs) else: - with fluid.dygraph.guard(): + with base.dygraph.guard(): return func(*args, **kwargs) return __impl__ @@ -46,7 +46,7 @@ def test_tensor_method_clear_gradient_case1(self): linear = paddle.nn.Linear(2, 3) out = linear(input) out.backward() - if not fluid.framework.in_dygraph_mode(): + if not base.framework.in_dygraph_mode(): linear.weight.clear_gradient() else: linear.weight._zero_grads() @@ -67,13 +67,13 @@ def test_tensor_method_clear_gradient_case2(self): linear.weight.clear_gradient(False) # before ._gradient_set_empty(False), # the return of ._is_gradient_set_empty() should be True - if not fluid.framework.in_dygraph_mode(): + if not base.framework.in_dygraph_mode(): self.assertTrue(linear.weight._is_gradient_set_empty()) else: self.assertIsNone(linear.weight.grad) # reset, because ClearGradient will call SetIsEmpty(True), but this is not our expectation. - if not fluid.framework.in_dygraph_mode(): + if not base.framework.in_dygraph_mode(): linear.weight._gradient_set_empty(False) # after ._gradient_set_empty(False), # the return of ._is_gradient_set_empty() should be False diff --git a/test/legacy_test/test_imperative_trace_non_persistable_inputs.py b/test/legacy_test/test_imperative_trace_non_persistable_inputs.py index b90b5d47b8994..a30064c60f93f 100644 --- a/test/legacy_test/test_imperative_trace_non_persistable_inputs.py +++ b/test/legacy_test/test_imperative_trace_non_persistable_inputs.py @@ -18,14 +18,14 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class SimpleFCLayer(paddle.nn.Layer): def __init__(self, feature_size, batch_size, fc_size): super().__init__() self._linear = paddle.nn.Linear(feature_size, fc_size) - self._offset = fluid.dygraph.to_variable( + self._offset = base.dygraph.to_variable( np.random.random((batch_size, fc_size)).astype('float32') ) @@ -36,10 +36,10 @@ def forward(self, x): class TestTracedLayerRecordNonPersistableInput(unittest.TestCase): def test_main(self): - if fluid.framework.in_dygraph_mode(): + if base.framework.in_dygraph_mode(): return traced_layer = None - with fluid.dygraph.guard(): + with base.dygraph.guard(): feature_size = 3 batch_size = 4 fc_size = 2 @@ -55,13 +55,13 @@ def test_main(self): } for _ in range(10): - in_x = fluid.dygraph.to_variable( + in_x = base.dygraph.to_variable( np.random.random((batch_size, feature_size)).astype( 'float32' ) ) if traced_layer is None: - dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( + dygraph_out, traced_layer = base.dygraph.TracedLayer.trace( layer, [in_x] ) else: diff --git a/test/legacy_test/test_imperative_transformer_sorted_gradient.py b/test/legacy_test/test_imperative_transformer_sorted_gradient.py index f85986fab3584..ea4fb2bb142fb 100644 --- a/test/legacy_test/test_imperative_transformer_sorted_gradient.py +++ b/test/legacy_test/test_imperative_transformer_sorted_gradient.py @@ -19,9 +19,9 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph import guard, to_variable +from paddle import base +from paddle.base import core +from paddle.base.dygraph import guard, to_variable from paddle.nn import Layer, Linear np.set_printoptions(suppress=True) @@ -398,10 +398,10 @@ def __init__(self, d_model, process_cmd, shape_len=None): if cmd == "n": self._layer_norm = paddle.nn.LayerNorm( normalized_shape=d_model, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(1.0) ), - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(0.0) ), ) @@ -658,7 +658,7 @@ def __init__( src_vocab_size, src_emb_dim, sparse=is_sparse, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name=word_emb_param_name, initializer=paddle.nn.initializer.Normal( 0.0, src_emb_dim**-0.5 @@ -674,7 +674,7 @@ def __init__( self._src_max_len, src_emb_dim, sparse=is_sparse, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name=pos_enc_param_name, initializer=paddle.nn.initializer.Assign(pos_inp), trainable=False, @@ -1112,7 +1112,7 @@ def transformer_sort_gradient_float32(self, is_sparse): def run_dygraph(): # NOTE(xiongkun03): In new executor, the inplace strategy is on by default, which will cause result of sumop have some differences. So we disable inplace. - fluid.set_flags({'FLAGS_new_executor_use_inplace': False}) + base.set_flags({'FLAGS_new_executor_use_inplace': False}) paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) transformer = TransFormer( @@ -1140,7 +1140,7 @@ def run_dygraph(): lr_decay = paddle.optimizer.lr.noam_decay( ModelHyperParams.d_model, TrainTaskConfig.warmup_steps ) - with fluid.default_main_program()._lr_schedule_guard(): + with base.default_main_program()._lr_schedule_guard(): learning_rate = lr_decay * TrainTaskConfig.learning_rate optimizer = paddle.optimizer.Adam( learning_rate=learning_rate, @@ -1215,10 +1215,10 @@ def run_dygraph(): is_test=False, is_sparse=is_sparse, ) - exe = fluid.Executor( - fluid.CPUPlace() + exe = base.Executor( + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) optimizer = paddle.optimizer.SGD(learning_rate=0.003) @@ -1249,7 +1249,7 @@ def run_dygraph(): for param in transformer.parameters(): static_param_name_list.append(param.name) out = exe.run( - fluid.default_startup_program(), + base.default_startup_program(), fetch_list=static_param_name_list, ) for i in range(len(static_param_name_list)): @@ -1269,7 +1269,7 @@ def run_dygraph(): fetch_list.extend(static_param_name_list) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed=feed_dict, fetch_list=fetch_list, ) @@ -1285,7 +1285,7 @@ def run_dygraph(): # compare eager result with imperative result with guard(): - fluid.set_flags({'FLAGS_sort_sum_gradient': False}) + base.set_flags({'FLAGS_sort_sum_gradient': False}) ( dy_avg_cost_value, dy_sum_cost_value, diff --git a/test/legacy_test/test_imperative_triple_grad.py b/test/legacy_test/test_imperative_triple_grad.py index 72e2689da774b..2ca8f40de1dae 100644 --- a/test/legacy_test/test_imperative_triple_grad.py +++ b/test/legacy_test/test_imperative_triple_grad.py @@ -18,16 +18,16 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.wrapped_decorator import wrap_decorator +from paddle import base +from paddle.base.wrapped_decorator import wrap_decorator def _dygraph_guard_(func): def __impl__(*args, **kwargs): - if fluid.in_dygraph_mode(): + if base.in_dygraph_mode(): return func(*args, **kwargs) else: - with fluid.dygraph.guard(): + with base.dygraph.guard(): return func(*args, **kwargs) return __impl__ @@ -39,7 +39,7 @@ def __impl__(*args, **kwargs): def random_var(size, low=-1, high=1, dtype='float32'): np.random.seed(2021) x_np = np.random.uniform(low=low, high=high, size=size).astype(dtype) - return fluid.dygraph.to_variable(x_np) + return base.dygraph.to_variable(x_np) class TestDygraphTripleGradMatmul(TestCase): @@ -118,8 +118,8 @@ def grad( create_graph=False, allow_unused=False, ): - fluid.set_flags({'FLAGS_sort_sum_gradient': self.sort_sum_gradient}) - return fluid.dygraph.grad( + base.set_flags({'FLAGS_sort_sum_gradient': self.sort_sum_gradient}) + return base.dygraph.grad( outputs=outputs, inputs=inputs, grad_outputs=grad_outputs, @@ -244,8 +244,8 @@ def grad( create_graph=False, allow_unused=False, ): - fluid.set_flags({'FLAGS_sort_sum_gradient': self.sort_sum_gradient}) - return fluid.dygraph.grad( + base.set_flags({'FLAGS_sort_sum_gradient': self.sort_sum_gradient}) + return base.dygraph.grad( outputs=outputs, inputs=inputs, grad_outputs=grad_outputs, diff --git a/test/legacy_test/test_imperative_using_non_zero_gpu.py b/test/legacy_test/test_imperative_using_non_zero_gpu.py index b83bd2fcaf4b7..887a04320e225 100644 --- a/test/legacy_test/test_imperative_using_non_zero_gpu.py +++ b/test/legacy_test/test_imperative_using_non_zero_gpu.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.dygraph import guard, to_variable +from paddle import base +from paddle.base.dygraph import guard, to_variable class TestImperativeUsingNonZeroGpu(unittest.TestCase): @@ -28,15 +28,15 @@ def run_main(self, np_arr, place): np.testing.assert_array_equal(np_arr, var.numpy()) def test_non_zero_gpu(self): - if not fluid.is_compiled_with_cuda(): + if not base.is_compiled_with_cuda(): return np_arr = np.random.random([11, 13]).astype('float32') if paddle.device.cuda.device_count() > 1: # should use non zero gpu if there are more than 1 gpu - self.run_main(np_arr, fluid.CUDAPlace(1)) + self.run_main(np_arr, base.CUDAPlace(1)) else: - self.run_main(np_arr, fluid.CUDAPlace(0)) + self.run_main(np_arr, base.CUDAPlace(0)) if __name__ == '__main__': diff --git a/test/legacy_test/test_increment.py b/test/legacy_test/test_increment.py index 5112d780bf58a..4887564e9b9bb 100755 --- a/test/legacy_test/test_increment.py +++ b/test/legacy_test/test_increment.py @@ -17,23 +17,23 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestIncrement(unittest.TestCase): def test_api(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.tensor.fill_constant( shape=[1], dtype='int64', value=5 ) expected_result = np.array([8], dtype='int64') output = paddle.tensor.math.increment(input, value=3) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) result = exe.run(fetch_list=[output]) self.assertEqual((result == expected_result).all(), True) - with fluid.dygraph.guard(): + with base.dygraph.guard(): input = paddle.ones(shape=[1], dtype='int64') expected_result = np.array([2], dtype='int64') output = paddle.tensor.math.increment(input, value=1) @@ -42,11 +42,11 @@ def test_api(self): class TestInplaceApiWithDataTransform(unittest.TestCase): def test_increment(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): paddle.enable_static() - with paddle.fluid.device_guard("gpu:0"): + with paddle.base.device_guard("gpu:0"): x = paddle.tensor.fill_constant([1], "float32", 0) - with paddle.fluid.device_guard("cpu"): + with paddle.base.device_guard("cpu"): x = paddle.increment(x) exe = paddle.static.Executor(paddle.CUDAPlace(0)) (a,) = exe.run(paddle.static.default_main_program(), fetch_list=[x]) diff --git a/test/legacy_test/test_index_add_op.py b/test/legacy_test/test_index_add_op.py index 0e9b15f45438f..39279305a15c7 100644 --- a/test/legacy_test/test_index_add_op.py +++ b/test/legacy_test/test_index_add_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import Program, core +from paddle.base import Program, core def compute_index_add_ref( diff --git a/test/legacy_test/test_index_put_op.py b/test/legacy_test/test_index_put_op.py index 44e50c1567721..9ab02298f94dd 100644 --- a/test/legacy_test/test_index_put_op.py +++ b/test/legacy_test/test_index_put_op.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle.fluid import Program +from paddle.base import Program def compute_index_put_ref(x_np, indices_np, value_np, accumulate=False): diff --git a/test/legacy_test/test_index_sample_op.py b/test/legacy_test/test_index_sample_op.py index b2fee3e3fd648..569e27203dd24 100755 --- a/test/legacy_test/test_index_sample_op.py +++ b/test/legacy_test/test_index_sample_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestIndexSampleOp(OpTest): @@ -211,9 +211,9 @@ def test_shape(self): index = paddle.static.data(name='index', shape=[-1, 3], dtype='int32') output = paddle.index_sample(x=x, index=index) - place = fluid.CPUPlace() - exe = fluid.Executor(place=place) - exe.run(fluid.default_startup_program()) + place = base.CPUPlace() + exe = base.Executor(place=place) + exe.run(base.default_startup_program()) feed = {'x': x_np, 'index': index_np} res = exe.run(feed=feed, fetch_list=[output]) @@ -221,7 +221,7 @@ def test_shape(self): class TestIndexSampleDynamic(unittest.TestCase): def test_result(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = paddle.to_tensor( [ [1.0, 2.0, 3.0, 4.0], diff --git a/test/legacy_test/test_index_select_op.py b/test/legacy_test/test_index_select_op.py index 7bd578dcf68aa..7f6eac1a9423d 100644 --- a/test/legacy_test/test_index_select_op.py +++ b/test/legacy_test/test_index_select_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard np.random.seed(1024) @@ -88,8 +88,8 @@ def init_dtype_type(self): class TestIndexSelectOpCaseSingleThread(TestIndexSelectOp): def init_dtype_type(self): - if fluid.is_compiled_with_cuda(): - fluid.set_flags({'FLAGS_cudnn_deterministic': True}) + if base.is_compiled_with_cuda(): + base.set_flags({'FLAGS_cudnn_deterministic': True}) self.x_type = np.float32 self.index_type = np.int32 self.dim = -2 @@ -195,7 +195,7 @@ def test_index_select_api(self): x = paddle.static.data(name='x', shape=[-1, 4]) index = paddle.static.data(name='index', shape=[3], dtype='int32') z = paddle.index_select(x, index, axis=1) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': self.data_x, 'index': self.data_index}, fetch_list=[z.name], @@ -211,7 +211,7 @@ def test_index_select_api(self): x = paddle.static.data(name='x', shape=[-1, 4]) index = paddle.static.data(name='index', shape=[3], dtype='int32') z = paddle.index_select(x, index) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': self.data_x, 'index': self.data_index}, fetch_list=[z.name], @@ -226,9 +226,9 @@ def test_dygraph_api(self): paddle.disable_static() self.input_data() # case 1: - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(self.data_x) - index = fluid.dygraph.to_variable(self.data_index) + with base.dygraph.guard(): + x = base.dygraph.to_variable(self.data_x) + index = base.dygraph.to_variable(self.data_index) z = paddle.index_select(x, index) np_z = z.numpy() expect_out = np.array( @@ -237,9 +237,9 @@ def test_dygraph_api(self): np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 2: - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(self.data_x) - index = fluid.dygraph.to_variable(self.data_index) + with base.dygraph.guard(): + x = base.dygraph.to_variable(self.data_x) + index = base.dygraph.to_variable(self.data_index) z = paddle.index_select(x, index, axis=1) np_z = z.numpy() expect_out = np.array( diff --git a/test/legacy_test/test_infer_no_need_buffer_slots.py b/test/legacy_test/test_infer_no_need_buffer_slots.py index 53f9212c7db72..1ba17a9270c50 100644 --- a/test/legacy_test/test_infer_no_need_buffer_slots.py +++ b/test/legacy_test/test_infer_no_need_buffer_slots.py @@ -15,19 +15,19 @@ import unittest import paddle -from paddle import fluid -from paddle.fluid import core, framework +from paddle import base +from paddle.base import core, framework class TestInferNoNeedBufferSlots(unittest.TestCase): def net(self): x1 = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_var(dtype="float32", shape=[1], lod_level=0, name="x1") ) x2 = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_var(dtype="float32", shape=[1], lod_level=0, name="x2") ) @@ -37,7 +37,7 @@ def net(self): def test_infer_no_need_buffer_slots(self): program = framework.Program() startup_program = framework.Program() - with fluid.program_guard(program, startup_program): + with base.program_guard(program, startup_program): loss = self.net() sgd = paddle.optimizer.SGD(learning_rate=0.01) sgd.minimize(loss) diff --git a/test/legacy_test/test_infer_shape.py b/test/legacy_test/test_infer_shape.py index 2b4f50a82646b..ba554f99086d5 100644 --- a/test/legacy_test/test_infer_shape.py +++ b/test/legacy_test/test_infer_shape.py @@ -14,7 +14,7 @@ import unittest -from paddle.fluid import core +from paddle.base import core class TestInferShape(unittest.TestCase): diff --git a/test/legacy_test/test_inference_api.py b/test/legacy_test/test_inference_api.py index 4f8cb527f1c6b..86e3f5d43d4db 100644 --- a/test/legacy_test/test_inference_api.py +++ b/test/legacy_test/test_inference_api.py @@ -19,8 +19,8 @@ paddle.enable_static() import numpy as np -from paddle import fluid -from paddle.fluid.core import PaddleDType, PaddleTensor +from paddle import base +from paddle.base.core import PaddleDType, PaddleTensor from paddle.framework import core from paddle.inference import ( Config, @@ -74,12 +74,12 @@ def test_inference_api(self): def get_sample_model(): - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype="float32" ) @@ -181,10 +181,10 @@ def test_lod_tensor(): predictor = create_predictor(config) in_names = predictor.get_input_names() in_handle = predictor.get_input_handle(in_names[0]) - in_data = paddle.fluid.create_lod_tensor( + in_data = paddle.base.create_lod_tensor( np.full((1, 6, 32, 32), 1.0, "float32"), [[1]], - paddle.fluid.CPUPlace(), + paddle.base.CPUPlace(), ) in_handle.share_external_data(in_data) predictor.run() diff --git a/test/legacy_test/test_inference_model_io.py b/test/legacy_test/test_inference_model_io.py index 7db7a113b5284..2f54e934818f3 100644 --- a/test/legacy_test/test_inference_model_io.py +++ b/test/legacy_test/test_inference_model_io.py @@ -21,14 +21,14 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.distributed.io import ( load_inference_model_distributed, save_persistables, ) -from paddle.fluid import core, executor -from paddle.fluid.compiler import CompiledProgram -from paddle.fluid.framework import Program, program_guard +from paddle.base import core, executor +from paddle.base.compiler import CompiledProgram +from paddle.base.framework import Program, program_guard from paddle.static.io import load_inference_model, save_inference_model paddle.enable_static() @@ -264,8 +264,8 @@ class TestSaveInferenceModelNew(unittest.TestCase): def test_save_and_load_inference_model(self): root_path = tempfile.TemporaryDirectory() MODEL_DIR = os.path.join(root_path.name, "inference_model5") - init_program = fluid.default_startup_program() - program = fluid.default_main_program() + init_program = base.default_startup_program() + program = base.default_main_program() # fake program without feed/fetch with program_guard(program, init_program): @@ -443,8 +443,8 @@ def test_save_and_load_inference_model(self): self.assertRaises(TypeError, paddle.static.io._get_valid_program, cp) def test_serialize_program_and_persistables(self): - init_program = fluid.default_startup_program() - program = fluid.default_main_program() + init_program = base.default_startup_program() + program = base.default_main_program() # fake program without feed/fetch with program_guard(program, init_program): @@ -492,8 +492,8 @@ def test_serialize_program_and_persistables(self): ) def test_normalize_program(self): - init_program = fluid.default_startup_program() - program = fluid.default_main_program() + init_program = base.default_startup_program() + program = base.default_main_program() # fake program without feed/fetch with program_guard(program, init_program): diff --git a/test/legacy_test/test_initializer.py b/test/legacy_test/test_initializer.py index 79b4782e6b6a3..52b2e4d5024dd 100644 --- a/test/legacy_test/test_initializer.py +++ b/test/legacy_test/test_initializer.py @@ -18,9 +18,9 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import framework -from paddle.fluid.core import VarDesc +from paddle import base +from paddle.base import framework +from paddle.base.core import VarDesc from paddle.regularizer import L2Decay DELTA = 0.00001 @@ -664,10 +664,10 @@ def test_set_global_weight_initilizer(self): """Test Set Global Param initilizer with UniformInitializer""" main_prog = framework.Program() startup_prog = framework.Program() - fluid.set_global_initializer( + base.set_global_initializer( paddle.nn.initializer.Uniform(low=-0.5, high=0.5) ) - with fluid.program_guard(main_prog, startup_prog): + with base.program_guard(main_prog, startup_prog): x = paddle.static.data(name="x", shape=[1, 3, 32, 32]) # default initilizer of param in layers.conv2d is NormalInitializer conv = paddle.static.nn.conv2d(x, 5, 3) @@ -685,17 +685,17 @@ def test_set_global_weight_initilizer(self): self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA) self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA) self.assertEqual(param_init_op.attr('seed'), 0) - fluid.set_global_initializer(None) + base.set_global_initializer(None) def test_set_global_bias_initilizer(self): """Test Set Global Bias initilizer with NormalInitializer""" main_prog = framework.Program() startup_prog = framework.Program() - fluid.set_global_initializer( + base.set_global_initializer( paddle.nn.initializer.Uniform(low=-0.5, high=0.5), bias_init=paddle.nn.initializer.Normal(0.0, 2.0), ) - with fluid.program_guard(main_prog, startup_prog): + with base.program_guard(main_prog, startup_prog): x = paddle.static.data(name="x", shape=[1, 3, 32, 32]) # default initilizer of bias in layers.conv2d is ConstantInitializer conv = paddle.static.nn.conv2d(x, 5, 3) @@ -715,7 +715,7 @@ def test_set_global_bias_initilizer(self): self.assertAlmostEqual(param_init_op.attr('min'), -0.5, delta=DELTA) self.assertAlmostEqual(param_init_op.attr('max'), 0.5, delta=DELTA) self.assertEqual(param_init_op.attr('seed'), 0) - fluid.set_global_initializer(None) + base.set_global_initializer(None) class TestUniformInitializerDygraph(unittest.TestCase): diff --git a/test/legacy_test/test_initializer_nn.py b/test/legacy_test/test_initializer_nn.py index e6fd87c497c66..b0b0e0bef268d 100644 --- a/test/legacy_test/test_initializer_nn.py +++ b/test/legacy_test/test_initializer_nn.py @@ -17,9 +17,9 @@ import numpy as np import paddle -from paddle import fluid, nn -from paddle.fluid import framework -from paddle.fluid.core import VarDesc +from paddle import base, nn +from paddle.base import framework +from paddle.base.core import VarDesc from paddle.nn import initializer DELTA = 0.00001 @@ -71,7 +71,7 @@ def test_constant_initializer_default_value_static(self, dtype="float32"): def test_constant_initializer_default_value_dygraph(self, dtype="float32"): """Test constant initializer with supplied value in dygraph""" - with fluid.dygraph.guard(): + with base.dygraph.guard(): linear = nn.Linear(2, 4, weight_attr=nn.initializer.Constant()) mat_target = np.ones((2, 4), dtype=dtype) * 0.0 mat_linear = linear.weight.numpy() @@ -89,7 +89,7 @@ def test_constant_initializer_static(self, dtype="float32"): def test_constant_initializer_dygraph(self, dtype="float32"): """Test constant initializer with supplied value in dygraph""" - with fluid.dygraph.guard(): + with base.dygraph.guard(): linear = nn.Linear( 2, 4, weight_attr=nn.initializer.Constant(value=2.0) ) diff --git a/test/legacy_test/test_inplace.py b/test/legacy_test/test_inplace.py index 047db61ec0c77..422bcd35d1612 100644 --- a/test/legacy_test/test_inplace.py +++ b/test/legacy_test/test_inplace.py @@ -22,7 +22,7 @@ class TestInplace(unittest.TestCase): def test_forward_version(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32)) self.assertEqual(var.inplace_version, 0) @@ -41,7 +41,7 @@ def test_forward_version(self): def test_backward_error(self): # It raises an error because the inplace operator will result # in incorrect gradient computation. - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.ones(shape=[4, 2, 3], dtype="float32") var_a.stop_gradient = False @@ -65,7 +65,7 @@ def test_backward_error(self): def test_backward_success_1(self): # var_b is modified inplace before using it, the inplace operator doesn't result # in incorrect gradient computation. - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.ones(shape=[4, 2, 3], dtype="float32") var_a.stop_gradient = False @@ -80,7 +80,7 @@ def test_backward_success_1(self): def test_backward_success_2(self): # Although var_b is modified inplace after using it, it does not used in gradient computation. # The inplace operator doesn't result in incorrect gradient computation. - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.ones(shape=[4, 2, 3], dtype="float32") var_a.stop_gradient = False @@ -133,7 +133,7 @@ def test_forward_result(self): ) def test_forward_version(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) self.assertEqual(var.inplace_version, 0) @@ -147,7 +147,7 @@ def test_forward_version(self): self.assertEqual(var.inplace_version, 3) def test_leaf_inplace_var_error(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var.stop_gradient = False @@ -159,7 +159,7 @@ def leaf_inplace_error(): def test_backward_error(self): # It raises an error because the inplace operator will result # in incorrect gradient computation. - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var_a.stop_gradient = False @@ -183,7 +183,7 @@ def test_backward_success_1(self): # var_b is modified inplace before using it, the inplace operator doesn't result # in incorrect gradient computation. grad_var_a, grad_var_a_inplace = 0, 1 - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var_a.stop_gradient = False @@ -199,7 +199,7 @@ def test_backward_success_1(self): loss.backward() grad_var_a_inplace = var_a.grad.numpy() - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var_a.stop_gradient = False @@ -216,7 +216,7 @@ def test_backward_success_2(self): # Although var_b is modified inplace after using it, it does not used in gradient computation. # The inplace operator doesn't result in incorrect gradient computation. grad_var_a, grad_var_a_inplace = 0, 1 - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var_a.stop_gradient = False @@ -235,7 +235,7 @@ def test_backward_success_2(self): loss.backward() grad_var_a_inplace = var_a.grad.numpy() - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var_a.stop_gradient = False @@ -275,7 +275,7 @@ def test_continuous_inplace_backward(self): # The api that only relies on input to calculate the gradient will copy input before # the inpalce calculation, so here supports continuous inpalce backward calculation. grad_var_a, grad_var_a_inplace = 0, 1 - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var_a.stop_gradient = False @@ -287,7 +287,7 @@ def test_continuous_inplace_backward(self): loss.backward() grad_var_a_inplace = var_a.grad.numpy() - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var_a.stop_gradient = False @@ -518,7 +518,7 @@ def test_backward_success_2(self): class TestLossIsInplaceVar(unittest.TestCase): def test_loss_is_inplace_var(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.ones((2, 2)) var_a.stop_gradient = False @@ -528,7 +528,7 @@ def test_loss_is_inplace_var(self): loss.backward() inplace_grad_var_a = var_a.grad.numpy() - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.ones((2, 2)) var_a.stop_gradient = False diff --git a/test/legacy_test/test_inplace_abn_op.py b/test/legacy_test/test_inplace_abn_op.py index 94c04ba41e4e1..d56a467a2ed79 100644 --- a/test/legacy_test/test_inplace_abn_op.py +++ b/test/legacy_test/test_inplace_abn_op.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestInplaceANBOpTraining(unittest.TestCase): @@ -42,12 +42,12 @@ def build_program( use_cuda=False, inplace=False, ): - main = fluid.Program() - startup = fluid.Program() + main = base.Program() + startup = base.Program() main.random_seed = seed startup.random_seed = seed - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + with base.unique_name.guard(): + with base.program_guard(main, startup): data = paddle.static.data( name='input', shape=self.dshape, @@ -58,8 +58,8 @@ def build_program( bn = paddle.static.nn.batch_norm( data, - param_attr=fluid.ParamAttr(name='bn_scale'), - bias_attr=fluid.ParamAttr(name='bn_bias'), + param_attr=base.ParamAttr(name='bn_scale'), + bias_attr=base.ParamAttr(name='bn_bias'), moving_mean_name='bn_moving_mean', moving_variance_name='bn_moving_variance', data_layout=layout, @@ -103,7 +103,7 @@ def test_all_branches(self): use_cuda, False, ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup) exe.run(program=main, feed={'input': data}) diff --git a/test/legacy_test/test_inplace_addto_strategy.py b/test/legacy_test/test_inplace_addto_strategy.py index 225b460f1ae4a..657b870c1c097 100644 --- a/test/legacy_test/test_inplace_addto_strategy.py +++ b/test/legacy_test/test_inplace_addto_strategy.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class ConvBNLayer(paddle.nn.Layer): @@ -54,9 +54,9 @@ def forward(self, inputs): def create_program(data_format="NCHW"): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): x = paddle.static.data(name='img', shape=[-1, 3, 224, 224]) x.stop_gradient = False if data_format == "NHWC": @@ -84,20 +84,20 @@ def run_program(enable_addto): np.random.seed(10) paddle.seed(10) paddle.framework.random._manual_program_seed(10) - if fluid.core.is_compiled_with_cuda(): - fluid.set_flags({"FLAGS_cudnn_deterministic": True}) - fluid.set_flags({"FLAGS_max_inplace_grad_add": 2}) + if base.core.is_compiled_with_cuda(): + base.set_flags({"FLAGS_cudnn_deterministic": True}) + base.set_flags({"FLAGS_max_inplace_grad_add": 2}) loss, main, startup, w = create_program(data_format=data_format) place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) - strategy = fluid.BuildStrategy() + strategy = base.BuildStrategy() strategy.enable_addto = enable_addto - compiled = fluid.CompiledProgram(main, build_strategy=strategy) + compiled = base.CompiledProgram(main, build_strategy=strategy) exe.run(startup) img = np.random.uniform(-128, 128, [8, 3, 224, 224]).astype( diff --git a/test/legacy_test/test_inplace_softmax_with_cross_entropy.py b/test/legacy_test/test_inplace_softmax_with_cross_entropy.py index 8f2263ee74c58..64bb0fbc669b4 100644 --- a/test/legacy_test/test_inplace_softmax_with_cross_entropy.py +++ b/test/legacy_test/test_inplace_softmax_with_cross_entropy.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestSoftmaxWithXe(unittest.TestCase): @@ -35,8 +35,8 @@ def softmax_with_xe( self, x, y, place, inplace=True, numeric_stable_mode=True ): m, n = x.shape - with fluid.program_guard(fluid.Program(), fluid.Program()): - with fluid.scope_guard(fluid.Scope()): + with base.program_guard(base.Program(), base.Program()): + with base.scope_guard(base.Scope()): x_d = paddle.static.data( name='x', shape=[m, n], @@ -57,14 +57,14 @@ def softmax_with_xe( numeric_stable_mode=numeric_stable_mode, ) - exe = fluid.Executor(place) + exe = base.Executor(place) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.enable_inplace = inplace - prog = fluid.CompiledProgram( - fluid.default_main_program(), build_strategy=build_strategy + prog = base.CompiledProgram( + base.default_main_program(), build_strategy=build_strategy ) fetch_list = [z_d.name, s_d.name] @@ -113,9 +113,9 @@ def main_with_place(self, place): self.assertTrue((s1 == s2).all()) def test_main(self): - self.main_with_place(fluid.CPUPlace()) - if fluid.core.is_compiled_with_cuda(): - self.main_with_place(fluid.CUDAPlace(0)) + self.main_with_place(base.CPUPlace()) + if base.core.is_compiled_with_cuda(): + self.main_with_place(base.CUDAPlace(0)) class TestSoftmaxWithXe1(TestSoftmaxWithXe): diff --git a/test/legacy_test/test_input_spec.py b/test/legacy_test/test_input_spec.py index 7234e6414c8d7..47c461a2a1eab 100644 --- a/test/legacy_test/test_input_spec.py +++ b/test/legacy_test/test_input_spec.py @@ -19,8 +19,8 @@ import numpy as np import paddle -from paddle.fluid import core -from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle.base import core +from paddle.base.framework import convert_np_dtype_to_dtype_ from paddle.jit.dy2static.utils import _compatible_non_tensor_spec from paddle.static import InputSpec diff --git a/test/legacy_test/test_instance_norm_op.py b/test/legacy_test/test_instance_norm_op.py index fe8ed7bf150c1..b4bd1401ddea9 100644 --- a/test/legacy_test/test_instance_norm_op.py +++ b/test/legacy_test/test_instance_norm_op.py @@ -19,9 +19,9 @@ from eager_op_test import OpTest import paddle -from paddle import fluid, nn -from paddle.fluid import Program, core, program_guard -from paddle.fluid.dygraph import to_variable +from paddle import base, nn +from paddle.base import Program, core, program_guard +from paddle.base.dygraph import to_variable def _reference_instance_norm_naive(x, scale, bias, epsilon, mean, var): @@ -325,9 +325,9 @@ def setUp(self): self.static_rev_desire[-1].append(rev[2]) def get_eager_desire(self, place): - if isinstance(place, fluid.CPUPlace): + if isinstance(place, base.CPUPlace): paddle.set_device("cpu") - if isinstance(place, fluid.CUDAPlace): + if isinstance(place, base.CUDAPlace): paddle.set_device("gpu") core.set_prim_eager_enabled(False) paddle.disable_static() @@ -350,9 +350,9 @@ def get_eager_desire(self, place): def get_static_desire(self, place): core._set_prim_all_enabled(False) paddle.enable_static() - if isinstance(place, fluid.CPUPlace): + if isinstance(place, base.CPUPlace): paddle.set_device("cpu") - if isinstance(place, fluid.CUDAPlace): + if isinstance(place, base.CUDAPlace): paddle.set_device("gpu") mp, sp = paddle.static.Program(), paddle.static.Program() @@ -428,7 +428,7 @@ def test_static_comp(self): if len(self.places) < 1: return - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): for place in self.places: fwd_actual.append([]) rev_actual.append([]) @@ -783,8 +783,8 @@ def test_with_place(place, shape): ] ground_truth = {name: var_dict[name] for name in var_names} - program = fluid.Program() - with fluid.program_guard(program): + program = base.Program() + with base.program_guard(program): block = program.global_block() for name in ground_truth: block.create_var( @@ -827,7 +827,7 @@ def test_with_place(place, shape): program._sync_with_cpp() - exe = fluid.Executor(place) + exe = base.Executor(place) out = exe.run( program, feed={ @@ -871,8 +871,8 @@ def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): # the input of instance_norm must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.static.nn.instance_norm, x1) @@ -918,7 +918,7 @@ def test_norm(self): ) for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): instance_norm = paddle.nn.InstanceNorm2D( 5, weight_attr=False, bias_attr=False ) @@ -952,7 +952,7 @@ def test_norm(self): ) for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): instance_norm = paddle.nn.InstanceNorm2D( 3, weight_attr=True, bias_attr=True ) diff --git a/test/legacy_test/test_instance_norm_op_v2.py b/test/legacy_test/test_instance_norm_op_v2.py index 8b6745b17bb1d..9b59b2d813616 100644 --- a/test/legacy_test/test_instance_norm_op_v2.py +++ b/test/legacy_test/test_instance_norm_op_v2.py @@ -19,8 +19,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def instance_norm_wrapper( @@ -79,27 +79,27 @@ def _reference_instance_norm_grad(x, scale, mean, var): class TestInstanceNorm(unittest.TestCase): def test_error(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu( "instance_norm" ): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: def error1d(): x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') instance_norm1d = paddle.nn.InstanceNorm1D(1) - instance_norm1d(fluid.dygraph.to_variable(x_data_4)) + instance_norm1d(base.dygraph.to_variable(x_data_4)) def error2d(): x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') instance_norm2d = paddle.nn.InstanceNorm2D(1) - instance_norm2d(fluid.dygraph.to_variable(x_data_3)) + instance_norm2d(base.dygraph.to_variable(x_data_3)) def error3d(): x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') instance_norm3d = paddle.nn.InstanceNorm3D(1) - instance_norm3d(fluid.dygraph.to_variable(x_data_4)) + instance_norm3d(base.dygraph.to_variable(x_data_4)) def weight_bias_false(): x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') @@ -107,31 +107,31 @@ def weight_bias_false(): 1, weight_attr=False, bias_attr=False ) - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): weight_bias_false() self.assertRaises(ValueError, error1d) self.assertRaises(ValueError, error2d) self.assertRaises(ValueError, error3d) def test_dygraph(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu( "instance_norm" ): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: shape = [4, 10, 4, 4] def compute_v1(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): bn = paddle.nn.InstanceNorm2D(shape[1]) - y = bn(fluid.dygraph.to_variable(x)) + y = bn(base.dygraph.to_variable(x)) return y.numpy() def compute_v2(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): bn = paddle.nn.InstanceNorm2D(shape[1]) - y = bn(fluid.dygraph.to_variable(x)) + y = bn(base.dygraph.to_variable(x)) return y.numpy() x = np.random.randn(*shape).astype("float32") @@ -140,14 +140,14 @@ def compute_v2(x): np.testing.assert_allclose(y1, y2, rtol=1e-05) def test_static(self): - with paddle.fluid.framework._static_guard(): - places = [fluid.CPUPlace()] + with paddle.base.framework._static_guard(): + places = [base.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu( "instance_norm" ): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: - exe = fluid.Executor(p) + exe = base.Executor(p) shape = [4, 10, 16, 16] def compute_v1(x_np): @@ -157,7 +157,7 @@ def compute_v1(x_np): name='x', shape=x_np.shape, dtype=x_np.dtype ) y = ins(x) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] return r @@ -168,7 +168,7 @@ def compute_v2(x_np): name='x', shape=x_np.shape, dtype=x_np.dtype ) y = ins(x) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] return r @@ -392,7 +392,7 @@ def train(self, use_amp, data_layout="NCHW"): def test_amp_nchw(self): if not isinstance( - paddle.fluid.framework._current_expected_place(), core.CPUPlace + paddle.base.framework._current_expected_place(), core.CPUPlace ): expected = self.train(False) actual = self.train(True) diff --git a/test/legacy_test/test_inverse_op.py b/test/legacy_test/test_inverse_op.py index de0527e9661be..da4c74e62814f 100644 --- a/test/legacy_test/test_inverse_op.py +++ b/test/legacy_test/test_inverse_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestInverseOp(OpTest): @@ -90,12 +90,12 @@ def config(self): class TestInverseAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[4, 4], dtype="float64" ) @@ -103,9 +103,9 @@ def check_static_result(self, place): input_np = np.random.random([4, 4]).astype("float64") result_np = np.linalg.inv(input_np) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) @@ -119,9 +119,9 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([4, 4]).astype("float64") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = paddle.inverse(input) np.testing.assert_allclose( result.numpy(), np.linalg.inv(input_np), rtol=1e-05 @@ -156,12 +156,12 @@ def test_errors(self): class TestInverseSingularAPI(unittest.TestCase): def setUp(self): - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[4, 4], dtype="float64" ) @@ -169,10 +169,10 @@ def check_static_result(self, place): input_np = np.zeros([4, 4]).astype("float64") - exe = fluid.Executor(place) + exe = base.Executor(place) try: fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) @@ -187,9 +187,9 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.ones([4, 4]).astype("float64") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) try: result = paddle.inverse(input) except RuntimeError as ex: diff --git a/test/legacy_test/test_io_save_load.py b/test/legacy_test/test_io_save_load.py index a6e125d3d3298..b108f63e75e54 100644 --- a/test/legacy_test/test_io_save_load.py +++ b/test/legacy_test/test_io_save_load.py @@ -17,8 +17,8 @@ import unittest import paddle -from paddle import fluid, static -from paddle.fluid import core +from paddle import base, static +from paddle.base import core class TestSaveLoadAPIError(unittest.TestCase): @@ -32,7 +32,7 @@ def tearDown(self): def test_get_valid_program_error(self): # case 1: CompiledProgram no program graph = core.Graph(core.ProgramDesc()) - compiled_program = fluid.CompiledProgram(graph) + compiled_program = base.CompiledProgram(graph) with self.assertRaises(TypeError): paddle.static.io._get_valid_program(compiled_program) @@ -41,8 +41,8 @@ def test_get_valid_program_error(self): paddle.static.io._get_valid_program("program") def test_load_vars_error(self): - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) # case 1: main_program type error when vars None with self.assertRaises(TypeError): static.io.load_vars( @@ -67,14 +67,14 @@ def tearDown(self): self.temp_dir.cleanup() def test_useless_feeded_var_names(self): - start_prog = fluid.Program() - main_prog = fluid.Program() - with fluid.program_guard(main_prog, start_prog): + start_prog = base.Program() + main_prog = base.Program() + with base.program_guard(main_prog, start_prog): x = paddle.static.data(name='x', shape=[10, 16], dtype='float32') y = paddle.static.data(name='y', shape=[10, 16], dtype='float32') z = paddle.static.nn.fc(x, 4) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(start_prog) with self.assertRaisesRegex( ValueError, "not involved in the target_vars calculation" diff --git a/test/legacy_test/test_ir_graph.py b/test/legacy_test/test_ir_graph.py index b563024e5bf2a..79db30cfd16b2 100644 --- a/test/legacy_test/test_ir_graph.py +++ b/test/legacy_test/test_ir_graph.py @@ -14,7 +14,7 @@ import unittest -from paddle import fluid +from paddle import base class TestIRGraph(unittest.TestCase): @@ -51,23 +51,23 @@ def test_erase(self): self.assertFalse(graph.has("test")) def test_create_var_node(self): - prog = fluid.core.ProgramDesc() + prog = base.core.ProgramDesc() block = prog.block(0) shape = [10, 20] x1 = block.var(b'x1') - x1.set_type(fluid.core.VarDesc.VarType.LOD_TENSOR) + x1.set_type(base.core.VarDesc.VarType.LOD_TENSOR) x1.set_shape(shape) - graph = fluid.core.Graph(prog) + graph = base.core.Graph(prog) node = graph.create_var_node(x1) - self.assertTrue(node.node_type() == fluid.core.Node.Type.Variable) + self.assertTrue(node.node_type() == base.core.Node.Type.Variable) def test_create_op_node(self): - prog = fluid.core.ProgramDesc() + prog = base.core.ProgramDesc() block = prog.block(0) sum_op_desc = block.append_op() - graph = fluid.core.Graph(prog) + graph = base.core.Graph(prog) node = graph.create_op_node(sum_op_desc) - self.assertTrue(node.node_type() == fluid.core.Node.Type.Operation) + self.assertTrue(node.node_type() == base.core.Node.Type.Operation) def test_create_control_dep_var(self): graph = build_graph() @@ -76,11 +76,11 @@ def test_create_control_dep_var(self): self.assertTrue(node.name() == name) def test_create_empty_node(self): - prog = fluid.core.ProgramDesc() - graph = fluid.core.Graph(prog) - n1 = graph.create_empty_node('x', fluid.core.Node.Type.Operation) + prog = base.core.ProgramDesc() + graph = base.core.Graph(prog) + n1 = graph.create_empty_node('x', base.core.Node.Type.Operation) self.assertTrue(n1.name() == 'x') - n2 = graph.create_empty_node('y', fluid.core.Node.Type.Variable) + n2 = graph.create_empty_node('y', base.core.Node.Type.Variable) self.assertTrue(n2.name() == 'y') def test_release_nodes(self): @@ -117,21 +117,21 @@ def resolve_hazard(self): def build_graph(): - prog = fluid.core.ProgramDesc() + prog = base.core.ProgramDesc() block = prog.block(0) shape = [10, 20] # prepare input/output x1 = block.var(b'x1') - x1.set_type(fluid.core.VarDesc.VarType.LOD_TENSOR) + x1.set_type(base.core.VarDesc.VarType.LOD_TENSOR) x1.set_shape(shape) x2 = block.var(b'x2') - x2.set_type(fluid.core.VarDesc.VarType.LOD_TENSOR) + x2.set_type(base.core.VarDesc.VarType.LOD_TENSOR) x2.set_shape(shape) out = block.var(b'out') - out.set_type(fluid.core.VarDesc.VarType.LOD_TENSOR) + out.set_type(base.core.VarDesc.VarType.LOD_TENSOR) sum_op_desc = block.append_op() sum_op_desc.set_type("sum") @@ -140,7 +140,7 @@ def build_graph(): sum_op_desc.check_attrs() sum_op_desc.infer_shape(block) - graph = fluid.core.Graph(prog) + graph = base.core.Graph(prog) return graph diff --git a/test/legacy_test/test_ir_inplace_pass.py b/test/legacy_test/test_ir_inplace_pass.py index 4aed1bc642fef..c5a5be1168f87 100644 --- a/test/legacy_test/test_ir_inplace_pass.py +++ b/test/legacy_test/test_ir_inplace_pass.py @@ -19,8 +19,8 @@ from parallel_executor_test_base import DeviceType, TestParallelExecutorBase import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def fc_with_batchnorm(use_feed): @@ -33,7 +33,7 @@ def fc_with_batchnorm(use_feed): hidden, size=200, activation='tanh', - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ), ) diff --git a/test/legacy_test/test_ir_memory_optimize_pass.py b/test/legacy_test/test_ir_memory_optimize_pass.py index 4fff201519ca2..6112d0aedd7ad 100644 --- a/test/legacy_test/test_ir_memory_optimize_pass.py +++ b/test/legacy_test/test_ir_memory_optimize_pass.py @@ -18,7 +18,7 @@ from parallel_executor_test_base import DeviceType, TestParallelExecutorBase import paddle -from paddle.fluid import core +from paddle.base import core def _feed_data_helper(): diff --git a/test/legacy_test/test_ir_memory_optimize_transformer.py b/test/legacy_test/test_ir_memory_optimize_transformer.py index 6c87727c4bb30..b3dc82c12e636 100644 --- a/test/legacy_test/test_ir_memory_optimize_transformer.py +++ b/test/legacy_test/test_ir_memory_optimize_transformer.py @@ -15,7 +15,7 @@ import os import unittest -from paddle.fluid import core +from paddle.base import core os.environ['FLAGS_eager_delete_tensor_gb'] = "0.0" diff --git a/test/legacy_test/test_isclose_op.py b/test/legacy_test/test_isclose_op.py index 2074a160c5b3d..5f17a9b6e7055 100644 --- a/test/legacy_test/test_isclose_op.py +++ b/test/legacy_test/test_isclose_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core class TestIscloseOp(OpTest): @@ -118,9 +118,9 @@ def test_api_case(self): paddle.enable_static() x_data = np.random.rand(10, 10) y_data = np.random.rand(10, 10) - places = [paddle.fluid.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): - places.append(paddle.fluid.CUDAPlace(0)) + places = [paddle.base.CPUPlace()] + if paddle.base.core.is_compiled_with_cuda(): + places.append(paddle.base.CUDAPlace(0)) for place in places: with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() @@ -132,9 +132,9 @@ def test_api_case(self): name='y', shape=[10, 10], dtype='float64' ) result = paddle.isclose(x, y) - exe = paddle.fluid.Executor(place) + exe = paddle.base.Executor(place) fetches = exe.run( - paddle.fluid.default_main_program(), + paddle.base.default_main_program(), feed={"x": x_data, "y": y_data}, fetch_list=[result], ) @@ -145,7 +145,7 @@ def test_api_case(self): class TestIscloseDygraph(unittest.TestCase): def test_api_case(self): places = [paddle.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: paddle.disable_static() diff --git a/test/legacy_test/test_isfinite_op.py b/test/legacy_test/test_isfinite_op.py index d5a409489d866..74bb329a8df39 100755 --- a/test/legacy_test/test_isfinite_op.py +++ b/test/legacy_test/test_isfinite_op.py @@ -17,7 +17,7 @@ import numpy as np from eager_op_test import OpTest, convert_float_to_uint16 -from paddle.fluid import core +from paddle.base import core class TestInf(OpTest): diff --git a/test/legacy_test/test_isfinite_v2_op.py b/test/legacy_test/test_isfinite_v2_op.py index 6c3a780aeda03..5c9faa507897d 100644 --- a/test/legacy_test/test_isfinite_v2_op.py +++ b/test/legacy_test/test_isfinite_v2_op.py @@ -17,18 +17,18 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base def run_static(x_np, dtype, op_str, use_gpu=False): paddle.enable_static() - startup_program = fluid.Program() - main_program = fluid.Program() + startup_program = base.Program() + main_program = base.Program() place = paddle.CPUPlace() - if use_gpu and fluid.core.is_compiled_with_cuda(): + if use_gpu and base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) - exe = fluid.Executor(place) - with fluid.program_guard(main_program, startup_program): + exe = base.Executor(place) + with base.program_guard(main_program, startup_program): x = paddle.static.data(name='x', shape=x_np.shape, dtype=dtype) res = getattr(paddle.tensor, op_str)(x) exe.run(startup_program) @@ -40,7 +40,7 @@ def run_static(x_np, dtype, op_str, use_gpu=False): def run_dygraph(x_np, op_str, use_gpu=True): place = paddle.CPUPlace() - if use_gpu and fluid.core.is_compiled_with_cuda(): + if use_gpu and base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) paddle.disable_static(place) x = paddle.to_tensor(x_np) @@ -49,9 +49,9 @@ def run_dygraph(x_np, op_str, use_gpu=True): def run_eager(x_np, op_str, use_gpu=True): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): place = paddle.CPUPlace() - if use_gpu and fluid.core.is_compiled_with_cuda(): + if use_gpu and base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) x = paddle.to_tensor(x_np) @@ -152,7 +152,7 @@ def test_finite(self): class TestError(unittest.TestCase): def test_bad_input(self): paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def test_isinf_bad_x(): x = [1, 2, 3] diff --git a/test/legacy_test/test_jit_layer.py b/test/legacy_test/test_jit_layer.py index e36cea92de780..e488995764539 100644 --- a/test/legacy_test/test_jit_layer.py +++ b/test/legacy_test/test_jit_layer.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle.fluid.framework import _dygraph_place_guard +from paddle.base.framework import _dygraph_place_guard from paddle.jit.layer import Layer from paddle.static import InputSpec diff --git a/test/legacy_test/test_jit_save_load.py b/test/legacy_test/test_jit_save_load.py index 40bf389b5f190..1edb99e1ea77f 100644 --- a/test/legacy_test/test_jit_save_load.py +++ b/test/legacy_test/test_jit_save_load.py @@ -22,8 +22,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import unique_name +from paddle import base +from paddle.base import unique_name from paddle.jit.api import to_static from paddle.jit.translated_layer import INFER_PARAMS_INFO_SUFFIX from paddle.nn import Linear @@ -304,7 +304,7 @@ def train(layer, input_size=784, label_size=1): learning_rate=0.01, parameter_list=layer.parameters() ) # create data loader - train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader = base.io.DataLoader.from_generator(capacity=5) train_loader.set_batch_generator( random_batch_reader(input_size, label_size) ) @@ -332,7 +332,7 @@ def train_with_label(layer, input_size=784, label_size=1): learning_rate=0.01, parameters=layer.parameters() ) # create data loader - train_loader = fluid.io.DataLoader.from_generator(capacity=5) + train_loader = base.io.DataLoader.from_generator(capacity=5) train_loader.set_batch_generator( random_batch_reader(input_size, label_size) ) @@ -356,7 +356,7 @@ def setUp(self): self.temp_dir.name, "test_jit_save_load/model" ) # enable dygraph mode - fluid.enable_dygraph() + base.enable_dygraph() # config seed paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) @@ -389,7 +389,7 @@ def load_and_inference(self, train_layer, infer_layer): train_layer.eval() infer_layer.eval() # inference & compare - x = fluid.dygraph.to_variable( + x = base.dygraph.to_variable( np.random.random((1, 784)).astype('float32') ) np.testing.assert_array_equal( @@ -417,7 +417,7 @@ def load_dygraph_state_dict(self, train_layer): new_layer.set_state_dict(load_state_dict) new_layer.eval() # inference & compare - x = fluid.dygraph.to_variable( + x = base.dygraph.to_variable( np.random.random((1, 784)).astype('float32') ) np.testing.assert_array_equal( @@ -442,14 +442,14 @@ def test_jit_load_no_path(self): class TestSaveLoadWithNestOut(unittest.TestCase): def setUp(self): # enable dygraph mode - fluid.enable_dygraph() + base.enable_dygraph() self.temp_dir = tempfile.TemporaryDirectory() def tearDown(self): self.temp_dir.cleanup() def test_nest_output(self): - x = fluid.dygraph.to_variable( + x = base.dygraph.to_variable( np.random.random((4, 8)).astype('float32') ) @@ -478,8 +478,8 @@ def test_dict_input(self): net = LinearNetWithDictInput(8, 8) # net.forward.concrete_program.inputs: # (<__main__.LinearNetWithDictInput object at 0x7f2655298a98>, - # {'img': var img : fluid.VarType.LOD_TENSOR.shape(-1, 8).astype(VarType.FP32)}, - # {'label': var label : fluid.VarType.LOD_TENSOR.shape(-1, 1).astype(VarType.INT64)}) + # {'img': var img : base.VarType.LOD_TENSOR.shape(-1, 8).astype(VarType.FP32)}, + # {'label': var label : base.VarType.LOD_TENSOR.shape(-1, 1).astype(VarType.INT64)}) self.assertEqual(len(net.forward.concrete_program.inputs), 3) temp_dir = tempfile.TemporaryDirectory() path = os.path.join( @@ -539,7 +539,7 @@ def test_dict_input(self): class TestSaveLoadWithInputSpec(unittest.TestCase): def setUp(self): # enable dygraph mode - fluid.enable_dygraph() + base.enable_dygraph() self.temp_dir = tempfile.TemporaryDirectory() def tearDown(self): @@ -567,7 +567,7 @@ def test_with_input_spec(self): # 2. load to infer infer_layer = paddle.jit.load(model_path) - x = fluid.dygraph.to_variable( + x = base.dygraph.to_variable( np.random.random((4, 8)).astype('float32') ) pred = infer_layer(x) @@ -591,10 +591,10 @@ def test_multi_in_out(self): # 3. load to infer infer_layer = paddle.jit.load(model_path) - x = fluid.dygraph.to_variable( + x = base.dygraph.to_variable( np.random.random((4, 8)).astype('float32') ) - y = fluid.dygraph.to_variable( + y = base.dygraph.to_variable( np.random.random((4, 8)).astype('float32') ) # 4. predict @@ -633,10 +633,10 @@ def test_multi_in_out1(self): # 3. load to infer infer_layer = paddle.jit.load(model_path) - x = fluid.dygraph.to_variable( + x = base.dygraph.to_variable( np.random.random((4, 8)).astype('float32') ) - y = fluid.dygraph.to_variable( + y = base.dygraph.to_variable( np.random.random((4, 8)).astype('float32') ) # 4. predict @@ -666,7 +666,7 @@ def test_multi_in_out1(self): class TestJitSaveLoadConfig(unittest.TestCase): def setUp(self): # enable dygraph mode - fluid.enable_dygraph() + base.enable_dygraph() # config seed paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) @@ -680,7 +680,7 @@ def test_output_spec(self): adam = paddle.optimizer.Adam( learning_rate=0.1, parameters=train_layer.parameters() ) - x = fluid.dygraph.to_variable( + x = base.dygraph.to_variable( np.random.random((4, 8)).astype('float32') ) for i in range(10): @@ -702,7 +702,7 @@ def test_output_spec(self): train_layer.eval() infer_layer = paddle.jit.load(model_path) - x = fluid.dygraph.to_variable( + x = base.dygraph.to_variable( np.random.random((4, 8)).astype('float32') ) np.testing.assert_array_equal( @@ -739,7 +739,7 @@ def setUp(self): self.temp_dir.name, "jit_multi_load/model" ) # enable dygraph mode - fluid.enable_dygraph() + base.enable_dygraph() # config seed paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) @@ -775,7 +775,7 @@ def setUp(self): self.temp_dir.name, "jit_prune_model_and_load/model" ) # enable dygraph mode - fluid.enable_dygraph() + base.enable_dygraph() # config seed paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) @@ -788,7 +788,7 @@ def train_and_save(self): adam = paddle.optimizer.Adam( learning_rate=0.1, parameters=train_layer.parameters() ) - x = fluid.dygraph.to_variable( + x = base.dygraph.to_variable( np.random.random((4, 8)).astype('float32') ) for i in range(10): @@ -813,7 +813,7 @@ def test_load_pruned_model(self): infer_layer = paddle.jit.load(self.model_path) - x = fluid.dygraph.to_variable( + x = base.dygraph.to_variable( np.random.random((4, 8)).astype('float32') ) np.testing.assert_array_equal( @@ -838,7 +838,7 @@ def test_load_var_not_in_extra_var_info(self): class TestJitSaveMultiCases(unittest.TestCase): def setUp(self): # enable dygraph mode - fluid.enable_dygraph() + base.enable_dygraph() # config seed paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) diff --git a/test/legacy_test/test_kldiv_loss_op.py b/test/legacy_test/test_kldiv_loss_op.py index f16cd781c2bbe..02dc6151d32b8 100644 --- a/test/legacy_test/test_kldiv_loss_op.py +++ b/test/legacy_test/test_kldiv_loss_op.py @@ -89,7 +89,7 @@ def run_kl_loss(self, reduction, shape=(5, 20)): target = np.random.uniform(-10, 10, shape).astype('float64') gt_loss = kldiv_loss(x, target, reduction) - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): kldiv_criterion = paddle.nn.KLDivLoss(reduction) pred_loss = kldiv_criterion( paddle.to_tensor(x), paddle.to_tensor(target) @@ -123,7 +123,7 @@ def test_kl_loss_static_api(self): class TestKLDivLossTypePromotion(unittest.TestCase): def test_kl_div_promotion(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x1 = paddle.rand([5, 20], dtype='float32') target1 = paddle.rand([5, 20], dtype='float64') diff --git a/test/legacy_test/test_kron_op.py b/test/legacy_test/test_kron_op.py index 735e8bd1e3203..d5eac58233f12 100644 --- a/test/legacy_test/test_kron_op.py +++ b/test/legacy_test/test_kron_op.py @@ -18,9 +18,9 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -import paddle.fluid.dygraph as dg -from paddle import fluid -from paddle.fluid import core +import paddle.base.dygraph as dg +from paddle import base +from paddle.base import core class TestKronOp(OpTest): @@ -123,7 +123,7 @@ def test_case(self): a = np.random.randn(10, 10).astype(np.float64) b = np.random.randn(10, 10).astype(np.float64) - place = fluid.CPUPlace() + place = base.CPUPlace() with dg.guard(place): a_var = dg.to_variable(a) b_var = dg.to_variable(b) @@ -134,16 +134,16 @@ def test_case_with_output(self): a = np.random.randn(10, 10).astype(np.float64) b = np.random.randn(10, 10).astype(np.float64) - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): a_var = paddle.static.data("a", [-1, -1], dtype="float64") b_var = paddle.static.data("b", [-1, -1], dtype="float64") out_var = paddle.kron(a_var, b_var) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(start) (c,) = exe.run(main, feed={'a': a, 'b': b}, fetch_list=[out_var]) np.testing.assert_allclose(c, np.kron(a, b)) @@ -160,8 +160,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} diff --git a/test/legacy_test/test_kthvalue_op.py b/test/legacy_test/test_kthvalue_op.py index e2fa225fd8f7e..463ee34b880e8 100644 --- a/test/legacy_test/test_kthvalue_op.py +++ b/test/legacy_test/test_kthvalue_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def cal_kthvalue(x, k, axis, keepdim=False): @@ -141,7 +141,7 @@ def test_gpu_kernel(): ) test_cpu_kernel() - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): test_gpu_kernel() @@ -168,7 +168,7 @@ def test_nan_in_gpu_kernel(): self.assertEqual(inds[0, 2].numpy(), nan_position) test_nan_in_cpu_kernel() - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): test_nan_in_gpu_kernel() diff --git a/test/legacy_test/test_l1_loss.py b/test/legacy_test/test_l1_loss.py index 20295d1a9a92b..651d55977b34c 100644 --- a/test/legacy_test/test_l1_loss.py +++ b/test/legacy_test/test_l1_loss.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestFunctionalL1Loss(unittest.TestCase): @@ -55,9 +55,9 @@ def run_static(self, use_gpu=False): result2 = paddle.nn.functional.l1_loss(input, label, reduction='none') y = paddle.nn.functional.l1_loss(input, label, name='aaa') - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) static_result = exe.run( feed={"input": self.input_np, "label": self.label_np}, fetch_list=[result0, result1, result2], @@ -73,22 +73,22 @@ def run_static(self, use_gpu=False): self.assertTrue('aaa' in y.name) def test_cpu(self): - paddle.disable_static(place=paddle.fluid.CPUPlace()) + paddle.disable_static(place=paddle.base.CPUPlace()) self.run_imperative() paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): self.run_static() def test_gpu(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return - paddle.disable_static(place=paddle.fluid.CUDAPlace(0)) + paddle.disable_static(place=paddle.base.CUDAPlace(0)) self.run_imperative() paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): self.run_static(use_gpu=True) # test case the raise message @@ -149,9 +149,9 @@ def run_static(self, use_gpu=False): l1_loss = paddle.nn.loss.L1Loss(name='aaa') result3 = l1_loss(input, label) - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) static_result = exe.run( feed={"input": self.input_np, "label": self.label_np}, fetch_list=[result0, result1, result2], @@ -166,22 +166,22 @@ def run_static(self, use_gpu=False): self.assertTrue('aaa' in result3.name) def test_cpu(self): - paddle.disable_static(place=paddle.fluid.CPUPlace()) + paddle.disable_static(place=paddle.base.CPUPlace()) self.run_imperative() paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): self.run_static() def test_gpu(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return - paddle.disable_static(place=paddle.fluid.CUDAPlace(0)) + paddle.disable_static(place=paddle.base.CUDAPlace(0)) self.run_imperative() paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): self.run_static(use_gpu=True) # test case the raise message diff --git a/test/legacy_test/test_label_smooth_functional.py b/test/legacy_test/test_label_smooth_functional.py index 621559286ea6d..81f868c83c895 100644 --- a/test/legacy_test/test_label_smooth_functional.py +++ b/test/legacy_test/test_label_smooth_functional.py @@ -17,9 +17,9 @@ import numpy as np import paddle -import paddle.fluid.dygraph as dg +import paddle.base.dygraph as dg import paddle.nn.functional as F -from paddle import fluid +from paddle import base class LabelSmoothTestCase(unittest.TestCase): @@ -41,12 +41,12 @@ def __init__( def setUp(self): self.label = np.random.randn(*(self.label_shape)).astype(self.dtype) - def fluid_layer(self, place): + def base_layer(self, place): paddle.enable_static() - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): label_var = paddle.static.data( "input", self.label_shape, dtype=self.dtype ) @@ -56,17 +56,17 @@ def fluid_layer(self, place): epsilon=self.epsilon, ) feed_dict = {"input": self.label} - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np def functional(self, place): paddle.enable_static() - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): label_var = paddle.static.data( "input", self.label_shape, dtype=self.dtype ) @@ -74,7 +74,7 @@ def functional(self, place): label_var, prior_dist=self.prior_dist, epsilon=self.epsilon ) feed_dict = {"input": self.label} - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (y_np,) = exe.run(main, feed=feed_dict, fetch_list=[y_var]) return y_np @@ -89,24 +89,24 @@ def paddle_dygraph_layer(self): return y_np def _test_equivalence(self, place): - place = fluid.CPUPlace() - result1 = self.fluid_layer(place) + place = base.CPUPlace() + result1 = self.base_layer(place) result2 = self.functional(place) result3 = self.paddle_dygraph_layer() np.testing.assert_array_almost_equal(result1, result2) np.testing.assert_array_almost_equal(result2, result3) def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self._test_equivalence(place) - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self._test_equivalence(place) class LabelSmoothErrorTestCase(LabelSmoothTestCase): def runTest(self): - place = fluid.CPUPlace() + place = base.CPUPlace() with dg.guard(place): with self.assertRaises(ValueError): self.paddle_dygraph_layer() diff --git a/test/legacy_test/test_label_smooth_op.py b/test/legacy_test/test_label_smooth_op.py index 5c45b6a9d89e2..d7cd2169a8ba7 100644 --- a/test/legacy_test/test_label_smooth_op.py +++ b/test/legacy_test/test_label_smooth_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core class TestLabelSmoothOp(OpTest): diff --git a/test/legacy_test/test_lamb_op.py b/test/legacy_test/test_lamb_op.py index 33b2ab65a8372..535b09bd8a448 100644 --- a/test/legacy_test/test_lamb_op.py +++ b/test/legacy_test/test_lamb_op.py @@ -19,7 +19,7 @@ from op import Operator import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_lambv2_op.py b/test/legacy_test/test_lambv2_op.py index b10eab7997055..b55d6dceb1319 100644 --- a/test/legacy_test/test_lambv2_op.py +++ b/test/legacy_test/test_lambv2_op.py @@ -17,14 +17,14 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import switch_to_static_graph +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import switch_to_static_graph class LAMBOptimizer(paddle.optimizer.Lamb): def _append_optimize_op(self, block, param_and_grad): - assert isinstance(block, fluid.framework.Block) + assert isinstance(block, base.framework.Block) block.program._use_lamb = True m = moment1 = self._get_accumulator( @@ -119,7 +119,7 @@ def test_lamb_op_with_multi_steps(self): paddle.enable_static() def _build_static_model(main, startup, seed=100): - with fluid.program_guard(main, startup): + with base.program_guard(main, startup): main.random_seed = seed startup.random_seed = seed x = paddle.static.data( @@ -133,21 +133,21 @@ def _build_static_model(main, startup, seed=100): avg_loss = paddle.mean(loss) return avg_loss - place = fluid.CPUPlace() + place = base.CPUPlace() num_steps = 10 for i in range(num_steps): feed_x = np.random.random(size=(10, 13)).astype('float32') feed_y = np.random.random(size=(10, 1)).astype('float32') - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): avg_loss = _build_static_model(main_program, startup_program) lamb_kernel = paddle.optimizer.Lamb(learning_rate=0.2) lamb_kernel.minimize(avg_loss) - executor = fluid.Executor(place) + executor = base.Executor(place) executor.run(startup_program) output = executor.run( program=main_program, @@ -155,14 +155,14 @@ def _build_static_model(main, startup, seed=100): fetch_list=[avg_loss.name], ) - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): loss = _build_static_model(main, startup) lamb = LAMBOptimizer(learning_rate=0.2) lamb.minimize(loss) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup) out = exe.run( program=main, diff --git a/test/legacy_test/test_layer_norm_op.py b/test/legacy_test/test_layer_norm_op.py index 0572ec64fe37d..76ce8bb2934b5 100644 --- a/test/legacy_test/test_layer_norm_op.py +++ b/test/legacy_test/test_layer_norm_op.py @@ -25,8 +25,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard from paddle.static.amp.fp16_utils import _keep_layer_norm_scale_bias_to_fp32 paddle.enable_static() @@ -584,8 +584,8 @@ def test_with_place( var_names += ['bias'] ground_truth = {name: var_dict[name] for name in var_names} - program = fluid.Program() - with fluid.program_guard(program): + program = base.Program() + with base.program_guard(program): block = program.global_block() for name in ground_truth: block.create_var( @@ -638,7 +638,7 @@ def test_with_place( grad_var.set_dtype(core.VarDesc.VarType.FP32) program._sync_with_cpp() - exe = fluid.Executor(place) + exe = base.Executor(place) out = exe.run( program, feed={ @@ -859,7 +859,7 @@ def check_main(self, x_np, weight_np, bias_np, dtype): bias = paddle.to_tensor(bias_np) if dtype == "bfloat16": - x = x.cast(paddle.fluid.core.VarDesc.VarType.BF16) + x = x.cast(paddle.base.core.VarDesc.VarType.BF16) x.stop_gradient = False weight.stop_gradient = False @@ -919,7 +919,7 @@ def check_layer_norm( x = paddle.to_tensor(x_np) if dtype == "bfloat16": - x = x.cast(paddle.fluid.core.VarDesc.VarType.BF16) + x = x.cast(paddle.base.core.VarDesc.VarType.BF16) x.stop_gradient = True bias = paddle.to_tensor(bias_np) if has_scale else None diff --git a/test/legacy_test/test_layer_norm_op_v2.py b/test/legacy_test/test_layer_norm_op_v2.py index 9087fe267808a..6e8cdfe728cfe 100644 --- a/test/legacy_test/test_layer_norm_op_v2.py +++ b/test/legacy_test/test_layer_norm_op_v2.py @@ -17,26 +17,26 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestDygraphLayerNormv2(unittest.TestCase): def test_dygraph(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: shape = [4, 10, 4, 4] def compute_v1(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): ln = paddle.nn.LayerNorm(shape[1:]) y = ln(paddle.to_tensor(x)) return y.numpy() def compute_v2(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): ln = paddle.nn.LayerNorm(shape[1:]) y = ln(paddle.to_tensor(x)) return y.numpy() @@ -47,14 +47,14 @@ def compute_v2(x): np.testing.assert_allclose(y1, y2, rtol=1e-05) def test_eager(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: shape = [4, 10, 4, 4] def compute_v1(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): ln = paddle.nn.LayerNorm(shape[1:]) x1 = paddle.to_tensor(x) x1.stop_gradient = False @@ -63,7 +63,7 @@ def compute_v1(x): return y.numpy(), x1.gradient() def compute_v2(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): ln = paddle.nn.LayerNorm(shape[1:]) x1 = paddle.to_tensor(x) x1.stop_gradient = False @@ -79,11 +79,11 @@ def compute_v2(x): def test_static(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: - exe = fluid.Executor(p) + exe = base.Executor(p) shape = [4, 10, 16, 16] def compute_v1(x_np): @@ -93,7 +93,7 @@ def compute_v1(x_np): name='x', shape=x_np.shape, dtype=x_np.dtype ) y = ln(x) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] return r @@ -104,7 +104,7 @@ def compute_v2(x_np): name='x', shape=x_np.shape, dtype=x_np.dtype ) y = ln(x) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] return r @@ -116,38 +116,38 @@ def compute_v2(x_np): class TestLayerNormFunction(unittest.TestCase): def test_dygraph(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: shape = [4, 10, 4, 4] def compute_v0(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): ln = paddle.nn.LayerNorm(shape[1:]) y = ln(paddle.to_tensor(x)) return y.numpy() def compute_v1(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): x = paddle.to_tensor(x) y = paddle.nn.functional.layer_norm(x, shape[1:]) return y.numpy() def compute_v2(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): x = paddle.to_tensor(x) y = paddle.nn.functional.layer_norm(x, tuple(shape[1:])) return y.numpy() def compute_v3(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): ln = paddle.nn.LayerNorm(shape[-1]) y = ln(paddle.to_tensor(x)) return y.numpy() def compute_v4(x): - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): x = paddle.to_tensor(x) y = paddle.nn.functional.layer_norm(x, shape[-1]) return y.numpy() diff --git a/test/legacy_test/test_layers.py b/test/legacy_test/test_layers.py index 25d5ad42bea48..a4d7d22ab677a 100644 --- a/test/legacy_test/test_layers.py +++ b/test/legacy_test/test_layers.py @@ -23,10 +23,10 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph import base, to_variable -from paddle.fluid.framework import Program, default_main_program, program_guard +from paddle import base +from paddle.base import core +from paddle.base.dygraph import base, to_variable +from paddle.base.framework import Program, default_main_program, program_guard from paddle.incubate.layers.nn import ( batch_fc, partial_concat, @@ -65,10 +65,10 @@ def static_graph(self): def get_static_graph_result( self, feed, fetch_list, with_lod=False, force_to_use_cpu=False ): - exe = fluid.Executor(self._get_place(force_to_use_cpu)) - exe.run(fluid.default_startup_program()) + exe = base.Executor(self._get_place(force_to_use_cpu)) + exe.run(base.default_startup_program()) return exe.run( - fluid.default_main_program(), + base.default_main_program(), feed=feed, fetch_list=fetch_list, return_numpy=(not with_lod), @@ -76,7 +76,7 @@ def get_static_graph_result( @contextlib.contextmanager def dynamic_graph(self, force_to_use_cpu=False): - with fluid.dygraph.guard( + with base.dygraph.guard( self._get_place(force_to_use_cpu=force_to_use_cpu) ): paddle.seed(self.seed) @@ -440,7 +440,7 @@ def test_conv2d_transpose(self): with self.dynamic_graph(): images = np.ones([2, 3, 5, 5], dtype='float32') custom_weight = np.random.randn(3, 3, 2, 2).astype("float32") - weight_attr = fluid.ParamAttr( + weight_attr = base.ParamAttr( initializer=paddle.nn.initializer.Assign(custom_weight) ) conv2d1 = paddle.nn.Conv2DTranspose(3, 3, [2, 2]) @@ -570,7 +570,7 @@ def test_bilinear_tensor_product(self): with self.dynamic_graph(): custom_weight = np.random.randn(6, 3, 3).astype("float32") - weight_attr = fluid.ParamAttr( + weight_attr = base.ParamAttr( initializer=paddle.nn.initializer.Assign(custom_weight) ) btp1 = paddle.nn.Bilinear(3, 3, 6) @@ -642,7 +642,7 @@ def test_embeding(self): with self.dynamic_graph(): custom_weight = np.random.randn(dict_size, 32).astype("float32") - weight_attr = fluid.ParamAttr( + weight_attr = base.ParamAttr( initializer=paddle.nn.initializer.Assign(custom_weight) ) emb1 = paddle.nn.Embedding(dict_size, 32, sparse=False) @@ -665,10 +665,10 @@ def test_embeding(self): def test_one_hot(self): with self.dynamic_graph(): - label = fluid.dygraph.to_variable(np.array([[1], [1], [3], [0]])) + label = base.dygraph.to_variable(np.array([[1], [1], [3], [0]])) one_hot_label1 = paddle.nn.functional.one_hot(label, 4) one_hot_label2 = paddle.nn.functional.one_hot( - label, fluid.dygraph.to_variable(np.array([4])) + label, base.dygraph.to_variable(np.array([4])) ) np.testing.assert_array_equal( one_hot_label1.numpy(), one_hot_label2.numpy() @@ -676,22 +676,22 @@ def test_one_hot(self): def test_split(self): with self.dynamic_graph(): - input = fluid.dygraph.to_variable(np.random.random((3, 8, 5))) + input = base.dygraph.to_variable(np.random.random((3, 8, 5))) x0, x1 = paddle.split(input, num_or_sections=2, axis=1) x00, x11 = paddle.split( input, num_or_sections=2, - axis=fluid.dygraph.to_variable(np.array([1])), + axis=base.dygraph.to_variable(np.array([1])), ) np.testing.assert_array_equal(x0.numpy(), x00.numpy()) np.testing.assert_array_equal(x1.numpy(), x11.numpy()) def test_topk(self): with self.dynamic_graph(): - input = fluid.dygraph.to_variable(np.random.random((13, 11))) + input = base.dygraph.to_variable(np.random.random((13, 11))) top5_values1, top5_indices1 = paddle.topk(input, k=5) top5_values2, top5_indices2 = paddle.topk( - input, k=fluid.dygraph.to_variable(np.array([5])) + input, k=base.dygraph.to_variable(np.array([5])) ) np.testing.assert_array_equal( top5_values1.numpy(), top5_values2.numpy() @@ -740,7 +740,7 @@ def test_conv3d(self): with self.dynamic_graph(): images = np.ones([2, 3, 6, 6, 6], dtype='float32') custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32") - weight_attr = fluid.ParamAttr( + weight_attr = base.ParamAttr( initializer=paddle.nn.initializer.Assign(custom_weight) ) conv3d1 = paddle.nn.Conv3D( @@ -801,7 +801,7 @@ def test_group_norm(self): ) static_ret = self.get_static_graph_result( feed={ - 'X': fluid.create_lod_tensor( + 'X': base.create_lod_tensor( data=input, recursive_seq_lens=[[1, 1]], place=place ) }, @@ -822,7 +822,7 @@ def test_group_norm(self): ret = groupNorm(X) static_ret2 = self.get_static_graph_result( feed={ - 'X': fluid.create_lod_tensor( + 'X': base.create_lod_tensor( data=input, recursive_seq_lens=[[1, 1]], place=place ) }, @@ -917,7 +917,7 @@ def test_spectral_norm(self): ) static_ret = self.get_static_graph_result( feed={ - 'Weight': fluid.create_lod_tensor( + 'Weight': base.create_lod_tensor( data=input, recursive_seq_lens=[[1, 1]], place=place ), }, @@ -933,7 +933,7 @@ def test_spectral_norm(self): ret = spectralNorm(Weight) static_ret2 = self.get_static_graph_result( feed={ - 'Weight': fluid.create_lod_tensor( + 'Weight': base.create_lod_tensor( data=input, recursive_seq_lens=[[1, 1]], place=place ) }, @@ -987,7 +987,7 @@ def test_conv3d_transpose(self): with self.dynamic_graph(): images = np.ones([2, 3, 6, 6, 6], dtype='float32') custom_weight = np.random.randn(3, 3, 2, 2, 2).astype("float32") - weight_attr = fluid.ParamAttr( + weight_attr = base.ParamAttr( initializer=paddle.nn.initializer.Assign(custom_weight) ) conv3d1 = paddle.nn.Conv3DTranspose( @@ -1190,17 +1190,17 @@ def greater_equal_branch(a, b): lambda: less_than_branch(a, b), ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) ret = exe.run(fetch_list=[out]) static_res = ret[0] with self.dynamic_graph(): - a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32')) - b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32')) + a = base.dygraph.to_variable(np.array([0.1]).astype('float32')) + b = base.dygraph.to_variable(np.array([0.23]).astype('float32')) out = paddle.static.nn.cond( a < b, lambda: less_than_branch(a, b), @@ -1260,11 +1260,11 @@ def fn_3(): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) static_res1, static_res2 = exe.run(fetch_list=[out_1, out_2]) with self.dynamic_graph(): @@ -1334,11 +1334,11 @@ def fn_3(): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) static_res1, static_res2, static_res3 = exe.run( fetch_list=[out_1, out_2, out_3] ) @@ -1424,10 +1424,10 @@ def test_accuracy(self): fc_out = paddle.nn.Linear(32 * 32, 10)(data_new) predict = paddle.nn.functional.softmax(fc_out) result = paddle.static.accuracy(input=predict, label=label, k=5) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) # x = np.random.rand(3, 32, 32).astype("float32") # y = np.array([[1], [0], [1]]) @@ -1559,8 +1559,8 @@ def _get_data( def make_fit_a_line(self): with program_guard( - fluid.default_main_program(), - startup_program=fluid.default_startup_program(), + base.default_main_program(), + startup_program=base.default_startup_program(), ): x = self._get_data(name='x', shape=[13], dtype='float32') y_predict = paddle.nn.Linear(13, 1)(x) @@ -1573,7 +1573,7 @@ def make_fit_a_line(self): def make_recognize_digits_mlp(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): # Change g_program, so the rest layers use `g_program` images = self._get_data(name='pixel', shape=[784], dtype='float32') @@ -1594,7 +1594,7 @@ def make_recognize_digits_mlp(self): def make_conv2d_transpose(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): img = self._get_data(name='pixel', shape=[3, 2, 2], dtype='float32') return paddle.static.nn.conv2d_transpose( @@ -1603,7 +1603,7 @@ def make_conv2d_transpose(self): def make_recognize_digits_conv(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): images = self._get_data( name='pixel', shape=[1, 28, 28], dtype='float32' @@ -1650,7 +1650,7 @@ def make_recognize_digits_conv(self): def make_word_embedding(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): dict_size = 10000 embed_size = 32 @@ -1710,7 +1710,7 @@ def make_word_embedding(self): def make_pool2d(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32') return paddle.nn.functional.max_pool2d( @@ -1719,7 +1719,7 @@ def make_pool2d(self): def make_pool2d_infershape(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): theta = self._get_data("theta", shape=[2, 3], dtype='float32') x = paddle.nn.functional.affine_grid( @@ -1731,7 +1731,7 @@ def make_pool2d_infershape(self): def make_softmax(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): data = self._get_data(name='data', shape=[10], dtype='float32') hid = paddle.nn.Linear(10, 20)(data) @@ -1776,7 +1776,7 @@ def make_nce(self): def make_multiplex(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): x1 = self._get_data(name='x1', shape=[4], dtype='float32') x2 = self._get_data(name='x2', shape=[4], dtype='float32') @@ -1786,7 +1786,7 @@ def make_multiplex(self): def make_softmax_with_cross_entropy(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): x = self._get_data(name='x', shape=[16], dtype='float32') y = self._get_data(name='label', shape=[1], dtype='int64') @@ -1823,7 +1823,7 @@ def make_softmax_with_cross_entropy(self): def make_scatter(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): x = self._get_data( name='x', shape=[3, 3], append_batch_size=False, dtype='float32' @@ -1841,7 +1841,7 @@ def make_scatter(self): return out def make_one_hot(self): - with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()): + with base.framework._dygraph_place_guard(place=base.CPUPlace()): label = self._get_data(name="label", shape=[1], dtype="int32") one_hot_label = paddle.nn.functional.one_hot(label, 10) return one_hot_label @@ -1849,7 +1849,7 @@ def make_one_hot(self): def make_label_smooth(self): # TODO(minqiyang): support gpu ut self._force_to_use_cpu = True - with fluid.framework._dygraph_place_guard(place=fluid.CPUPlace()): + with base.framework._dygraph_place_guard(place=base.CPUPlace()): label = self._get_data(name="label", shape=[1], dtype="int32") one_hot_label = paddle.nn.functional.one_hot(label, 10) smooth_label = F.label_smooth(label=one_hot_label, epsilon=0.1) @@ -1857,7 +1857,7 @@ def make_label_smooth(self): def make_topk(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): data = self._get_data(name="label", shape=[200], dtype="float32") values, indices = paddle.topk(data, k=5) @@ -1866,7 +1866,7 @@ def make_topk(self): def make_l2_normalize(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): x = self._get_data(name='x', shape=[8, 7, 10], dtype="float32") output = paddle.nn.functional.normalize(x, axis=1) @@ -1874,7 +1874,7 @@ def make_l2_normalize(self): def make_shape(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): input = self._get_data( name="input", shape=[3, 100, 100], dtype="float32" @@ -1884,7 +1884,7 @@ def make_shape(self): def make_pad2d(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): input = self._get_data( name="input", shape=[3, 100, 100], dtype="float32" @@ -1901,7 +1901,7 @@ def make_pad2d(self): def make_mish(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): input = self._get_data(name="input", shape=[16], dtype="float32") out = paddle.nn.functional.mish(input, name='mish') @@ -1909,7 +1909,7 @@ def make_mish(self): def make_cross_entropy(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): x = self._get_data(name="x", shape=[30, 10], dtype="float32") label = self._get_data(name="label", shape=[30, 1], dtype="int64") @@ -1926,7 +1926,7 @@ def make_cross_entropy(self): def make_uniform_random_batch_size_like(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): input = self._get_data( name="input", shape=[13, 11], dtype='float32' @@ -1936,14 +1936,14 @@ def make_uniform_random_batch_size_like(self): def make_gaussian_random(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): out = random.gaussian(shape=[20, 30]) return out def make_sum(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): input = self._get_data( name="input", shape=[13, 11], dtype='float32' @@ -1958,7 +1958,7 @@ def make_slice(self): axes = [0, 1, 2] with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): input = self._get_data( name="input", shape=[3, 4, 5, 6], dtype='float32' @@ -1969,7 +1969,7 @@ def make_slice(self): def make_scale_variable(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): input = self._get_data( name="input", shape=[3, 4, 5, 6], dtype='float32' @@ -1985,7 +1985,7 @@ def make_scale_variable(self): def make_bilinear_tensor_product_layer(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): data = self._get_data(name='data', shape=[4], dtype="float32") @@ -1997,7 +1997,7 @@ def make_bilinear_tensor_product_layer(self): def make_batch_norm(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): data = self._get_data( name='data', shape=[32, 128, 128], dtype="float32" @@ -2007,7 +2007,7 @@ def make_batch_norm(self): def make_batch_norm_momentum_variable(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): data = self._get_data( name='data', shape=[32, 128, 128], dtype="float32" @@ -2023,7 +2023,7 @@ def make_batch_norm_momentum_variable(self): def make_range(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): paddle.arange(0, 10, 2, 'int32') paddle.arange(0.1, 10.0, 0.2, 'float32') @@ -2042,7 +2042,7 @@ def make_range(self): def make_spectral_norm(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): weight = self._get_data( name='weight', @@ -2055,7 +2055,7 @@ def make_spectral_norm(self): def make_kldiv_loss(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): x = self._get_data( name='x', @@ -2076,7 +2076,7 @@ def make_kldiv_loss(self): def make_pixel_shuffle(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): x = self._get_data(name="X", shape=[9, 4, 4], dtype="float32") out = paddle.nn.functional.pixel_shuffle(x, upscale_factor=3) @@ -2084,7 +2084,7 @@ def make_pixel_shuffle(self): def make_mse_loss(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): x = self._get_data(name="X", shape=[1], dtype="float32") y = self._get_data(name="Y", shape=[1], dtype="float32") @@ -2093,7 +2093,7 @@ def make_mse_loss(self): def make_square_error_cost(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): x = self._get_data(name="X", shape=[1], dtype="float32") y = self._get_data(name="Y", shape=[1], dtype="float32") @@ -2161,13 +2161,13 @@ def test_batch_fc(self): out = batch_fc( input=input, param_size=[16, 3, 10], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( learning_rate=1.0, name="w_0", initializer=paddle.nn.initializer.XavierNormal(), ), bias_size=[16, 10], - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( learning_rate=1.0, name="b_0", initializer=paddle.nn.initializer.XavierNormal(), @@ -2188,7 +2188,7 @@ def test_rank_attention(self): input=input, rank_offset=rank_offset, rank_param_shape=[18, 3], - rank_param_attr=fluid.ParamAttr( + rank_param_attr=base.ParamAttr( learning_rate=1.0, name="ubm_rank_param.w_0", initializer=paddle.nn.initializer.XavierNormal(), @@ -2261,7 +2261,7 @@ def test_partial_concat(self): def test_addmm(self): with program_guard( - fluid.default_main_program(), fluid.default_startup_program() + base.default_main_program(), base.default_startup_program() ): input = paddle.static.data( name='input_data', @@ -2313,14 +2313,14 @@ def forward(self): class TestLayerParameterTrainableSet(unittest.TestCase): def test_layer_parameter_set(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = ExampleNet() self.assertFalse(net.weight.trainable) class TestLayerTrainingAttribute(unittest.TestCase): def test_set_train_eval_in_dynamic_mode(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): net = paddle.nn.Dropout() net.train() self.assertTrue(net.training) @@ -2359,7 +2359,7 @@ def forward(self, input): class TestSubLayerCount(unittest.TestCase): def test_sublayer(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): mySuperlayer = MySuperLayer() self.assertTrue(len(mySuperlayer.sublayers()) == 3) self.assertTrue(len(mySuperlayer.sublayers(include_self=True)) == 4) diff --git a/test/legacy_test/test_layout_autotune.py b/test/legacy_test/test_layout_autotune.py index b4297e98817a8..68e85bc866e84 100644 --- a/test/legacy_test/test_layout_autotune.py +++ b/test/legacy_test/test_layout_autotune.py @@ -44,11 +44,11 @@ def forward(self, image): class LayoutAutoTune(unittest.TestCase): def test_config(self): - paddle.fluid.core.enable_layout_autotune() + paddle.base.core.enable_layout_autotune() if self.use_autoune(): - self.assertEqual(paddle.fluid.core.use_layout_autotune(), True) - paddle.fluid.core.disable_layout_autotune() - self.assertEqual(paddle.fluid.core.use_layout_autotune(), False) + self.assertEqual(paddle.base.core.use_layout_autotune(), True) + paddle.base.core.disable_layout_autotune() + self.assertEqual(paddle.base.core.use_layout_autotune(), False) self.use_autoune() def setUp(self): @@ -59,7 +59,7 @@ def use_autoune(self): paddle.incubate.autotune.set_config( config={"layout": {"enable": True}} ) - return paddle.fluid.core.use_layout_autotune() + return paddle.base.core.use_layout_autotune() else: config = {"layout": {"enable": False}} tfile = tempfile.NamedTemporaryFile(mode="w+", delete=False) @@ -67,7 +67,7 @@ def use_autoune(self): tfile.close() paddle.incubate.autotune.set_config(tfile.name) os.remove(tfile.name) - return paddle.fluid.core.use_layout_autotune() + return paddle.base.core.use_layout_autotune() def train(self, data_format): model = SimpleNet(data_format="NCHW", class_num=2) diff --git a/test/legacy_test/test_lazy_init.py b/test/legacy_test/test_lazy_init.py index 8b2bef91c00e8..37bf8b6fe19ae 100644 --- a/test/legacy_test/test_lazy_init.py +++ b/test/legacy_test/test_lazy_init.py @@ -18,7 +18,7 @@ import paddle from paddle import LazyGuard -from paddle.fluid import unique_name +from paddle.base import unique_name from paddle.nn import Layer, Linear from paddle.nn.initializer import ( Constant, diff --git a/test/legacy_test/test_lcm.py b/test/legacy_test/test_lcm.py index 478853d8bab8f..706c2dc23c32e 100644 --- a/test/legacy_test/test_lcm.py +++ b/test/legacy_test/test_lcm.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -31,9 +31,9 @@ def setUp(self): self.y_shape = [] def test_static_graph(self): - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(startup_program, train_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(startup_program, train_program): x1 = paddle.static.data( name='input1', dtype='int32', shape=self.x_shape ) @@ -43,13 +43,13 @@ def test_static_graph(self): out = paddle.lcm(x1, x2) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'input1': self.x_np, 'input2': self.y_np}, fetch_list=[out], ) diff --git a/test/legacy_test/test_ldexp.py b/test/legacy_test/test_ldexp.py index b2a9976dd9c18..76e1b454a2e2c 100644 --- a/test/legacy_test/test_ldexp.py +++ b/test/legacy_test/test_ldexp.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core from paddle.static import Program, program_guard DYNAMIC = 1 diff --git a/test/legacy_test/test_learning_rate_scheduler.py b/test/legacy_test/test_learning_rate_scheduler.py index 8898fb59b87b1..fd92acaf719b1 100644 --- a/test/legacy_test/test_learning_rate_scheduler.py +++ b/test/legacy_test/test_learning_rate_scheduler.py @@ -19,8 +19,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core, framework +from paddle import base +from paddle.base import core, framework def exponential_decay( @@ -118,10 +118,10 @@ def lambda_decay(global_step, learning_rate, lr_lambda): class TestLearningRateDecayDygraph(unittest.TestCase): def test_LR_state_dict(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = np.random.uniform(-1, 1, [3, 10]).astype("float32") linear = paddle.nn.Linear(10, 10) - input = fluid.dygraph.to_variable(x) + input = base.dygraph.to_variable(x) Exponential_scheduler = paddle.optimizer.lr.ExponentialDecay( learning_rate=0.1, @@ -235,7 +235,7 @@ def test_LR_state_dict(self): ) def test_NoamDecay(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): d_model = 0.01 warmup_steps = 200 learning_rate = 2.0 @@ -248,18 +248,18 @@ def test_NoamDecay(self): step, d_model, warmup_steps, learning_rate ) lr.step() - fluid_result = lr() + base_result = lr() self.assertAlmostEqual( right_result, - fluid_result, + base_result, msg='Failed lr scheduler in step {}, Python result is {}, Fluid result is {}'.format( - step, right_result, fluid_result + step, right_result, base_result ), ) def test_LinearLrWarmup(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): lr = paddle.optimizer.lr.PolynomialDecay( learning_rate=1.0, decay_steps=10, @@ -288,7 +288,7 @@ def test_LinearLrWarmup(self): ) def test_MultiStepDecay(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): learning_rate = 0.5 milestones = [2, 4, 8] decay_rate = 0.2 @@ -305,14 +305,14 @@ def test_MultiStepDecay(self): right_result = multi_step_decay( epoch, learning_rate, milestones, decay_rate ) - fluid_result = adam.get_lr() + base_result = adam.get_lr() adam.step() scheduler.step() self.assertAlmostEqual( right_result, - fluid_result, + base_result, msg='Failed lr scheduler in epoch {}, Python result is {}, Fluid result is {}'.format( - epoch, right_result, fluid_result + epoch, right_result, base_result ), ) @@ -333,7 +333,7 @@ def test_MultiStepDecay(self): lr = paddle.optimizer.lr.MultiStepDecay(-1, [20, 30, 50]) def test_StepDecay(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): learning_rate = 0.5 step_size = 3 decay_rate = 0.2 @@ -344,14 +344,14 @@ def test_StepDecay(self): right_result = step_decay( epoch, learning_rate, step_size, decay_rate ) - fluid_result = scheduler() + base_result = scheduler() scheduler.get_lr() scheduler.step() self.assertAlmostEqual( right_result, - fluid_result, + base_result, msg='Failed lr scheduler in epoch {}, Python result is {}, Fluid result is {}'.format( - epoch, right_result, fluid_result + epoch, right_result, base_result ), ) @@ -362,7 +362,7 @@ def test_StepDecay(self): lr = paddle.optimizer.lr.StepDecay(learning_rate, 20, 2) def test_LambdaDecay(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): learning_rate = 0.5 lr_lambda = lambda x: 0.95**x scheduler = paddle.optimizer.lr.LambdaDecay( @@ -376,14 +376,14 @@ def test_LambdaDecay(self): for epoch in range(30): right_result = lambda_decay(epoch, learning_rate, lr_lambda) - fluid_result = scheduler() + base_result = scheduler() scheduler.get_lr() scheduler.step() self.assertAlmostEqual( right_result, - fluid_result, + base_result, msg='Failed lr scheduler in epoch {}, Python result is {}, Fluid result is {}'.format( - epoch, right_result, fluid_result + epoch, right_result, base_result ), ) @@ -392,26 +392,26 @@ def test_LambdaDecay(self): class TestLearningRateDecay(unittest.TestCase): - def check_decay(self, python_decay_fn, fluid_decay_fn, kwargs): - places = [fluid.CPUPlace()] + def check_decay(self, python_decay_fn, base_decay_fn, kwargs): + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: self.check_decay_with_place( - place, python_decay_fn, fluid_decay_fn, kwargs + place, python_decay_fn, base_decay_fn, kwargs ) def check_decay_with_place( - self, place, python_decay_fn, fluid_decay_fn, kwargs + self, place, python_decay_fn, base_decay_fn, kwargs ): - main_prog = fluid.Program() - startup_prog = fluid.Program() + main_prog = base.Program() + startup_prog = base.Program() - with fluid.program_guard(main_prog, startup_prog): - decayed_lr = fluid_decay_fn(**kwargs) + with base.program_guard(main_prog, startup_prog): + decayed_lr = base_decay_fn(**kwargs) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(startup_prog) @@ -502,7 +502,7 @@ def test_decay(self): ), ] - for py_decay_fn, fluid_decay_fn, kwargs in decay_fns: + for py_decay_fn, base_decay_fn, kwargs in decay_fns: print( "class=" + self.__class__.__name__ @@ -514,32 +514,32 @@ def test_decay(self): main_program = framework.Program() startup_program = framework.Program() with framework.program_guard(main_program, startup_program): - self.check_decay(py_decay_fn, fluid_decay_fn, kwargs) + self.check_decay(py_decay_fn, base_decay_fn, kwargs) class TestLinearWamrupLearningRateDecay(unittest.TestCase): def check_decay_with_place( - self, place, python_decay_fn, fluid_decay_fn, kwargs + self, place, python_decay_fn, base_decay_fn, kwargs ): - main_prog = fluid.Program() - startup_prog = fluid.Program() + main_prog = base.Program() + startup_prog = base.Program() warmup_steps = 10 start_lr = 0.1 / 3.0 end_lr = 0.1 - with fluid.program_guard(main_prog, startup_prog): + with base.program_guard(main_prog, startup_prog): decayed_lr = paddle.optimizer.lr.linear_lr_warmup( - fluid_decay_fn(**kwargs), warmup_steps, start_lr, end_lr + base_decay_fn(**kwargs), warmup_steps, start_lr, end_lr ) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(startup_prog) for step in range(20): # Step of NoamDecay starts from 1. - if fluid_decay_fn.__name__ == 'noam_decay': + if base_decay_fn.__name__ == 'noam_decay': step += 1 (lr_val,) = exe.run(main_prog, feed={}, fetch_list=[decayed_lr]) if step < warmup_steps: @@ -564,17 +564,17 @@ def check_decay_with_place( class TestLinearWamrupLearningRateDecayWithScalarInput(unittest.TestCase): def run_scalar_lr(self, place, lr, start_lr, end_lr): - main_prog = fluid.Program() - startup_prog = fluid.Program() + main_prog = base.Program() + startup_prog = base.Program() warmup_steps = 10 - with fluid.program_guard(main_prog, startup_prog): + with base.program_guard(main_prog, startup_prog): decayed_lr = paddle.optimizer.lr.linear_lr_warmup( lr, warmup_steps, start_lr, end_lr ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup_prog) for step in range(20): @@ -595,9 +595,9 @@ def run_scalar_lr(self, place, lr, start_lr, end_lr): def test_scalar_lr(self): def run_places(lr, start_lr, end_lr): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.run_scalar_lr(p, lr, start_lr, end_lr) diff --git a/test/legacy_test/test_lerp_op.py b/test/legacy_test/test_lerp_op.py index 7966e9a4b98f5..f85062940e31d 100644 --- a/test/legacy_test/test_lerp_op.py +++ b/test/legacy_test/test_lerp_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() np.random.seed(0) diff --git a/test/legacy_test/test_less_equal_op.py b/test/legacy_test/test_less_equal_op.py index 98b25bac4ea47..12473936c7085 100644 --- a/test/legacy_test/test_less_equal_op.py +++ b/test/legacy_test/test_less_equal_op.py @@ -19,7 +19,7 @@ import paddle from paddle import static -from paddle.fluid import core +from paddle.base import core class Test_Less_Equal_Op_Fp16(unittest.TestCase): diff --git a/test/legacy_test/test_less_than_op.py b/test/legacy_test/test_less_than_op.py index ee5adcb97470a..dccb4576db60b 100644 --- a/test/legacy_test/test_less_than_op.py +++ b/test/legacy_test/test_less_than_op.py @@ -19,7 +19,7 @@ import paddle from paddle import static -from paddle.fluid import core +from paddle.base import core class Test_Less_Than_Op_Fp16(unittest.TestCase): diff --git a/test/legacy_test/test_lgamma_op.py b/test/legacy_test/test_lgamma_op.py index 52c079f8a62a1..3fbbb45f235d6 100644 --- a/test/legacy_test/test_lgamma_op.py +++ b/test/legacy_test/test_lgamma_op.py @@ -20,7 +20,7 @@ from scipy import special import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_limit_by_capacity_op.py b/test/legacy_test/test_limit_by_capacity_op.py index 00fbf9b8dc68a..a897f35578a5b 100644 --- a/test/legacy_test/test_limit_by_capacity_op.py +++ b/test/legacy_test/test_limit_by_capacity_op.py @@ -18,7 +18,7 @@ import paddle from paddle.distributed.models.moe import utils -from paddle.fluid import core +from paddle.base import core def limit_by_capacity(expert_count, _capacity, n_worker): diff --git a/test/legacy_test/test_linalg_lstsq_op.py b/test/legacy_test/test_linalg_lstsq_op.py index 69792b368cf6f..ca9ec7dfb26d5 100644 --- a/test/legacy_test/test_linalg_lstsq_op.py +++ b/test/legacy_test/test_linalg_lstsq_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class LinalgLstsqTestCase(unittest.TestCase): @@ -95,8 +95,8 @@ def test_static(self): paddle.enable_static() for dev in self.devices: paddle.set_device(dev) - place = fluid.CPUPlace() if dev == "cpu" else fluid.CUDAPlace(0) - with fluid.program_guard(fluid.Program(), fluid.Program()): + place = base.CPUPlace() if dev == "cpu" else base.CUDAPlace(0) + with base.program_guard(base.Program(), base.Program()): x = paddle.static.data( name="x", shape=self._input_shape_1, @@ -110,9 +110,9 @@ def test_static(self): results = paddle.linalg.lstsq( x, y, rcond=self.rcond, driver=self.driver ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self._input_data_1, "y": self._input_data_2}, fetch_list=[results], ) diff --git a/test/legacy_test/test_linalg_pinv_op.py b/test/legacy_test/test_linalg_pinv_op.py index 4420d5b1faff3..fecd97e2f0b24 100644 --- a/test/legacy_test/test_linalg_pinv_op.py +++ b/test/legacy_test/test_linalg_pinv_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class LinalgPinvTestCase(unittest.TestCase): @@ -63,11 +63,11 @@ def test_dygraph(self): def test_static(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x = paddle.static.data( name="input", shape=self._input_shape, @@ -76,9 +76,9 @@ def test_static(self): out = paddle.linalg.pinv( x, rcond=self.rcond, hermitian=self.hermitian ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": self._input_data}, fetch_list=[out], ) diff --git a/test/legacy_test/test_linear.py b/test/legacy_test/test_linear.py index 8823a4f40be2e..016cb50e453d8 100644 --- a/test/legacy_test/test_linear.py +++ b/test/legacy_test/test_linear.py @@ -18,8 +18,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class LinearTestCase(unittest.TestCase): @@ -45,14 +45,14 @@ def functional(self, place): def paddle_nn_layer(self, place): paddle.disable_static(place) input = paddle.to_tensor(self.input) - weight_attr = fluid.ParamAttr( + weight_attr = base.ParamAttr( name="linear_weight", learning_rate=1.0, trainable=False, regularizer=None, initializer=paddle.nn.initializer.Constant(value=1.0), ) - bias_attr = fluid.ParamAttr( + bias_attr = base.ParamAttr( name="linear_bias", learning_rate=1.0, trainable=False, diff --git a/test/legacy_test/test_linear_interp_op.py b/test/legacy_test/test_linear_interp_op.py index 34f0eb7c25f43..d7b6848c3cbe5 100755 --- a/test/legacy_test/test_linear_interp_op.py +++ b/test/legacy_test/test_linear_interp_op.py @@ -19,8 +19,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def linear_interp_np( @@ -255,8 +255,8 @@ def test_case(self): align_corners=False, data_format='NCW', ) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(x_data) + with base.dygraph.guard(): + x = base.dygraph.to_variable(x_data) interp = us_1(x) expect = linear_interp_np( diff --git a/test/legacy_test/test_linear_interp_v2_op.py b/test/legacy_test/test_linear_interp_v2_op.py index 389b0dee1d22f..87cf56e3eb1b4 100755 --- a/test/legacy_test/test_linear_interp_v2_op.py +++ b/test/legacy_test/test_linear_interp_v2_op.py @@ -19,8 +19,8 @@ from eager_op_test import OpTest, convert_float_to_uint16, paddle_static_guard import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard from paddle.nn.functional import interpolate @@ -326,8 +326,8 @@ def test_case(self): align_corners=False, data_format='NCW', ) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(x_data) + with base.dygraph.guard(): + x = base.dygraph.to_variable(x_data) interp = us_1(x) expect = linear_interp_np( @@ -531,7 +531,7 @@ def out_shape_error(): @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) class TestLinearInterpOpForFloat16(unittest.TestCase): def init_test_case(self): diff --git a/test/legacy_test/test_linspace.py b/test/legacy_test/test_linspace.py index 6468ad08c8fb5..88a2ef57acc5d 100644 --- a/test/legacy_test/test_linspace.py +++ b/test/legacy_test/test_linspace.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16, paddle_static_guard import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestLinspaceOpCommonCase(OpTest): @@ -147,8 +147,8 @@ def test_variable_input1(self): stop = paddle.full(shape=[1], fill_value=10, dtype='float32') num = paddle.full(shape=[1], fill_value=5, dtype='int32') out = paddle.linspace(start, stop, num, dtype='float32') - exe = fluid.Executor(place=fluid.CPUPlace()) - res = exe.run(fluid.default_main_program(), fetch_list=[out]) + exe = base.Executor(place=base.CPUPlace()) + res = exe.run(base.default_main_program(), fetch_list=[out]) np_res = np.linspace(0, 10, 5, dtype='float32') self.assertEqual((res == np_res).all(), True) @@ -165,9 +165,9 @@ def test_dtype(self): out_1 = paddle.linspace(0, 10, 5, dtype='float32') out_2 = paddle.linspace(0, 10, 5, dtype=np.float32) out_3 = paddle.linspace(0, 10, 5, dtype=core.VarDesc.VarType.FP32) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) res_1, res_2, res_3 = exe.run( - fluid.default_main_program(), fetch_list=[out_1, out_2, out_3] + base.default_main_program(), fetch_list=[out_1, out_2, out_3] ) np.testing.assert_array_equal(res_1, res_2) diff --git a/test/legacy_test/test_listen_and_serv_op.py b/test/legacy_test/test_listen_and_serv_op.py index 2ff2bddfcf2bf..121490c8ae4fc 100644 --- a/test/legacy_test/test_listen_and_serv_op.py +++ b/test/legacy_test/test_listen_and_serv_op.py @@ -24,7 +24,7 @@ from multiprocessing import Process import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -43,8 +43,8 @@ def run_pserver(use_cuda, sync_mode, ip, port, trainers, trainer_id): sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) pserver_endpoints = ip + ":" + port current_endpoint = ip + ":" + port @@ -80,8 +80,8 @@ def run_pserver_with_empty_block( sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) ps1 = ip + ":" + str(int(port) + 1) ps2 = ip + ":" + port diff --git a/test/legacy_test/test_load_state_dict_from_old_format.py b/test/legacy_test/test_load_state_dict_from_old_format.py index e107a03baff6c..3b36afe0ea35f 100644 --- a/test/legacy_test/test_load_state_dict_from_old_format.py +++ b/test/legacy_test/test_load_state_dict_from_old_format.py @@ -21,8 +21,8 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def convolutional_neural_network(img): @@ -78,8 +78,8 @@ def tearDown(self): def train_and_save_model(self): with new_program_scope(): - startup_program = fluid.default_startup_program() - main_program = fluid.default_main_program() + startup_program = base.default_startup_program() + main_program = base.default_main_program() img = paddle.static.data( name='img', shape=[None, 1, 28, 28], dtype='float32' @@ -91,14 +91,14 @@ def train_and_save_model(self): prediction, avg_loss = static_train_net(img, label) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) - feeder = fluid.DataFeeder(feed_list=[img, label], place=place) + feeder = base.DataFeeder(feed_list=[img, label], place=place) exe.run(startup_program) train_reader = paddle.batch( @@ -120,8 +120,8 @@ def train_and_save_model(self): break static_param_dict = {} - for param in fluid.default_main_program().all_parameters(): - static_param_dict[param.name] = fluid.executor._fetch_var( + for param in base.default_main_program().all_parameters(): + static_param_dict[param.name] = base.executor._fetch_var( param.name ) diff --git a/test/legacy_test/test_lod_array_length_op.py b/test/legacy_test/test_lod_array_length_op.py index f357043e0c8fb..110493a99940b 100644 --- a/test/legacy_test/test_lod_array_length_op.py +++ b/test/legacy_test/test_lod_array_length_op.py @@ -17,8 +17,8 @@ import numpy import paddle -from paddle.fluid import Program, core, program_guard -from paddle.fluid.executor import Executor +from paddle.base import Program, core, program_guard +from paddle.base.executor import Executor class TestLoDArrayLength(unittest.TestCase): diff --git a/test/legacy_test/test_lod_tensor.py b/test/legacy_test/test_lod_tensor.py index b3f34dccd1c03..4bfe899787e8b 100644 --- a/test/legacy_test/test_lod_tensor.py +++ b/test/legacy_test/test_lod_tensor.py @@ -16,9 +16,9 @@ import numpy as np -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.lod_tensor import ( +from paddle import base +from paddle.base import core +from paddle.base.lod_tensor import ( create_lod_tensor, create_random_int_lodtensor, ) @@ -26,7 +26,7 @@ class TestLoDTensor(unittest.TestCase): def test_pybind_recursive_seq_lens(self): - tensor = fluid.LoDTensor() + tensor = base.LoDTensor() recursive_seq_lens = [] tensor.set_recursive_sequence_lengths(recursive_seq_lens) recursive_seq_lens = [[], [1], [3]] @@ -43,9 +43,9 @@ def test_pybind_recursive_seq_lens(self): self.assertEqual( tensor.recursive_sequence_lengths(), recursive_seq_lens ) - tensor.set(np.random.random([6, 1]), fluid.CPUPlace()) + tensor.set(np.random.random([6, 1]), base.CPUPlace()) self.assertTrue(tensor.has_valid_recursive_sequence_lengths()) - tensor.set(np.random.random([9, 1]), fluid.CPUPlace()) + tensor.set(np.random.random([9, 1]), base.CPUPlace()) self.assertFalse(tensor.has_valid_recursive_sequence_lengths()) # Each level's sum should be equal to the number of items in the next level @@ -55,12 +55,12 @@ def test_pybind_recursive_seq_lens(self): self.assertEqual( tensor.recursive_sequence_lengths(), recursive_seq_lens ) - tensor.set(np.random.random([8, 1]), fluid.CPUPlace()) + tensor.set(np.random.random([8, 1]), base.CPUPlace()) self.assertFalse(tensor.has_valid_recursive_sequence_lengths()) recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 1]] tensor.set_recursive_sequence_lengths(recursive_seq_lens) self.assertTrue(tensor.has_valid_recursive_sequence_lengths()) - tensor.set(np.random.random([9, 1]), fluid.CPUPlace()) + tensor.set(np.random.random([9, 1]), base.CPUPlace()) self.assertFalse(tensor.has_valid_recursive_sequence_lengths()) def test_create_lod_tensor(self): @@ -76,10 +76,10 @@ def test_create_lod_tensor(self): create_lod_tensor, data, wrong_recursive_seq_lens, - fluid.CPUPlace(), + base.CPUPlace(), ) tensor = create_lod_tensor( - data, correct_recursive_seq_lens, fluid.CPUPlace() + data, correct_recursive_seq_lens, base.CPUPlace() ) self.assertEqual( tensor.recursive_sequence_lengths(), correct_recursive_seq_lens @@ -94,7 +94,7 @@ def test_create_lod_tensor(self): # Create LoDTensor from numpy array data = np.random.random([10, 1]).astype('float64') recursive_seq_lens = [[2, 1], [3, 3, 4]] - tensor = create_lod_tensor(data, recursive_seq_lens, fluid.CPUPlace()) + tensor = create_lod_tensor(data, recursive_seq_lens, base.CPUPlace()) self.assertEqual( tensor.recursive_sequence_lengths(), recursive_seq_lens ) @@ -105,7 +105,7 @@ def test_create_lod_tensor(self): # Create LoDTensor from another LoDTensor, they are differnt instances new_recursive_seq_lens = [[2, 2, 1], [1, 2, 2, 3, 2]] new_tensor = create_lod_tensor( - tensor, new_recursive_seq_lens, fluid.CPUPlace() + tensor, new_recursive_seq_lens, base.CPUPlace() ) self.assertEqual( tensor.recursive_sequence_lengths(), recursive_seq_lens @@ -122,7 +122,7 @@ def test_create_random_int_lodtensor(self): low = 0 high = dict_size - 1 tensor = create_random_int_lodtensor( - recursive_seq_lens, shape, fluid.CPUPlace(), low, high + recursive_seq_lens, shape, base.CPUPlace(), low, high ) self.assertEqual( tensor.recursive_sequence_lengths(), recursive_seq_lens @@ -136,51 +136,51 @@ def test_print_lodtensor(self): low = 0 high = dict_size - 1 tensor = create_random_int_lodtensor( - recursive_seq_lens, shape, fluid.CPUPlace(), low, high + recursive_seq_lens, shape, base.CPUPlace(), low, high ) print(tensor) self.assertTrue(isinstance(str(tensor), str)) if core.is_compiled_with_cuda(): gtensor = create_random_int_lodtensor( - recursive_seq_lens, shape, fluid.CUDAPlace(0), low, high + recursive_seq_lens, shape, base.CUDAPlace(0), low, high ) print(gtensor) self.assertTrue(isinstance(str(gtensor), str)) def test_dlpack_support(self): - tensor = fluid.create_lod_tensor( + tensor = base.create_lod_tensor( np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]], - fluid.CPUPlace(), + base.CPUPlace(), ) dltensor = tensor._to_dlpack() - tensor_from_dlpack = fluid.core.from_dlpack(dltensor) - self.assertTrue(isinstance(tensor_from_dlpack, fluid.core.Tensor)) + tensor_from_dlpack = base.core.from_dlpack(dltensor) + self.assertTrue(isinstance(tensor_from_dlpack, base.core.Tensor)) np.testing.assert_array_equal( np.array(tensor_from_dlpack), np.array([[1], [2], [3], [4]]).astype('int'), ) # when build with cuda if core.is_compiled_with_cuda(): - gtensor = fluid.create_lod_tensor( + gtensor = base.create_lod_tensor( np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]], - fluid.CUDAPlace(0), + base.CUDAPlace(0), ) gdltensor = gtensor._to_dlpack() - gtensor_from_dlpack = fluid.core.from_dlpack(gdltensor) - self.assertTrue(isinstance(gtensor_from_dlpack, fluid.core.Tensor)) + gtensor_from_dlpack = base.core.from_dlpack(gdltensor) + self.assertTrue(isinstance(gtensor_from_dlpack, base.core.Tensor)) np.testing.assert_array_equal( np.array(gtensor_from_dlpack), np.array([[1], [2], [3], [4]]).astype('int'), ) def test_as_type(self): - tensor = fluid.create_lod_tensor( + tensor = base.create_lod_tensor( np.array([[1], [2], [3], [4]]).astype('int'), [[1, 3]], - fluid.CPUPlace(), + base.CPUPlace(), ) fp32_tensor = tensor._as_type(core.VarDesc.VarType.FP32) print(fp32_tensor) diff --git a/test/legacy_test/test_lod_tensor_array.py b/test/legacy_test/test_lod_tensor_array.py index c9d2f1554bda5..cafa79717e8ac 100644 --- a/test/legacy_test/test_lod_tensor_array.py +++ b/test/legacy_test/test_lod_tensor_array.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core class TestLoDTensorArray(unittest.TestCase): diff --git a/test/legacy_test/test_log_softmax.py b/test/legacy_test/test_log_softmax.py index 8c951ca8c8965..70da5577485ff 100644 --- a/test/legacy_test/test_log_softmax.py +++ b/test/legacy_test/test_log_softmax.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core np.random.seed(10) @@ -167,7 +167,7 @@ def setUp(self): self.x = np.random.uniform(-1.0, 1.0, self.x_shape).astype(np.float32) self.place = ( paddle.CUDAPlace(0) - if paddle.fluid.core.is_compiled_with_cuda() + if paddle.base.core.is_compiled_with_cuda() else paddle.CPUPlace() ) @@ -201,7 +201,7 @@ def setUp(self): self.x = np.random.uniform(-1, 1, self.x_shape).astype(np.float32) self.place = ( paddle.CUDAPlace(0) - if paddle.fluid.core.is_compiled_with_cuda() + if paddle.base.core.is_compiled_with_cuda() else paddle.CPUPlace() ) diff --git a/test/legacy_test/test_logaddexp.py b/test/legacy_test/test_logaddexp.py index 1c9e66fb129ff..a55727b774dd9 100644 --- a/test/legacy_test/test_logaddexp.py +++ b/test/legacy_test/test_logaddexp.py @@ -33,7 +33,7 @@ class TestLogsumexpAPI(unittest.TestCase): def setUp(self): self.place = ( paddle.CUDAPlace(0) - if paddle.fluid.core.is_compiled_with_cuda() + if paddle.base.core.is_compiled_with_cuda() else paddle.CPUPlace() ) diff --git a/test/legacy_test/test_logcumsumexp_op.py b/test/legacy_test/test_logcumsumexp_op.py index 2643b19f68e24..ab0df6cf73a19 100644 --- a/test/legacy_test/test_logcumsumexp_op.py +++ b/test/legacy_test/test_logcumsumexp_op.py @@ -24,8 +24,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def np_naive_logcumsumexp(x: np.ndarray, axis: Optional[int] = None): @@ -149,7 +149,7 @@ def run_imperative(self): np.testing.assert_allclose(z, y.numpy(), rtol=1e-05) def run_static(self, use_gpu=False): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): data_np = np.random.random((5, 4)).astype(np.float32) x = paddle.static.data('X', [5, 4]) y = paddle.logcumsumexp(x) @@ -158,9 +158,9 @@ def run_static(self, use_gpu=False): y4 = paddle.logcumsumexp(x, dtype='float64') y5 = paddle.logcumsumexp(x, axis=-2) - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) out = exe.run( feed={'X': data_np}, fetch_list=[ @@ -183,37 +183,37 @@ def run_static(self, use_gpu=False): np.testing.assert_allclose(z, out[4], rtol=1e-05) def test_cpu(self): - paddle.disable_static(paddle.fluid.CPUPlace()) + paddle.disable_static(paddle.base.CPUPlace()) self.run_imperative() paddle.enable_static() self.run_static() def test_gpu(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return - paddle.disable_static(paddle.fluid.CUDAPlace(0)) + paddle.disable_static(paddle.base.CUDAPlace(0)) self.run_imperative() paddle.enable_static() self.run_static(use_gpu=True) def test_name(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data('x', [3, 4]) y = paddle.logcumsumexp(x, name='out') self.assertTrue('out' in y.name) def test_type_error(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): with self.assertRaises(TypeError): data_np = np.random.random((100, 100), dtype=np.int32) x = paddle.static.data('X', [100, 100], dtype='int32') y = paddle.logcumsumexp(x) - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) + exe = base.Executor(place) + exe.run(base.default_startup_program()) out = exe.run(feed={'X': data_np}, fetch_list=[y.name]) diff --git a/test/legacy_test/test_logit_op.py b/test/legacy_test/test_logit_op.py index edec9fb1d309a..e2123bfa8f9c5 100644 --- a/test/legacy_test/test_logit_op.py +++ b/test/legacy_test/test_logit_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(10) @@ -148,7 +148,7 @@ def setUp(self): self.x = np.random.uniform(0.0, 1.0, self.x_shape).astype(np.float32) self.place = ( paddle.CUDAPlace(0) - if paddle.fluid.core.is_compiled_with_cuda() + if paddle.base.core.is_compiled_with_cuda() else paddle.CPUPlace() ) diff --git a/test/legacy_test/test_logspace.py b/test/legacy_test/test_logspace.py index 0587846bc4841..402442ca4bc06 100644 --- a/test/legacy_test/test_logspace.py +++ b/test/legacy_test/test_logspace.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core class TestLogspaceOpCommonCase(OpTest): diff --git a/test/legacy_test/test_logsumexp.py b/test/legacy_test/test_logsumexp.py index a5184650f36d7..44c958fef6323 100644 --- a/test/legacy_test/test_logsumexp.py +++ b/test/legacy_test/test_logsumexp.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def ref_logsumexp(x, axis=None, keepdim=False, reduce_all=False): @@ -125,7 +125,7 @@ def set_attrs(self): self.axis = [0, 1, 2, 3] def set_attrs_addition(self): - if paddle.fluid.core.is_compiled_with_rocm(): + if paddle.base.core.is_compiled_with_rocm(): self.user_defined_grads = [self.calc_grad()] self.user_defined_grad_outputs = [np.ones(1, dtype=self.dtype)] @@ -140,7 +140,7 @@ def set_attrs(self): self.reduce_all = True def set_attrs_addition(self): - if paddle.fluid.core.is_compiled_with_rocm(): + if paddle.base.core.is_compiled_with_rocm(): self.user_defined_grads = [self.calc_grad()] self.user_defined_grad_outputs = [np.ones(1, dtype=self.dtype)] @@ -239,7 +239,7 @@ def setUp(self): self.x = np.random.uniform(-1, 1, self.shape).astype(np.float32) self.place = ( paddle.CUDAPlace(0) - if paddle.fluid.core.is_compiled_with_cuda() + if paddle.base.core.is_compiled_with_cuda() else paddle.CPUPlace() ) @@ -281,7 +281,7 @@ def test_alias(self): # Test logsumexp bug class TestLogZeroError(unittest.TestCase): def test_errors(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): def test_0_size(): array = np.array([], dtype=np.float32) diff --git a/test/legacy_test/test_lookahead.py b/test/legacy_test/test_lookahead.py index bfcd7afd4b795..f9eeee2f651a4 100644 --- a/test/legacy_test/test_lookahead.py +++ b/test/legacy_test/test_lookahead.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid, nn +from paddle import base, nn LOOKAHEAD_K = 5 LOOKAHEAD_ALPHA = 0.2 @@ -27,13 +27,13 @@ class TestLookAhead(unittest.TestCase): def test_lookahead_static(self): paddle.enable_static() - place = fluid.CPUPlace() + place = base.CPUPlace() shape = [2, 3, 8, 8] - exe = fluid.Executor(place) - train_program = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(train_program, startup): - with fluid.unique_name.guard(): + exe = base.Executor(place) + train_program = base.Program() + startup = base.Program() + with base.program_guard(train_program, startup): + with base.unique_name.guard(): data = paddle.static.data( name='X', shape=[None, 1], dtype='float32' ) diff --git a/test/legacy_test/test_lookup_table_bf16_op.py b/test/legacy_test/test_lookup_table_bf16_op.py index 48cb484f0d81d..204992f0c1c42 100644 --- a/test/legacy_test/test_lookup_table_bf16_op.py +++ b/test/legacy_test/test_lookup_table_bf16_op.py @@ -24,8 +24,8 @@ from op import Operator import paddle -from paddle import enable_static, fluid -from paddle.fluid import core +from paddle import enable_static, base +from paddle.base import core def _lookup(weights, ids, flat_ids, op_version="lookup_table"): @@ -227,25 +227,25 @@ def setUp(self): self.flat_ids = self.ids.flatten() self.value = 3.0 self.w_fp32 = np.full(self.w_shape, self.value) - self.place = fluid.CPUPlace() - self.prog = fluid.Program() - self.startup_prog = fluid.Program() + self.place = base.CPUPlace() + self.prog = base.Program() + self.startup_prog = base.Program() self.set_initializer() - with fluid.program_guard(self.prog, self.startup_prog): + with base.program_guard(self.prog, self.startup_prog): x = paddle.static.data( name='x', shape=self.ids_shape, dtype='int64' ) self.emb = paddle.static.nn.embedding( input=x, size=self.w_shape, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="emb_weight", initializer=self.initializer ), is_sparse=False, dtype="uint16", ) # bfloat16 - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(self.startup_prog) self.result = exe.run( self.prog, feed={'x': self.ids}, fetch_list=['emb_weight', self.emb] diff --git a/test/legacy_test/test_lookup_table_op.py b/test/legacy_test/test_lookup_table_op.py index 04ac09bdce996..8a3eb0ef2da1f 100644 --- a/test/legacy_test/test_lookup_table_op.py +++ b/test/legacy_test/test_lookup_table_op.py @@ -25,7 +25,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import Program, core, program_guard +from paddle.base import Program, core, program_guard class TestLookupTableOp(OpTest): diff --git a/test/legacy_test/test_lookup_table_v2_bf16_op.py b/test/legacy_test/test_lookup_table_v2_bf16_op.py index 54cf40210c2b6..13b15053a6e67 100644 --- a/test/legacy_test/test_lookup_table_v2_bf16_op.py +++ b/test/legacy_test/test_lookup_table_v2_bf16_op.py @@ -25,8 +25,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestLookupTableV2BF16Op(TestLookupTableBF16Op): @@ -101,25 +101,25 @@ def setUp(self): self.flat_ids = self.ids.flatten() self.value = 3.0 self.w_fp32 = np.full(self.w_shape, self.value) - self.place = fluid.CPUPlace() - self.prog = fluid.Program() - self.startup_prog = fluid.Program() + self.place = base.CPUPlace() + self.prog = base.Program() + self.startup_prog = base.Program() self.set_initializer() - with fluid.program_guard(self.prog, self.startup_prog): + with base.program_guard(self.prog, self.startup_prog): x = paddle.static.data( name='x', shape=[-1] + self.ids_shape, dtype='int64' ) self.emb = paddle.static.nn.embedding( input=x, size=self.w_shape, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="emb_weight", initializer=self.initializer ), is_sparse=False, dtype="uint16", ) # bfloat16 - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(self.startup_prog) self.result = exe.run( self.prog, feed={'x': self.ids}, fetch_list=['emb_weight', self.emb] diff --git a/test/legacy_test/test_lookup_table_v2_op.py b/test/legacy_test/test_lookup_table_v2_op.py index c02ce3f95be84..5000c3486b5e6 100644 --- a/test/legacy_test/test_lookup_table_v2_op.py +++ b/test/legacy_test/test_lookup_table_v2_op.py @@ -19,8 +19,8 @@ from op import Operator import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestStaticGraphSupportMultipleInt(unittest.TestCase): @@ -204,14 +204,14 @@ def init_data(self): def get_w_grad(self, is_sparse): self.init_data() - main_program = fluid.Program() - with fluid.program_guard(main_program, fluid.Program()): + main_program = base.Program() + with base.program_guard(main_program, base.Program()): x = paddle.static.data(name='x', shape=[-1, 5], dtype='int64') y_ = paddle.static.data(name='y_', shape=[-1, 5], dtype='float32') emb = paddle.static.nn.embedding( input=x, size=[10, 16], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="emb_weight", learning_rate=10, initializer=paddle.nn.initializer.Assign(self.w_data), @@ -226,9 +226,9 @@ def get_w_grad(self, is_sparse): sgd_optimizer = paddle.optimizer.SGD(learning_rate=1e-4) sgd_optimizer.minimize(loss) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) ret = exe.run( feed={'x': self.x_data, 'y_': self.y_data}, fetch_list=['emb_weight'], @@ -253,11 +253,11 @@ def test_api(self): x = paddle.static.data(name='x', shape=[-1, 20], dtype='int64') emb = paddle.static.nn.embedding(input=x, size=[128, 64]) - place = fluid.CPUPlace() + place = base.CPUPlace() x_data = np.random.randint(0, 127, [2, 20]).astype("int64") - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) ret = exe.run( feed={ 'x': x_data, diff --git a/test/legacy_test/test_lr_scheduler.py b/test/legacy_test/test_lr_scheduler.py index 5a5ce8aa166dc..54484ecc6ad2c 100644 --- a/test/legacy_test/test_lr_scheduler.py +++ b/test/legacy_test/test_lr_scheduler.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core def reduce_lr_on_plateau( diff --git a/test/legacy_test/test_lrn_op.py b/test/legacy_test/test_lrn_op.py index df9b1ebccf481..2e65b92ebe532 100644 --- a/test/legacy_test/test_lrn_op.py +++ b/test/legacy_test/test_lrn_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, paddle_static_guard import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestLRNOp(OpTest): @@ -110,13 +110,13 @@ def init_test_case(self): class TestLocalResponseNormFAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_3d_input(self, place): with paddle_static_guard(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): in_np1 = np.random.random([3, 40, 40]).astype("float32") in_np2 = np.transpose(in_np1, (0, 2, 1)) @@ -132,9 +132,9 @@ def check_static_3d_input(self, place): res2 = paddle.nn.functional.local_response_norm( x=input2, size=5, data_format='NLC' ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input1": in_np1, "input2": in_np2}, fetch_list=[res1, res2], ) @@ -146,7 +146,7 @@ def check_static_3d_input(self, place): def check_static_4d_input(self, place): with paddle_static_guard(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input1 = paddle.static.data( name="input1", shape=[3, 3, 40, 40], dtype="float32" ) @@ -164,9 +164,9 @@ def check_static_4d_input(self, place): in_np1 = np.random.random([3, 3, 40, 40]).astype("float32") in_np2 = np.transpose(in_np1, (0, 2, 3, 1)) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input1": in_np1, "input2": in_np2}, fetch_list=[res1, res2], ) @@ -178,7 +178,7 @@ def check_static_4d_input(self, place): def check_static_5d_input(self, place): with paddle_static_guard(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input1 = paddle.static.data( name="input1", shape=[3, 3, 3, 40, 40], dtype="float32" ) @@ -195,9 +195,9 @@ def check_static_5d_input(self, place): in_np1 = np.random.random([3, 3, 3, 40, 40]).astype("float32") in_np2 = np.transpose(in_np1, (0, 2, 3, 4, 1)) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input1": in_np1, "input2": in_np2}, fetch_list=[res1, res2], ) @@ -215,7 +215,7 @@ def test_static(self): self.check_static_5d_input(place=place) def check_dygraph_3d_input(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): in_np1 = np.random.random([3, 40, 40]).astype("float32") in_np2 = np.transpose(in_np1, (0, 2, 1)) @@ -233,7 +233,7 @@ def check_dygraph_3d_input(self, place): np.testing.assert_allclose(res1.numpy(), res2_tran, rtol=1e-05) def check_dygraph_4d_input(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): in_np1 = np.random.random([3, 3, 40, 40]).astype("float32") in_np2 = np.transpose(in_np1, (0, 2, 3, 1)) @@ -251,7 +251,7 @@ def check_dygraph_4d_input(self, place): np.testing.assert_allclose(res1.numpy(), res2_tran, rtol=1e-05) def check_dygraph_5d_input(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): in_np1 = np.random.random([3, 3, 3, 40, 40]).astype("float32") in_np2 = np.transpose(in_np1, (0, 2, 3, 4, 1)) @@ -282,10 +282,10 @@ def test_errors(self): def test_Variable(): # the input of lrn must be Variable. - x1 = fluid.create_lod_tensor( + x1 = base.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], - fluid.CPUPlace(), + base.CPUPlace(), ) paddle.nn.functional.local_response_norm(x1, size=5) @@ -327,13 +327,13 @@ def test_shape(): class TestLocalResponseNormCAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): in1 = paddle.rand(shape=(3, 3, 40, 40), dtype="float32") in2 = paddle.transpose(in1, [0, 2, 3, 1]) @@ -347,7 +347,7 @@ def test_dygraph(self): np.testing.assert_allclose(res1.numpy(), res2_tran, rtol=1e-05) def test_static_fp16_gpu(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle_static_guard(): with paddle.static.program_guard( diff --git a/test/legacy_test/test_lstm_cudnn_op.py b/test/legacy_test/test_lstm_cudnn_op.py index 2e171913915aa..20b0066dd55a7 100644 --- a/test/legacy_test/test_lstm_cudnn_op.py +++ b/test/legacy_test/test_lstm_cudnn_op.py @@ -20,7 +20,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core random.seed(2) np.set_printoptions(threshold=np.inf) diff --git a/test/legacy_test/test_lu_op.py b/test/legacy_test/test_lu_op.py index 27f6505641c63..f12d4ae6c893a 100644 --- a/test/legacy_test/test_lu_op.py +++ b/test/legacy_test/test_lu_op.py @@ -22,8 +22,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def scipy_lu(A, pivot): @@ -202,9 +202,9 @@ def run_lu_dygraph(shape, dtype): min_mn = min(m, n) pivot = True - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: paddle.disable_static(place) batch_size = a.size // (a.shape[-1] * a.shape[-2]) @@ -253,11 +253,11 @@ def run_lu_static(shape, dtype): pivot = True places = [] - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): batch_size = a.size // (a.shape[-1] * a.shape[-2]) sP, sl, sU = scipy_lu(a, pivot) sL = np.tril(sl, -1) @@ -282,9 +282,9 @@ def run_lu_static(shape, dtype): name="input", shape=shape, dtype=dtype ) lu, p = paddle.linalg.lu(x, pivot=pivot) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": a}, fetch_list=[lu, p], ) @@ -310,7 +310,7 @@ def run_lu_static(shape, dtype): class TestLUAPIError(unittest.TestCase): def test_errors(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): # The size of input in lu should not be 0. def test_0_size(): array = np.array([], dtype=np.float32) diff --git a/test/legacy_test/test_lu_unpack_op.py b/test/legacy_test/test_lu_unpack_op.py index 06cc404c8846c..9c6486f29cd6e 100644 --- a/test/legacy_test/test_lu_unpack_op.py +++ b/test/legacy_test/test_lu_unpack_op.py @@ -22,8 +22,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def scipy_lu_unpack(A): @@ -138,17 +138,17 @@ def setUp(self): lu = lu.numpy() pivots = pivots.numpy() else: - with fluid.program_guard(fluid.Program(), fluid.Program()): - place = fluid.CPUPlace() + with base.program_guard(base.Program(), base.Program()): + place = base.CPUPlace() if core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + place = base.CUDAPlace(0) xv = paddle.static.data( name="input", shape=self.x_shape, dtype=self.dtype ) lu, p = paddle.linalg.lu(xv) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": x}, fetch_list=[lu, p], ) @@ -228,9 +228,9 @@ def run_lu_unpack_dygraph(shape, dtype): n = a.shape[-1] min_mn = min(m, n) - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: paddle.disable_static(place) @@ -271,11 +271,11 @@ def run_lu_static(shape, dtype): n = a.shape[-1] min_mn = min(m, n) - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): sP, sL, sU = scipy_lu_unpack(a) x = paddle.static.data( @@ -283,9 +283,9 @@ def run_lu_static(shape, dtype): ) lu, p = paddle.linalg.lu(x) pP, pL, pU = paddle.linalg.lu_unpack(lu, p) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": a}, fetch_list=[pP, pL, pU], ) diff --git a/test/legacy_test/test_manual_seed.py b/test/legacy_test/test_manual_seed.py index 7af57833312e4..dccf88ae29565 100644 --- a/test/legacy_test/test_manual_seed.py +++ b/test/legacy_test/test_manual_seed.py @@ -17,13 +17,13 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.tensor import random class TestManualSeed(unittest.TestCase): def test_seed(self): - fluid.enable_dygraph() + base.enable_dygraph() gen = paddle.seed(12312321111) x = random.gaussian([10], dtype="float32") @@ -38,7 +38,7 @@ def test_seed(self): x2_np = x2.numpy() x3_np = x3.numpy() - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): np.testing.assert_allclose(x1_np, x2_np, rtol=1e-05) np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) diff --git a/test/legacy_test/test_margin_cross_entropy_op.py b/test/legacy_test/test_margin_cross_entropy_op.py index 439a449b58788..9c27be97fc577 100644 --- a/test/legacy_test/test_margin_cross_entropy_op.py +++ b/test/legacy_test/test_margin_cross_entropy_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16, paddle_static_guard import paddle -from paddle.fluid import Program, core, program_guard +from paddle.base import Program, core, program_guard def stable_softmax_comm(x): @@ -322,7 +322,7 @@ def setUp(self): paddle.framework.random._manual_program_seed(self.seed) self.places = [] if core.is_compiled_with_cuda(): - self.places.append(paddle.fluid.CUDAPlace(0)) + self.places.append(paddle.base.CUDAPlace(0)) def initParams(self): self.python_out_sig = ["Loss"] @@ -402,9 +402,9 @@ def check_static_result(self, place): reduction=self.reduction, ) - exe = paddle.fluid.Executor(place) + exe = paddle.base.Executor(place) [loss_res, softmax_res] = exe.run( - paddle.fluid.default_main_program(), + paddle.base.default_main_program(), feed={'logits': logits_np, 'label': labels_np}, fetch_list=[loss, softmax], ) @@ -416,7 +416,7 @@ def test_dynamic(self): self.check_dynamic_result(place=place) def check_dynamic_result(self, place): - with paddle.fluid.dygraph.guard(place): + with paddle.base.dygraph.guard(place): datas = np.random.uniform( -0.99, 0.99, [self.batch_dim, self.feat_dim] ).astype(self.dtype) @@ -492,7 +492,7 @@ def setUp(self): paddle.framework.random._manual_program_seed(self.seed) self.places = [] if core.is_compiled_with_cuda(): - self.places.append(paddle.fluid.CUDAPlace(0)) + self.places.append(paddle.base.CUDAPlace(0)) def initParams(self): self.python_api = python_api @@ -517,7 +517,7 @@ def init_dtype(self): def test_dynamic_errors(self): def test_dim(): for place in self.places: - with paddle.fluid.dygraph.guard(place): + with paddle.base.dygraph.guard(place): labels_np = np.random.randint( 0, self.num_class, (self.batch_dim, 2), dtype="int64" ) @@ -540,7 +540,7 @@ def test_dim(): def test_label_type(): for place in self.places: - with paddle.fluid.dygraph.guard(place): + with paddle.base.dygraph.guard(place): labels_np = np.random.uniform( 0, self.num_class, (self.batch_dim, 1) ).astype(self.dtype) @@ -563,7 +563,7 @@ def test_label_type(): def test_group_value(): for place in self.places: - with paddle.fluid.dygraph.guard(place): + with paddle.base.dygraph.guard(place): labels_np = np.random.randint( 0, self.num_class, (self.batch_dim,), dtype="int64" ) diff --git a/test/legacy_test/test_margin_rank_loss_op.py b/test/legacy_test/test_margin_rank_loss_op.py index 2b3c3b51ff383..ad40b546b81a5 100644 --- a/test/legacy_test/test_margin_rank_loss_op.py +++ b/test/legacy_test/test_margin_rank_loss_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, paddle_static_guard import paddle -from paddle import fluid +from paddle import base class TestMarginRankLossOp(OpTest): @@ -75,19 +75,19 @@ def setUp(self): self.loss = loss def test_identity(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self.check_identity(place) - if fluid.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self.check_identity(place) def check_identity(self, place): with paddle_static_guard(): - main = fluid.Program() - start = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): label = paddle.static.data( "label", (self.batch_size, 1), "float32" ) @@ -101,7 +101,7 @@ def check_identity(self, place): x1, x2, label, self.margin, 'none' ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(start) (out_np,) = exe.run( main, diff --git a/test/legacy_test/test_masked_select_op.py b/test/legacy_test/test_masked_select_op.py index 6d89ced3ecc07..572e0ecb17583 100644 --- a/test/legacy_test/test_masked_select_op.py +++ b/test/legacy_test/test_masked_select_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def np_masked_select(x, mask): diff --git a/test/legacy_test/test_math_op_patch.py b/test/legacy_test/test_math_op_patch.py index 7eab8dcf4fd4b..55b3f55c45451 100644 --- a/test/legacy_test/test_math_op_patch.py +++ b/test/legacy_test/test_math_op_patch.py @@ -19,7 +19,7 @@ from decorator_helper import prog_scope import paddle -from paddle import fluid +from paddle import base class TestMathOpPatches(unittest.TestCase): @@ -36,11 +36,11 @@ def test_add_scalar(self): c = ab + 10 d = ab + a # e = a + ab - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') b_np, c_np, d_np = exe.run( - fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b, c, d] + base.default_main_program(), feed={"a": a_np}, fetch_list=[b, c, d] ) np.testing.assert_allclose(a_np + 10, b_np, rtol=1e-05) ab_np = np.concatenate([a_np, b_np], axis=1) @@ -52,11 +52,11 @@ def test_add_scalar(self): def test_radd_scalar(self): a = paddle.static.data(name="a", shape=[-1, 1]) b = 10 + a - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') (b_np,) = exe.run( - fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + base.default_main_program(), feed={"a": a_np}, fetch_list=[b] ) np.testing.assert_allclose(a_np + 10, b_np, rtol=1e-05) @@ -64,11 +64,11 @@ def test_radd_scalar(self): def test_sub_scalar(self): a = paddle.static.data(name="a", shape=[-1, 1]) b = a - 10 - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') (b_np,) = exe.run( - fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + base.default_main_program(), feed={"a": a_np}, fetch_list=[b] ) np.testing.assert_allclose(a_np - 10, b_np, rtol=1e-05) @@ -76,11 +76,11 @@ def test_sub_scalar(self): def test_rsub_scalar(self): a = paddle.static.data(name="a", shape=[-1, 1]) b = 10 - a - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') (b_np,) = exe.run( - fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + base.default_main_program(), feed={"a": a_np}, fetch_list=[b] ) np.testing.assert_allclose(10 - a_np, b_np, rtol=1e-05) @@ -88,11 +88,11 @@ def test_rsub_scalar(self): def test_mul_scalar(self): a = paddle.static.data(name="a", shape=[-1, 1]) b = a * 10 - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') (b_np,) = exe.run( - fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + base.default_main_program(), feed={"a": a_np}, fetch_list=[b] ) np.testing.assert_allclose(a_np * 10, b_np, rtol=1e-05) @@ -100,11 +100,11 @@ def test_mul_scalar(self): def test_rmul_scalar(self): a = paddle.static.data(name="a", shape=[-1, 1]) b = 10 * a - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') (b_np,) = exe.run( - fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + base.default_main_program(), feed={"a": a_np}, fetch_list=[b] ) np.testing.assert_allclose(10 * a_np, b_np, rtol=1e-05) @@ -112,11 +112,11 @@ def test_rmul_scalar(self): def test_div_scalar(self): a = paddle.static.data(name="a", shape=[-1, 1]) b = a / 10 - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') (b_np,) = exe.run( - fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + base.default_main_program(), feed={"a": a_np}, fetch_list=[b] ) np.testing.assert_allclose(a_np / 10, b_np, rtol=1e-05) @@ -124,12 +124,12 @@ def test_div_scalar(self): def test_rdiv_scalar(self): a = paddle.static.data(name="a", shape=[-1, 1]) b = 10 / a - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2 (b_np,) = exe.run( - fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + base.default_main_program(), feed={"a": a_np}, fetch_list=[b] ) np.testing.assert_allclose(10 / a_np, b_np, rtol=1e-05) @@ -138,12 +138,12 @@ def test_div_two_tensor(self): a = paddle.static.data(name="a", shape=[-1, 1]) b = paddle.static.data(name="b", shape=[-1, 1]) c = a / b - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') b_np = np.random.random(size=[10, 1]).astype('float32') + 1e-2 (c_np,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"a": a_np, 'b': b_np}, fetch_list=[c], ) @@ -154,12 +154,12 @@ def test_mul_two_tensor(self): a = paddle.static.data(name="a", shape=[-1, 1]) b = paddle.static.data(name="b", shape=[-1, 1]) c = a * b - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') b_np = np.random.random(size=[10, 1]).astype('float32') (c_np,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"a": a_np, 'b': b_np}, fetch_list=[c], ) @@ -170,12 +170,12 @@ def test_add_two_tensor(self): a = paddle.static.data(name="a", shape=[-1, 1]) b = paddle.static.data(name="b", shape=[-1, 1]) c = a + b - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') b_np = np.random.random(size=[10, 1]).astype('float32') (c_np,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"a": a_np, 'b': b_np}, fetch_list=[c], ) @@ -186,12 +186,12 @@ def test_sub_two_tensor(self): a = paddle.static.data(name="a", shape=[-1, 1]) b = paddle.static.data(name="b", shape=[-1, 1]) c = a - b - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.random(size=[10, 1]).astype('float32') b_np = np.random.random(size=[10, 1]).astype('float32') (c_np,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"a": a_np, 'b': b_np}, fetch_list=[c], ) @@ -201,11 +201,11 @@ def test_sub_two_tensor(self): def test_integer_div(self): a = paddle.static.data(name="a", shape=[-1, 1], dtype='int64') b = a / 7 - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.array([3, 4, 10, 14, 9, 18]).astype('int64') (b_np,) = exe.run( - fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + base.default_main_program(), feed={"a": a_np}, fetch_list=[b] ) b_np_actual = (a_np / 7).astype('float32') @@ -217,19 +217,19 @@ def test_equal(self): b = paddle.static.data(name="b", shape=[-1, 1], dtype='float32') c = a == b - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.array([3, 4, 10, 14, 9, 18]).astype('float32') b_np = np.array([3, 4, 11, 15, 8, 18]).astype('float32') (c_np,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"a": a_np, "b": b_np}, fetch_list=[c], ) np.testing.assert_array_equal(c_np, a_np == b_np) - self.assertEqual(c.dtype, fluid.core.VarDesc.VarType.BOOL) + self.assertEqual(c.dtype, base.core.VarDesc.VarType.BOOL) @prog_scope() def test_equal_and_cond(self): @@ -242,12 +242,12 @@ def test_equal_and_cond(self): cond = one == zero c = paddle.static.nn.cond(cond, lambda: a + b, lambda: a - b) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.array([3, 4, 10, 14, 9, 18]).astype('float32') b_np = np.array([3, 4, 11, 15, 8, 18]).astype('float32') (c_np,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"a": a_np, "b": b_np}, fetch_list=[c], ) @@ -259,12 +259,12 @@ def test_neg(self): a = paddle.static.data(name="a", shape=[-1, 10, 1], dtype='float32') a.desc.set_need_check_feed(False) b = -a - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float32') (b_np,) = exe.run( - fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + base.default_main_program(), feed={"a": a_np}, fetch_list=[b] ) np.testing.assert_allclose(-a_np, b_np, rtol=1e-05) @@ -273,12 +273,12 @@ def test_astype(self): a = paddle.static.data(name="a", shape=[-1, 10, 1]) a.desc.set_need_check_feed(False) b = a.astype('float32') - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) a_np = np.random.uniform(-1, 1, size=[10, 1]).astype('float64') (b_np,) = exe.run( - fluid.default_main_program(), feed={"a": a_np}, fetch_list=[b] + base.default_main_program(), feed={"a": a_np}, fetch_list=[b] ) np.testing.assert_allclose(a_np.astype('float32'), b_np, rtol=1e-05) @@ -291,9 +291,9 @@ def test_bitwise_and(self): y = paddle.static.data(name="y", shape=[2, 3, 5], dtype="int32") z = x & y - exe = fluid.Executor() + exe = base.Executor() out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": x_np, "y": y_np}, fetch_list=[z], ) @@ -309,9 +309,9 @@ def test_bitwise_or(self): y = paddle.static.data(name="y", shape=[2, 3, 5], dtype="int32") z = x | y - exe = fluid.Executor() + exe = base.Executor() out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": x_np, "y": y_np}, fetch_list=[z], ) @@ -327,9 +327,9 @@ def test_bitwise_xor(self): y = paddle.static.data(name="y", shape=[2, 3, 5], dtype="int32") z = x ^ y - exe = fluid.Executor() + exe = base.Executor() out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": x_np, "y": y_np}, fetch_list=[z], ) @@ -343,9 +343,9 @@ def test_bitwise_not(self): x = paddle.static.data(name="x", shape=[2, 3, 5], dtype="int32") z = ~x - exe = fluid.Executor() + exe = base.Executor() out = exe.run( - fluid.default_main_program(), feed={"x": x_np}, fetch_list=[z] + base.default_main_program(), feed={"x": x_np}, fetch_list=[z] ) np.testing.assert_array_equal(out[0], out_np) @@ -357,9 +357,9 @@ def test_T(self): x = paddle.static.data(name="x", shape=[2, 8, 5, 3], dtype="int32") z = x.T - exe = fluid.Executor() + exe = base.Executor() out = exe.run( - fluid.default_main_program(), feed={"x": x_np}, fetch_list=[z] + base.default_main_program(), feed={"x": x_np}, fetch_list=[z] ) np.testing.assert_array_equal(out[0], out_np) diff --git a/test/legacy_test/test_math_op_patch_var_base.py b/test/legacy_test/test_math_op_patch_var_base.py index c392fb972697d..7a2970289fcd2 100644 --- a/test/legacy_test/test_math_op_patch_var_base.py +++ b/test/legacy_test/test_math_op_patch_var_base.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestMathOpPatchesVarBase(unittest.TestCase): @@ -29,76 +29,76 @@ def setUp(self): def test_add(self): a_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) res = a + b np.testing.assert_array_equal(res.numpy(), a_np + b_np) def test_sub(self): a_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) res = a - b np.testing.assert_array_equal(res.numpy(), a_np - b_np) def test_mul(self): a_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) res = a * b np.testing.assert_array_equal(res.numpy(), a_np * b_np) def test_div(self): a_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) res = a / b # NOTE: Not sure why array_equal fails on windows, allclose is acceptable np.testing.assert_allclose(res.numpy(), a_np / b_np, rtol=1e-05) def test_add_scalar(self): a_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) b = 0.1 res = a + b np.testing.assert_array_equal(res.numpy(), a_np + b) def test_add_scalar_reverse(self): a_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) b = 0.1 res = b + a np.testing.assert_array_equal(res.numpy(), b + a_np) def test_sub_scalar(self): a_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) b = 0.1 res = a - b np.testing.assert_array_equal(res.numpy(), a_np - b) def test_sub_scalar_reverse(self): a_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) b = 0.1 res = b - a np.testing.assert_array_equal(res.numpy(), b - a_np) def test_mul_scalar(self): a_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) b = 0.1 res = a * b np.testing.assert_array_equal(res.numpy(), a_np * b) @@ -106,8 +106,8 @@ def test_mul_scalar(self): # div_scalar, not equal def test_div_scalar(self): a_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) b = 0.1 res = a / b np.testing.assert_allclose(res.numpy(), a_np / b, rtol=1e-05) @@ -116,27 +116,27 @@ def test_div_scalar(self): def test_pow(self): a_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) res = a**b np.testing.assert_allclose(res.numpy(), a_np**b_np, rtol=1e-05) def test_floor_div(self): a_np = np.random.randint(1, 100, size=self.shape) b_np = np.random.randint(1, 100, size=self.shape) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) res = a // b np.testing.assert_array_equal(res.numpy(), a_np // b_np) def test_mod(self): a_np = np.random.randint(1, 100, size=self.shape) b_np = np.random.randint(1, 100, size=self.shape) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) res = a % b np.testing.assert_array_equal(res.numpy(), a_np % b_np) @@ -170,10 +170,10 @@ def test_equal(self): a_np = np.asarray([1, 2, 3, 4, 5]) b_np = np.asarray([1, 2, 3, 4, 5]) c_np = np.asarray([1, 2, 2, 4, 5]) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) - c = fluid.dygraph.to_variable(c_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) + c = base.dygraph.to_variable(c_np) res1 = a == b res2 = a == c np.testing.assert_array_equal(res1.numpy(), a_np == b_np) @@ -183,10 +183,10 @@ def test_not_equal(self): a_np = np.asarray([1, 2, 3, 4, 5]) b_np = np.asarray([1, 2, 3, 4, 5]) c_np = np.asarray([1, 2, 2, 4, 5]) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) - c = fluid.dygraph.to_variable(c_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) + c = base.dygraph.to_variable(c_np) res1 = a != b res2 = a != c np.testing.assert_array_equal(res1.numpy(), a_np != b_np) @@ -195,49 +195,49 @@ def test_not_equal(self): def test_less_than(self): a_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) res = a < b np.testing.assert_array_equal(res.numpy(), a_np < b_np) def test_less_equal(self): a_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) res = a <= b np.testing.assert_array_equal(res.numpy(), a_np <= b_np) def test_greater_than(self): a_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) res = a > b np.testing.assert_array_equal(res.numpy(), a_np > b_np) def test_greater_equal(self): a_np = np.random.random(self.shape).astype(self.dtype) b_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) res = a >= b np.testing.assert_array_equal(res.numpy(), a_np >= b_np) def test_neg(self): a_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) res = -a np.testing.assert_array_equal(res.numpy(), -a_np) def test_float_int_long(self): - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(np.array([100.1])) + with base.dygraph.guard(): + a = base.dygraph.to_variable(np.array([100.1])) self.assertTrue(float(a) == 100.1) self.assertTrue(int(a) == 100) self.assertTrue(int(a) == 100) @@ -249,13 +249,13 @@ def test_float_int_long(self): def test_len(self): a_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) self.assertTrue(len(a) == 10) def test_index(self): - with fluid.dygraph.guard(): - var1 = fluid.dygraph.to_variable(np.array([2])) + with base.dygraph.guard(): + var1 = base.dygraph.to_variable(np.array([2])) i_tmp = 0 for i in range(var1): self.assertTrue(i == i_tmp) @@ -276,7 +276,7 @@ def test_index(self): self.assertTrue(str1[var1] == 's') def test_np_left_mul(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): t = np.sqrt(2.0 * np.pi) x = paddle.ones((2, 2), dtype="float32") y = t * x @@ -291,16 +291,16 @@ def test_np_left_mul(self): def test_add_different_dtype(self): a_np = np.random.random(self.shape).astype(np.float32) b_np = np.random.random(self.shape).astype(np.float16) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) res = a + b np.testing.assert_array_equal(res.numpy(), a_np + b_np) def test_floordiv_different_dtype(self): a_np = np.full(self.shape, 10, np.int64) b_np = np.full(self.shape, 2, np.int32) - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = paddle.to_tensor(a_np) b = paddle.to_tensor(b_np) res = a // b @@ -308,11 +308,11 @@ def test_floordiv_different_dtype(self): def test_astype(self): a_np = np.random.uniform(-1, 1, self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) res1 = a.astype(np.float16) res2 = a.astype('float16') - res3 = a.astype(fluid.core.VarDesc.VarType.FP16) + res3 = a.astype(base.core.VarDesc.VarType.FP16) self.assertEqual(res1.dtype, res2.dtype) self.assertEqual(res1.dtype, res3.dtype) @@ -323,11 +323,11 @@ def test_astype(self): def test_conpare_op_broadcast(self): a_np = np.random.uniform(-1, 1, [10, 1, 10]).astype(self.dtype) b_np = np.random.uniform(-1, 1, [1, 1, 10]).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) - self.assertEqual((a != b).dtype, fluid.core.VarDesc.VarType.BOOL) + self.assertEqual((a != b).dtype, base.core.VarDesc.VarType.BOOL) np.testing.assert_array_equal((a != b).numpy(), a_np != b_np) def test_tensor_patch_method(self): @@ -400,7 +400,7 @@ def test_tensor_patch_method(self): ) d = d.matmul(d.t()) # ROCM not support cholesky - if not fluid.core.is_compiled_with_rocm(): + if not base.core.is_compiled_with_rocm(): np.testing.assert_array_equal( d.cholesky().numpy(), paddle.cholesky(d).numpy() ) @@ -611,8 +611,8 @@ def test_tensor_patch_method(self): def test_complex_scalar(self): a_np = np.random.random(self.shape).astype(self.dtype) - with fluid.dygraph.guard(): - a = fluid.dygraph.to_variable(a_np) + with base.dygraph.guard(): + a = base.dygraph.to_variable(a_np) res = 1j * a np.testing.assert_array_equal(res.numpy(), 1j * a_np) @@ -621,7 +621,7 @@ def test_matmul(self): y_np = np.random.uniform(-1, 1, [3, 2]).astype(self.dtype) except_out = x_np @ y_np - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = paddle.to_tensor(x_np) y = paddle.to_tensor(y_np) out = x @ y diff --git a/test/legacy_test/test_matmul_int8_op.py b/test/legacy_test/test_matmul_int8_op.py index b1aa9d3286845..f482f7b194446 100644 --- a/test/legacy_test/test_matmul_int8_op.py +++ b/test/legacy_test/test_matmul_int8_op.py @@ -18,7 +18,7 @@ from test_sparse_attention_op import get_cuda_version import paddle -from paddle.fluid import core +from paddle.base import core paddle.disable_static() diff --git a/test/legacy_test/test_matmul_op.py b/test/legacy_test/test_matmul_op.py index b5ac23f3e38ad..cf2fd10d5dcf2 100644 --- a/test/legacy_test/test_matmul_op.py +++ b/test/legacy_test/test_matmul_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, paddle_static_guard import paddle -from paddle import fluid +from paddle import base def generate_compatible_shapes(dim_X, dim_Y, transpose_X, transpose_Y): @@ -168,11 +168,11 @@ def generate_compatible_shapes_ndim(dim, transpose_X, transpose_Y): class API_TestMm(unittest.TestCase): def test_out(self): with paddle_static_guard(): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data(name="x", shape=[2], dtype="float64") y = paddle.static.data(name='y', shape=[2], dtype='float64') result = paddle.mm(x, y) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) data1 = np.random.rand(2) data2 = np.random.rand(2) np_res = exe.run( @@ -191,12 +191,12 @@ def test_out(self): ) def test_dygraph_without_out(self): - device = fluid.CPUPlace() - with fluid.dygraph.guard(device): + device = base.CPUPlace() + with base.dygraph.guard(device): input_array1 = np.random.rand(3, 4).astype("float64") input_array2 = np.random.rand(4, 3).astype("float64") - data1 = fluid.dygraph.to_variable(input_array1) - data2 = fluid.dygraph.to_variable(input_array2) + data1 = base.dygraph.to_variable(input_array1) + data2 = base.dygraph.to_variable(input_array2) out = paddle.mm(data1, data2) expected_result = np.matmul(input_array1, input_array2) np.testing.assert_allclose(expected_result, out.numpy(), rtol=1e-05) @@ -204,12 +204,12 @@ def test_dygraph_without_out(self): class Test_API_Matmul(unittest.TestCase): def test_dygraph_without_out(self): - device = fluid.CPUPlace() - with fluid.dygraph.guard(device): + device = base.CPUPlace() + with base.dygraph.guard(device): input_array1 = np.random.rand(3, 4).astype("float64") input_array2 = np.random.rand(4, 3).astype("float64") - data1 = fluid.dygraph.to_variable(input_array1) - data2 = fluid.dygraph.to_variable(input_array2) + data1 = base.dygraph.to_variable(input_array1) + data2 = base.dygraph.to_variable(input_array2) out = paddle.matmul(data1, data2) expected_result = np.matmul(input_array1, input_array2) np.testing.assert_allclose(expected_result, out.numpy(), rtol=1e-05) @@ -220,7 +220,7 @@ def test_errors(self): with paddle_static_guard(): def test_error1(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data( name="data1", shape=[10, 2], dtype="float32" ) @@ -232,7 +232,7 @@ def test_error1(): self.assertRaises(ValueError, test_error1) def test_error2(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data( name="data1", shape=[-1, 10, 2], dtype="float32" ) @@ -244,7 +244,7 @@ def test_error2(): test_error2() def test_error3(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data( name="data1", shape=[10, 10, 2], dtype="float32" ) diff --git a/test/legacy_test/test_matmul_v2_op.py b/test/legacy_test/test_matmul_v2_op.py index fa939fd29d48e..681a24fd243c4 100644 --- a/test/legacy_test/test_matmul_v2_op.py +++ b/test/legacy_test/test_matmul_v2_op.py @@ -19,8 +19,8 @@ from testsuite import create_op import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): @@ -494,12 +494,12 @@ def test_check_grad(self): class TestMatMulV2API(unittest.TestCase): def setUp(self): - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input_x = paddle.static.data( name="input_x", shape=[4, 3], dtype="float32" ) @@ -512,9 +512,9 @@ def check_static_result(self, place): x_np = np.random.random([4, 3]).astype("float32") y_np = np.random.random([3, 4]).astype("float32") - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input_x": x_np, "input_y": y_np}, fetch_list=[result], ) @@ -525,7 +525,7 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_x = np.random.random([4, 3]).astype("float64") input_y = np.random.random([3, 4]).astype("float64") x = paddle.to_tensor(input_x) @@ -536,7 +536,7 @@ def test_dygraph_fp16(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_x = np.random.random([4, 3]).astype("float16") input_y = np.random.random([3, 4]).astype("float16") x = paddle.to_tensor(input_x) @@ -547,7 +547,7 @@ def test_compute_type_fp32(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.set_flags( {'FLAGS_gemm_use_half_precision_compute_type': False} ) @@ -573,7 +573,7 @@ def test_compute_type_fp16_nan(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): paddle.set_flags( {'FLAGS_gemm_use_half_precision_compute_type': True} ) @@ -605,8 +605,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -658,8 +658,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -720,8 +720,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -746,8 +746,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -772,8 +772,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} @@ -798,8 +798,8 @@ def setUp(self): self.init_input_output() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': -1, 'use_mkldnn': False} self.outputs = {'Out': self.out} diff --git a/test/legacy_test/test_matrix_nms_op.py b/test/legacy_test/test_matrix_nms_op.py index 80ac3aa4d1775..9500e6fbf6f06 100644 --- a/test/legacy_test/test_matrix_nms_op.py +++ b/test/legacy_test/test_matrix_nms_op.py @@ -19,7 +19,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import Program, program_guard +from paddle.base import Program, program_guard def python_matrix_nms( diff --git a/test/legacy_test/test_matrix_power_op.py b/test/legacy_test/test_matrix_power_op.py index cc4be16fdfaf9..e9967a7ec3e1c 100644 --- a/test/legacy_test/test_matrix_power_op.py +++ b/test/legacy_test/test_matrix_power_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -243,12 +243,12 @@ def config(self): class TestMatrixPowerAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input_x = paddle.static.data( name="input_x", shape=[4, 4], dtype="float64" ) @@ -256,9 +256,9 @@ def check_static_result(self, place): input_np = np.random.random([4, 4]).astype("float64") result_np = np.linalg.matrix_power(input_np, -2) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input_x": input_np}, fetch_list=[result], ) @@ -272,7 +272,7 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([4, 4]).astype("float64") input = paddle.to_tensor(input_np) result = paddle.linalg.matrix_power(input, -2) @@ -338,12 +338,12 @@ def test_errors(self): class TestMatrixPowerSingularAPI(unittest.TestCase): def setUp(self): - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[4, 4], dtype="float64" ) @@ -351,10 +351,10 @@ def check_static_result(self, place): input_np = np.zeros([4, 4]).astype("float64") - exe = fluid.Executor(place) + exe = base.Executor(place) try: fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) @@ -371,9 +371,9 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.ones([4, 4]).astype("float64") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) try: result = paddle.linalg.matrix_power(input, -2) except RuntimeError as ex: diff --git a/test/legacy_test/test_matrix_rank_op.py b/test/legacy_test/test_matrix_rank_op.py index 860447cb556b3..1a8bbb5b4636c 100644 --- a/test/legacy_test/test_matrix_rank_op.py +++ b/test/legacy_test/test_matrix_rank_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() SEED = 2049 @@ -167,12 +167,12 @@ def test_dygraph(self): def test_static(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x_np = np.random.rand(3, 4, 7, 7).astype(np.float64) tol_np = np.random.random([3, 4]).astype(np.float32) x_pd = paddle.static.data( @@ -185,41 +185,41 @@ def test_static(self): rank_pd = paddle.linalg.matrix_rank( x_pd, tol_pd, hermitian=False ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"X": x_np, "TolTensor": tol_np}, fetch_list=[rank_pd], ) np.testing.assert_allclose(fetches[0], rank_np, rtol=1e-05) for place in places: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x_np = np.random.rand(3, 4, 7, 7).astype(np.float64) x_pd = paddle.static.data( name="X", shape=[3, 4, 7, 7], dtype='float64' ) rank_np = np.linalg.matrix_rank(x_np, hermitian=True) rank_pd = paddle.linalg.matrix_rank(x_pd, hermitian=True) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"X": x_np}, fetch_list=[rank_pd], ) np.testing.assert_allclose(fetches[0], rank_np, rtol=1e-05) for place in places: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x_np = np.random.rand(3, 4, 7, 7).astype(np.float64) x_pd = paddle.static.data( name="X", shape=[3, 4, 7, 7], dtype='float64' ) rank_np = np.linalg.matrix_rank(x_np, 0.1, hermitian=False) rank_pd = paddle.linalg.matrix_rank(x_pd, 0.1, hermitian=False) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"X": x_np}, fetch_list=[rank_pd], ) diff --git a/test/legacy_test/test_max_min_amax_amin_op.py b/test/legacy_test/test_max_min_amax_amin_op.py index 400a0c1eeb776..b5184bd3acd20 100644 --- a/test/legacy_test/test_max_min_amax_amin_op.py +++ b/test/legacy_test/test_max_min_amax_amin_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -28,9 +28,9 @@ def setUp(self): self.init_case() self.cal_np_out_and_gradient() self.place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) def init_case(self): @@ -92,18 +92,18 @@ def _choose_paddle_func(self, func, x): # We check the output between paddle API and numpy in static graph. def test_static_graph(self): def _test_static_graph(func): - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(startup_program, train_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(startup_program, train_program): x = paddle.static.data( name='input', dtype=self.dtype, shape=self.shape ) x.stop_gradient = False out = self._choose_paddle_func(func, x) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) res = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'input': self.x_np}, fetch_list=[out], ) diff --git a/test/legacy_test/test_max_op.py b/test/legacy_test/test_max_op.py index 90fe8561b866a..580b8423bffef 100644 --- a/test/legacy_test/test_max_op.py +++ b/test/legacy_test/test_max_op.py @@ -19,7 +19,7 @@ from test_sum_op import TestReduceOPTensorAxisBase import paddle -from paddle.fluid import core +from paddle.base import core class ApiMaxTest(unittest.TestCase): diff --git a/test/legacy_test/test_maximum_op.py b/test/legacy_test/test_maximum_op.py index 544f2c090096b..818bdb65fee68 100644 --- a/test/legacy_test/test_maximum_op.py +++ b/test/legacy_test/test_maximum_op.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core class ApiMaximumTest(unittest.TestCase): diff --git a/test/legacy_test/test_maxout_op.py b/test/legacy_test/test_maxout_op.py index b6d339c3aab28..06b855ce2364a 100644 --- a/test/legacy_test/test_maxout_op.py +++ b/test/legacy_test/test_maxout_op.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core paddle.enable_static() np.random.seed(1) diff --git a/test/legacy_test/test_mean_op.py b/test/legacy_test/test_mean_op.py index 5f56926304183..023a8aceddde3 100644 --- a/test/legacy_test/test_mean_op.py +++ b/test/legacy_test/test_mean_op.py @@ -21,8 +21,8 @@ from test_sum_op import TestReduceOPTensorAxisBase import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard np.random.seed(10) @@ -106,7 +106,7 @@ def test_check_output(self): def test_checkout_grad(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x_np = np.random.random((10, 10)).astype(self.dtype) x = paddle.to_tensor(x_np) x.stop_gradient = False @@ -461,19 +461,19 @@ def test_case(x, axis=None, keepdim=False): test_case(self.x, [0, 1, 2, 3]) paddle.enable_static() - def test_fluid_api(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + def test_base_api(self): + with base.program_guard(base.Program(), base.Program()): x = paddle.static.data("x", shape=[10, 10], dtype="float32") out = paddle.mean(x=x, axis=1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) x_np = np.random.rand(10, 10).astype(np.float32) res = exe.run(feed={"x": x_np}, fetch_list=[out]) np.testing.assert_allclose(res[0], np.mean(x_np, axis=1), rtol=1e-05) - with fluid.dygraph.guard(): + with base.dygraph.guard(): x_np = np.random.rand(10, 10).astype(np.float32) - x = fluid.dygraph.to_variable(x_np) + x = base.dygraph.to_variable(x_np) out = paddle.mean(x=x, axis=1) np.testing.assert_allclose( out.numpy(), np.mean(x_np, axis=1), rtol=1e-05 @@ -537,9 +537,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -568,9 +568,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_memcpy_op.py b/test/legacy_test/test_memcpy_op.py index 8dd8e363af792..768c1bec79c9d 100755 --- a/test/legacy_test/test_memcpy_op.py +++ b/test/legacy_test/test_memcpy_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestMemcpy_FillConstant(unittest.TestCase): @@ -72,8 +72,8 @@ def test_gpu_copy_to_pinned(self): outputs={'Out': pinned_var}, attrs={'dst_place_type': 2}, ) - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CUDAPlace(0) + exe = base.Executor(place) gpu_, pinned_ = exe.run( main_program, feed={}, fetch_list=[gpu_var.name, pinned_var.name] ) @@ -88,8 +88,8 @@ def test_pinned_copy_gpu(self): outputs={'Out': gpu_var}, attrs={'dst_place_type': 1}, ) - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CUDAPlace(0) + exe = base.Executor(place) gpu_, pinned_ = exe.run( main_program, feed={}, fetch_list=[gpu_var.name, pinned_var.name] ) @@ -144,8 +144,8 @@ def test_hip_copy_bool_value(self): outputs={'Out': gpu_var}, attrs={'dst_place_type': 1}, ) - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CUDAPlace(0) + exe = base.Executor(place) gpu_, pinned_ = exe.run( main_program, feed={}, @@ -187,7 +187,7 @@ def test_SELECTED_ROWS(self): name="selected_row_0", dtype="float32", persistable=False, - type=fluid.core.VarDesc.VarType.SELECTED_ROWS, + type=base.core.VarDesc.VarType.SELECTED_ROWS, stop_gradient=True, ) main_program.global_block().append_op( @@ -207,8 +207,8 @@ def test_SELECTED_ROWS(self): outputs={'Out': pinned_var}, attrs={'dst_place_type': 2}, ) - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CUDAPlace(0) + exe = base.Executor(place) selected_row_var_, pinned_ = exe.run( main_program, feed={}, diff --git a/test/legacy_test/test_memory_efficient_attention.py b/test/legacy_test/test_memory_efficient_attention.py index c4220fe5b8077..24e1d5115f44f 100644 --- a/test/legacy_test/test_memory_efficient_attention.py +++ b/test/legacy_test/test_memory_efficient_attention.py @@ -23,7 +23,7 @@ import paddle import paddle.incubate.nn.attn_bias as ab import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core from paddle.incubate.nn.memory_efficient_attention import ( memory_efficient_attention, ) diff --git a/test/legacy_test/test_memory_reuse_exclude_feed_var.py b/test/legacy_test/test_memory_reuse_exclude_feed_var.py index 205369bc820c5..de0f08efc99e0 100644 --- a/test/legacy_test/test_memory_reuse_exclude_feed_var.py +++ b/test/legacy_test/test_memory_reuse_exclude_feed_var.py @@ -18,7 +18,7 @@ import paddle import paddle.nn.functional as F -from paddle import fluid +from paddle import base class TestMemoryReuseExcludeFeedVar(unittest.TestCase): @@ -33,18 +33,18 @@ def main_impl(self, place): relu_image = F.relu(image) loss = paddle.mean(relu_image) - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.enable_inplace = True build_strategy.memory_optimize = True - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) - compiled_prog = fluid.CompiledProgram( - fluid.default_main_program(), build_strategy=build_strategy + compiled_prog = base.CompiledProgram( + base.default_main_program(), build_strategy=build_strategy ) - image_tensor = fluid.LoDTensor() + image_tensor = base.LoDTensor() np_image = np.random.uniform( low=-10, high=10, size=self.image_shape ).astype('float32') @@ -57,14 +57,14 @@ def main_impl(self, place): np.testing.assert_array_equal(np.array(image_tensor), np_image) def test_main(self): - places = [fluid.CPUPlace()] - if fluid.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for p in places: - with fluid.program_guard(fluid.Program(), fluid.Program()): - with fluid.unique_name.guard(): - with fluid.scope_guard(fluid.Scope()): + with base.program_guard(base.Program(), base.Program()): + with base.unique_name.guard(): + with base.scope_guard(base.Scope()): self.main_impl(p) diff --git a/test/legacy_test/test_merge_selectedrows_op.py b/test/legacy_test/test_merge_selectedrows_op.py index 7c7f3a86da88c..023fadf927523 100644 --- a/test/legacy_test/test_merge_selectedrows_op.py +++ b/test/legacy_test/test_merge_selectedrows_op.py @@ -17,7 +17,7 @@ import numpy as np from op import Operator -from paddle.fluid import core +from paddle.base import core class TestMergeSelectedRows(unittest.TestCase): diff --git a/test/legacy_test/test_merged_adam_op.py b/test/legacy_test/test_merged_adam_op.py index 6ff2cb90bc039..9f254a89d6ee8 100644 --- a/test/legacy_test/test_merged_adam_op.py +++ b/test/legacy_test/test_merged_adam_op.py @@ -18,7 +18,7 @@ import paddle from paddle import _C_ops, _legacy_C_ops -from paddle.fluid.framework import in_dygraph_mode +from paddle.base.framework import in_dygraph_mode def run_adam_op( @@ -47,15 +47,15 @@ def run_adam_op( paddle.disable_static() paddle.set_device(place) - param_vars = [paddle.fluid.dygraph.to_variable(p) for p in params] - grad_vars = [paddle.fluid.dygraph.to_variable(g) for g in grads] - lr_vars = [paddle.fluid.dygraph.to_variable(l) for l in lrs] - moment1_vars = [paddle.fluid.dygraph.to_variable(m) for m in moment1s] - moment2_vars = [paddle.fluid.dygraph.to_variable(m) for m in moment2s] - beta1_pow_vars = [paddle.fluid.dygraph.to_variable(b) for b in beta1_pows] - beta2_pow_vars = [paddle.fluid.dygraph.to_variable(b) for b in beta2_pows] + param_vars = [paddle.base.dygraph.to_variable(p) for p in params] + grad_vars = [paddle.base.dygraph.to_variable(g) for g in grads] + lr_vars = [paddle.base.dygraph.to_variable(l) for l in lrs] + moment1_vars = [paddle.base.dygraph.to_variable(m) for m in moment1s] + moment2_vars = [paddle.base.dygraph.to_variable(m) for m in moment2s] + beta1_pow_vars = [paddle.base.dygraph.to_variable(b) for b in beta1_pows] + beta2_pow_vars = [paddle.base.dygraph.to_variable(b) for b in beta2_pows] master_param_vars = [ - paddle.fluid.dygraph.to_variable(m_p) for m_p in master_params + paddle.base.dygraph.to_variable(m_p) for m_p in master_params ] if not use_merged: diff --git a/test/legacy_test/test_merged_momentum_op.py b/test/legacy_test/test_merged_momentum_op.py index 08d200a911f3c..289c86fef3b4e 100644 --- a/test/legacy_test/test_merged_momentum_op.py +++ b/test/legacy_test/test_merged_momentum_op.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper def run_momentum_op( diff --git a/test/legacy_test/test_meshgrid_op.py b/test/legacy_test/test_meshgrid_op.py index d2f7b0c2eca89..97c896e7cb8da 100644 --- a/test/legacy_test/test_meshgrid_op.py +++ b/test/legacy_test/test_meshgrid_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def meshgrid_wrapper(x): @@ -155,10 +155,10 @@ def test_api(self): out_2 = np.reshape(input_2, [1, 200]) out_2 = np.broadcast_to(out_2, [100, 200]) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) grid_x, grid_y = paddle.tensor.meshgrid(x, y) res_1, res_2 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'x': input_1, 'y': input_2}, fetch_list=[grid_x, grid_y], ) @@ -191,10 +191,10 @@ def test_list_input(self): out_2 = np.reshape(input_2, [1, 200]) out_2 = np.broadcast_to(out_2, [100, 200]) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) grid_x, grid_y = paddle.tensor.meshgrid([x, y]) res_1, res_2 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'x': input_1, 'y': input_2}, fetch_list=[grid_x, grid_y], ) @@ -228,10 +228,10 @@ def test_tuple_input(self): out_2 = np.reshape(input_2, [1, 200]) out_2 = np.broadcast_to(out_2, [100, 200]) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) grid_x, grid_y = paddle.tensor.meshgrid((x, y)) res_1, res_2 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'x': input_1, 'y': input_2}, fetch_list=[grid_x, grid_y], ) @@ -257,9 +257,9 @@ def test_api_with_dygraph(self): ], ).astype('int32') - with fluid.dygraph.guard(): - tensor_3 = fluid.dygraph.to_variable(input_3) - tensor_4 = fluid.dygraph.to_variable(input_4) + with base.dygraph.guard(): + tensor_3 = base.dygraph.to_variable(input_3) + tensor_4 = base.dygraph.to_variable(input_4) res_3, res_4 = paddle.tensor.meshgrid(tensor_3, tensor_4) np.testing.assert_array_equal(res_3.shape, [100, 200]) @@ -283,9 +283,9 @@ def test_api_with_dygraph_list_input(self): ], ).astype('int32') - with fluid.dygraph.guard(): - tensor_3 = fluid.dygraph.to_variable(input_3) - tensor_4 = fluid.dygraph.to_variable(input_4) + with base.dygraph.guard(): + tensor_3 = base.dygraph.to_variable(input_3) + tensor_4 = base.dygraph.to_variable(input_4) res_3, res_4 = paddle.tensor.meshgrid([tensor_3, tensor_4]) np.testing.assert_array_equal(res_3.shape, [100, 200]) @@ -309,9 +309,9 @@ def test_api_with_dygraph_tuple_input(self): ], ).astype('int32') - with fluid.dygraph.guard(): - tensor_3 = fluid.dygraph.to_variable(input_3) - tensor_4 = fluid.dygraph.to_variable(input_4) + with base.dygraph.guard(): + tensor_3 = base.dygraph.to_variable(input_3) + tensor_4 = base.dygraph.to_variable(input_4) res_3, res_4 = paddle.tensor.meshgrid((tensor_3, tensor_4)) np.testing.assert_array_equal(res_3.shape, [100, 200]) @@ -360,16 +360,16 @@ def test_dygraph_api(self): ], ).astype('int32') - with fluid.dygraph.guard(): - tensor_1 = fluid.dygraph.to_variable(input_1) - tensor_2 = fluid.dygraph.to_variable(input_2) + with base.dygraph.guard(): + tensor_1 = base.dygraph.to_variable(input_1) + tensor_2 = base.dygraph.to_variable(input_2) tensor_1.stop_gradient = False tensor_2.stop_gradient = False res_1, res_2 = paddle.tensor.meshgrid((tensor_1, tensor_2)) sum = paddle.add_n([res_1, res_2]) sum.backward() - tensor_eager_1 = fluid.dygraph.to_variable(input_1) - tensor_eager_2 = fluid.dygraph.to_variable(input_2) + tensor_eager_1 = base.dygraph.to_variable(input_1) + tensor_eager_2 = base.dygraph.to_variable(input_2) tensor_eager_1.stop_gradient = False tensor_eager_2.stop_gradient = False res_eager_1, res_eager_2 = paddle.tensor.meshgrid( diff --git a/test/legacy_test/test_metrics.py b/test/legacy_test/test_metrics.py index eb7af9365af3b..805ac0bad28b0 100644 --- a/test/legacy_test/test_metrics.py +++ b/test/legacy_test/test_metrics.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.hapi.model import to_list @@ -158,7 +158,7 @@ def random_pred_label(self): return label, pred_one_hot def test_main(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): acc = paddle.metric.Accuracy(topk=self.topk, name=self.name) for _ in range(10): label, pred = self.random_pred_label() @@ -200,11 +200,11 @@ def setUp(self): def test_main(self): paddle.enable_static() - main_prog = fluid.Program() - startup_prog = fluid.Program() + main_prog = base.Program() + startup_prog = base.Program() main_prog.random_seed = 1024 startup_prog.random_seed = 1024 - with fluid.program_guard(main_prog, startup_prog): + with base.program_guard(main_prog, startup_prog): pred = paddle.static.data( name='pred', shape=[None, self.class_num], dtype='float32' ) @@ -214,8 +214,8 @@ def test_main(self): acc = paddle.metric.Accuracy(topk=self.topk, name=self.name) state = acc.compute(pred, label) - exe = fluid.Executor(fluid.CPUPlace()) - compiled_main_prog = fluid.CompiledProgram(main_prog) + exe = base.Executor(base.CPUPlace()) + compiled_main_prog = base.CompiledProgram(main_prog) for _ in range(10): label, pred = self.random_pred_label() diff --git a/test/legacy_test/test_min_op.py b/test/legacy_test/test_min_op.py index 5495f44f12dee..4d51ce3fbbf28 100644 --- a/test/legacy_test/test_min_op.py +++ b/test/legacy_test/test_min_op.py @@ -19,8 +19,8 @@ from test_sum_op import TestReduceOPTensorAxisBase import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class ApiMinTest(unittest.TestCase): @@ -120,7 +120,7 @@ def init_data(self): class TestMinAPIWithEmptyTensor(unittest.TestCase): def test_empty_tensor(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): with self.assertRaises(ValueError): data = np.array([], dtype=np.float32) data = np.reshape(data, [0, 0, 0, 0, 0, 0, 0]) diff --git a/test/legacy_test/test_minimum_op.py b/test/legacy_test/test_minimum_op.py index f1de0f0f10d85..6267b78b4cf9d 100644 --- a/test/legacy_test/test_minimum_op.py +++ b/test/legacy_test/test_minimum_op.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core class ApiMinimumTest(unittest.TestCase): diff --git a/test/legacy_test/test_mix_precision_all_reduce_fuse.py b/test/legacy_test/test_mix_precision_all_reduce_fuse.py index ef81e5cc3bf71..6887b2d0de631 100644 --- a/test/legacy_test/test_mix_precision_all_reduce_fuse.py +++ b/test/legacy_test/test_mix_precision_all_reduce_fuse.py @@ -20,7 +20,7 @@ from simple_nets import init_data import paddle -from paddle.fluid import core +from paddle.base import core batch_size = 12 img_shape = [1, 28, 28] diff --git a/test/legacy_test/test_mode_op.py b/test/legacy_test/test_mode_op.py index 15c376f57abb6..63e67882e23e1 100644 --- a/test/legacy_test/test_mode_op.py +++ b/test/legacy_test/test_mode_op.py @@ -22,8 +22,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def _mode1D(a): @@ -226,7 +226,7 @@ def test_gpu_kernel(): paddle.disable_static() test_cpu_kernel() - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): test_gpu_kernel() @@ -267,7 +267,7 @@ def test_run_static(self): class TestModeZeroError(unittest.TestCase): def test_errors(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): def test_0_size(): array = np.array([], dtype=np.float32) diff --git a/test/legacy_test/test_model.py b/test/legacy_test/test_model.py index 35649cce6bc54..683b002e05815 100644 --- a/test/legacy_test/test_model.py +++ b/test/legacy_test/test_model.py @@ -20,7 +20,7 @@ import numpy as np import paddle -from paddle import Model, fluid, jit, to_tensor +from paddle import Model, base, jit, to_tensor from paddle.hapi.model import prepare_distributed_context from paddle.io import Dataset, DistributedBatchSampler from paddle.metric import Accuracy @@ -163,7 +163,7 @@ def dynamic_train(model, dataloader): def dynamic_evaluate(model, dataloader): - with fluid.dygraph.no_grad(): + with base.dygraph.no_grad(): model.eval() cnt = 0 for inputs, labels in dataloader: @@ -182,15 +182,15 @@ def dynamic_evaluate(model, dataloader): @unittest.skipIf( - not fluid.is_compiled_with_cuda(), 'CPU testing is not supported' + not base.is_compiled_with_cuda(), 'CPU testing is not supported' ) class TestModel(unittest.TestCase): @classmethod def setUpClass(cls): - if not fluid.is_compiled_with_cuda(): + if not base.is_compiled_with_cuda(): cls().skipTest('module not tested when ONLY_CPU compling') cls.device = paddle.set_device('gpu') - fluid.enable_dygraph(cls.device) + base.enable_dygraph(cls.device) sp_num = 1280 cls.train_dataset = MnistDataset(mode='train', sample_num=sp_num) @@ -228,7 +228,7 @@ def setUpClass(cls): cls.weight_path = os.path.join(cls.save_dir, 'lenet') paddle.save(dy_lenet.state_dict(), cls.weight_path + '.pdparams') - fluid.disable_dygraph() + base.disable_dygraph() @classmethod def tearDownClass(cls): @@ -274,7 +274,7 @@ def test_prepare_context(self): prepare_distributed_context() def fit(self, dynamic, num_replicas=None, rank=None, num_iters=None): - fluid.enable_dygraph(self.device) if dynamic else None + base.enable_dygraph(self.device) if dynamic else None seed = 555 paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) @@ -337,10 +337,10 @@ def fit(self, dynamic, num_replicas=None, rank=None, num_iters=None): ) model.fit(train_loader, val_loader) - fluid.disable_dygraph() if dynamic else None + base.disable_dygraph() if dynamic else None def fit_with_tuple_input(self, dynamic, num_replicas=None, rank=None): - fluid.enable_dygraph(self.device) if dynamic else None + base.enable_dygraph(self.device) if dynamic else None seed = 555 paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) @@ -390,10 +390,10 @@ def fit_with_tuple_input(self, dynamic, num_replicas=None, rank=None): ) model.fit(train_loader, val_loader) - fluid.disable_dygraph() if dynamic else None + base.disable_dygraph() if dynamic else None def evaluate(self, dynamic): - fluid.enable_dygraph(self.device) if dynamic else None + base.enable_dygraph(self.device) if dynamic else None model = Model(LeNet(), self.inputs, self.labels) model.prepare(metrics=Accuracy()) model.load(self.weight_path) @@ -413,10 +413,10 @@ def evaluate(self, dynamic): model.evaluate(val_loader) - fluid.disable_dygraph() if dynamic else None + base.disable_dygraph() if dynamic else None def predict(self, dynamic): - fluid.enable_dygraph(self.device) if dynamic else None + base.enable_dygraph(self.device) if dynamic else None model = Model(LeNet(), self.inputs) model.prepare() model.load(self.weight_path) @@ -441,10 +441,10 @@ def predict(self, dynamic): model.evaluate(test_loader) - fluid.disable_dygraph() if dynamic else None + base.disable_dygraph() if dynamic else None def test_predict_without_inputs(self): - fluid.enable_dygraph(self.device) + base.enable_dygraph(self.device) model = Model(LeNet()) model.prepare() model.load(self.weight_path) @@ -453,7 +453,7 @@ def test_predict_without_inputs(self): self.test_dataset, batch_size=64, stack_outputs=True ) np.testing.assert_equal(output[0].shape[0], len(self.test_dataset)) - fluid.disable_dygraph() + base.disable_dygraph() def test_summary_gpu(self): paddle.disable_static(self.device) @@ -494,7 +494,7 @@ def test_train_batch(self, dynamic=True): label = np.random.randint(0, 10, size=(4, 1)).astype(np.int64) def get_expect(): - fluid.enable_dygraph(fluid.CPUPlace()) + base.enable_dygraph(base.CPUPlace()) self.set_seed() m = MyModel() optim = paddle.optimizer.SGD( @@ -507,13 +507,13 @@ def get_expect(): avg_loss.backward() optim.minimize(avg_loss) m.clear_gradients() - fluid.disable_dygraph() + base.disable_dygraph() return avg_loss.numpy() ref = get_expect() for dynamic in [True, False]: device = paddle.set_device('cpu') - fluid.enable_dygraph(device) if dynamic else None + base.enable_dygraph(device) if dynamic else None self.set_seed() net = MyModel() @@ -527,25 +527,25 @@ def get_expect(): model.prepare(optim2, loss=CrossEntropyLoss(reduction="sum")) (loss,) = model.train_batch([data], [label]) np.testing.assert_allclose(loss.flatten(), ref.flatten()) - fluid.disable_dygraph() if dynamic else None + base.disable_dygraph() if dynamic else None def test_test_batch(self): dim = 20 data = np.random.random(size=(4, dim)).astype(np.float32) def get_expect(): - fluid.enable_dygraph(fluid.CPUPlace()) + base.enable_dygraph(base.CPUPlace()) self.set_seed() m = MyModel() m.eval() output = m(to_tensor(data)) - fluid.disable_dygraph() + base.disable_dygraph() return output.numpy() ref = get_expect() for dynamic in [True, False]: device = paddle.set_device('cpu') - fluid.enable_dygraph(device) if dynamic else None + base.enable_dygraph(device) if dynamic else None self.set_seed() net = MyModel() inputs = [InputSpec([None, dim], 'float32', 'x')] @@ -554,7 +554,7 @@ def get_expect(): (out,) = model.predict_batch([data]) np.testing.assert_allclose(out, ref, rtol=1e-6) - fluid.disable_dygraph() if dynamic else None + base.disable_dygraph() if dynamic else None def test_save_load(self): path = os.path.join(tempfile.mkdtemp(), '.cache_test_save_load') @@ -562,7 +562,7 @@ def test_save_load(self): os.makedirs(path) for dynamic in [True, False]: device = paddle.set_device('cpu') - fluid.enable_dygraph(device) if dynamic else None + base.enable_dygraph(device) if dynamic else None net = MyModel() inputs = [InputSpec([None, 20], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] @@ -575,7 +575,7 @@ def test_save_load(self): ) model.save(path) model.load(path) - fluid.disable_dygraph() if dynamic else None + base.disable_dygraph() if dynamic else None shutil.rmtree(path) def test_dynamic_load(self): @@ -616,14 +616,14 @@ def test_dynamic_save_static_load(self): os.makedirs(path) # dynamic saving device = paddle.set_device('cpu') - fluid.enable_dygraph(device) + base.enable_dygraph(device) model = Model(MyModel()) optim = paddle.optimizer.SGD( learning_rate=0.001, parameters=model.parameters() ) model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) model.save(path) - fluid.disable_dygraph() + base.disable_dygraph() inputs = [InputSpec([None, 20], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] @@ -652,7 +652,7 @@ def test_static_save_dynamic_load(self): model.save(path) device = paddle.set_device('cpu') - fluid.enable_dygraph(device) # if dynamic else None + base.enable_dygraph(device) # if dynamic else None net = MyModel() inputs = [InputSpec([None, 20], 'float32', 'x')] @@ -664,12 +664,12 @@ def test_static_save_dynamic_load(self): model.prepare(optimizer=optim, loss=CrossEntropyLoss(reduction="sum")) model.load(path) shutil.rmtree(path) - fluid.disable_dygraph() + base.disable_dygraph() def test_parameters(self): for dynamic in [True, False]: device = paddle.set_device('cpu') - fluid.enable_dygraph(device) if dynamic else None + base.enable_dygraph(device) if dynamic else None net = MyModel() inputs = [InputSpec([None, 20], 'float32', 'x')] model = Model(net, inputs) @@ -677,7 +677,7 @@ def test_parameters(self): params = model.parameters() self.assertTrue(params[0].shape[0] == 20) self.assertTrue(params[0].shape[1] == 10) - fluid.disable_dygraph() if dynamic else None + base.disable_dygraph() if dynamic else None def test_summary(self): def _get_param_from_state_dict(state_dict): @@ -688,7 +688,7 @@ def _get_param_from_state_dict(state_dict): for dynamic in [True, False]: device = paddle.set_device('cpu') - fluid.enable_dygraph(device) if dynamic else None + base.enable_dygraph(device) if dynamic else None net = MyModel() inputs = [InputSpec([None, 20], 'float32', 'x')] model = Model(net, inputs) @@ -837,16 +837,16 @@ def test_export_deploy_model(self): model.save(save_dir, training=False) ori_results = model.predict_batch(tensor_img) - fluid.disable_dygraph() if dynamic else None + base.disable_dygraph() if dynamic else None place = ( - fluid.CPUPlace() - if not fluid.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + base.CPUPlace() + if not base.is_compiled_with_cuda() + else base.CUDAPlace(0) ) - new_scope = fluid.Scope() - with fluid.scope_guard(new_scope): - exe = fluid.Executor(place) + new_scope = base.Scope() + with base.scope_guard(new_scope): + exe = base.Executor(place) [ inference_program, feed_target_names, @@ -984,7 +984,7 @@ def make_optimizer(parameters=None): # dynamic test device = paddle.set_device('cpu') - fluid.enable_dygraph(device) + base.enable_dygraph(device) net = MyModel() inputs = [InputSpec([None, 20], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] @@ -1047,7 +1047,7 @@ def make_optimizer(parameters=None): # dynamic test device = paddle.set_device('cpu') - fluid.enable_dygraph(device) + base.enable_dygraph(device) net = MyModel() inputs = [InputSpec([None, 20], 'float32', 'x')] labels = [InputSpec([None, 1], 'int64', 'label')] diff --git a/test/legacy_test/test_modelaverage.py b/test/legacy_test/test_modelaverage.py index 920167906f70d..29d192eeebad9 100644 --- a/test/legacy_test/test_modelaverage.py +++ b/test/legacy_test/test_modelaverage.py @@ -17,20 +17,20 @@ import numpy as np import paddle -from paddle import fluid, nn +from paddle import base, nn class TestModelAverage(unittest.TestCase): def test_model_average_static(self): paddle.enable_static() - place = fluid.CPUPlace() + place = base.CPUPlace() shape = [2, 3, 8, 8] - exe = fluid.Executor(place) - train_program = fluid.Program() - startup = fluid.Program() - test_program = fluid.Program() - with fluid.program_guard(train_program, startup): - with fluid.unique_name.guard(): + exe = base.Executor(place) + train_program = base.Program() + startup = base.Program() + test_program = base.Program() + with base.program_guard(train_program, startup): + with base.unique_name.guard(): data = paddle.static.data( name='X', shape=[None, 1], dtype='float32' ) diff --git a/test/legacy_test/test_momentum_op.py b/test/legacy_test/test_momentum_op.py index b23183996c025..67296e516ab78 100644 --- a/test/legacy_test/test_momentum_op.py +++ b/test/legacy_test/test_momentum_op.py @@ -20,8 +20,8 @@ from op import Operator import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def calculate_momentum_by_numpy( @@ -249,7 +249,7 @@ def setUp(self): def test_check_output(self): paddle.enable_static() if core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + place = base.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place(place, check_dygraph=False) @@ -528,7 +528,7 @@ def init_args(self): def test_sparse_momentum(self): if core.is_compiled_with_cuda(): - self.check_with_place(fluid.CUDAPlace(0)) + self.check_with_place(base.CUDAPlace(0)) class TestSparseMomentumOpWithMultiPrecision2( @@ -555,9 +555,9 @@ def test_momentum_dygraph(self): def test_momentum(self): paddle.enable_static() - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): + place = base.CPUPlace() + main = base.Program() + with base.program_guard(main): x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) @@ -575,9 +575,9 @@ def test_momentum(self): train_reader = paddle.batch( paddle.dataset.uci_housing.train(), batch_size=1 ) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + feeder = base.DataFeeder(place=place, feed_list=[x, y]) + exe = base.Executor(place) + exe.run(base.default_startup_program()) for data in train_reader(): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) @@ -692,9 +692,9 @@ def test_momentum_dygraph_1(self): def test_momentum_static(self): paddle.enable_static() - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): + place = base.CPUPlace() + main = base.Program() + with base.program_guard(main): x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1, activation=None) @@ -712,9 +712,9 @@ def test_momentum_static(self): train_reader = paddle.batch( paddle.dataset.uci_housing.train(), batch_size=1 ) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + feeder = base.DataFeeder(place=place, feed_list=[x, y]) + exe = base.Executor(place) + exe.run(base.default_startup_program()) for data in train_reader(): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) @@ -810,7 +810,7 @@ def __update_params(self, momentum, linear): momentum.minimize(loss) linear.clear_gradients() - def __test_vs(self, place=fluid.CPUPlace()): + def __test_vs(self, place=base.CPUPlace()): paddle.disable_static(place=place) linear_old = paddle.nn.Linear( @@ -847,10 +847,10 @@ def __test_vs(self, place=fluid.CPUPlace()): 'the param weight updated by two Momentum optimizers should equal', ) - def test_vs(self, place=fluid.CPUPlace()): - places = [fluid.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + def test_vs(self, place=base.CPUPlace()): + places = [base.CPUPlace()] + if paddle.base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for place in places: self.__test_vs(place=place) diff --git a/test/legacy_test/test_monitor.py b/test/legacy_test/test_monitor.py index 37fb9cb898a63..8f856c2ea1aae 100644 --- a/test/legacy_test/test_monitor.py +++ b/test/legacy_test/test_monitor.py @@ -23,8 +23,8 @@ import tempfile import unittest -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestDatasetWithStat(unittest.TestCase): @@ -76,21 +76,21 @@ def test_dataset_run_with_stat(self): dataset._set_fea_eval(1, True) dataset.slots_shuffle(["slot1"]) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) if self.use_data_loader: - data_loader = fluid.io.DataLoader.from_dataset( - dataset, fluid.cpu_places(), self.drop_last + data_loader = base.io.DataLoader.from_dataset( + dataset, base.cpu_places(), self.drop_last ) for i in range(self.epoch_num): for data in data_loader(): - exe.run(fluid.default_main_program(), feed=data) + exe.run(base.default_main_program(), feed=data) else: for i in range(self.epoch_num): try: exe.train_from_dataset( - fluid.default_main_program(), + base.default_main_program(), dataset, fetch_list=[embs[0], embs[1]], fetch_info=["emb0", "emb1"], diff --git a/test/legacy_test/test_mse_loss.py b/test/legacy_test/test_mse_loss.py index 37fb0a6a95883..688895240a374 100644 --- a/test/legacy_test/test_mse_loss.py +++ b/test/legacy_test/test_mse_loss.py @@ -17,9 +17,9 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.executor import Executor +from paddle import base +from paddle.base import core +from paddle.base.executor import Executor class TestMseLoss(unittest.TestCase): @@ -41,10 +41,10 @@ def test_mse_loss(self): for use_cuda in ( [False, True] if core.is_compiled_with_cuda() else [False] ): - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() exe = Executor(place) (result,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_val, "label": label_val}, fetch_list=[output], ) @@ -79,14 +79,14 @@ def test_NNMseLoss_mean(self): input_np = np.random.uniform(0.1, 0.5, dim).astype("float32") label_np = np.random.uniform(0.1, 0.5, dim).astype("float32") paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[-1] + dim, dtype='float32' ) @@ -98,18 +98,18 @@ def test_NNMseLoss_mean(self): mse_loss = paddle.nn.loss.MSELoss() ret = mse_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={"input": input_np, "label": label_np}, fetch_list=[ret], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): mse_loss = paddle.nn.loss.MSELoss() dy_ret = mse_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_result = dy_ret.numpy() @@ -125,14 +125,14 @@ def test_NNMseLoss_sum(self): input_np = np.random.uniform(0.1, 0.5, dim).astype("float32") label_np = np.random.uniform(0.1, 0.5, dim).astype("float32") paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[-1] + dim, dtype='float32' ) @@ -144,18 +144,18 @@ def test_NNMseLoss_sum(self): mse_loss = paddle.nn.loss.MSELoss(reduction='sum') ret = mse_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={"input": input_np, "label": label_np}, fetch_list=[ret], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): mse_loss = paddle.nn.loss.MSELoss(reduction='sum') dy_ret = mse_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_result = dy_ret.numpy() @@ -171,14 +171,14 @@ def test_NNMseLoss_none(self): input_np = np.random.uniform(0.1, 0.5, dim).astype("float32") label_np = np.random.uniform(0.1, 0.5, dim).astype("float32") paddle.enable_static() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[-1] + dim, dtype='float32' ) @@ -190,18 +190,18 @@ def test_NNMseLoss_none(self): mse_loss = paddle.nn.loss.MSELoss(reduction='none') ret = mse_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={"input": input_np, "label": label_np}, fetch_list=[ret], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): mse_loss = paddle.nn.loss.MSELoss(reduction='none') dy_ret = mse_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_result = dy_ret.numpy() diff --git a/test/legacy_test/test_mul_nn_grad.py b/test/legacy_test/test_mul_nn_grad.py index 10ea654c84305..eb30a0e1a5068 100644 --- a/test/legacy_test/test_mul_nn_grad.py +++ b/test/legacy_test/test_mul_nn_grad.py @@ -19,8 +19,8 @@ from decorator_helper import prog_scope import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -57,9 +57,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_mul_op.py b/test/legacy_test/test_mul_op.py index 9ce95d354d7af..d1b5d5f02843b 100644 --- a/test/legacy_test/test_mul_op.py +++ b/test/legacy_test/test_mul_op.py @@ -17,7 +17,7 @@ import numpy as np -from paddle.fluid import core +from paddle.base import core sys.path.append("..") from eager_op_test import OpTest, convert_float_to_uint16 diff --git a/test/legacy_test/test_multi_dot_op.py b/test/legacy_test/test_multi_dot_op.py index dc6f5c4810cf9..8a3557b6d7f14 100644 --- a/test/legacy_test/test_multi_dot_op.py +++ b/test/legacy_test/test_multi_dot_op.py @@ -19,7 +19,7 @@ from numpy.linalg import multi_dot import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_multi_label_soft_margin_loss.py b/test/legacy_test/test_multi_label_soft_margin_loss.py index af8481df1719c..c2e68191ffc36 100644 --- a/test/legacy_test/test_multi_label_soft_margin_loss.py +++ b/test/legacy_test/test_multi_label_soft_margin_loss.py @@ -96,7 +96,7 @@ def test_static( def test_dygraph( place, input_np, label_np, weight=None, reduction='mean', functional=False ): - with paddle.fluid.dygraph.base.guard(): + with paddle.base.dygraph.base.guard(): input = paddle.to_tensor(input_np) label = paddle.to_tensor(label_np) if weight is not None: diff --git a/test/legacy_test/test_multiclass_nms_op.py b/test/legacy_test/test_multiclass_nms_op.py index 6878513ce8e2b..2059eff7d738b 100644 --- a/test/legacy_test/test_multiclass_nms_op.py +++ b/test/legacy_test/test_multiclass_nms_op.py @@ -20,8 +20,8 @@ import paddle from paddle import _C_ops -from paddle.fluid import core -from paddle.fluid.layer_helper import LayerHelper +from paddle.base import core +from paddle.base.layer_helper import LayerHelper def multiclass_nms3( diff --git a/test/legacy_test/test_multinomial_op.py b/test/legacy_test/test_multinomial_op.py index 882437f73fa68..de58d6e8dd903 100644 --- a/test/legacy_test/test_multinomial_op.py +++ b/test/legacy_test/test_multinomial_op.py @@ -20,8 +20,8 @@ from test_attribute_var import UnittestBase import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def sample_output_one_dimension(out, dim): @@ -306,16 +306,16 @@ def test_dygraph4(self): def test_static(self): paddle.enable_static() - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): x = paddle.static.data('x', shape=[4], dtype='float32') out = paddle.multinomial(x, num_samples=100000, replacement=True) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) x_np = np.random.rand(4).astype('float32') diff --git a/test/legacy_test/test_multiplex_op.py b/test/legacy_test/test_multiplex_op.py index 64886a232f68f..c860825c66e40 100644 --- a/test/legacy_test/test_multiplex_op.py +++ b/test/legacy_test/test_multiplex_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle import fluid +from paddle import base class TestMultiplexOp(OpTest): @@ -62,7 +62,7 @@ def test_check_grad_ignore_x3(self): class TestMultiplexOpError(unittest.TestCase): def test_errors(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x1 = paddle.static.data(name='x1', shape=[None, 2], dtype='int64') x2 = paddle.static.data(name='x2', shape=[None, 2], dtype='int64') index = paddle.static.data( @@ -111,7 +111,7 @@ def test_multiplex_dygraph(self): paddle.enable_static() def test_dygraph_api(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): img1 = np.array([[1, 2], [3, 4]]).astype(np.float32) img2 = np.array([[5, 6], [7, 8]]).astype(np.float32) inputs = [paddle.to_tensor(img1), paddle.to_tensor(img2)] diff --git a/test/legacy_test/test_multiprocess_dataloader_dataset.py b/test/legacy_test/test_multiprocess_dataloader_dataset.py index 7eb99ef7006e7..d10d51d6a0241 100755 --- a/test/legacy_test/test_multiprocess_dataloader_dataset.py +++ b/test/legacy_test/test_multiprocess_dataloader_dataset.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.io import ( ChainDataset, ComposeDataset, @@ -61,7 +61,7 @@ def run_main(self, num_workers, places): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 place = paddle.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([16, 3, 4]).astype('float32') input = paddle.to_tensor(input_np) label_np = np.random.random([16, 1]).astype('int32') @@ -82,8 +82,8 @@ def run_main(self, num_workers, places): assert len(label) == 1 assert input.shape == [1, 3, 4] assert label.shape == [1, 1] - assert isinstance(input, fluid.core.eager.Tensor) - assert isinstance(label, fluid.core.eager.Tensor) + assert isinstance(input, base.core.eager.Tensor) + assert isinstance(label, base.core.eager.Tensor) np.testing.assert_allclose(input.numpy(), input_np[i]) np.testing.assert_allclose(label.numpy(), label_np[i]) @@ -180,8 +180,8 @@ def assert_basic(input, label): assert len(label) == 1 assert input.shape == [1, 3, 4] assert label.shape == [1, 1] - assert isinstance(input, fluid.core.eager.Tensor) - assert isinstance(label, fluid.core.eager.Tensor) + assert isinstance(input, base.core.eager.Tensor) + assert isinstance(label, base.core.eager.Tensor) elements_list = [] for _, (input, label) in enumerate(dataloader()): @@ -261,7 +261,7 @@ def run_main(self, num_workers, places): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 place = paddle.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): dataset = NumpyMixTensorDataset(16) assert len(dataset) == 16 dataloader = DataLoader( @@ -277,8 +277,8 @@ def run_main(self, num_workers, places): assert len(label) == 1 assert input.shape == [1, IMAGE_SIZE] assert label.shape == [1, 1] - assert isinstance(input, fluid.core.eager.Tensor) - assert isinstance(label, fluid.core.eager.Tensor) + assert isinstance(input, base.core.eager.Tensor) + assert isinstance(label, base.core.eager.Tensor) class ComplextDataset(Dataset): @@ -306,7 +306,7 @@ def run_main(self, num_workers): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 place = paddle.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): dataset = ComplextDataset(16) assert len(dataset) == 16 dataloader = DataLoader( @@ -362,7 +362,7 @@ def run_main(self, num_workers): paddle.static.default_startup_program().random_seed = 1 paddle.static.default_main_program().random_seed = 1 place = paddle.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): self.init_dataset() dataloader = DataLoader( self.dataset, @@ -373,7 +373,7 @@ def run_main(self, num_workers): ) for i, data in enumerate(dataloader()): - assert isinstance(data, fluid.core.eager.Tensor) + assert isinstance(data, base.core.eager.Tensor) assert data.shape == [2, 2, 3] def test_main(self): diff --git a/test/legacy_test/test_multiprocess_dataloader_dynamic.py b/test/legacy_test/test_multiprocess_dataloader_dynamic.py index debc1cbf20630..c60d92153e530 100644 --- a/test/legacy_test/test_multiprocess_dataloader_dynamic.py +++ b/test/legacy_test/test_multiprocess_dataloader_dynamic.py @@ -29,7 +29,7 @@ ) import paddle -from paddle import fluid +from paddle import base from paddle.io import DataLoader from paddle.nn import Linear @@ -76,9 +76,9 @@ def forward(self, image): class TestDygraphDataLoader(unittest.TestCase): def run_main(self, num_workers, places, persistent_workers): - fluid.default_startup_program().random_seed = 1 - fluid.default_main_program().random_seed = 1 - with fluid.dygraph.guard(places[0]): + base.default_startup_program().random_seed = 1 + base.default_main_program().random_seed = 1 + with base.dygraph.guard(places[0]): fc_net = SimpleFCNet() optimizer = paddle.optimizer.Adam(parameters=fc_net.parameters()) @@ -147,9 +147,9 @@ def test_main(self): class TestDygraphDataLoaderWithBatchedDataset(TestDygraphDataLoader): def run_main(self, num_workers, places, persistent_workers): - fluid.default_startup_program().random_seed = 1 - fluid.default_main_program().random_seed = 1 - with fluid.dygraph.guard(places[0]): + base.default_startup_program().random_seed = 1 + base.default_main_program().random_seed = 1 + with base.dygraph.guard(places[0]): fc_net = SimpleFCNet() optimizer = paddle.optimizer.Adam(parameters=fc_net.parameters()) diff --git a/test/legacy_test/test_multiprocess_dataloader_exception.py b/test/legacy_test/test_multiprocess_dataloader_exception.py index bfd08f703c4f6..398e3bf4b99be 100644 --- a/test/legacy_test/test_multiprocess_dataloader_exception.py +++ b/test/legacy_test/test_multiprocess_dataloader_exception.py @@ -17,8 +17,8 @@ import numpy as np -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.io import BatchSampler, DataLoader, Dataset, IterableDataset from paddle.io.dataloader.worker import _worker_loop @@ -39,8 +39,8 @@ def __len__(self): class TestDataLoaderAssert(unittest.TestCase): def test_main(self): - place = fluid.cpu_places()[0] - with fluid.dygraph.guard(place): + place = base.cpu_places()[0] + with base.dygraph.guard(place): dataset = RandomDataset(100) batch_sampler = BatchSampler(dataset=dataset, batch_size=4) @@ -147,8 +147,8 @@ def test_main(self): class TestDataLoaderWorkerLoop(unittest.TestCase): def run_without_worker_done(self, use_shared_memory=True): try: - place = fluid.cpu_places()[0] - with fluid.dygraph.guard(place): + place = base.cpu_places()[0] + with base.dygraph.guard(place): dataset = RandomDataset(800) # test init_fn @@ -204,8 +204,8 @@ def _collate_fn(sample_list): def run_with_worker_done(self, use_shared_memory=True): try: - place = fluid.CPUPlace() - with fluid.dygraph.guard(place): + place = base.CPUPlace() + with base.dygraph.guard(place): dataset = RandomDataset(800) # test init_fn diff --git a/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_dynamic.py b/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_dynamic.py index 5ee06b58674c3..164a6531d3ecc 100644 --- a/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_dynamic.py +++ b/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_dynamic.py @@ -29,7 +29,7 @@ ) import paddle -from paddle import fluid +from paddle import base from paddle.io import DataLoader from paddle.nn import Linear @@ -77,9 +77,9 @@ def forward(self, image): class TestDygraphDataLoader(unittest.TestCase): def run_main(self, num_workers, places, persistent_workers): - fluid.default_startup_program().random_seed = 1 - fluid.default_main_program().random_seed = 1 - with fluid.dygraph.guard(places[0]): + base.default_startup_program().random_seed = 1 + base.default_main_program().random_seed = 1 + with base.dygraph.guard(places[0]): fc_net = SimpleFCNet() optimizer = paddle.optimizer.Adam(parameters=fc_net.parameters()) @@ -146,9 +146,9 @@ def test_main(self): class TestDygraphDataLoaderWithBatchedDataset(TestDygraphDataLoader): def run_main(self, num_workers, places, persistent_workers): - fluid.default_startup_program().random_seed = 1 - fluid.default_main_program().random_seed = 1 - with fluid.dygraph.guard(places[0]): + base.default_startup_program().random_seed = 1 + base.default_main_program().random_seed = 1 + with base.dygraph.guard(places[0]): fc_net = SimpleFCNet() optimizer = paddle.optimizer.Adam(parameters=fc_net.parameters()) diff --git a/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_split.py b/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_split.py index 94d25c82ac541..a158194992060 100644 --- a/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_split.py +++ b/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_split.py @@ -17,7 +17,7 @@ import numpy as np -from paddle import fluid +from paddle import base from paddle.io import DataLoader, IterableDataset, get_worker_info @@ -47,8 +47,8 @@ def __iter__(self): class TestDynamicDataLoaderIterSplit(unittest.TestCase): def test_main(self): - place = fluid.CPUPlace() - with fluid.dygraph.guard(place): + place = base.CPUPlace() + with base.dygraph.guard(place): dataset = RangeIterableDatasetSplit(0, 10) dataloader = DataLoader( dataset, @@ -77,8 +77,8 @@ def __iter__(self): class TestDynamicDataLoaderIterInitFuncSplit(unittest.TestCase): def test_main(self): - place = fluid.CPUPlace() - with fluid.dygraph.guard(place): + place = base.CPUPlace() + with base.dygraph.guard(place): dataset = RangeIterableDataset(0, 10) def worker_spliter(worker_id): diff --git a/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_static.py b/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_static.py index a2c0ec547fbcb..cf7ff971ca711 100644 --- a/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_static.py +++ b/test/legacy_test/test_multiprocess_dataloader_iterable_dataset_static.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.io import DataLoader, IterableDataset EPOCH_NUM = 2 @@ -45,13 +45,13 @@ def __iter__(self): def simple_fc_net_static(): - startup_prog = fluid.Program() - main_prog = fluid.Program() + startup_prog = base.Program() + main_prog = base.Program() startup_prog.random_seed = 1 main_prog.random_seed = 1 - with fluid.unique_name.guard(): - with fluid.program_guard(main_prog, startup_prog): + with base.unique_name.guard(): + with base.program_guard(main_prog, startup_prog): image = paddle.static.data( name='image', shape=[None, IMAGE_SIZE], dtype='float32' ) @@ -59,10 +59,10 @@ def simple_fc_net_static(): name='label', shape=[None, 1], dtype='int64' ) hidden = image - param_attr = fluid.ParamAttr( + param_attr = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.8) ) - bias_attr = fluid.ParamAttr( + bias_attr = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.5) ) for hidden_size in [10, 20, 30]: @@ -98,10 +98,10 @@ def simple_fc_net_static(): def prepare_places(with_cpu=False, with_gpu=True): places = [] if with_cpu: - places.append([fluid.CPUPlace()]) + places.append([base.CPUPlace()]) - if with_gpu and fluid.core.is_compiled_with_cuda(): - tmp = fluid.cuda_places()[:2] + if with_gpu and base.core.is_compiled_with_cuda(): + tmp = base.cuda_places()[:2] assert len(tmp) > 0, "no gpu detected" places.append([tmp[0]]) return places @@ -109,8 +109,8 @@ def prepare_places(with_cpu=False, with_gpu=True): class TestStaticDataLoader(unittest.TestCase): def run_main(self, num_workers, places, persistent_workers): - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): startup_prog, main_prog, image, label, loss = simple_fc_net_static() dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) @@ -126,10 +126,10 @@ def run_main(self, num_workers, places, persistent_workers): ) # assert len(dataloader) == int(SAMPLE_NUM / BATCH_SIZE) - exe = fluid.Executor(place=places[0]) + exe = base.Executor(place=places[0]) exe.run(startup_prog) - prog = fluid.CompiledProgram(main_prog) + prog = base.CompiledProgram(main_prog) step_list = [] loss_list = [] @@ -212,8 +212,8 @@ def __iter__(self): class TestStaticDataLoaderWithBatchedDataset(TestStaticDataLoader): def run_main(self, num_workers, places, persistent_workers): - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): startup_prog, main_prog, image, label, loss = simple_fc_net_static() dataset = RandomBatchedDataset(SAMPLE_NUM, CLASS_NUM) @@ -228,7 +228,7 @@ def run_main(self, num_workers, places, persistent_workers): persistent_workers=persistent_workers, ) - exe = fluid.Executor(place=places[0]) + exe = base.Executor(place=places[0]) exe.run(startup_prog) prog = main_prog diff --git a/test/legacy_test/test_multiprocess_dataloader_static.py b/test/legacy_test/test_multiprocess_dataloader_static.py index 478aec47f7f25..fed4534c52a02 100644 --- a/test/legacy_test/test_multiprocess_dataloader_static.py +++ b/test/legacy_test/test_multiprocess_dataloader_static.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.io import DataLoader, Dataset EPOCH_NUM = 3 @@ -45,13 +45,13 @@ def __len__(self): def simple_fc_net_static(): - startup_prog = fluid.Program() - main_prog = fluid.Program() + startup_prog = base.Program() + main_prog = base.Program() startup_prog.random_seed = 1 main_prog.random_seed = 1 - with fluid.unique_name.guard(): - with fluid.program_guard(main_prog, startup_prog): + with base.unique_name.guard(): + with base.program_guard(main_prog, startup_prog): image = paddle.static.data( name='image', shape=[None, IMAGE_SIZE], dtype='float32' ) @@ -59,10 +59,10 @@ def simple_fc_net_static(): name='label', shape=[None, 1], dtype='int64' ) hidden = image - param_attr = fluid.ParamAttr( + param_attr = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.8) ) - bias_attr = fluid.ParamAttr( + bias_attr = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.5) ) for hidden_size in [10, 20, 30]: @@ -98,10 +98,10 @@ def simple_fc_net_static(): def prepare_places(with_cpu=False, with_gpu=True): places = [] if with_cpu: - places.append([fluid.CPUPlace()]) + places.append([base.CPUPlace()]) - if with_gpu and fluid.core.is_compiled_with_cuda(): - tmp = fluid.cuda_places()[:2] + if with_gpu and base.core.is_compiled_with_cuda(): + tmp = base.cuda_places()[:2] assert len(tmp) > 0, "no gpu detected" places.append([tmp[0]]) return places @@ -109,8 +109,8 @@ def prepare_places(with_cpu=False, with_gpu=True): class TestStaticDataLoader(unittest.TestCase): def run_main(self, num_workers, places, persistent_workers): - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): startup_prog, main_prog, image, label, loss = simple_fc_net_static() dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) @@ -126,7 +126,7 @@ def run_main(self, num_workers, places, persistent_workers): ) assert len(dataloader) == int(SAMPLE_NUM / BATCH_SIZE) - exe = fluid.Executor(place=places[0]) + exe = base.Executor(place=places[0]) exe.run(startup_prog) prog = main_prog @@ -193,12 +193,12 @@ def test_main(self): class TestStaticDataLoaderReturnList(unittest.TestCase): def run_single_place(self, num_workers): - scope = fluid.Scope() + scope = base.Scope() image = paddle.static.data( name='image', shape=[None, IMAGE_SIZE], dtype='float32' ) label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') - with fluid.scope_guard(scope): + with base.scope_guard(scope): dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) dataloader = DataLoader( dataset, @@ -216,19 +216,19 @@ def run_single_place(self, num_workers): assert not isinstance(d[1], list) def run_multi_place(self, num_workers): - scope = fluid.Scope() + scope = base.Scope() image = paddle.static.data( name='image', shape=[None, IMAGE_SIZE], dtype='float32' ) label = paddle.static.data(name='label', shape=[None, 1], dtype='int64') - with fluid.scope_guard(scope): + with base.scope_guard(scope): dataset = RandomDataset(SAMPLE_NUM, CLASS_NUM) dataloader = DataLoader( dataset, feed_list=[image, label], num_workers=num_workers, batch_size=BATCH_SIZE, - places=[fluid.CPUPlace()] * 2, + places=[base.CPUPlace()] * 2, drop_last=True, return_list=True, ) @@ -270,8 +270,8 @@ def __len__(self): class TestStaticDataLoaderWithBatchedDataset(TestStaticDataLoader): def run_main(self, num_workers, places, persistent_workers): - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): startup_prog, main_prog, image, label, loss = simple_fc_net_static() dataset = RandomBatchedDataset(SAMPLE_NUM, CLASS_NUM) @@ -287,10 +287,10 @@ def run_main(self, num_workers, places, persistent_workers): ) assert len(dataloader) == int(SAMPLE_NUM / BATCH_SIZE) - exe = fluid.Executor(place=places[0]) + exe = base.Executor(place=places[0]) exe.run(startup_prog) - prog = fluid.CompiledProgram(main_prog) + prog = base.CompiledProgram(main_prog) step_list = [] loss_list = [] diff --git a/test/legacy_test/test_multiprocess_reader_exception.py b/test/legacy_test/test_multiprocess_reader_exception.py index a32552c282d69..e885cffcb4fb0 100644 --- a/test/legacy_test/test_multiprocess_reader_exception.py +++ b/test/legacy_test/test_multiprocess_reader_exception.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.reader import multiprocess_reader @@ -31,10 +31,10 @@ def setUp(self): self.raise_exception = False def places(self): - if fluid.is_compiled_with_cuda(): - return [fluid.CPUPlace(), fluid.CUDAPlace(0)] + if base.is_compiled_with_cuda(): + return [base.CPUPlace(), base.CUDAPlace(0)] else: - return [fluid.CPUPlace()] + return [base.CPUPlace()] def main_impl(self, place, iterable): sample_num = 40 @@ -52,11 +52,11 @@ def __impl__(): return __impl__ - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): image = paddle.static.data( name='image', dtype='float32', shape=[None, 10] ) - reader = fluid.io.DataLoader.from_generator( + reader = base.io.DataLoader.from_generator( feed_list=[image], capacity=2, iterable=iterable ) @@ -66,21 +66,21 @@ def __impl__(): [fake_reader(), fake_reader()], use_pipe=self.use_pipe ) - if isinstance(place, fluid.CUDAPlace): + if isinstance(place, base.CUDAPlace): reader.set_sample_generator( decorated_reader, batch_size=batch_size, - places=fluid.cuda_places(0), + places=base.cuda_places(0), ) else: reader.set_sample_generator( decorated_reader, batch_size=batch_size, - places=fluid.cpu_places(1), + places=base.cpu_places(1), ) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) batch_num = int(sample_num * 2 / batch_size) @@ -103,7 +103,7 @@ def __impl__(): while True: exe.run(fetch_list=[image_p_1]) num += 1 - except fluid.core.EOFException: + except base.core.EOFException: reader.reset() self.assertFalse(self.raise_exception) self.assertEqual(num, batch_num) @@ -116,7 +116,7 @@ def test_main(self): for p in self.places(): for iterable in [False, True]: try: - with fluid.scope_guard(fluid.Scope()): + with base.scope_guard(base.Scope()): self.main_impl(p, iterable) self.assertTrue(not self.raise_exception) diff --git a/test/legacy_test/test_naive_best_fit_gpu_memory_limit.py b/test/legacy_test/test_naive_best_fit_gpu_memory_limit.py index dba09d70a6e07..de601cb89c484 100644 --- a/test/legacy_test/test_naive_best_fit_gpu_memory_limit.py +++ b/test/legacy_test/test_naive_best_fit_gpu_memory_limit.py @@ -16,34 +16,34 @@ import numpy as np -from paddle import fluid +from paddle import base -fluid.core.globals()['FLAGS_allocator_strategy'] = 'naive_best_fit' +base.core.globals()['FLAGS_allocator_strategy'] = 'naive_best_fit' -if fluid.is_compiled_with_cuda(): - fluid.core.globals()['FLAGS_gpu_memory_limit_mb'] = 10 +if base.is_compiled_with_cuda(): + base.core.globals()['FLAGS_gpu_memory_limit_mb'] = 10 class TestBase(unittest.TestCase): def setUp(self): - if fluid.is_compiled_with_cuda(): - self._limit = fluid.core.globals()['FLAGS_gpu_memory_limit_mb'] + if base.is_compiled_with_cuda(): + self._limit = base.core.globals()['FLAGS_gpu_memory_limit_mb'] def test_allocate(self): - if not fluid.is_compiled_with_cuda(): + if not base.is_compiled_with_cuda(): return other_dim = int(1024 * 1024 / 4) - place = fluid.CUDAPlace(0) - t = fluid.LoDTensor() + place = base.CUDAPlace(0) + t = base.LoDTensor() t.set( np.ndarray([int(self._limit / 2), other_dim], dtype='float32'), place, ) del t - t = fluid.LoDTensor() + t = base.LoDTensor() large_np = np.ndarray([2 * self._limit, other_dim], dtype='float32') try: diff --git a/test/legacy_test/test_name_scope.py b/test/legacy_test/test_name_scope.py index af8617cb99908..4b3e5dd0ff9df 100644 --- a/test/legacy_test/test_name_scope.py +++ b/test/legacy_test/test_name_scope.py @@ -15,24 +15,24 @@ import unittest import paddle -from paddle import fluid +from paddle import base class TestNameScope(unittest.TestCase): def test_name_scope(self): - with fluid.name_scope("s1"): + with base.name_scope("s1"): a = paddle.static.data(name='data', shape=[-1, 1], dtype='int32') b = a + 1 - with fluid.name_scope("s2"): + with base.name_scope("s2"): c = b * 1 - with fluid.name_scope("s3"): + with base.name_scope("s3"): d = c / 1 - with fluid.name_scope("s1"): + with base.name_scope("s1"): f = paddle.pow(d, 2.0) - with fluid.name_scope("s4"): + with base.name_scope("s4"): g = f - 1 - for op in fluid.default_main_program().block(0).ops: + for op in base.default_main_program().block(0).ops: if op.type == 'elementwise_add': self.assertEqual(op.desc.attr("op_namescope"), '/s1/') elif op.type == 'elementwise_mul': diff --git a/test/legacy_test/test_nan_inf.py b/test/legacy_test/test_nan_inf.py index cf5ad32daf195..e6f6ddb770144 100644 --- a/test/legacy_test/test_nan_inf.py +++ b/test/legacy_test/test_nan_inf.py @@ -117,7 +117,7 @@ def test_nan_inf_dynamic(self): self.run_check_nan_inf(cmd, self.dygraph_expected_op_count) # Test on GPU. - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): cmd = f"{self._python_interp} {filepath} --use_cuda --check_nan_inf_level {self.check_nan_inf_level}" self.run_check_nan_inf(cmd, self.dygraph_expected_op_count) @@ -233,7 +233,7 @@ def _check_num_nan_inf(use_cuda): {"FLAGS_check_nan_inf": 1, "FLAGS_check_nan_inf_level": 0} ) _check_num_nan_inf(use_cuda=False) - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): _check_num_nan_inf(use_cuda=True) def run_check_nan_inf_level(self, use_cuda, dtype, level): @@ -257,7 +257,7 @@ def test_check_nan_inf_level_float32(self): self.run_check_nan_inf_level( use_cuda=False, dtype="float32", level=level ) - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): self.run_check_nan_inf_level( use_cuda=True, dtype="float32", level=level ) @@ -267,7 +267,7 @@ def test_check_nan_inf_level_float16(self): self.run_check_nan_inf_level( use_cuda=False, dtype="float32", level=level ) - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): self.run_check_nan_inf_level( use_cuda=True, dtype="float16", level=level ) @@ -279,7 +279,7 @@ def test_eager(self): x_np, y_np = self.generate_inputs(shape, "float32") device_list = ["cpu"] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): device_list.append("gpu:0") for device in device_list: diff --git a/test/legacy_test/test_nan_inf_dir.py b/test/legacy_test/test_nan_inf_dir.py index 122ddb74f4123..180e84044b8b0 100644 --- a/test/legacy_test/test_nan_inf_dir.py +++ b/test/legacy_test/test_nan_inf_dir.py @@ -110,7 +110,7 @@ def test_num_nan_inf(self): self.check_num_nan_inf( x_np, use_cuda=False, subdir="check_nan_inf_dir_cpu" ) - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): self.check_num_nan_inf( x_np, use_cuda=True, subdir="check_nan_inf_dir_gpu" ) diff --git a/test/legacy_test/test_nan_to_num_op.py b/test/legacy_test/test_nan_to_num_op.py index a5e0bbe62f4e2..c8a6d4fc67bef 100644 --- a/test/legacy_test/test_nan_to_num_op.py +++ b/test/legacy_test/test_nan_to_num_op.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core # from eager_op_test import OpTest @@ -87,7 +87,7 @@ def test_static(self): def test_dygraph(self): paddle.disable_static(place=self.place) - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): # NOTE(tiancaishaonvjituizi): float64 input fails the test x_np = np.array([[1, np.nan, -2], [np.inf, 0, -np.inf]]).astype( np.float32 diff --git a/test/legacy_test/test_nanmean_api.py b/test/legacy_test/test_nanmean_api.py index f89756e7d8f2b..58c47a77702f3 100644 --- a/test/legacy_test/test_nanmean_api.py +++ b/test/legacy_test/test_nanmean_api.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(10) diff --git a/test/legacy_test/test_nanmedian.py b/test/legacy_test/test_nanmedian.py index ba3fe7f5039bd..9a55a91ac625b 100644 --- a/test/legacy_test/test_nanmedian.py +++ b/test/legacy_test/test_nanmedian.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(102) @@ -175,7 +175,7 @@ def test_duplicated_axis(): def test_dygraph(self): paddle.disable_static(place=self.place) - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): data = self.fake_data["col_nan_odd"] out = paddle.nanmedian(paddle.to_tensor(data), keepdim=True) np_res = np.nanmedian(data, keepdims=True) diff --git a/test/legacy_test/test_nansum_api.py b/test/legacy_test/test_nansum_api.py index f0f255fe6e120..050ff6be2c8b2 100644 --- a/test/legacy_test/test_nansum_api.py +++ b/test/legacy_test/test_nansum_api.py @@ -17,15 +17,15 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class API_Test_Nansum(unittest.TestCase): def test_static_graph(self): paddle.enable_static() - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): input = paddle.static.data( name='input', dtype='float32', shape=[2, 4] ) @@ -33,10 +33,10 @@ def test_static_graph(self): out2 = paddle.nansum(input, axis=0) out3 = paddle.nansum(input, axis=-1) out4 = paddle.nansum(input, axis=1, keepdim=True) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) x = np.array( @@ -87,7 +87,7 @@ def test_static_graph_fp16(self): out2 = paddle.nansum(input, axis=0) out3 = paddle.nansum(input, axis=-1) out4 = paddle.nansum(input, axis=1, keepdim=True) - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) exe = paddle.static.Executor(place) exe.run(startup_program) @@ -131,8 +131,8 @@ def test_dygraph(self): x = np.array( [[float('nan'), 3, 5, 9], [1, 2, float('-nan'), 7]] ).astype(np.float32) - with fluid.dygraph.guard(): - inputs = fluid.dygraph.to_variable(x) + with base.dygraph.guard(): + inputs = base.dygraph.to_variable(x) out = paddle.nansum(inputs) out_ref = np.array([27]).astype(np.float32) diff --git a/test/legacy_test/test_nce.py b/test/legacy_test/test_nce.py index f91abbff55248..8332f07b85c3a 100644 --- a/test/legacy_test/test_nce.py +++ b/test/legacy_test/test_nce.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, paddle_static_guard import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard def nce( @@ -161,7 +161,7 @@ def setUp(self): @staticmethod def get_place(): - place = fluid.core.CPUPlace() + place = base.core.CPUPlace() return place @staticmethod @@ -195,7 +195,7 @@ def train_network( ) w_param = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_parameter( shape=[num_total_classes, 10], @@ -205,7 +205,7 @@ def train_network( ) ) b_param = ( - fluid.default_main_program() + base.default_main_program() .global_block() .create_parameter( shape=[num_total_classes, 1], @@ -238,7 +238,7 @@ def train_network( def test_input_is_selected_rows(self): with paddle_static_guard(): place = self.get_place() - exe = fluid.Executor(place) + exe = base.Executor(place) data = self.get_train_data(self.batch_size) nid_freq_arr = np.random.dirichlet(np.ones(20) * 1000).astype( @@ -247,17 +247,17 @@ def test_input_is_selected_rows(self): rets = [] # for dense - dense_scope = fluid.core.Scope() - dense_startup_program = fluid.framework.Program() - dense_train_program = fluid.framework.Program() - with fluid.scope_guard(dense_scope): - with fluid.program_guard( + dense_scope = base.core.Scope() + dense_startup_program = base.framework.Program() + dense_train_program = base.framework.Program() + with base.scope_guard(dense_scope): + with base.program_guard( dense_train_program, dense_startup_program ): cost, feeds = self.train_network( 20, 5, "custom_dist", nid_freq_arr.tolist(), False ) - feeder = fluid.DataFeeder(feed_list=feeds, place=place) + feeder = base.DataFeeder(feed_list=feeds, place=place) paddle.enable_static() exe.run(dense_startup_program) loss_val = exe.run( @@ -268,17 +268,17 @@ def test_input_is_selected_rows(self): rets.append(np.mean(loss_val)) # for sparse - sparse_scope = fluid.core.Scope() - sparse_startup_program = fluid.framework.Program() - sparse_train_program = fluid.framework.Program() - with fluid.scope_guard(sparse_scope): - with fluid.program_guard( + sparse_scope = base.core.Scope() + sparse_startup_program = base.framework.Program() + sparse_train_program = base.framework.Program() + with base.scope_guard(sparse_scope): + with base.program_guard( sparse_train_program, sparse_startup_program ): cost, feeds = self.train_network( 20, 5, "custom_dist", nid_freq_arr.tolist(), True ) - feeder = fluid.DataFeeder(feed_list=feeds, place=place) + feeder = base.DataFeeder(feed_list=feeds, place=place) paddle.enable_static() exe.run(sparse_startup_program) loss_val = exe.run( @@ -295,10 +295,10 @@ class TestNCE_OpError(unittest.TestCase): def test_errors(self): with paddle_static_guard(): with program_guard(Program(), Program()): - input1 = fluid.create_lod_tensor( + input1 = base.create_lod_tensor( np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], - fluid.CPUPlace(), + base.CPUPlace(), ) label1 = paddle.static.data( name='label1', shape=[-1, 4], dtype="int64" @@ -311,10 +311,10 @@ def test_errors(self): input2 = paddle.static.data( name='input2', shape=[-1, 4], dtype="float32" ) - label2 = fluid.create_lod_tensor( + label2 = base.create_lod_tensor( np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], - fluid.CPUPlace(), + base.CPUPlace(), ) # the input(label) of nce layer must be Variable. self.assertRaises( diff --git a/test/legacy_test/test_nearest_interp_op.py b/test/legacy_test/test_nearest_interp_op.py index f1cad2f66899b..6ef7c23589ed3 100755 --- a/test/legacy_test/test_nearest_interp_op.py +++ b/test/legacy_test/test_nearest_interp_op.py @@ -17,7 +17,7 @@ import numpy as np from eager_op_test import OpTest -from paddle.fluid import core +from paddle.base import core def nearest_neighbor_interp_np( diff --git a/test/legacy_test/test_nearest_interp_v2_op.py b/test/legacy_test/test_nearest_interp_v2_op.py index 72b371b9e3469..15902efc28e53 100755 --- a/test/legacy_test/test_nearest_interp_v2_op.py +++ b/test/legacy_test/test_nearest_interp_v2_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.nn.functional import interpolate paddle.enable_static() @@ -969,7 +969,7 @@ def test_case(self): place = core.CUDAPlace(0) else: place = core.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_data = np.random.random((2, 3, 6, 6)).astype("int64") scale_np = np.array([2, 2]).astype("int64") input_x = paddle.to_tensor(input_data) @@ -994,7 +994,7 @@ def test_case(self): place = core.CUDAPlace(0) else: place = core.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_data = np.random.random((2, 2, 6, 6, 6)).astype("int64") scale_np = np.array([2, 2, 2]).astype("int64") input_x = paddle.to_tensor(input_data) @@ -1013,7 +1013,7 @@ def test_case(self): @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) class TestNearestInterp3DOpForFloat16(unittest.TestCase): def init_test_case(self): @@ -1054,7 +1054,7 @@ def test_main(self): @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) class TestNearestInterpOpForFloat16(unittest.TestCase): def init_test_case(self): diff --git a/test/legacy_test/test_neg_op.py b/test/legacy_test/test_neg_op.py index ea748a57aeb84..8e17344dfe1b9 100644 --- a/test/legacy_test/test_neg_op.py +++ b/test/legacy_test/test_neg_op.py @@ -57,7 +57,7 @@ def test_cpu(self): self.run_static() def test_gpu(self): - if not paddle.fluid.core.is_compiled_with_cuda(): + if not paddle.base.core.is_compiled_with_cuda(): return paddle.disable_static(place=paddle.CUDAPlace(0)) diff --git a/test/legacy_test/test_network_with_dtype.py b/test/legacy_test/test_network_with_dtype.py index 07f38a1e80d9c..69d3bfb7f9d7f 100644 --- a/test/legacy_test/test_network_with_dtype.py +++ b/test/legacy_test/test_network_with_dtype.py @@ -15,8 +15,8 @@ import unittest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core BATCH_SIZE = 20 @@ -27,9 +27,9 @@ def setUp(self): self.init_dtype() def run_net_on_place(self, place): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): x = paddle.static.data(name='x', shape=[-1, 13], dtype=self.dtype) y = paddle.static.data(name='y', shape=[-1, 1], dtype=self.dtype) y_predict = paddle.static.nn.fc(x, size=1, activation=None) @@ -44,8 +44,8 @@ def run_net_on_place(self, place): train_reader = paddle.batch( paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE ) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) + feeder = base.DataFeeder(place=place, feed_list=[x, y]) + exe = base.Executor(place) exe.run(startup) for data in train_reader(): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) @@ -56,13 +56,13 @@ def init_dtype(self): pass def test_cpu(self): - place = fluid.CPUPlace() + place = base.CPUPlace() self.run_net_on_place(place) def test_gpu(self): if not core.is_compiled_with_cuda(): return - place = fluid.CUDAPlace(0) + place = base.CUDAPlace(0) self.run_net_on_place(place) diff --git a/test/legacy_test/test_nll_loss.py b/test/legacy_test/test_nll_loss.py index e0bb3882806c1..e225bbebd9a0d 100644 --- a/test/legacy_test/test_nll_loss.py +++ b/test/legacy_test/test_nll_loss.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle import fluid +from paddle import base def nll_loss_1d( @@ -82,15 +82,15 @@ def test_NLLLoss_1D_mean(self): input_np = np.random.random(size=(10, 10)).astype(np.float64) np.random.seed(200) label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - # place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + # place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[10, 10], dtype='float64' ) @@ -98,21 +98,21 @@ def test_NLLLoss_1D_mean(self): nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={"input": input_np, "label": label_np}, fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss() dy_res = nll_loss( paddle.to_tensor(input_np), paddle.to_tensor(label_np) ) dy_result = dy_res.numpy() - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss() eager_res = nll_loss( paddle.to_tensor(input_np), paddle.to_tensor(label_np) @@ -130,15 +130,15 @@ def test_NLLLoss_1D_sum(self): input_np = np.random.random(size=(10, 10)).astype(np.float64) np.random.seed(200) label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - # place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + # place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[10, 10], dtype='float64' ) @@ -146,14 +146,14 @@ def test_NLLLoss_1D_sum(self): nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={"input": input_np, "label": label_np}, fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') dy_res = nll_loss( paddle.to_tensor(input_np), paddle.to_tensor(label_np) @@ -181,15 +181,15 @@ def test_NLLLoss_1D_with_weight_mean(self): np.random.seed(200) label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64) weight_np = np.random.random(size=(10,)).astype(np.float64) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - # place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + # place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[10, 10], dtype='float64' ) @@ -200,7 +200,7 @@ def test_NLLLoss_1D_with_weight_mean(self): nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={ @@ -211,7 +211,7 @@ def test_NLLLoss_1D_with_weight_mean(self): fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( weight=paddle.to_tensor(weight_np) ) @@ -243,15 +243,15 @@ def test_NLLLoss_1D_with_weight_sum(self): np.random.seed(200) label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64) weight_np = np.random.random(size=(10,)).astype(np.float64) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - # place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + # place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[10, 10], dtype='float64' ) @@ -262,7 +262,7 @@ def test_NLLLoss_1D_with_weight_sum(self): nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum') res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={ @@ -273,7 +273,7 @@ def test_NLLLoss_1D_with_weight_sum(self): fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( weight=paddle.to_tensor(weight_np), reduction='sum' ) @@ -295,10 +295,10 @@ def test_NLLLoss_1D_with_weight_mean_cpu(self): np.random.seed(200) label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64) weight_np = np.random.random(size=(10,)).astype(np.float64) - prog = fluid.Program() - startup_prog = fluid.Program() - place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[10, 10], dtype='float64' ) @@ -309,7 +309,7 @@ def test_NLLLoss_1D_with_weight_mean_cpu(self): nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={ @@ -320,7 +320,7 @@ def test_NLLLoss_1D_with_weight_mean_cpu(self): fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( weight=paddle.to_tensor(weight_np) ) @@ -340,10 +340,10 @@ def test_NLLLoss_1D_with_weight_no_reduce_cpu(self): np.random.seed(200) label_np = np.random.randint(0, 10, size=(10,)).astype(np.int64) weight_np = np.random.random(size=(10,)).astype(np.float64) - prog = fluid.Program() - startup_prog = fluid.Program() - place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[10, 10], dtype='float64' ) @@ -354,7 +354,7 @@ def test_NLLLoss_1D_with_weight_no_reduce_cpu(self): nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none') res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={ @@ -365,7 +365,7 @@ def test_NLLLoss_1D_with_weight_no_reduce_cpu(self): fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( weight=paddle.to_tensor(weight_np), reduction='none' ) @@ -386,15 +386,15 @@ def test_NLLLoss_2D_mean(self): input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64) np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - # place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + # place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) @@ -404,14 +404,14 @@ def test_NLLLoss_2D_mean(self): nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={"input": input_np, "label": label_np}, fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss() dy_res = nll_loss( paddle.to_tensor(input_np), paddle.to_tensor(label_np) @@ -429,15 +429,15 @@ def test_NLLLoss_2D_sum(self): input_np = np.random.random(size=(5, 3, 5, 5)).astype(np.float64) np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - # place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + # place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) @@ -447,14 +447,14 @@ def test_NLLLoss_2D_sum(self): nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={"input": input_np, "label": label_np}, fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss(reduction='sum') dy_res = nll_loss( paddle.to_tensor(input_np), paddle.to_tensor(label_np) @@ -473,15 +473,15 @@ def test_NLLLoss_2D_with_weight_mean(self): np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64) weight_np = np.random.random(size=(3,)).astype(np.float64) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - # place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + # place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) @@ -495,7 +495,7 @@ def test_NLLLoss_2D_with_weight_mean(self): nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={ @@ -506,7 +506,7 @@ def test_NLLLoss_2D_with_weight_mean(self): fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( weight=paddle.to_tensor(weight_np) ) @@ -527,10 +527,10 @@ def test_NLLLoss_2D_with_weight_mean_cpu(self): np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64) weight_np = np.random.random(size=(3,)).astype(np.float64) - prog = fluid.Program() - startup_prog = fluid.Program() - place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) @@ -544,7 +544,7 @@ def test_NLLLoss_2D_with_weight_mean_cpu(self): nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={ @@ -555,7 +555,7 @@ def test_NLLLoss_2D_with_weight_mean_cpu(self): fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( weight=paddle.to_tensor(weight_np) ) @@ -576,14 +576,14 @@ def test_NLLLoss_2D_with_weight_sum(self): np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5)).astype(np.int64) weight_np = np.random.random(size=(3,)).astype(np.float64) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[5, 3, 5, 5], dtype='float64' ) @@ -597,7 +597,7 @@ def test_NLLLoss_2D_with_weight_sum(self): nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum') res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={ @@ -608,7 +608,7 @@ def test_NLLLoss_2D_with_weight_sum(self): fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( weight=paddle.to_tensor(weight_np), reduction='sum' ) @@ -630,15 +630,15 @@ def test_NLLLoss_in_dims_not_2or4_mean(self): input_np = np.random.random(size=(5, 3, 5, 5, 5)).astype(np.float64) np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - # place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + # place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) @@ -648,14 +648,14 @@ def test_NLLLoss_in_dims_not_2or4_mean(self): nll_loss = paddle.nn.loss.NLLLoss() res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={"input": input_np, "label": label_np}, fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss() dy_res = nll_loss( paddle.to_tensor(input_np), paddle.to_tensor(label_np) @@ -680,15 +680,15 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_mean(self): np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64) weight_np = np.random.random(size=(3,)).astype(np.float64) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - # place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + # place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) @@ -701,7 +701,7 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_mean(self): nll_loss = paddle.nn.loss.NLLLoss(weight=weight) res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={ @@ -712,7 +712,7 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_mean(self): fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( weight=paddle.to_tensor(weight_np) ) @@ -741,15 +741,15 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_sum(self): np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64) weight_np = np.random.random(size=(3,)).astype(np.float64) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) @@ -762,7 +762,7 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_sum(self): nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='sum') res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={ @@ -773,7 +773,7 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_sum(self): fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( weight=paddle.to_tensor(weight_np), reduction='sum' ) @@ -805,15 +805,15 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce(self): np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64) weight_np = np.random.random(size=(3,)).astype(np.float64) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - # place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + # place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) @@ -826,7 +826,7 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce(self): nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none') res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={ @@ -837,7 +837,7 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce(self): fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( weight=paddle.to_tensor(weight_np), reduction='none' ) @@ -870,10 +870,10 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce_cpu(self): np.random.seed(200) label_np = np.random.randint(0, 3, size=(5, 5, 5, 5)).astype(np.int64) weight_np = np.random.random(size=(3,)).astype(np.float64) - prog = fluid.Program() - startup_prog = fluid.Program() - place = fluid.CPUPlace() - with fluid.program_guard(prog, startup_prog): + prog = base.Program() + startup_prog = base.Program() + place = base.CPUPlace() + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[5, 3, 5, 5, 5], dtype='float64' ) @@ -886,7 +886,7 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce_cpu(self): nll_loss = paddle.nn.loss.NLLLoss(weight=weight, reduction='none') res = nll_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_result,) = exe.run( prog, feed={ @@ -897,7 +897,7 @@ def test_NLLLoss_in_dims_not_2or4_with_weight_no_reduce_cpu(self): fetch_list=[res], ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): nll_loss = paddle.nn.loss.NLLLoss( weight=paddle.to_tensor(weight_np), reduction='none' ) @@ -966,10 +966,10 @@ def test_check_output_with_weight(self): def test_check_grad(self): self.with_weight = True - place = fluid.CPUPlace() + place = base.CPUPlace() self.check_grad_with_place(place, ['X'], 'Out') - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self.check_grad_with_place(place, ['X'], 'Out') def init_test_case(self): @@ -1017,10 +1017,10 @@ def test_check_output_with_weight(self): def test_check_grad(self): self.with_weight = True - place = fluid.CPUPlace() + place = base.CPUPlace() self.check_grad_with_place(place, ['X'], 'Out') - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self.check_grad_with_place(place, ['X'], 'Out') def init_test_case(self): @@ -1067,10 +1067,10 @@ def test_check_output_with_weight(self): def test_check_grad(self): self.with_weight = True - place = fluid.CPUPlace() + place = base.CPUPlace() self.check_grad_with_place(place, ['X'], 'Out') - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self.check_grad_with_place(place, ['X'], 'Out') def init_test_case(self): @@ -1118,10 +1118,10 @@ def test_check_output_with_weight(self): def test_check_grad(self): self.with_weight = True - place = fluid.CPUPlace() + place = base.CPUPlace() self.check_grad_with_place(place, ['X'], 'Out') - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) self.check_grad_with_place(place, ['X'], 'Out') def init_test_case(self): @@ -1169,7 +1169,7 @@ def test_x_dim_lt_2(): self.assertRaises(ValueError, test_x_dim_lt_2) def test_x_dim_imperative_lt_2(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x_np = np.random.random(size=(5,)).astype(np.float64) label_np = np.random.randint(0, 10, size=(5,)).astype(np.int64) x = paddle.to_tensor(x_np) @@ -1228,7 +1228,7 @@ def test_NLLLoss_reduction_not_sum_mean_none(): self.assertRaises(ValueError, test_NLLLoss_reduction_not_sum_mean_none) def test_NLLLoss_reduction_imperative_not_sum_mean_none(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x_np = np.random.random(size=(5, 3)).astype(np.float64) label_np = np.random.randint(0, 3, size=(5,)).astype(np.int64) x = paddle.to_tensor(x_np) @@ -1258,7 +1258,7 @@ def test_nll_loss_function_reduction_not_sum_mean_none(): ) def test_nll_loss_function_reduction_imperative_not_sum_mean_none(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x_np = np.random.random(size=(5, 3)).astype(np.float64) label_np = np.random.randint(0, 3, size=(5,)).astype(np.int64) x = paddle.to_tensor(x_np) diff --git a/test/legacy_test/test_nn_functional_embedding_static.py b/test/legacy_test/test_nn_functional_embedding_static.py index 1cf9198504738..49935b1b1ec41 100644 --- a/test/legacy_test/test_nn_functional_embedding_static.py +++ b/test/legacy_test/test_nn_functional_embedding_static.py @@ -17,21 +17,21 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.nn import functional class EmbeddingStatic(unittest.TestCase): def test_1(self): - prog = fluid.Program() - with fluid.program_guard(prog): + prog = base.Program() + with base.program_guard(prog): def test_bad_x(): initializer = paddle.nn.initializer.Assign( np.random.random(size=(128, 100)) ) - param_attr = fluid.ParamAttr( + param_attr = base.ParamAttr( name="emb_weight", learning_rate=0.5, initializer=initializer, @@ -55,15 +55,15 @@ def test_bad_x(): test_bad_x() def test_2(self): - prog = fluid.Program() - with fluid.program_guard(prog): + prog = base.Program() + with base.program_guard(prog): def test_bad_x(): initializer = paddle.nn.initializer.Assign( np.random.random(size=(128, 100)) ) - param_attr = fluid.ParamAttr( + param_attr = base.ParamAttr( name="emb_weight", learning_rate=0.5, initializer=initializer, diff --git a/test/legacy_test/test_nn_functional_hot_op.py b/test/legacy_test/test_nn_functional_hot_op.py index 0a4fb0942f8c1..e320a991c74bc 100644 --- a/test/legacy_test/test_nn_functional_hot_op.py +++ b/test/legacy_test/test_nn_functional_hot_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.nn import functional @@ -131,9 +131,9 @@ def test_api_with_dygraph(self): label = np.array( [np.random.randint(0, num_classes - 1) for i in range(6)] ).reshape([6, 1]) - with fluid.dygraph.guard(): + with base.dygraph.guard(): one_hot_label = functional.one_hot( - x=fluid.dygraph.to_variable(label), num_classes=num_classes + x=base.dygraph.to_variable(label), num_classes=num_classes ) def _run(self, num_classes): @@ -141,13 +141,13 @@ def _run(self, num_classes): label.desc.set_need_check_feed(False) one_hot_label = functional.one_hot(x=label, num_classes=num_classes) - place = fluid.CPUPlace() + place = base.CPUPlace() label_data = np.array( [np.random.randint(0, 10 - 1) for i in range(6)] ).reshape([6, 1]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) ret = exe.run( feed={ 'label': label_data, @@ -159,7 +159,7 @@ def _run(self, num_classes): class BadInputTestOnehotV2(unittest.TestCase): def test_error(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def test_bad_x(): label = paddle.static.data( diff --git a/test/legacy_test/test_nn_grad.py b/test/legacy_test/test_nn_grad.py index 398b3d9c888e0..592f4d8c0c922 100644 --- a/test/legacy_test/test_nn_grad.py +++ b/test/legacy_test/test_nn_grad.py @@ -19,8 +19,8 @@ from decorator_helper import prog_scope import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -47,9 +47,9 @@ def config(self): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: self.func(place) @@ -82,9 +82,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -106,9 +106,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -131,9 +131,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -162,9 +162,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -193,9 +193,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -225,9 +225,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -257,9 +257,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -284,9 +284,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -306,9 +306,9 @@ def func(self, place): gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -328,9 +328,9 @@ def func(self, place): gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -361,9 +361,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -413,9 +413,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -438,9 +438,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -474,9 +474,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -509,9 +509,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -540,9 +540,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_nn_margin_rank_loss.py b/test/legacy_test/test_nn_margin_rank_loss.py index f125c1cbe878f..35967c9390936 100644 --- a/test/legacy_test/test_nn_margin_rank_loss.py +++ b/test/legacy_test/test_nn_margin_rank_loss.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.static import Program, program_guard @@ -42,7 +42,7 @@ def setUp(self): "float64" ) self.places = [] - self.places.append(fluid.CPUPlace()) + self.places.append(base.CPUPlace()) if core.is_compiled_with_cuda(): self.places.append(paddle.CUDAPlace(0)) diff --git a/test/legacy_test/test_nn_matmul_v2_grad.py b/test/legacy_test/test_nn_matmul_v2_grad.py index 1924045c0e88c..62350f9532ea1 100644 --- a/test/legacy_test/test_nn_matmul_v2_grad.py +++ b/test/legacy_test/test_nn_matmul_v2_grad.py @@ -19,8 +19,8 @@ from decorator_helper import prog_scope import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -57,9 +57,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -72,9 +72,9 @@ def init_test(self): self.transpose_y = True def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -111,9 +111,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -150,9 +150,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -190,9 +190,9 @@ def func(self, place): def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -229,9 +229,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -268,9 +268,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -307,9 +307,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -346,9 +346,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -385,9 +385,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -424,9 +424,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -463,9 +463,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -502,9 +502,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -541,9 +541,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -580,9 +580,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -619,9 +619,9 @@ def func(self, place): ) def test_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_nn_sigmoid_op.py b/test/legacy_test/test_nn_sigmoid_op.py index 298e5cf1f4a6b..a038ff7b94b8f 100644 --- a/test/legacy_test/test_nn_sigmoid_op.py +++ b/test/legacy_test/test_nn_sigmoid_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid, nn -from paddle.fluid import core +from paddle import base, nn +from paddle.base import core from paddle.nn import functional @@ -45,7 +45,7 @@ def check_static_api(self, place): x = paddle.static.data(name='x', shape=self.x_shape) x.stop_gradient = False y = mysigmoid(x) - fluid.backward.append_backward(paddle.mean(y)) + base.backward.append_backward(paddle.mean(y)) exe = paddle.static.Executor(place) out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y]) np.testing.assert_allclose(out[0], self.y, rtol=1e-05) @@ -59,9 +59,9 @@ def check_dynamic_api(self, place): np.testing.assert_allclose(y.numpy(), self.y, rtol=1e-05) def test_check_api(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: self.check_dynamic_api(place) self.check_static_api(place) @@ -85,7 +85,7 @@ def check_static_api(self, place): with paddle.static.program_guard(main_program): x = paddle.static.data(name='x', shape=self.x_shape) y = functional.sigmoid(x, name="api_sigmoid") - exe = paddle.static.Executor(fluid.CPUPlace()) + exe = paddle.static.Executor(base.CPUPlace()) out = exe.run(main_program, feed={'x': self.x}, fetch_list=[y]) np.testing.assert_allclose(out[0], self.y, rtol=1e-05) @@ -96,9 +96,9 @@ def check_dynamic_api(self): np.testing.assert_allclose(y.numpy(), self.y, rtol=1e-05) def test_check_api(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: self.check_static_api(place) self.check_dynamic_api() diff --git a/test/legacy_test/test_noamdecay_op.py b/test/legacy_test/test_noamdecay_op.py index 62312c7a8b9f0..aafa50e0e74af 100644 --- a/test/legacy_test/test_noamdecay_op.py +++ b/test/legacy_test/test_noamdecay_op.py @@ -19,7 +19,7 @@ class TestSparseEmbeddingAPIError(unittest.TestCase): def test_errors(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): # The size of input in sparse_embedding should not be 0. def test_0_d_model(): schedular = paddle.optimizer.lr.NoamDecay( diff --git a/test/legacy_test/test_nonzero_api.py b/test/legacy_test/test_nonzero_api.py index c78de5a24f6be..6dbe9cfed9de1 100644 --- a/test/legacy_test/test_nonzero_api.py +++ b/test/legacy_test/test_nonzero_api.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard def call_nonzero(x): @@ -37,7 +37,7 @@ def test_nonzero_api_as_tuple(self): self.assertEqual(type(y), tuple) self.assertEqual(len(y), 2) z = paddle.concat(list(y), axis=1) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': data}, fetch_list=[z.name], return_numpy=False @@ -53,7 +53,7 @@ def test_nonzero_api_as_tuple(self): self.assertEqual(type(y), tuple) self.assertEqual(len(y), 1) z = paddle.concat(list(y), axis=1) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': data}, fetch_list=[z.name], return_numpy=False ) @@ -66,7 +66,7 @@ def test_nonzero_api(self): x = paddle.static.data(name='x', shape=[-1, 2], dtype='float32') x.desc.set_need_check_feed(False) y = paddle.nonzero(x) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': data}, fetch_list=[y.name], return_numpy=False ) @@ -78,7 +78,7 @@ def test_nonzero_api(self): x = paddle.static.data(name='x', shape=[-1], dtype='float32') x.desc.set_need_check_feed(False) y = paddle.nonzero(x) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': data}, fetch_list=[y.name], return_numpy=False ) @@ -87,8 +87,8 @@ def test_nonzero_api(self): def test_dygraph_api(self): data_x = np.array([[True, False], [False, True]]) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(data_x) + with base.dygraph.guard(): + x = base.dygraph.to_variable(data_x) z = paddle.nonzero(x) np_z = z.numpy() expect_out = np.array([[0, 0], [1, 1]]) diff --git a/test/legacy_test/test_norm_all.py b/test/legacy_test/test_norm_all.py index 75441aa5dfab4..44001abdea81f 100644 --- a/test/legacy_test/test_norm_all.py +++ b/test/legacy_test/test_norm_all.py @@ -18,9 +18,9 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import _C_ops, fluid -from paddle.fluid import core -from paddle.fluid.framework import in_dygraph_mode +from paddle import _C_ops, base +from paddle.base import core +from paddle.base.framework import in_dygraph_mode # hack method for test p_norm final state @@ -412,11 +412,11 @@ def calc_gradient(self): def run_fro(self, p, axis, shape_x, dtype, keep_dim, check_dim=False): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): data = paddle.static.data(name="X", shape=shape_x, dtype=dtype) out = paddle.norm(x=data, p=p, axis=axis, keepdim=keep_dim) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) np_input = (np.random.rand(*shape_x) + 1.0).astype(dtype) expected_result = numpy_frobenius_norm( np_input, axis=axis, keepdims=keep_dim @@ -434,11 +434,11 @@ def run_fro(self, p, axis, shape_x, dtype, keep_dim, check_dim=False): def run_pnorm(self, p, axis, shape_x, dtype, keep_dim, check_dim=False): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): data = paddle.static.data(name="X", shape=shape_x, dtype=dtype) out = paddle.norm(x=data, p=p, axis=axis, keepdim=keep_dim) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) np_input = (np.random.rand(*shape_x) + 1.0).astype(dtype) expected_result = p_norm( np_input, porder=p, axis=axis, keepdims=keep_dim @@ -637,7 +637,7 @@ def test_dygraph(self): run_graph(self, p='fro', axis=None, shape_x=[2, 3, 4], dtype="float32") def test_name(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data(name="x", shape=[10, 10], dtype="float32") y_1 = paddle.norm(x, p='fro', name='frobenius_name') y_2 = paddle.norm(x, p=2, name='pnorm_name') @@ -645,7 +645,7 @@ def test_name(self): self.assertEqual(('pnorm_name' in y_2.name), True) def test_errors(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): def err_dtype(p, shape_x, xdtype, out=None): data = paddle.static.data(shape=shape_x, dtype=xdtype) @@ -674,7 +674,7 @@ def err_dtype(p, shape_x, xdtype, out=None): ValueError, paddle.norm, data, p='unspport', axis=[-3, -2, -1] ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): # The size of input in Norm should not be 0. def test_0_size(): array = np.array([], dtype=np.float32) diff --git a/test/legacy_test/test_norm_nn_grad.py b/test/legacy_test/test_norm_nn_grad.py index 361929d49b8e0..3bd8f1b61ee05 100644 --- a/test/legacy_test/test_norm_nn_grad.py +++ b/test/legacy_test/test_norm_nn_grad.py @@ -19,15 +19,15 @@ from decorator_helper import prog_scope import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestInstanceNormDoubleGradCheck(unittest.TestCase): @prog_scope() def func(self, place): - prog = fluid.Program() - with fluid.program_guard(prog): + prog = base.Program() + with base.program_guard(prog): np.random.seed() shape = [2, 3, 4, 5] dtype = "float32" @@ -42,9 +42,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -54,8 +54,8 @@ class TestInstanceNormDoubleGradCheckWithoutParamBias( ): @prog_scope() def func(self, place): - prog = fluid.Program() - with fluid.program_guard(prog): + prog = base.Program() + with base.program_guard(prog): np.random.seed() shape = [2, 3, 4, 5] dtype = "float32" @@ -77,8 +77,8 @@ def instance_norm_wrapper(self, x): @prog_scope() def func(self, place): - prog = fluid.Program() - with fluid.program_guard(prog): + prog = base.Program() + with base.program_guard(prog): np.random.seed() shape = [2, 3, 4, 5] dtype = "float32" @@ -103,9 +103,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -119,8 +119,8 @@ def instance_norm_wrapper(self, x): @prog_scope() def func(self, place): - prog = fluid.Program() - with fluid.program_guard(prog): + prog = base.Program() + with base.program_guard(prog): np.random.seed() shape = [2, 3, 4, 5] dtype = "float32" @@ -164,8 +164,8 @@ def batch_norm_wrapper(self, x): @prog_scope() def func(self, place): - prog = fluid.Program() - with fluid.program_guard(prog): + prog = base.Program() + with base.program_guard(prog): np.random.seed() dtype = "float32" eps = 0.005 @@ -191,9 +191,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -241,8 +241,8 @@ def batch_norm_wrapper(self, x): class TestBatchNormDoubleGradCheckCase5(TestBatchNormDoubleGradCheck): @prog_scope() def func(self, place): - prog = fluid.Program() - with fluid.program_guard(prog): + prog = base.Program() + with base.program_guard(prog): np.random.seed(37) dtype = "float32" eps = 0.005 diff --git a/test/legacy_test/test_norm_op.py b/test/legacy_test/test_norm_op.py index 3144ec189ed4e..a486fba065e10 100644 --- a/test/legacy_test/test_norm_op.py +++ b/test/legacy_test/test_norm_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def l2_norm(x, axis, epsilon): @@ -113,7 +113,7 @@ def test_check_grad(self): @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) class TestNormOp7(TestNormOp): def init_dtype(self): @@ -121,12 +121,12 @@ def init_dtype(self): def test_check_output(self): self.check_output_with_place( - fluid.core.CUDAPlace(0), atol=5e-2, check_cinn=True + base.core.CUDAPlace(0), atol=5e-2, check_cinn=True ) def test_check_grad(self): self.check_grad_with_place( - fluid.core.CUDAPlace(0), + base.core.CUDAPlace(0), ['X'], 'Out', max_relative_error=0.05, @@ -203,7 +203,7 @@ def init_test_case(self): class API_NormTest(unittest.TestCase): def test_errors(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def test_norm_x_type(): data = paddle.static.data(name="x", shape=[3, 3], dtype="int64") diff --git a/test/legacy_test/test_normal.py b/test/legacy_test/test_normal.py index 6009580a8cde7..d03e311f8c1c3 100644 --- a/test/legacy_test/test_normal.py +++ b/test/legacy_test/test_normal.py @@ -33,7 +33,7 @@ def setUp(self): self.dtype = self.get_dtype() self.place = ( paddle.CUDAPlace(0) - if paddle.fluid.core.is_compiled_with_cuda() + if paddle.base.core.is_compiled_with_cuda() else paddle.CPUPlace() ) diff --git a/test/legacy_test/test_normalization_wrapper.py b/test/legacy_test/test_normalization_wrapper.py index 4b58a4b21564b..dccade9412e5c 100644 --- a/test/legacy_test/test_normalization_wrapper.py +++ b/test/legacy_test/test_normalization_wrapper.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestNormalization(unittest.TestCase): @@ -43,7 +43,7 @@ def set_program(self, axis, epsilon): ) out = paddle.sum(l2_norm, axis=None) - fluid.backward.append_backward(loss=out) + base.backward.append_backward(loss=out) self.fetch_list = [l2_norm] def run_program(self): @@ -54,10 +54,10 @@ def run_program(self): for place in places: self.set_inputs(place) - exe = fluid.Executor(place) + exe = base.Executor(place) (output,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed=self.inputs, fetch_list=self.fetch_list, return_numpy=True, @@ -67,7 +67,7 @@ def run_program(self): def set_inputs(self, place): """Set the randomly generated data to the test program.""" self.inputs = {} - tensor = fluid.Tensor() + tensor = base.Tensor() tensor.set(self.data, place) self.inputs[self.data_desc["name"]] = tensor diff --git a/test/legacy_test/test_normalize.py b/test/legacy_test/test_normalize.py index b9e34e599eb39..9c0c482f767b5 100644 --- a/test/legacy_test/test_normalize.py +++ b/test/legacy_test/test_normalize.py @@ -18,7 +18,7 @@ import paddle import paddle.nn.functional as F -from paddle import fluid +from paddle import base def p_normalize(x, axis=1, p=2, epsilon=1e-12, keepdims=True): @@ -63,9 +63,9 @@ def run_static(self, use_gpu=False): result3 = F.normalize(x, name='aaa') result4 = F.normalize(x2, axis=0) - place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) if use_gpu else base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) static_result = exe.run( feed={"input": self.input_np, "input2": self.input_np2}, fetch_list=[result0, result1, result2, result4], @@ -79,26 +79,26 @@ def run_static(self, use_gpu=False): self.assertRaises(ValueError, F.normalize, x2) def test_cpu(self): - paddle.disable_static(place=paddle.fluid.CPUPlace()) + paddle.disable_static(place=paddle.base.CPUPlace()) self.run_imperative() paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): self.run_static() def test_gpu(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return - paddle.disable_static(place=paddle.fluid.CUDAPlace(0)) + paddle.disable_static(place=paddle.base.CUDAPlace(0)) self.run_imperative() paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): self.run_static(use_gpu=True) def test_errors(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): # The size of input in Normalize should not be 0. def test_0_size(): array = np.array([], dtype=np.float32) diff --git a/test/legacy_test/test_npair_loss_op.py b/test/legacy_test/test_npair_loss_op.py index b49cdf0547c47..79a003762e35a 100755 --- a/test/legacy_test/test_npair_loss_op.py +++ b/test/legacy_test/test_npair_loss_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard def npairloss(anchor, positive, labels, l2_reg=0.002): @@ -63,8 +63,8 @@ def test_npair_loss(self): num_data, feat_dim, num_classes = 18, 6, 3 place = core.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) embeddings_anchor = np.random.rand(num_data, feat_dim).astype( np.float32 @@ -199,7 +199,7 @@ def test_labels_type(): class TestNpairLossZeroError(unittest.TestCase): def test_errors(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): def test_anchor_0_size(): array = np.array([], dtype=np.float32) diff --git a/test/legacy_test/test_number_count_op.py b/test/legacy_test/test_number_count_op.py index 78acc7948e15e..b877a40026dc2 100644 --- a/test/legacy_test/test_number_count_op.py +++ b/test/legacy_test/test_number_count_op.py @@ -19,7 +19,7 @@ import paddle from paddle.distributed.models.moe import utils -from paddle.fluid import core +from paddle.base import core def count(x, upper_num): diff --git a/test/legacy_test/test_numel_op.py b/test/legacy_test/test_numel_op.py index 57f07fa86981a..d04326971ed00 100644 --- a/test/legacy_test/test_numel_op.py +++ b/test/legacy_test/test_numel_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestNumelOp(OpTest): @@ -149,9 +149,9 @@ def init(self): class TestNumelAPI(unittest.TestCase): def test_numel_static(self): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): shape1 = [2, 1, 4, 5] shape2 = [1, 4, 5] x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1') @@ -188,9 +188,9 @@ def test_numel_imperative(self): paddle.enable_static() def test_error(self): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): def test_x_type(): shape = [1, 4, 5] diff --git a/test/legacy_test/test_one_hot_v2_op.py b/test/legacy_test/test_one_hot_v2_op.py index 8bb28ed4e3f86..f8a7c3a59140e 100644 --- a/test/legacy_test/test_one_hot_v2_op.py +++ b/test/legacy_test/test_one_hot_v2_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def one_hot_wrapper(x, depth_tensor, **keargs): @@ -141,9 +141,9 @@ def test_api_with_dygraph(self): label = np.array( [np.random.randint(0, depth - 1) for i in range(6)] ).reshape([6, 1]) - with fluid.dygraph.guard(): + with base.dygraph.guard(): one_hot_label = paddle.nn.functional.one_hot( - fluid.dygraph.to_variable(label), depth + base.dygraph.to_variable(label), depth ) one_hot_label = paddle.nn.functional.one_hot( paddle.to_tensor(label), depth @@ -154,13 +154,13 @@ def _run(self, depth): label.desc.set_need_check_feed(False) one_hot_label = paddle.nn.functional.one_hot(x=label, num_classes=depth) - place = fluid.CPUPlace() + place = base.CPUPlace() label_data = np.array( [np.random.randint(0, 10 - 1) for i in range(6)] ).reshape([6, 1]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) ret = exe.run( feed={ 'label': label_data, @@ -172,7 +172,7 @@ def _run(self, depth): class BadInputTestOnehotV2(unittest.TestCase): def test_error(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def test_bad_x(): label = paddle.static.data( diff --git a/test/legacy_test/test_ones_like.py b/test/legacy_test/test_ones_like.py index fad132e6093cc..17048414c3896 100644 --- a/test/legacy_test/test_ones_like.py +++ b/test/legacy_test/test_ones_like.py @@ -17,9 +17,9 @@ import numpy as np import paddle -from paddle import _C_ops, fluid, ones_like -from paddle.fluid import Program, core, program_guard -from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle import _C_ops, base, ones_like +from paddle.base import Program, core, program_guard +from paddle.base.framework import convert_np_dtype_to_dtype_ class TestOnesLikeAPIError(unittest.TestCase): @@ -45,11 +45,11 @@ def test_api(self): out5 = ones_like(x, 'int64') place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) outs = exe.run( train_program, feed={'X': np.ones(shape).astype('float32')}, @@ -67,9 +67,9 @@ class TestOnesAPI(unittest.TestCase): def test_api(self): shape = [3, 4] place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) paddle.disable_static(place) diff --git a/test/legacy_test/test_ones_op.py b/test/legacy_test/test_ones_op.py index 22e3d3d737d44..4dff895aceb8a 100644 --- a/test/legacy_test/test_ones_op.py +++ b/test/legacy_test/test_ones_op.py @@ -45,7 +45,7 @@ def test_paddle_ones(self): expected_result = np.ones(10, dtype="int64") self.assertEqual((result == expected_result).all(), True) - def test_fluid_ones(self): + def test_base_ones(self): with paddle.static.program_guard(paddle.static.Program()): ones = paddle.ones(shape=[10], dtype="int64") place = paddle.CPUPlace() diff --git a/test/legacy_test/test_op_function_generator.py b/test/legacy_test/test_op_function_generator.py index 0ad5a2e4a6692..e5e3d3957a739 100644 --- a/test/legacy_test/test_op_function_generator.py +++ b/test/legacy_test/test_op_function_generator.py @@ -18,7 +18,7 @@ import paddle import paddle.nn.functional as F -from paddle import _legacy_C_ops, fluid +from paddle import _legacy_C_ops, base class TestTracedLayer(paddle.nn.Layer): @@ -36,11 +36,11 @@ def setUp(self): self.array = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) def test_elementwise_add(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) b = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) - x = fluid.dygraph.to_variable(a) - y = fluid.dygraph.to_variable(b) + x = base.dygraph.to_variable(a) + y = base.dygraph.to_variable(b) x.stop_gradient = False res1 = paddle.add(x, y) @@ -49,11 +49,11 @@ def test_elementwise_add(self): np.testing.assert_array_equal(res1.numpy(), res2.numpy()) def test_elementwise_mul(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) b = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) - x = fluid.dygraph.to_variable(a) - y = fluid.dygraph.to_variable(b) + x = base.dygraph.to_variable(a) + y = base.dygraph.to_variable(b) res1 = paddle.multiply(x, y) res2 = _legacy_C_ops.elementwise_mul(x, y) @@ -61,9 +61,9 @@ def test_elementwise_mul(self): np.testing.assert_array_equal(res1.numpy(), res2.numpy()) def test_relu(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(-1, 1, self.shape).astype(self.dtype) - x = fluid.dygraph.to_variable(a) + x = base.dygraph.to_variable(a) res1 = F.relu(x) res2 = _legacy_C_ops.relu(x) @@ -71,11 +71,11 @@ def test_relu(self): np.testing.assert_array_equal(res1.numpy(), res2.numpy()) def test_trace_backward(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): a = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) b = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) - x = fluid.dygraph.to_variable(a) - y = fluid.dygraph.to_variable(b) + x = base.dygraph.to_variable(a) + y = base.dygraph.to_variable(b) x.stop_gradient = False y.stop_gradient = False x.retain_grads() diff --git a/test/legacy_test/test_op_name_conflict.py b/test/legacy_test/test_op_name_conflict.py index 5114e81c24c37..fbb717eda9609 100644 --- a/test/legacy_test/test_op_name_conflict.py +++ b/test/legacy_test/test_op_name_conflict.py @@ -17,24 +17,24 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestOpNameConflict(unittest.TestCase): def test_conflict(self): paddle.enable_static() - main = fluid.Program() - startup = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, startup): x = paddle.static.data(name="x", shape=[1], dtype='float32') y = paddle.static.data(name="y", shape=[1], dtype='float32') m = paddle.log2(x, name="log2") n = paddle.log2(y, name="log2") - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) m_v, n_v = exe.run( feed={ "x": np.ones((1), "float32") * 1, diff --git a/test/legacy_test/test_op_support_gpu.py b/test/legacy_test/test_op_support_gpu.py index 88bb417e0911c..46561b4014df2 100644 --- a/test/legacy_test/test_op_support_gpu.py +++ b/test/legacy_test/test_op_support_gpu.py @@ -14,7 +14,7 @@ import unittest -from paddle.fluid import core +from paddle.base import core class TestOpSupportGPU(unittest.TestCase): diff --git a/test/legacy_test/test_op_version.py b/test/legacy_test/test_op_version.py index 5fadf6b24f4c4..56953aaf4a545 100644 --- a/test/legacy_test/test_op_version.py +++ b/test/legacy_test/test_op_version.py @@ -14,7 +14,7 @@ import unittest -from paddle import fluid, utils +from paddle import base, utils class OpLastCheckpointCheckerTest(unittest.TestCase): @@ -24,7 +24,7 @@ def __init__(self, methodName='runTest'): self.fake_op = 'for_pybind_test__' def test_op_attr_info(self): - update_type = fluid.core.OpUpdateType.kNewAttr + update_type = base.core.OpUpdateType.kNewAttr info_list = self.checker.filter_updates( self.fake_op, update_type, 'STRINGS' ) @@ -34,7 +34,7 @@ def test_op_attr_info(self): self.assertEqual(info_list[0].remark(), 'std::vector') def test_op_input_output_info(self): - update_type = fluid.core.OpUpdateType.kNewInput + update_type = base.core.OpUpdateType.kNewInput info_list = self.checker.filter_updates( self.fake_op, update_type, 'NewInput' ) @@ -43,7 +43,7 @@ def test_op_input_output_info(self): self.assertEqual(info_list[0].remark(), 'NewInput_') def test_op_bug_fix_info(self): - update_type = fluid.core.OpUpdateType.kBugfixWithBehaviorChanged + update_type = base.core.OpUpdateType.kBugfixWithBehaviorChanged info_list = self.checker.filter_updates(self.fake_op, update_type) self.assertTrue(info_list) self.assertEqual(info_list[0].remark(), 'BugfixWithBehaviorChanged_') @@ -52,7 +52,7 @@ def test_op_bug_fix_info(self): class OpVersionTest(unittest.TestCase): def __init__(self, methodName='runTest'): super().__init__(methodName) - self.vmap = fluid.core.get_op_version_map() + self.vmap = base.core.get_op_version_map() self.fake_op = 'for_pybind_test__' def test_checkpoints(self): diff --git a/test/legacy_test/test_operator.py b/test/legacy_test/test_operator.py index da08e898ed82e..19f75afd74757 100644 --- a/test/legacy_test/test_operator.py +++ b/test/legacy_test/test_operator.py @@ -17,7 +17,7 @@ import numpy as np import op -from paddle.fluid.proto import framework_pb2 +from paddle.base.proto import framework_pb2 class TestGetAllProtos(unittest.TestCase): diff --git a/test/legacy_test/test_operator_desc.py b/test/legacy_test/test_operator_desc.py index e79ef1e951eb5..d0ee95854cd94 100644 --- a/test/legacy_test/test_operator_desc.py +++ b/test/legacy_test/test_operator_desc.py @@ -14,8 +14,8 @@ import unittest -from paddle.fluid import core -from paddle.fluid.framework import Program, default_startup_program +from paddle.base import core +from paddle.base.framework import Program, default_startup_program main_program = default_startup_program() diff --git a/test/legacy_test/test_optimizer.py b/test/legacy_test/test_optimizer.py index 568d50eab22bd..c7e6d21124176 100644 --- a/test/legacy_test/test_optimizer.py +++ b/test/legacy_test/test_optimizer.py @@ -20,10 +20,10 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core, framework -from paddle.fluid.backward import append_backward -from paddle.fluid.framework import ( +from paddle import base +from paddle.base import core, framework +from paddle.base.backward import append_backward +from paddle.base.framework import ( Program, convert_np_dtype_to_dtype_, program_guard, @@ -830,8 +830,8 @@ def mlp(input_x, input_y): main_program = Program() startup_program = Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): with program_guard(main_program, startup_program): input_x = paddle.static.data( name="x", shape=[-1, 3], dtype='float32' @@ -845,13 +845,13 @@ def mlp(input_x, input_y): sgd._set_checkpoints([prediction]) sgd.minimize(cost) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) feed_data = gen_data() drop_vec = exe.run( feed=feed_data, - program=fluid.default_main_program(), + program=base.default_main_program(), fetch_list=[ "dropout_with_seed_cpu.tmp_1", "dropout_with_seed_cpu.tmp_1.subprog_0", @@ -895,8 +895,8 @@ def mlp(input_x, input_y): main_program = Program() startup_program = Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): with program_guard(main_program, startup_program): input_x = paddle.static.data( name="x", shape=[-1, 3], dtype='float32' @@ -910,13 +910,13 @@ def mlp(input_x, input_y): sgd._set_checkpoints([prediction]) sgd.minimize(cost) - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) + exe = base.Executor(place) + exe.run(base.default_startup_program()) feed_data = gen_data() drop_vec = exe.run( feed=feed_data, - program=fluid.default_main_program(), + program=base.default_main_program(), fetch_list=[ "dropout_with_seed_gpu.tmp_1", "dropout_with_seed_gpu.tmp_1.subprog_0", @@ -1028,7 +1028,7 @@ def __init__(self, dtype): def forward(self, x): return x * self._w + self._b - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): model = MyLayer(dtype) x = paddle.rand([10, 2, 3], dtype=dtype) loss = model(x) @@ -1138,7 +1138,7 @@ def __len__(self): def test_with_state_dict(self): if core.is_compiled_with_cuda(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): out_use_state_dict = self.check_with_opt_state_dict( use_save_load=True ) diff --git a/test/legacy_test/test_optimizer_for_varbase.py b/test/legacy_test/test_optimizer_for_varbase.py index a8e4b03341837..abeebc2ec3e04 100644 --- a/test/legacy_test/test_optimizer_for_varbase.py +++ b/test/legacy_test/test_optimizer_for_varbase.py @@ -88,7 +88,7 @@ def test_optimizer_with_varbase_input(self): optimizer.Adam(learning_rate=self.lr, parameters=x) def test_create_param_lr_with_1_for_coverage(self): - x = paddle.fluid.framework.EagerParamBase( + x = paddle.base.framework.EagerParamBase( dtype="float32", shape=[5, 10], lod_level=0, @@ -97,7 +97,7 @@ def test_create_param_lr_with_1_for_coverage(self): ) x.value().get_tensor().set( np.random.random((5, 10)).astype('float32'), - paddle.fluid.framework._current_expected_place(), + paddle.base.framework._current_expected_place(), ) y = paddle.ones([5, 10]) @@ -107,7 +107,7 @@ def test_create_param_lr_with_1_for_coverage(self): opt.step() def test_create_param_lr_with_no_1_value_for_coverage(self): - x = paddle.fluid.framework.EagerParamBase( + x = paddle.base.framework.EagerParamBase( dtype="float32", shape=[5, 10], lod_level=0, @@ -116,7 +116,7 @@ def test_create_param_lr_with_no_1_value_for_coverage(self): ) x.value().get_tensor().set( np.random.random((5, 10)).astype('float32'), - paddle.fluid.framework._current_expected_place(), + paddle.base.framework._current_expected_place(), ) y = paddle.ones([5, 10]) diff --git a/test/legacy_test/test_optimizer_grad.py b/test/legacy_test/test_optimizer_grad.py index 842c5d0766326..dad2e783124ab 100644 --- a/test/legacy_test/test_optimizer_grad.py +++ b/test/legacy_test/test_optimizer_grad.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.backward import _append_grad_suffix_ +from paddle import base +from paddle.base.backward import _append_grad_suffix_ paddle.enable_static() @@ -79,20 +79,20 @@ def build_net(self, cond_i, use_bf16=False): param_x = paddle.create_parameter( dtype="float32", shape=self.shape, - attr=fluid.ParamAttr(learning_rate=self.param_lr, name="param_x"), + attr=base.ParamAttr(learning_rate=self.param_lr, name="param_x"), default_initializer=paddle.nn.initializer.Assign(self.x), ) param_y = paddle.create_parameter( dtype="float32", shape=self.shape, - attr=fluid.ParamAttr(learning_rate=self.param_lr, name="param_y"), + attr=base.ParamAttr(learning_rate=self.param_lr, name="param_y"), default_initializer=paddle.nn.initializer.Assign(self.y), ) param_z = paddle.create_parameter( dtype="float32", shape=self.shape, - attr=fluid.ParamAttr(learning_rate=self.param_lr, name="param_z"), + attr=base.ParamAttr(learning_rate=self.param_lr, name="param_z"), default_initializer=paddle.nn.initializer.Assign(self.z), ) @@ -193,9 +193,9 @@ def _check_grads(self, use_bf16=False): """ main logic code to check the validity of apply_optimize. """ - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) # test on CPU and GPU for place in places: for param_lr in self.param_lr: @@ -206,9 +206,9 @@ def _check_grads(self, use_bf16=False): ) self._init_param_attr() - main_program = fluid.Program() - init_program = fluid.Program() - with fluid.program_guard(main_program, init_program): + main_program = base.Program() + init_program = base.Program() + with base.program_guard(main_program, init_program): # reset optimizer._accumulators to avoid duplicate name in loop. self.optimizer._accumulators = defaultdict( lambda: {} @@ -223,7 +223,7 @@ def _check_grads(self, use_bf16=False): if use_bf16: self.optimizer = decorated_optimizer - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(init_program) if use_bf16: self.optimizer.amp_init(exe.place) @@ -245,7 +245,7 @@ def _check_grads(self, use_bf16=False): @unittest.skipIf( - not fluid.core.supports_bfloat16(), "place does not support BF16 evaluation" + not base.core.supports_bfloat16(), "place does not support BF16 evaluation" ) class TestSGDOptimizer(TestOptimizer): def test_optimizer_multiblock_except(self): diff --git a/test/legacy_test/test_optimizer_in_control_flow.py b/test/legacy_test/test_optimizer_in_control_flow.py index 5e1fca418b7fe..84adcf75cfef8 100644 --- a/test/legacy_test/test_optimizer_in_control_flow.py +++ b/test/legacy_test/test_optimizer_in_control_flow.py @@ -17,9 +17,9 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.framework import Program, program_guard +from paddle import base +from paddle.base import core +from paddle.base.framework import Program, program_guard BATCH_SIZE = 1 INPUT_SIZE = 784 @@ -47,10 +47,10 @@ def double_fc_net(image): image, size=FC_SIZE, activation='relu', - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.99) ), - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.5) ), name="hidden", @@ -60,10 +60,10 @@ def double_fc_net(image): hidden, size=CLASS_NUM, activation='softmax', - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.2) ), - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.8) ), name="prediction", @@ -121,8 +121,8 @@ def fn_2(opt, avg_loss=None, pred=None, label=None): lambda: fn_2(sgd, avg_loss_2), ) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) exe.run(startup_program) for epoch in range(EPOCH_NUM): @@ -173,10 +173,10 @@ def forward(self, inputs): def dynamic(train_data, use_cuda=False, use_parallel_exe=False): - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - with fluid.dygraph.guard(place): - fluid.default_startup_program().random_seed = SEED - fluid.default_main_program().random_seed = SEED + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + with base.dygraph.guard(place): + base.default_startup_program().random_seed = SEED + base.default_main_program().random_seed = SEED dy_layer = DygraphLayer() adam = paddle.optimizer.Adam( learning_rate=LR, parameters=dy_layer.parameters() @@ -187,8 +187,8 @@ def dynamic(train_data, use_cuda=False, use_parallel_exe=False): for epoch in range(EPOCH_NUM): image_data, label = train_data[epoch] - var_input = fluid.dygraph.to_variable(image_data) - var_label = fluid.dygraph.to_variable(label) + var_input = base.dygraph.to_variable(image_data) + var_label = base.dygraph.to_variable(label) hidden, prediction = dy_layer(var_input) if epoch % 2 == 0: diff --git a/test/legacy_test/test_overlap_add_op.py b/test/legacy_test/test_overlap_add_op.py index 98d4ce10aaabb..90b10c6b9a6c0 100644 --- a/test/legacy_test/test_overlap_add_op.py +++ b/test/legacy_test/test_overlap_add_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def overlap_add(x, hop_length, axis=-1): diff --git a/test/legacy_test/test_pad3d_op.py b/test/legacy_test/test_pad3d_op.py index 7e6cefcc3132d..cbd8d142e6467 100644 --- a/test/legacy_test/test_pad3d_op.py +++ b/test/legacy_test/test_pad3d_op.py @@ -20,7 +20,7 @@ import paddle import paddle.nn.functional as F from paddle import nn -from paddle.fluid import ( +from paddle.base import ( Executor, Program, core, diff --git a/test/legacy_test/test_pad_op.py b/test/legacy_test/test_pad_op.py index 3cec8719e137c..f33a317e944c6 100644 --- a/test/legacy_test/test_pad_op.py +++ b/test/legacy_test/test_pad_op.py @@ -20,7 +20,7 @@ from test_attribute_var import UnittestBase import paddle -from paddle.fluid import Program, core, program_guard +from paddle.base import Program, core, program_guard def pad_wrapper(x, paddings, pad_value): @@ -115,7 +115,7 @@ def test_check_grad_normal(self): class TestPadOpError(unittest.TestCase): def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with program_guard(Program(), Program()): input_data = np.random.random((2, 2)).astype("float32") @@ -136,7 +136,7 @@ def init_info(self): self.save_path = os.path.join(self.temp_dir.name, self.path_prefix()) def test_static(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): main_prog = Program() starup_prog = Program() with program_guard(main_prog, starup_prog): @@ -196,7 +196,7 @@ def call_func(self, x): class TestPaddingValueTensor3(unittest.TestCase): def test_static(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): np_x = np.random.random((16, 16)).astype('float32') main_prog = Program() starup_prog = Program() diff --git a/test/legacy_test/test_paddle_imperative_double_grad.py b/test/legacy_test/test_paddle_imperative_double_grad.py index 99bb5bf714413..32e65fff99f54 100644 --- a/test/legacy_test/test_paddle_imperative_double_grad.py +++ b/test/legacy_test/test_paddle_imperative_double_grad.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid.wrapped_decorator import wrap_decorator +from paddle import base +from paddle.base.wrapped_decorator import wrap_decorator def _dygraph_guard_(func): @@ -28,7 +28,7 @@ def __impl__(*args, **kwargs): if paddle.in_dynamic_mode(): return func(*args, **kwargs) else: - with fluid.dygraph.guard(): + with base.dygraph.guard(): return func(*args, **kwargs) return __impl__ @@ -39,7 +39,7 @@ def __impl__(*args, **kwargs): def random_var(size, low=-1, high=1, dtype='float32'): x_np = np.random.uniform(low=low, high=high, size=size).astype(dtype) - return fluid.dygraph.to_variable(x_np) + return base.dygraph.to_variable(x_np) class TestDygraphDoubleGrad(TestCase): @@ -152,7 +152,7 @@ def test_none_one_initial_gradient(self): ) np.random.shuffle(x_np) - x = fluid.dygraph.to_variable(x_np) + x = base.dygraph.to_variable(x_np) x.stop_gradient = False alpha = 0.2 diff --git a/test/legacy_test/test_paddle_multiprocessing.py b/test/legacy_test/test_paddle_multiprocessing.py index 9117a2d86a09d..f3b74596b3326 100644 --- a/test/legacy_test/test_paddle_multiprocessing.py +++ b/test/legacy_test/test_paddle_multiprocessing.py @@ -198,7 +198,7 @@ def test_pass_empty(self): class TestMultiprocessingGpu(TestMultiprocessingBase): @unittest.skipIf( - not paddle.fluid.core.is_compiled_with_cuda(), + not paddle.base.core.is_compiled_with_cuda(), "core is not compiled with CUDA", ) def func_test_pass_tensor(self): diff --git a/test/legacy_test/test_paddle_save_load.py b/test/legacy_test/test_paddle_save_load.py index be7eb0febeb62..f5d87ab70ea0f 100644 --- a/test/legacy_test/test_paddle_save_load.py +++ b/test/legacy_test/test_paddle_save_load.py @@ -22,8 +22,8 @@ import paddle import paddle.optimizer as opt -from paddle import fluid, nn -from paddle.fluid import framework +from paddle import base, nn +from paddle.base import framework from paddle.optimizer import Adam from paddle.optimizer.lr import LRScheduler @@ -168,7 +168,7 @@ def tearDown(self): def set_zero(self, prog, place, scope=None): if scope is None: - scope = fluid.global_scope() + scope = base.global_scope() for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: ten = scope.find_var(var.name).get_tensor() @@ -225,31 +225,31 @@ def test_replace_static_save_load(self): for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_map[var.name] = t path = os.path.join( self.temp_dir.name, "test_replace_static_save_load", "model" ) - # paddle.save, legacy paddle.fluid.load + # paddle.save, legacy paddle.base.load self.replace_static_save(prog, path) self.set_zero(prog, place) paddle.static.load(prog, path) for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, np.array(base_t)) - # legacy paddle.fluid.save, paddle.load + # legacy paddle.base.save, paddle.load paddle.static.save(prog, path) self.set_zero(prog, place) self.replace_static_load(prog, path) for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -257,13 +257,13 @@ def test_replace_static_save_load(self): path_vars = 'test_replace_save_load_return_tensor_static/model' for var in prog.list_vars(): if var.persistable: - tensor = var.get_value(fluid.global_scope()) + tensor = var.get_value(base.global_scope()) paddle.save( tensor, os.path.join(self.temp_dir.name, path_vars, var.name), ) with self.assertRaises(TypeError): - var.get_value('fluid.global_scope()') + var.get_value('base.global_scope()') with self.assertRaises(ValueError): x.get_value() with self.assertRaises(TypeError): @@ -285,7 +285,7 @@ def test_replace_static_save_load(self): ) var.set_value(tensor) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -360,7 +360,7 @@ def test_single_pickle_var_dygraph(self): self.assertTrue( isinstance( t_dygraph, - paddle.fluid.core.eager.Tensor, + paddle.base.core.eager.Tensor, ) ) np.testing.assert_array_equal(tensor.numpy(), np_dygraph) @@ -368,7 +368,7 @@ def test_single_pickle_var_dygraph(self): paddle.enable_static() lod_static = paddle.load(path) np_static = paddle.load(path, return_numpy=True) - self.assertTrue(isinstance(lod_static, paddle.fluid.core.LoDTensor)) + self.assertTrue(isinstance(lod_static, paddle.base.core.LoDTensor)) np.testing.assert_array_equal(tensor.numpy(), np_static) np.testing.assert_array_equal(tensor.numpy(), np.array(lod_static)) @@ -383,9 +383,9 @@ def test_single_pickle_var_static(self): z = paddle.static.nn.fc(x, 128) loss = paddle.mean(z) place = ( - fluid.CPUPlace() - if not paddle.fluid.core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + base.CPUPlace() + if not paddle.base.core.is_compiled_with_cuda() + else base.CUDAPlace(0) ) exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) @@ -394,7 +394,7 @@ def test_single_pickle_var_static(self): if list(var.shape) == [IMAGE_SIZE, 128]: tensor = var.get_value() break - scope = fluid.global_scope() + scope = base.global_scope() origin_tensor = np.array(tensor) path = os.path.join( self.temp_dir.name, 'test_single_pickle_var_static/var' @@ -438,9 +438,9 @@ def test_dygraph_save_static_load(self): y_static = layer(data) program = paddle.static.default_main_program() place = ( - fluid.CPUPlace() - if not paddle.fluid.core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + base.CPUPlace() + if not paddle.base.core.is_compiled_with_cuda() + else base.CUDAPlace(0) ) exe = paddle.static.Executor(paddle.CPUPlace()) exe.run(paddle.static.default_startup_program()) @@ -571,7 +571,7 @@ def test_save_load_complex_object_dygraph_save(self): self.assertTrue(load_tensor2['epoch'] == 123) self.assertTrue( - isinstance(load_tensor3[0], paddle.fluid.core.LoDTensor) + isinstance(load_tensor3[0], paddle.base.core.LoDTensor) ) np.testing.assert_array_equal( np.array(load_tensor3[0]), obj3[0].numpy() @@ -582,7 +582,7 @@ def test_save_load_complex_object_dygraph_save(self): self.assertTrue( isinstance( load_tensor3[2]["state_dict"][k], - paddle.fluid.core.LoDTensor, + paddle.base.core.LoDTensor, ) ) np.testing.assert_array_equal( @@ -592,14 +592,14 @@ def test_save_load_complex_object_dygraph_save(self): for k, v in state_dict.items(): self.assertTrue( isinstance( - load_tensor3[2]["opt"][k], paddle.fluid.core.LoDTensor + load_tensor3[2]["opt"][k], paddle.base.core.LoDTensor ) ) np.testing.assert_array_equal( np.array(load_tensor3[2]['opt'][k]), v.numpy() ) - self.assertTrue(load_tensor4[0], paddle.fluid.core.LoDTensor) + self.assertTrue(load_tensor4[0], paddle.base.core.LoDTensor) np.testing.assert_array_equal(np.array(load_tensor4[0]), obj4[0]) load_array1 = paddle.load(path1, return_numpy=True) @@ -641,9 +641,9 @@ def test_save_load_complex_object_static_save(self): z = paddle.static.nn.fc(z, 128, bias_attr=False) loss = paddle.mean(z) place = ( - fluid.CPUPlace() - if not paddle.fluid.core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + base.CPUPlace() + if not paddle.base.core.is_compiled_with_cuda() + else base.CUDAPlace(0) ) prog = paddle.static.default_main_program() exe = paddle.static.Executor(place) @@ -705,15 +705,15 @@ def test_save_load_complex_object_static_save(self): ) self.assertTrue(load_tensor2['epoch'] == 123) - self.assertTrue(isinstance(load_tensor3[0], fluid.core.LoDTensor)) + self.assertTrue(isinstance(load_tensor3[0], base.core.LoDTensor)) np.testing.assert_array_equal(np.array(load_tensor3[0]), obj3[0]) - self.assertTrue(isinstance(load_tensor3[1], fluid.core.LoDTensor)) + self.assertTrue(isinstance(load_tensor3[1], base.core.LoDTensor)) np.testing.assert_array_equal(np.array(load_tensor3[1]), obj3[1]) for k, v in state_dict.items(): self.assertTrue( isinstance( - load_tensor3[2]["state_dict"][k], fluid.core.LoDTensor + load_tensor3[2]["state_dict"][k], base.core.LoDTensor ) ) np.testing.assert_array_equal( @@ -722,13 +722,13 @@ def test_save_load_complex_object_static_save(self): for k, v in state_dict.items(): self.assertTrue( - isinstance(load_tensor3[2]["opt"][k], fluid.core.LoDTensor) + isinstance(load_tensor3[2]["opt"][k], base.core.LoDTensor) ) np.testing.assert_array_equal( np.array(load_tensor3[2]['opt'][k]), np.array(v) ) - self.assertTrue(isinstance(load_tensor4[0], fluid.core.LoDTensor)) + self.assertTrue(isinstance(load_tensor4[0], base.core.LoDTensor)) np.testing.assert_array_equal(np.array(load_tensor4[0]), obj4[0]) load_array1 = paddle.load(path1, return_numpy=True) @@ -788,14 +788,14 @@ def test_save_load_complex_object_static_save(self): self.assertTrue( isinstance( load_tensor3[0], - fluid.core.eager.Tensor, + base.core.eager.Tensor, ) ) np.testing.assert_array_equal(load_tensor3[0].numpy(), obj3[0]) self.assertTrue( isinstance( load_tensor3[1], - fluid.core.eager.Tensor, + base.core.eager.Tensor, ) ) np.testing.assert_array_equal(load_tensor3[1].numpy(), obj3[1]) @@ -804,7 +804,7 @@ def test_save_load_complex_object_static_save(self): self.assertTrue( isinstance( load_tensor3[2]["state_dict"][k], - fluid.core.eager.Tensor, + base.core.eager.Tensor, ) ) np.testing.assert_array_equal( @@ -815,7 +815,7 @@ def test_save_load_complex_object_static_save(self): self.assertTrue( isinstance( load_tensor3[2]["opt"][k], - fluid.core.eager.Tensor, + base.core.eager.Tensor, ) ) np.testing.assert_array_equal( @@ -825,7 +825,7 @@ def test_save_load_complex_object_static_save(self): self.assertTrue( isinstance( load_tensor4[0], - fluid.core.eager.Tensor, + base.core.eager.Tensor, ) ) np.testing.assert_array_equal(load_tensor4[0].numpy(), obj4[0]) @@ -874,8 +874,8 @@ def test_varbase_binary_var(self): load_tensor = paddle.load(path, return_numpy=False) origin_array = varbase.numpy() load_tensor_array = load_tensor.numpy() - if paddle.fluid.core.is_compiled_with_cuda(): - fluid.core._cuda_synchronize(paddle.CUDAPlace(0)) + if paddle.base.core.is_compiled_with_cuda(): + base.core._cuda_synchronize(paddle.CUDAPlace(0)) np.testing.assert_array_equal(origin_array, load_array) np.testing.assert_array_equal(origin_array, load_tensor_array) @@ -916,9 +916,9 @@ def test_static_save_to_memory(self): z = paddle.static.nn.fc(z, 128, bias_attr=False) loss = paddle.mean(z) place = ( - fluid.CPUPlace() - if not paddle.fluid.core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + base.CPUPlace() + if not paddle.base.core.is_compiled_with_cuda() + else base.CUDAPlace(0) ) prog = paddle.static.default_main_program() exe = paddle.static.Executor(place) diff --git a/test/legacy_test/test_paddle_save_load_binary.py b/test/legacy_test/test_paddle_save_load_binary.py index a4f3709c556be..df7304cf1d19e 100644 --- a/test/legacy_test/test_paddle_save_load_binary.py +++ b/test/legacy_test/test_paddle_save_load_binary.py @@ -22,8 +22,8 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import framework +from paddle import base +from paddle.base import framework IMAGE_SIZE = 784 @@ -39,7 +39,7 @@ def tearDown(self): def set_zero(self, prog, place, scope=None): if scope is None: - scope = fluid.global_scope() + scope = base.global_scope() for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: ten = scope.find_var(var.name).get_tensor() @@ -81,9 +81,9 @@ def test_replace_save_load_vars(self): z = paddle.static.nn.fc(z, 128, bias_attr=False) loss = paddle.mean(z) place = ( - fluid.CPUPlace() - if not paddle.fluid.core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + base.CPUPlace() + if not paddle.base.core.is_compiled_with_cuda() + else base.CUDAPlace(0) ) exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) @@ -92,7 +92,7 @@ def test_replace_save_load_vars(self): for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) @@ -114,7 +114,7 @@ def test_replace_save_load_vars(self): for var in prog.list_vars(): if var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] @@ -131,7 +131,7 @@ def test_replace_save_load_vars(self): for var in prog.list_vars(): if var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] @@ -149,15 +149,15 @@ def test_save_load_lod_tensor(self): OUTPUT_NUM, name='fc_vars', ) - prog = fluid.default_main_program() + prog = base.default_main_program() place = ( - fluid.CPUPlace() - if not paddle.fluid.core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + base.CPUPlace() + if not paddle.base.core.is_compiled_with_cuda() + else base.CUDAPlace(0) ) - exe = fluid.Executor(place) + exe = base.Executor(place) prog = paddle.static.default_main_program() - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) dirname = os.path.join( self.temp_dir.name, 'test_save_load_lod_tensor1/tensor_' @@ -178,7 +178,7 @@ def test_save_load_lod_tensor(self): is_zeros = np.array(var.get_value()) loaded_tensor = paddle.load(dirname + 'fc_vars.w_0') - self.assertTrue(isinstance(loaded_tensor, fluid.core.LoDTensor)) + self.assertTrue(isinstance(loaded_tensor, base.core.LoDTensor)) self.assertTrue( list(loaded_tensor.shape()) == [IMAGE_SIZE, OUTPUT_NUM] ) @@ -199,11 +199,11 @@ def test_save_load_lod_tensor(self): paddle.load(path) with self.assertRaises(ValueError): - temp_lod = fluid.core.LoDTensor() + temp_lod = base.core.LoDTensor() paddle.save(temp_lod, path, use_binary_format=True) with self.assertRaises(RuntimeError): - fluid.core.save_lod_tensor( + base.core.save_lod_tensor( temp_lod, os.path.join( self.temp_dir.name, @@ -212,7 +212,7 @@ def test_save_load_lod_tensor(self): ) with self.assertRaises(RuntimeError): - fluid.core.load_lod_tensor( + base.core.load_lod_tensor( temp_lod, os.path.join( self.temp_dir.name, @@ -237,14 +237,14 @@ def test_save_load_lod_tensor(self): def test_save_load_selected_rows(self): paddle.enable_static() place = ( - fluid.CPUPlace() - if not paddle.fluid.core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + base.CPUPlace() + if not paddle.base.core.is_compiled_with_cuda() + else base.CUDAPlace(0) ) height = 10 rows = [0, 4, 7] row_numel = 12 - selected_rows = fluid.core.SelectedRows(rows, height) + selected_rows = base.core.SelectedRows(rows, height) path = os.path.join( self.temp_dir.name, 'test_paddle_save_load_selected_rows/sr.pdsr' ) @@ -259,13 +259,13 @@ def test_save_load_selected_rows(self): paddle.save(selected_rows, path, use_binary_format=True) load_sr = paddle.load(path) - self.assertTrue(isinstance(load_sr, fluid.core.SelectedRows)) + self.assertTrue(isinstance(load_sr, base.core.SelectedRows)) self.assertTrue(list(load_sr.rows()) == rows) self.assertTrue(load_sr.height() == height) np.testing.assert_array_equal(np.array(load_sr.get_tensor()), np_array) with self.assertRaises(RuntimeError): - fluid.core.save_selected_rows( + base.core.save_selected_rows( selected_rows, os.path.join( self.temp_dir.name, @@ -273,7 +273,7 @@ def test_save_load_selected_rows(self): ), ) with self.assertRaises(RuntimeError): - fluid.core.load_selected_rows( + base.core.load_selected_rows( selected_rows, os.path.join( self.temp_dir.name, @@ -288,7 +288,7 @@ def test_save_load_selected_rows(self): # load from memory selected_rows_mem = paddle.load(byio) to_array_mem = np.array(selected_rows_mem) - self.assertTrue(isinstance(selected_rows_mem, fluid.core.SelectedRows)) + self.assertTrue(isinstance(selected_rows_mem, base.core.SelectedRows)) self.assertTrue(list(selected_rows_mem.rows()) == rows) self.assertTrue(selected_rows_mem.height() == height) np.testing.assert_array_equal( diff --git a/test/legacy_test/test_paddlescience.py b/test/legacy_test/test_paddlescience.py index e3fbe0c25dbe7..06c5abe80df89 100644 --- a/test/legacy_test/test_paddlescience.py +++ b/test/legacy_test/test_paddlescience.py @@ -15,10 +15,10 @@ import unittest import paddle -from paddle import fluid, jit, nn +from paddle import base, jit, nn paddle.jit.enable_to_static(True) -fluid.core._set_prim_all_enabled(True) +base.core._set_prim_all_enabled(True) x = paddle.randn([4, 1]) y = paddle.randn([4, 1]) diff --git a/test/legacy_test/test_pairwise_distance.py b/test/legacy_test/test_pairwise_distance.py index e89a713282db2..2f9199f48c04a 100644 --- a/test/legacy_test/test_pairwise_distance.py +++ b/test/legacy_test/test_pairwise_distance.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base def np_pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False): @@ -48,9 +48,9 @@ def test_static( prog = paddle.static.Program() startup_prog = paddle.static.Program() place = ( - fluid.CUDAPlace(0) - if paddle.fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if paddle.base.core.is_compiled_with_cuda() + else base.CPUPlace() ) paddle.enable_static() with paddle.static.program_guard(prog, startup_prog): diff --git a/test/legacy_test/test_parallel_dygraph_dataparallel.py b/test/legacy_test/test_parallel_dygraph_dataparallel.py index aedf97b168381..de3160e9c6f9c 100644 --- a/test/legacy_test/test_parallel_dygraph_dataparallel.py +++ b/test/legacy_test/test_parallel_dygraph_dataparallel.py @@ -18,7 +18,7 @@ import time import unittest -from paddle import fluid +from paddle import base from paddle.distributed.utils.launch_utils import ( TrainerProc, find_free_ports, @@ -160,8 +160,8 @@ def run_mnist_2gpu( allocator_strategy="auto_growth", ): if ( - not fluid.core.is_compiled_with_cuda() - or fluid.core.get_cuda_device_count() == 0 + not base.core.is_compiled_with_cuda() + or base.core.get_cuda_device_count() == 0 ): return diff --git a/test/legacy_test/test_parallel_dygraph_dataparallel_cpuonly.py b/test/legacy_test/test_parallel_dygraph_dataparallel_cpuonly.py index b2f7831489100..5a944284414bf 100644 --- a/test/legacy_test/test_parallel_dygraph_dataparallel_cpuonly.py +++ b/test/legacy_test/test_parallel_dygraph_dataparallel_cpuonly.py @@ -103,8 +103,8 @@ def start_local_trainers( class TestMultipleGpus(unittest.TestCase): def run_mnist_2gpu(self, target_file_name): - # if not fluid.core.is_compiled_with_cuda( - # ) or fluid.core.get_cuda_device_count() == 0: + # if not base.core.is_compiled_with_cuda( + # ) or base.core.get_cuda_device_count() == 0: # return selected_gpus = get_gpus('0,1') diff --git a/test/legacy_test/test_parallel_dygraph_transformer_gloo.py b/test/legacy_test/test_parallel_dygraph_transformer_gloo.py index 7dee57cb29ff0..714da4b720696 100644 --- a/test/legacy_test/test_parallel_dygraph_transformer_gloo.py +++ b/test/legacy_test/test_parallel_dygraph_transformer_gloo.py @@ -17,7 +17,7 @@ from test_dist_base import TestDistBase -from paddle import fluid +from paddle import base flag_name = os.path.splitext(__file__)[0] @@ -46,7 +46,7 @@ def _setup_config(self): self._find_unused_parameters = False def test_transformer(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "parallel_dygraph_transformer.py", delta=1e-5, diff --git a/test/legacy_test/test_parallel_executor_seresnext_with_fuse_all_reduce_cpu.py b/test/legacy_test/test_parallel_executor_seresnext_with_fuse_all_reduce_cpu.py index 8b5e892364c0c..75d3d85e20e5b 100644 --- a/test/legacy_test/test_parallel_executor_seresnext_with_fuse_all_reduce_cpu.py +++ b/test/legacy_test/test_parallel_executor_seresnext_with_fuse_all_reduce_cpu.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle import fluid +from paddle import base -fluid.core._set_fuse_parameter_group_size(3) -fluid.core._set_fuse_parameter_memory_size(131072) +base.core._set_fuse_parameter_group_size(3) +base.core._set_fuse_parameter_memory_size(131072) import unittest from functools import partial diff --git a/test/legacy_test/test_parallel_executor_seresnext_with_fuse_all_reduce_gpu.py b/test/legacy_test/test_parallel_executor_seresnext_with_fuse_all_reduce_gpu.py index 0a39002275edc..752538efaa059 100644 --- a/test/legacy_test/test_parallel_executor_seresnext_with_fuse_all_reduce_gpu.py +++ b/test/legacy_test/test_parallel_executor_seresnext_with_fuse_all_reduce_gpu.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle import fluid +from paddle import base -fluid.core._set_fuse_parameter_group_size(3) -fluid.core._set_fuse_parameter_memory_size(131072) +base.core._set_fuse_parameter_group_size(3) +base.core._set_fuse_parameter_memory_size(131072) import unittest from functools import partial diff --git a/test/legacy_test/test_parallel_executor_seresnext_with_reduce_cpu.py b/test/legacy_test/test_parallel_executor_seresnext_with_reduce_cpu.py index d1e3d9f6d0c9a..9dead36622763 100644 --- a/test/legacy_test/test_parallel_executor_seresnext_with_reduce_cpu.py +++ b/test/legacy_test/test_parallel_executor_seresnext_with_reduce_cpu.py @@ -17,7 +17,7 @@ import seresnext_net from parallel_executor_test_base import DeviceType, TestParallelExecutorBase -from paddle.fluid import core +from paddle.base import core class TestResnetWithReduceBase(TestParallelExecutorBase): diff --git a/test/legacy_test/test_parallel_executor_transformer.py b/test/legacy_test/test_parallel_executor_transformer.py index 3b72fde560dd5..cdf24938e2573 100644 --- a/test/legacy_test/test_parallel_executor_transformer.py +++ b/test/legacy_test/test_parallel_executor_transformer.py @@ -22,7 +22,7 @@ import paddle from paddle.dataset import wmt16 -from paddle.fluid import core +from paddle.base import core os.environ['CPU_NUM'] = str(4) diff --git a/test/legacy_test/test_parameter.py b/test/legacy_test/test_parameter.py index 30bf9dc0d8280..415167e31fe61 100644 --- a/test/legacy_test/test_parameter.py +++ b/test/legacy_test/test_parameter.py @@ -18,10 +18,10 @@ import numpy as np import paddle -from paddle.fluid import core -from paddle.fluid.dygraph import guard -from paddle.fluid.executor import Executor -from paddle.fluid.framework import Variable, default_main_program +from paddle.base import core +from paddle.base.dygraph import guard +from paddle.base.executor import Executor +from paddle.base.framework import Variable, default_main_program paddle.enable_static() main_program = default_main_program() diff --git a/test/legacy_test/test_partial_eager_deletion_transformer.py b/test/legacy_test/test_partial_eager_deletion_transformer.py index 2d78f886963a9..acfc9d02567d8 100644 --- a/test/legacy_test/test_partial_eager_deletion_transformer.py +++ b/test/legacy_test/test_partial_eager_deletion_transformer.py @@ -15,9 +15,9 @@ import unittest import paddle -from paddle import fluid +from paddle import base -fluid.core._set_eager_deletion_mode(0.0, 0.55, True) +base.core._set_eager_deletion_mode(0.0, 0.55, True) if __name__ == '__main__': paddle.enable_static() diff --git a/test/legacy_test/test_pass_builder.py b/test/legacy_test/test_pass_builder.py index 19912b6df065f..b976d29ca0db3 100644 --- a/test/legacy_test/test_pass_builder.py +++ b/test/legacy_test/test_pass_builder.py @@ -22,16 +22,16 @@ from simple_nets import simple_fc_net import paddle -from paddle import fluid -from paddle.fluid import compiler, core +from paddle import base +from paddle.base import compiler, core class TestPassBuilder(unittest.TestCase): def check_network_convergence(self, use_cuda, build_strategy=None): os.environ['CPU_NUM'] = str(4) - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.program_guard(main, startup): loss = simple_fc_net() test_program = main.clone(for_test=True) @@ -42,8 +42,8 @@ def check_network_convergence(self, use_cuda, build_strategy=None): image = np.random.normal(size=(batch_size, 784)).astype('float32') label = np.random.randint(0, 10, (batch_size, 1), dtype="int64") - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) exe.run(startup) feed_dict = {'image': image, 'label': label} @@ -83,7 +83,7 @@ def check_network_convergence(self, use_cuda, build_strategy=None): ) def test_parallel_testing_with_new_strategy(self): - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() self.assertFalse(build_strategy.fuse_elewise_add_act_ops) build_strategy.fuse_elewise_add_act_ops = True # FIXME: currently fuse_elewise_add_act_ops not compatible with below options diff --git a/test/legacy_test/test_pixel_shuffle_op.py b/test/legacy_test/test_pixel_shuffle_op.py index 7c6f18479fd11..7ab983476c5d6 100644 --- a/test/legacy_test/test_pixel_shuffle_op.py +++ b/test/legacy_test/test_pixel_shuffle_op.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def pixel_shuffle_np(x, up_factor, data_format="NCHW"): @@ -182,14 +182,14 @@ def test_static_graph_functional(self): exe = paddle.static.Executor(place=place) res_1 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, )[0] res_2 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, @@ -222,13 +222,13 @@ def test_api_fp16(self): out_2_np = pixel_shuffle_np(self.x_2_np, 3, "NHWC") exe = paddle.static.Executor(place=place) res_1 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, )[0] res_2 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, @@ -260,14 +260,14 @@ def test_static_graph_layer(self): exe = paddle.static.Executor(place=place) res_1 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, )[0] res_2 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, @@ -319,21 +319,21 @@ def test_dygraph2(self): class TestPixelShuffleError(unittest.TestCase): def test_error_functional(self): def error_upscale_factor(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") pixel_shuffle = F.pixel_shuffle(paddle.to_tensor(x), 3.33) self.assertRaises(TypeError, error_upscale_factor) def error_0_upscale_factor(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = paddle.uniform([1, 1, 1, 1], dtype='float64') pixel_shuffle = F.pixel_shuffle(x, 0) self.assertRaises(ValueError, error_0_upscale_factor) def error_data_format(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") pixel_shuffle = F.pixel_shuffle(paddle.to_tensor(x), 3, "WOW") @@ -341,14 +341,14 @@ def error_data_format(): def test_error_layer(self): def error_upscale_factor_layer(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") ps = paddle.nn.PixelShuffle(3.33) self.assertRaises(TypeError, error_upscale_factor_layer) def error_data_format_layer(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") ps = paddle.nn.PixelShuffle(3, "MEOW") diff --git a/test/legacy_test/test_pixel_unshuffle.py b/test/legacy_test/test_pixel_unshuffle.py index 2353ca0192c7e..dc982ded5b4ee 100644 --- a/test/legacy_test/test_pixel_unshuffle.py +++ b/test/legacy_test/test_pixel_unshuffle.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def pixel_unshuffle_np(x, down_factor, data_format="NCHW"): @@ -221,14 +221,14 @@ def test_static_graph_functional(self): exe = paddle.static.Executor(place=place) res_1 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, )[0] res_2 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, @@ -263,14 +263,14 @@ def test_static_graph_layer(self): exe = paddle.static.Executor(place=place) res_1 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True, )[0] res_2 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True, @@ -337,28 +337,28 @@ def test_error_functional(self): '''test_error_functional''' def error_input(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([4, 12, 12]).astype("float64") pixel_unshuffle = F.pixel_unshuffle(paddle.to_tensor(x), 2) self.assertRaises(ValueError, error_input) def error_downscale_factor_1(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 1, 12, 12]).astype("float64") pixel_unshuffle = F.pixel_unshuffle(paddle.to_tensor(x), 3.33) self.assertRaises(TypeError, error_downscale_factor_1) def error_downscale_factor_2(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 1, 12, 12]).astype("float64") pixel_unshuffle = F.pixel_unshuffle(paddle.to_tensor(x), -1) self.assertRaises(ValueError, error_downscale_factor_2) def error_data_format(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 1, 12, 12]).astype("float64") pixel_unshuffle = F.pixel_unshuffle( paddle.to_tensor(x), 3, "WOW" @@ -370,7 +370,7 @@ def test_error_layer(self): '''test_error_layer''' def error_input_layer(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([4, 12, 12]).astype("float64") ps = paddle.nn.PixelUnshuffle(2) ps(paddle.to_tensor(x)) @@ -378,21 +378,21 @@ def error_input_layer(): self.assertRaises(ValueError, error_input_layer) def error_downscale_factor_layer_1(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 1, 12, 12]).astype("float64") ps = paddle.nn.PixelUnshuffle(3.33) self.assertRaises(TypeError, error_downscale_factor_layer_1) def error_downscale_factor_layer_2(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 1, 12, 12]).astype("float64") ps = paddle.nn.PixelUnshuffle(-1) self.assertRaises(ValueError, error_downscale_factor_layer_2) def error_data_format_layer(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 1, 12, 12]).astype("float64") ps = paddle.nn.PixelUnshuffle(3, "MEOW") diff --git a/test/legacy_test/test_poisson_nll_loss.py b/test/legacy_test/test_poisson_nll_loss.py index 14ad375519914..4cfa517856780 100644 --- a/test/legacy_test/test_poisson_nll_loss.py +++ b/test/legacy_test/test_poisson_nll_loss.py @@ -18,7 +18,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core np.random.seed(100) diff --git a/test/legacy_test/test_poisson_op.py b/test/legacy_test/test_poisson_op.py index 84edf6a322189..f6667407ed92e 100644 --- a/test/legacy_test/test_poisson_op.py +++ b/test/legacy_test/test_poisson_op.py @@ -23,7 +23,7 @@ ) import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() paddle.seed(100) @@ -106,7 +106,7 @@ def test_static(self): self.assertTrue(np.min(y_np) >= 0) def test_dygraph(self): - with paddle.fluid.dygraph.base.guard(): + with paddle.base.dygraph.base.guard(): x = paddle.randn([10, 10], dtype='float32') y = paddle.poisson(x) self.assertTrue(np.min(y.numpy()) >= 0) diff --git a/test/legacy_test/test_polar.py b/test/legacy_test/test_polar.py index 21b72c97d5879..5336fe38037a4 100644 --- a/test/legacy_test/test_polar.py +++ b/test/legacy_test/test_polar.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(10) diff --git a/test/legacy_test/test_polygamma_op.py b/test/legacy_test/test_polygamma_op.py index 9b5cf3062f3d2..88736f89173e0 100644 --- a/test/legacy_test/test_polygamma_op.py +++ b/test/legacy_test/test_polygamma_op.py @@ -19,7 +19,7 @@ from scipy import special import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(100) paddle.seed(100) diff --git a/test/legacy_test/test_pool1d_api.py b/test/legacy_test/test_pool1d_api.py index 871c1817202fd..1381470481c53 100644 --- a/test/legacy_test/test_pool1d_api.py +++ b/test/legacy_test/test_pool1d_api.py @@ -18,8 +18,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def adaptive_start_index(index, input_size, output_size): @@ -117,12 +117,12 @@ def avg_pool1D_forward_naive( class TestPool1D_API(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_avg_static_results(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 32], dtype="float32" ) @@ -133,9 +133,9 @@ def check_avg_static_results(self, place): input_np, ksize=[2], strides=[2], paddings=[0], ceil_mode=False ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) @@ -168,9 +168,9 @@ def check_avg_static_results_fp16(self, place): np.testing.assert_allclose(fetches[0], result_np, rtol=1e-03) def check_avg_dygraph_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = F.avg_pool1d(input, kernel_size=2, stride=2, padding=[0]) result_np = avg_pool1D_forward_naive( @@ -186,9 +186,9 @@ def check_avg_dygraph_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_padding_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = F.avg_pool1d( input, kernel_size=2, stride=2, padding=[1], exclusive=True ) @@ -207,7 +207,7 @@ def check_avg_dygraph_padding_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_static_results(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 32], dtype="float32" ) @@ -218,18 +218,18 @@ def check_max_static_results(self, place): input_np, ksize=[2], strides=[2], paddings=[0] ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_max_dygraph_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = F.max_pool1d(input, kernel_size=2, stride=2, padding=0) result_np = max_pool1D_forward_naive( @@ -245,9 +245,9 @@ def check_max_dygraph_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_return_index_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result, index = F.max_pool1d( input, kernel_size=2, stride=2, padding=0, return_mask=True ) @@ -265,9 +265,9 @@ def check_max_dygraph_return_index_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_padding_same(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = F.max_pool1d( input, kernel_size=2, stride=2, padding="SAME" ) @@ -279,9 +279,9 @@ def check_max_dygraph_padding_same(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_padding_same(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = F.avg_pool1d( input, kernel_size=2, stride=2, padding="SAME" ) @@ -307,11 +307,11 @@ def test_pool1d(self): class TestPool1DError_API(unittest.TestCase): def test_error_api(self): def run1(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = [[2]] res_pd = F.max_pool1d( input_pd, kernel_size=2, stride=2, padding=padding @@ -320,11 +320,11 @@ def run1(): self.assertRaises(ValueError, run1) def run2(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = [[2]] res_pd = F.max_pool1d( input_pd, kernel_size=2, stride=2, padding=padding @@ -333,11 +333,11 @@ def run2(): self.assertRaises(ValueError, run2) def run3(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "padding" res_pd = F.max_pool1d( input_pd, kernel_size=2, stride=2, padding=padding @@ -346,11 +346,11 @@ def run3(): self.assertRaises(ValueError, run3) def run4(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "VALID" res_pd = F.max_pool1d( input_pd, @@ -363,11 +363,11 @@ def run4(): self.assertRaises(ValueError, run4) def run5(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "VALID" res_pd = F.max_pool1d( input_pd, @@ -380,11 +380,11 @@ def run5(): self.assertRaises(ValueError, run5) def run6(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "VALID" res_pd = F.avg_pool1d( input_pd, @@ -397,11 +397,11 @@ def run6(): self.assertRaises(ValueError, run6) def run7(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "paddle" res_pd = F.avg_pool1d( input_pd, @@ -414,11 +414,11 @@ def run7(): self.assertRaises(ValueError, run7) def run_kernel_out_of_range(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = 0 res_pd = F.avg_pool1d( input_pd, @@ -431,11 +431,11 @@ def run_kernel_out_of_range(): self.assertRaises(ValueError, run_kernel_out_of_range) def run_stride_out_of_range(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = 0 res_pd = F.avg_pool1d( input_pd, @@ -448,7 +448,7 @@ def run_stride_out_of_range(): self.assertRaises(ValueError, run_stride_out_of_range) def run_zero_stride(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): array = np.array([1], dtype=np.float32) x = paddle.to_tensor( np.reshape(array, [1, 1, 1]), dtype='float32' @@ -460,7 +460,7 @@ def run_zero_stride(): self.assertRaises(ValueError, run_zero_stride) def run_zero_tuple_stride(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): array = np.array([1], dtype=np.float32) x = paddle.to_tensor( np.reshape(array, [1, 1, 1]), dtype='float32' diff --git a/test/legacy_test/test_pool2d_api.py b/test/legacy_test/test_pool2d_api.py index a4971a399ae64..fcca5381fa4f0 100644 --- a/test/legacy_test/test_pool2d_api.py +++ b/test/legacy_test/test_pool2d_api.py @@ -22,20 +22,20 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.nn.functional import avg_pool2d, max_pool2d class TestPool2D_API(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_avg_static_results(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 32, 32], dtype="float32" ) @@ -50,18 +50,18 @@ def check_avg_static_results(self, place): pool_type='avg', ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_avg_dygraph_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = avg_pool2d(input, kernel_size=2, stride=2, padding=0) result_np = pool2D_forward_naive( @@ -80,9 +80,9 @@ def check_avg_dygraph_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_padding_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = avg_pool2d( input, kernel_size=2, stride=2, padding=1, ceil_mode=False ) @@ -104,9 +104,9 @@ def check_avg_dygraph_padding_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_ceilmode_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = avg_pool2d( input, kernel_size=2, stride=2, padding=0, ceil_mode=True ) @@ -127,7 +127,7 @@ def check_avg_dygraph_ceilmode_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_static_results(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 32, 32], dtype="float32" ) @@ -142,18 +142,18 @@ def check_max_static_results(self, place): pool_type='max', ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_max_dygraph_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = max_pool2d( input, kernel_size=2, stride=2, padding=0, return_mask=False ) @@ -174,9 +174,9 @@ def check_max_dygraph_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_nhwc_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable( + input = base.dygraph.to_variable( np.transpose(input_np, [0, 2, 3, 1]) ) result = max_pool2d( @@ -202,9 +202,9 @@ def check_max_dygraph_nhwc_results(self, place): ) def check_max_dygraph_padding_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = max_pool2d( input, kernel_size=2, stride=2, padding=1, ceil_mode=False ) @@ -226,9 +226,9 @@ def check_max_dygraph_padding_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_ceilmode_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = max_pool2d( input, kernel_size=2, stride=2, padding=0, ceil_mode=True ) @@ -249,9 +249,9 @@ def check_max_dygraph_ceilmode_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_stride_is_none(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result, indices = max_pool2d( input, kernel_size=2, @@ -277,9 +277,9 @@ def check_max_dygraph_stride_is_none(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_stride_is_none(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = avg_pool2d( input, kernel_size=2, stride=None, padding="SAME" ) @@ -301,9 +301,9 @@ def check_avg_dygraph_stride_is_none(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_padding(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) padding = [[0, 0], [0, 0], [0, 0], [0, 0]] result = max_pool2d( input, @@ -329,9 +329,9 @@ def check_max_dygraph_padding(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_divisor(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) padding = [[0, 0], [0, 0], [0, 0], [0, 0]] result = avg_pool2d( input, @@ -374,11 +374,11 @@ def test_pool2d(self): class TestPool2DError_API(unittest.TestCase): def test_error_api(self): def run1(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = [[0, 1], [0, 0], [0, 0], [0, 0]] res_pd = max_pool2d( input_pd, kernel_size=2, stride=2, padding=padding @@ -387,11 +387,11 @@ def run1(): self.assertRaises(ValueError, run1) def run2(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = [[0, 1], [0, 0], [0, 0], [0, 0]] res_pd = max_pool2d( input_pd, @@ -404,11 +404,11 @@ def run2(): self.assertRaises(ValueError, run2) def run3(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "padding" res_pd = max_pool2d( input_pd, @@ -421,11 +421,11 @@ def run3(): self.assertRaises(ValueError, run3) def run3_avg(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "padding" res_pd = avg_pool2d( input_pd, @@ -438,11 +438,11 @@ def run3_avg(): self.assertRaises(ValueError, run3_avg) def run4(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "VALID" res_pd = max_pool2d( input_pd, @@ -456,11 +456,11 @@ def run4(): self.assertRaises(ValueError, run4) def run4_avg(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "VALID" res_pd = avg_pool2d( input_pd, @@ -474,11 +474,11 @@ def run4_avg(): self.assertRaises(ValueError, run4_avg) def run5(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "padding" res_pd = avg_pool2d( input_pd, @@ -491,11 +491,11 @@ def run5(): self.assertRaises(ValueError, run5) def run6(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "VALID" res_pd = avg_pool2d( input_pd, @@ -509,11 +509,11 @@ def run6(): self.assertRaises(ValueError, run6) def run7(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "VALID" res_pd = avg_pool2d( input_pd, @@ -527,11 +527,11 @@ def run7(): self.assertRaises(ValueError, run7) def run8(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = "VALID" res_pd = max_pool2d( input_pd, @@ -545,11 +545,11 @@ def run8(): self.assertRaises(ValueError, run8) def run9(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) res_pd = max_pool2d( input_pd, kernel_size=2, @@ -563,11 +563,11 @@ def run9(): self.assertRaises(ValueError, run9) def run_kernel_out_of_range(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) res_pd = avg_pool2d( input_pd, kernel_size=[-1, 2], @@ -580,11 +580,11 @@ def run_kernel_out_of_range(): self.assertRaises(ValueError, run_kernel_out_of_range) def run_stride_out_of_range(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) res_pd = avg_pool2d( input_pd, kernel_size=3, @@ -597,7 +597,7 @@ def run_stride_out_of_range(): self.assertRaises(ValueError, run_stride_out_of_range) def run_zero_stride(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): array = np.array([1], dtype=np.float32) x = paddle.to_tensor( np.reshape(array, [1, 1, 1, 1]), dtype='float32' @@ -609,7 +609,7 @@ def run_zero_stride(): self.assertRaises(ValueError, run_zero_stride) def run_zero_tuple_stride(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): array = np.array([1], dtype=np.float32) x = paddle.to_tensor( np.reshape(array, [1, 1, 1, 1]), dtype='float32' diff --git a/test/legacy_test/test_pool2d_op.py b/test/legacy_test/test_pool2d_op.py index 73c6050228bc3..f55b1a9a9d02c 100644 --- a/test/legacy_test/test_pool2d_op.py +++ b/test/legacy_test/test_pool2d_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def adaptive_start_index(index, input_size, output_size): @@ -389,7 +389,7 @@ def setUp(self): self.inputs = {'X': convert_float_to_uint16(input)} else: output = output.astype(self.dtype) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(input)} self.attrs = { 'strides': self.strides, diff --git a/test/legacy_test/test_pool3d_api.py b/test/legacy_test/test_pool3d_api.py index a9e849fb91d41..a2286740391e4 100644 --- a/test/legacy_test/test_pool3d_api.py +++ b/test/legacy_test/test_pool3d_api.py @@ -22,20 +22,20 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.nn.functional import avg_pool3d, max_pool3d class TestPool3D_API(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_avg_static_results(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 32, 32, 32], dtype="float32" ) @@ -50,18 +50,18 @@ def check_avg_static_results(self, place): pool_type='avg', ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_avg_dygraph_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = avg_pool3d(input, kernel_size=2, stride=2, padding="SAME") result_np = pool3D_forward_naive( @@ -82,9 +82,9 @@ def check_avg_dygraph_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_padding_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = avg_pool3d( input, kernel_size=2, @@ -116,9 +116,9 @@ def check_avg_dygraph_padding_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_dygraph_ceilmode_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = avg_pool3d( input, kernel_size=2, stride=2, padding=0, ceil_mode=True ) @@ -140,7 +140,7 @@ def check_avg_dygraph_ceilmode_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_static_results(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 32, 32, 32], dtype="float32" ) @@ -155,18 +155,18 @@ def check_max_static_results(self, place): pool_type='max', ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) np.testing.assert_allclose(fetches[0], result_np, rtol=1e-05) def check_max_dygraph_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = max_pool3d(input, kernel_size=2, stride=2, padding=0) result_np = pool3D_forward_naive( @@ -185,9 +185,9 @@ def check_max_dygraph_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_ndhwc_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable( + input = base.dygraph.to_variable( np.transpose(input_np, [0, 2, 3, 4, 1]) ) result = max_pool3d( @@ -214,9 +214,9 @@ def check_max_dygraph_ndhwc_results(self, place): ) def check_max_dygraph_ceilmode_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = max_pool3d( input, kernel_size=2, stride=2, padding=0, ceil_mode=True ) @@ -238,9 +238,9 @@ def check_max_dygraph_ceilmode_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_padding_results(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result = max_pool3d( input, kernel_size=2, stride=2, padding=1, ceil_mode=False ) @@ -262,9 +262,9 @@ def check_max_dygraph_padding_results(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_stride_is_none(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) result, indices = max_pool3d( input, kernel_size=2, @@ -290,9 +290,9 @@ def check_max_dygraph_stride_is_none(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_max_dygraph_padding(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) padding = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0]] result = max_pool3d(input, kernel_size=2, stride=2, padding=padding) @@ -316,9 +316,9 @@ def check_max_dygraph_padding(self, place): np.testing.assert_allclose(result.numpy(), result_np, rtol=1e-05) def check_avg_divisor(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([2, 3, 32, 32, 32]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) padding = 0 result = avg_pool3d( input, @@ -368,7 +368,7 @@ def test_pool3d(self): def test_static_fp16_gpu(self): paddle.enable_static() - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() @@ -396,8 +396,8 @@ def test_static_fp16_gpu(self): def test_static_bf16_gpu(self): paddle.enable_static() if ( - paddle.fluid.core.is_compiled_with_cuda() - and paddle.fluid.core.is_bfloat16_supported(core.CUDAPlace(0)) + paddle.base.core.is_compiled_with_cuda() + and paddle.base.core.is_bfloat16_supported(core.CUDAPlace(0)) ): place = paddle.CUDAPlace(0) with paddle.static.program_guard( @@ -427,11 +427,11 @@ def test_static_bf16_gpu(self): class TestPool3DError_API(unittest.TestCase): def test_error_api(self): def run1(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = [[0, 1], [0, 0], [0, 0], [0, 0], [0, 0]] res_pd = avg_pool3d( input_pd, kernel_size=2, stride=2, padding=padding @@ -440,11 +440,11 @@ def run1(): self.assertRaises(ValueError, run1) def run2(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = [[0, 1], [0, 0], [0, 0], [0, 0], [0, 0]] res_pd = avg_pool3d( input_pd, @@ -457,11 +457,11 @@ def run2(): self.assertRaises(ValueError, run2) def run3(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) padding = [[0, 1], [0, 0], [0, 0], [0, 0], [0, 0]] res_pd = avg_pool3d( input_pd, @@ -474,11 +474,11 @@ def run3(): self.assertRaises(ValueError, run3) def run4(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) res_pd = avg_pool3d( input_pd, kernel_size=2, @@ -490,11 +490,11 @@ def run4(): self.assertRaises(ValueError, run4) def run5(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) res_pd = max_pool3d( input_pd, kernel_size=2, @@ -506,11 +506,11 @@ def run5(): self.assertRaises(ValueError, run5) def run6(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) res_pd = avg_pool3d( input_pd, kernel_size=2, @@ -522,11 +522,11 @@ def run6(): self.assertRaises(ValueError, run6) def run7(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) res_pd = max_pool3d( input_pd, kernel_size=2, @@ -538,11 +538,11 @@ def run7(): self.assertRaises(ValueError, run7) def run8(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) res_pd = avg_pool3d( input_pd, kernel_size=2, @@ -555,11 +555,11 @@ def run8(): self.assertRaises(ValueError, run8) def run9(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) res_pd = max_pool3d( input_pd, kernel_size=2, @@ -572,11 +572,11 @@ def run9(): self.assertRaises(ValueError, run9) def run10(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) res_pd = max_pool3d( input_pd, kernel_size=2, @@ -589,11 +589,11 @@ def run10(): self.assertRaises(ValueError, run10) def run_kernel_out_of_range(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) res_pd = avg_pool3d( input_pd, kernel_size=-1, @@ -605,11 +605,11 @@ def run_kernel_out_of_range(): self.assertRaises(ValueError, run_kernel_out_of_range) def run_size_out_of_range(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_np = np.random.uniform(-1, 1, [2, 3, 32, 32, 32]).astype( np.float32 ) - input_pd = fluid.dygraph.to_variable(input_np) + input_pd = base.dygraph.to_variable(input_np) res_pd = avg_pool3d( input_pd, kernel_size=2, @@ -621,7 +621,7 @@ def run_size_out_of_range(): self.assertRaises(ValueError, run_size_out_of_range) def run_zero_stride(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): array = np.array([1], dtype=np.float32) x = paddle.to_tensor( np.reshape(array, [1, 1, 1, 1, 1]), dtype='float32' @@ -633,7 +633,7 @@ def run_zero_stride(): self.assertRaises(ValueError, run_zero_stride) def run_zero_tuple_stride(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): array = np.array([1], dtype=np.float32) x = paddle.to_tensor( np.reshape(array, [1, 1, 1, 1, 1]), dtype='float32' diff --git a/test/legacy_test/test_pool3d_op.py b/test/legacy_test/test_pool3d_op.py index 9f35227bd0684..649e5cd255bd8 100644 --- a/test/legacy_test/test_pool3d_op.py +++ b/test/legacy_test/test_pool3d_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core def adaptive_start_index(index, input_size, output_size): @@ -363,7 +363,7 @@ def setUp(self): self.padding_algorithm, ).astype(self.dtype) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(input)} self.attrs = { 'strides': self.strides, diff --git a/test/legacy_test/test_pool_max_op.py b/test/legacy_test/test_pool_max_op.py index 70b37da5ebb39..e24a6a5f3bf5c 100644 --- a/test/legacy_test/test_pool_max_op.py +++ b/test/legacy_test/test_pool_max_op.py @@ -24,7 +24,7 @@ from testsuite import create_op import paddle -from paddle.fluid import core +from paddle.base import core def adaptive_start_index(index, input_size, output_size): diff --git a/test/legacy_test/test_pow.py b/test/legacy_test/test_pow.py index 011593b3e874e..659fc32c2423f 100755 --- a/test/legacy_test/test_pow.py +++ b/test/legacy_test/test_pow.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core from paddle.static import Program, program_guard DYNAMIC = 1 diff --git a/test/legacy_test/test_prelu_op.py b/test/legacy_test/test_prelu_op.py index 8588ed498d703..80772fdd671e5 100644 --- a/test/legacy_test/test_prelu_op.py +++ b/test/legacy_test/test_prelu_op.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import Program, core +from paddle import base +from paddle.base import Program, core def ref_prelu(x, weight): @@ -144,14 +144,14 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) x = paddle.to_tensor(self.x_np) - m = paddle.nn.PReLU(weight_attr=fluid.ParamAttr(name="weight")) + m = paddle.nn.PReLU(weight_attr=base.ParamAttr(name="weight")) out = m(x) out_ref = ref_prelu_nn(self.x_np, 1, 0.25) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) x = paddle.to_tensor(self.x_np) m = paddle.nn.PReLU( - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(0.5) ) ) @@ -471,7 +471,7 @@ def test_check_grad(self): def prelu_t(x, mode, param_attr=None, name=None, data_format='NCHW'): - helper = fluid.layer_helper.LayerHelper('prelu', **locals()) + helper = base.layer_helper.LayerHelper('prelu', **locals()) alpha_shape = [1, x.shape[1], 1, 1] dtype = helper.input_dtype(input_param_name='x') alpha = helper.create_parameter( @@ -503,7 +503,7 @@ def setUp(self): def test_mode_error(self): main_program = Program() - with fluid.program_guard(main_program, Program()): + with base.program_guard(main_program, Program()): x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = prelu_t(x, 'any') @@ -512,7 +512,7 @@ def test_mode_error(self): def test_data_format_error1(self): main_program = Program() - with fluid.program_guard(main_program, Program()): + with base.program_guard(main_program, Program()): x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = prelu_t(x, 'channel', data_format='N') @@ -521,7 +521,7 @@ def test_data_format_error1(self): def test_data_format_error2(self): main_program = Program() - with fluid.program_guard(main_program, Program()): + with base.program_guard(main_program, Program()): x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = paddle.static.nn.prelu(x, 'channel', data_format='N') diff --git a/test/legacy_test/test_print_op.py b/test/legacy_test/test_print_op.py index a8c67868117ed..3352d2b23ef93 100755 --- a/test/legacy_test/test_print_op.py +++ b/test/legacy_test/test_print_op.py @@ -18,9 +18,9 @@ from simple_nets import init_data, simple_fc_net import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.framework import switch_main_program +from paddle import base +from paddle.base import core +from paddle.base.framework import switch_main_program from paddle.static import Program, program_guard paddle.enable_static() @@ -29,7 +29,7 @@ class TestPrintOpCPU(unittest.TestCase): def setUp(self): self.place = paddle.CPUPlace() - self.x_tensor = fluid.core.LoDTensor() + self.x_tensor = base.core.LoDTensor() tensor_np = np.random.random(size=(2, 3)).astype('float32') self.x_tensor.set(tensor_np, self.place) self.x_tensor.set_recursive_sequence_lengths([[1, 1]]) @@ -93,7 +93,7 @@ class TestPrintOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The input type of Print_op must be Variable. - x1 = fluid.create_lod_tensor( + x1 = base.create_lod_tensor( np.array([[-1]]), [[1]], paddle.CPUPlace() ) self.assertRaises(TypeError, paddle.static.Print, x1) @@ -108,7 +108,7 @@ def test_errors(self): class TestPrintOpGPU(TestPrintOpCPU): def setUp(self): self.place = paddle.CUDAPlace(0) - self.x_tensor = fluid.core.LoDTensor() + self.x_tensor = base.core.LoDTensor() tensor_np = np.random.random(size=(2, 3)).astype('float32') self.x_tensor.set(tensor_np, self.place) self.x_tensor.set_recursive_sequence_lengths([[1, 1]]) diff --git a/test/legacy_test/test_prod_op.py b/test/legacy_test/test_prod_op.py index 2146655baf5e0..2a0b06d76f849 100644 --- a/test/legacy_test/test_prod_op.py +++ b/test/legacy_test/test_prod_op.py @@ -138,7 +138,7 @@ def test_cpu(self): self.run_static() def test_gpu(self): - if not paddle.fluid.core.is_compiled_with_cuda(): + if not paddle.base.core.is_compiled_with_cuda(): return paddle.disable_static(place=paddle.CUDAPlace(0)) diff --git a/test/legacy_test/test_program.py b/test/legacy_test/test_program.py index a1b5b51a886e3..16c0351366adb 100644 --- a/test/legacy_test/test_program.py +++ b/test/legacy_test/test_program.py @@ -15,8 +15,8 @@ import unittest import paddle -from paddle import fluid -from paddle.fluid.framework import Program, default_main_program, program_guard +from paddle import base +from paddle.base.framework import Program, default_main_program, program_guard paddle.enable_static() @@ -105,7 +105,7 @@ def test_program_clone_with_parameter(self): self.assertNotEqual(0, len(new_program.blocks[0].all_parameters())) def test_program_all_parameters(self): - program = fluid.default_main_program() + program = base.default_main_program() data = paddle.static.data(name='x', shape=[None, 13], dtype='float32') hidden = paddle.static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) @@ -118,19 +118,19 @@ def test_program_all_parameters(self): self.assertEqual(param_list[1].name, "fc_0.b_0") def test_prune_with_input_type_error(self): - program = fluid.default_main_program() + program = base.default_main_program() feed_var_names = [2, 3, 4] self.assertRaises( ValueError, program._prune_with_input, feed_var_names, [] ) def test_random_seed_error(self): - program = fluid.default_main_program() + program = base.default_main_program() with self.assertRaises(ValueError): program.random_seed = "seed" def test_copy_info_from_error(self): - program = fluid.default_main_program() + program = base.default_main_program() self.assertRaises(TypeError, program._copy_param_info_from, "program") self.assertRaises( TypeError, program._copy_dist_param_info_from, "program" diff --git a/test/legacy_test/test_program_code.py b/test/legacy_test/test_program_code.py index 03661d3b3fbb0..86979038a0a28 100644 --- a/test/legacy_test/test_program_code.py +++ b/test/legacy_test/test_program_code.py @@ -15,12 +15,12 @@ import unittest import paddle -from paddle import fluid +from paddle import base class TestProgramToReadableCode(unittest.TestCase): def setUp(self): - self.program = fluid.Program() + self.program = base.Program() self.block = self.program.current_block() self.var = self.block.create_var( name="X", shape=[-1, 23, 48], dtype='float32' @@ -45,7 +45,7 @@ def false_func(): shape=[3, 2], dtype='int32', value=-1 ) - with fluid.program_guard(program): + with base.program_guard(program): x = paddle.tensor.fill_constant( shape=[1], dtype='float32', value=0.1 ) diff --git a/test/legacy_test/test_program_converter.py b/test/legacy_test/test_program_converter.py index 9a9c49df01b68..3894ca930ee0f 100644 --- a/test/legacy_test/test_program_converter.py +++ b/test/legacy_test/test_program_converter.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid.proto import framework_pb2 +from paddle.base.proto import framework_pb2 class TestSetValue(unittest.TestCase): diff --git a/test/legacy_test/test_program_prune_backward.py b/test/legacy_test/test_program_prune_backward.py index c304777e64570..237684e3b0bd9 100755 --- a/test/legacy_test/test_program_prune_backward.py +++ b/test/legacy_test/test_program_prune_backward.py @@ -25,8 +25,8 @@ ) import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def simple_fc_net_with_accuracy(use_feed): @@ -39,7 +39,7 @@ def simple_fc_net_with_accuracy(use_feed): hidden, size=200, activation='relu', - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ), ) @@ -118,11 +118,11 @@ def loss2(opt, pred, label, with_optimize): class TestProgramPruneBackward(unittest.TestCase): def program_compare(self, program_a, program_b): assert isinstance( - program_a, fluid.framework.Program - ), "The first argument should be fluid.framework.Program." + program_a, base.framework.Program + ), "The first argument should be base.framework.Program." assert isinstance( - program_b, fluid.framework.Program - ), "The second argument should be fluid.framework Program." + program_b, base.framework.Program + ), "The second argument should be base.framework Program." self.assertEqual(len(program_a.blocks), len(program_b.blocks)) for idx in range(len(program_a.blocks)): @@ -140,7 +140,7 @@ def program_compare(self, program_a, program_b): def check_prune_correctness(self, method, feed_dict, optimizer): loss = method(use_feed=False) - main_program = fluid.default_main_program() + main_program = base.default_main_program() test_prog_orig = main_program.clone(for_test=True) optimizer().minimize(loss) test_prog_prune = main_program.clone(for_test=True) @@ -152,8 +152,8 @@ def check_prune_correctness(self, method, feed_dict, optimizer): places.append(core.CUDAPlace(0)) for place in places: - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) (loss_data_prune,) = exe.run( test_prog_prune, feed=feed_dict, fetch_list=[loss.name] @@ -230,7 +230,7 @@ def optimizer(): with self.program_scope_guard(): # the program argument is used to distinguish Program and CompiledProgram feed_dict = get_feed_data_reader().get_next( - fluid.Executor(core.CPUPlace()), fluid.default_main_program() + base.Executor(core.CPUPlace()), base.default_main_program() ) self.check_prune_correctness( method=transformer, feed_dict=feed_dict, optimizer=optimizer @@ -255,23 +255,23 @@ def test_optimization_in_cond(self): feed_dict = {'x': x_in, 'label': label_in} with self.program_scope_guard(): loss = optimization_in_cond_net(False) - main_program = fluid.default_main_program() + main_program = base.default_main_program() test_prog_orig = main_program.clone(for_test=True) place = core.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) (loss_data_orig,) = exe.run( test_prog_orig, feed=feed_dict, fetch_list=[loss.name] ) with self.program_scope_guard(): loss = optimization_in_cond_net(True) - main_program = fluid.default_main_program() + main_program = base.default_main_program() test_prog_prune = main_program.clone(for_test=True) place = core.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) (loss_data_prune,) = exe.run( test_prog_prune, feed=feed_dict, fetch_list=[loss.name] ) @@ -281,12 +281,12 @@ def test_optimization_in_cond(self): @contextlib.contextmanager def program_scope_guard(self): - prog = fluid.Program() - startup_prog = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): - with fluid.unique_name.guard(): + prog = base.Program() + startup_prog = base.Program() + scope = base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): + with base.unique_name.guard(): yield diff --git a/test/legacy_test/test_program_to_string.py b/test/legacy_test/test_program_to_string.py index e45e8cc394340..c6524d9cf5d92 100644 --- a/test/legacy_test/test_program_to_string.py +++ b/test/legacy_test/test_program_to_string.py @@ -15,12 +15,12 @@ import unittest import paddle -from paddle import fluid +from paddle import base class TestProgram(unittest.TestCase): def test_program_to_string(self): - prog = fluid.default_main_program() + prog = base.default_main_program() a = paddle.static.data(name="X", shape=[2, 3], dtype="float32") c = paddle.static.nn.fc(a, size=3) prog_string = prog.to_string(throw_on_error=True, with_details=False) diff --git a/test/legacy_test/test_protobuf.py b/test/legacy_test/test_protobuf.py index c76a5fe67f245..ec42e13bd6311 100644 --- a/test/legacy_test/test_protobuf.py +++ b/test/legacy_test/test_protobuf.py @@ -14,7 +14,7 @@ import unittest -from paddle.fluid.proto import framework_pb2 +from paddle.base.proto import framework_pb2 class TestFrameworkProto(unittest.TestCase): diff --git a/test/legacy_test/test_protobuf_descs.py b/test/legacy_test/test_protobuf_descs.py index 9a84d43e44bd7..c26752ba46208 100644 --- a/test/legacy_test/test_protobuf_descs.py +++ b/test/legacy_test/test_protobuf_descs.py @@ -14,8 +14,8 @@ import unittest -from paddle.fluid import core -from paddle.fluid.framework import Program +from paddle.base import core +from paddle.base.framework import Program class TestOpDesc(unittest.TestCase): diff --git a/test/legacy_test/test_prune.py b/test/legacy_test/test_prune.py index 45f40d358901d..00b96074ab5c2 100644 --- a/test/legacy_test/test_prune.py +++ b/test/legacy_test/test_prune.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import framework +from paddle import base +from paddle.base import framework class TestPrune(unittest.TestCase): @@ -39,7 +39,7 @@ def test_prune_with_input(self): program = framework.Program() startup_program = framework.Program() block = program.global_block() - with fluid.program_guard(program, startup_program): + with base.program_guard(program, startup_program): (x, y, label, loss) = self.net() self.assertEqual(len(block.ops), 5) self.assertEqual( @@ -65,7 +65,7 @@ def test_prune(self): program = framework.Program() startup_program = framework.Program() block = program.global_block() - with fluid.program_guard(program, startup_program): + with base.program_guard(program, startup_program): (x, y, label, loss) = self.net() self.assertEqual(len(block.ops), 5) self.assertEqual( @@ -95,7 +95,7 @@ def test_prune_target_not_list(self): program = framework.Program() startup_program = framework.Program() block = program.global_block() - with fluid.program_guard(program, startup_program): + with base.program_guard(program, startup_program): (x, y, label, loss) = self.net() self.assertEqual(len(block.ops), 5) self.assertEqual( @@ -125,7 +125,7 @@ def test_prune_target_none(self): program = framework.Program() startup_program = framework.Program() block = program.global_block() - with fluid.program_guard(program, startup_program): + with base.program_guard(program, startup_program): (x, y, label, loss) = self.net() self.assertEqual(len(block.ops), 5) self.assertEqual( @@ -154,10 +154,10 @@ def mock(self, program, feed, fetch, optimize_ops): @contextlib.contextmanager def _mock_guard(mock): - original = fluid.Executor._prune_program - fluid.Executor._prune_program = mock + original = base.Executor._prune_program + base.Executor._prune_program = mock yield - fluid.Executor._prune_program = original + base.Executor._prune_program = original class TestExecutorRunAutoPrune(unittest.TestCase): @@ -166,7 +166,7 @@ def net1(self): x.desc.set_need_check_feed(False) label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") label.desc.set_need_check_feed(False) - w_param_attrs = fluid.ParamAttr( + w_param_attrs = base.ParamAttr( name="fc_weight", learning_rate=0.5, initializer=paddle.nn.initializer.Constant(1.0), @@ -194,13 +194,13 @@ def net2(self): x2.desc.set_need_check_feed(False) label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") label.desc.set_need_check_feed(False) - w1_param_attrs = fluid.ParamAttr( + w1_param_attrs = base.ParamAttr( name="fc_weight1", learning_rate=0.5, initializer=paddle.nn.initializer.Constant(1.0), trainable=True, ) - w2_param_attrs = fluid.ParamAttr( + w2_param_attrs = base.ParamAttr( name="fc_weight2", learning_rate=0.5, initializer=paddle.nn.initializer.Constant(1.0), @@ -238,11 +238,11 @@ def test_not_prune(self): """ program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') @@ -261,11 +261,11 @@ def test_prune_fetches_without_optimizer(self): """ program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) weight_init = np.array( scope.find_var(w_param_attrs.name).get_tensor() @@ -294,13 +294,13 @@ def test_prune_fetches_with_optimizer(self): """ program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.5) sgd_optimizer.minimize(loss1) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) weight_init = np.array( scope.find_var(w_param_attrs.name).get_tensor() @@ -325,15 +325,15 @@ def test_prune_fetches_with_optimizer(self): def test_prune_compiled_program(self): program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.5) sgd_optimizer.minimize(loss1) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) - compiled_prog = fluid.CompiledProgram(program) + compiled_prog = base.CompiledProgram(program) weight_init = np.array( scope.find_var(w_param_attrs.name).get_tensor() ) @@ -357,11 +357,11 @@ def test_prune_compiled_program(self): def test_prune_feed_without_optimizer(self): program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) weight_init = np.array( scope.find_var(w_param_attrs.name).get_tensor() @@ -386,13 +386,13 @@ def test_prune_feed_without_optimizer(self): def test_prune_feed_with_optimizer(self): program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.5) sgd_optimizer.minimize(loss1) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) x_np = np.random.random(size=(10, 2)).astype('float32') label_np = np.random.randint(1, size=(10, 1)).astype('int64') @@ -417,13 +417,13 @@ def test_prune_with_cache_program(self): 10 times with the same input arguments. ''' with _mock_guard(mock): - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.prune_called_times = 0 program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.5) sgd_optimizer.minimize(loss1) @@ -451,13 +451,13 @@ def test_prune_with_cache_program2(self): the cache_keys should be different and get different pruned program. ''' with _mock_guard(mock): - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.prune_called_times = 0 program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(program, startup_program): ( x1, x2, @@ -519,13 +519,13 @@ def test_prune_with_cache_compiled_program(self): 10 times with the same input arguments. ''' with _mock_guard(mock): - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.prune_called_times = 0 program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.5) sgd_optimizer.minimize(loss1) @@ -534,7 +534,7 @@ def test_prune_with_cache_compiled_program(self): label_np = np.random.randint(1, size=(10, 1)).astype( 'int64' ) - compiled_prog = fluid.CompiledProgram(program) + compiled_prog = base.CompiledProgram(program) for i in range(10): res = exe.run( compiled_prog, @@ -552,13 +552,13 @@ def test_prune_with_multi_optimizers(self): If there are multiple optimizers in the program, we can run specific one by pass the return of optimize.minimize() to fetch_list. ''' - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() + scope = base.Scope() # do not use_prune - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.5) train1, _ = sgd_optimizer.minimize(loss1) @@ -577,9 +577,9 @@ def test_prune_with_multi_optimizers(self): scope.find_var(w_param_attrs.name).get_tensor() ) - scope = fluid.Scope() + scope = base.Scope() # use_prune - with fluid.scope_guard(scope): + with base.scope_guard(scope): exe.run(startup_program) res = exe.run( program, @@ -592,8 +592,8 @@ def test_prune_with_multi_optimizers(self): ) # expected - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): exe.run(startup_program) exe.run( cloned_program, @@ -613,13 +613,13 @@ def test_prune_program_with_tupe_in_fetch_list(self): If there are multiple optimizers in the program, we can run specific one by pass the return of optimize.minimize() to fetch_list. ''' - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() + scope = base.Scope() # do not use_prune - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.5) train1 = sgd_optimizer.minimize(loss1) @@ -641,9 +641,9 @@ def test_prune_program_with_tupe_in_fetch_list(self): scope.find_var(w_param_attrs.name).get_tensor() ) - scope = fluid.Scope() + scope = base.Scope() # use_prune - with fluid.scope_guard(scope): + with base.scope_guard(scope): exe.run(startup_program) res = exe.run( program, @@ -656,8 +656,8 @@ def test_prune_program_with_tupe_in_fetch_list(self): ) # expected - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): exe.run(startup_program) exe.run( cloned_program, @@ -679,9 +679,9 @@ def test_prune_program_partial_parameter_updated(self): """ program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(program, startup_program): ( x1, x2, @@ -699,7 +699,7 @@ def test_prune_program_partial_parameter_updated(self): train1 = sgd_optimizer.minimize(loss1) sgd_optimizer1 = paddle.optimizer.SGD(learning_rate=0.5) train2 = sgd_optimizer1.minimize(loss2) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) weight1_init = np.array( scope.find_var(w1_param_attrs.name).get_tensor() @@ -737,13 +737,13 @@ def test_prune_override_use_prune(self): ''' If optimize_ops in provided in the fetch_list, the argument use_prune is always override to True. ''' - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() + scope = base.Scope() # do not use_prune - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.5) train1, _ = sgd_optimizer.minimize(loss1) @@ -763,9 +763,9 @@ def test_prune_override_use_prune(self): scope.find_var(w_param_attrs.name).get_tensor() ) - scope = fluid.Scope() + scope = base.Scope() # use_prune - with fluid.scope_guard(scope): + with base.scope_guard(scope): exe.run(startup_program) res = exe.run( program, @@ -777,8 +777,8 @@ def test_prune_override_use_prune(self): ) # expected - scope = fluid.Scope() - with fluid.scope_guard(scope): + scope = base.Scope() + with base.scope_guard(scope): exe.run(startup_program) exe.run( cloned_program, @@ -797,11 +797,11 @@ def test_prune_feed_var_in_fetchlist_1(self): # the variable to be fed is not leaf program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) weight_init = np.array( scope.find_var(w_param_attrs.name).get_tensor() @@ -828,11 +828,11 @@ def test_prune_feed_var_in_fetchlist_2(self): # the variable to be fed is leaf program = framework.Program() startup_program = framework.Program() - scope = fluid.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(program, startup_program): + scope = base.Scope() + with base.scope_guard(scope): + with base.program_guard(program, startup_program): (x, y, label, loss1, loss2, w_param_attrs) = self.net1() - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) weight_init = np.array( scope.find_var(w_param_attrs.name).get_tensor() diff --git a/test/legacy_test/test_prune_gate_by_capacity_op.py b/test/legacy_test/test_prune_gate_by_capacity_op.py index d6103be8d13d5..966651e22ff1c 100644 --- a/test/legacy_test/test_prune_gate_by_capacity_op.py +++ b/test/legacy_test/test_prune_gate_by_capacity_op.py @@ -18,7 +18,7 @@ import paddle from paddle.distributed.models.moe import utils -from paddle.fluid import core +from paddle.base import core def count(x, upper_num): diff --git a/test/legacy_test/test_psroi_pool_op.py b/test/legacy_test/test_psroi_pool_op.py index 03c2424d87844..fc4b513acfe72 100644 --- a/test/legacy_test/test_psroi_pool_op.py +++ b/test/legacy_test/test_psroi_pool_op.py @@ -229,7 +229,7 @@ def test_dytype_is_float64(): np.testing.assert_allclose(out, expect_out, rtol=1e-05) places = ['cpu'] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append('gpu') for place in places: paddle.set_device(place) @@ -285,7 +285,7 @@ def test_dytype_is_float64(): paddle.disable_static() places = ['cpu'] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append('gpu') for place in places: paddle.set_device(place) @@ -382,11 +382,11 @@ def test_function_in_static(self): self.x, self.boxes, self.boxes_num, 10, 1.0, 7, 7 ) places = [paddle.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: exe = paddle.static.Executor(place) - boxes_lod_data = paddle.fluid.create_lod_tensor( + boxes_lod_data = paddle.base.create_lod_tensor( self.boxes, [[1, 2]], place ) (out_res,) = exe.run( diff --git a/test/legacy_test/test_pull_gpups_sparse_op.py b/test/legacy_test/test_pull_gpups_sparse_op.py index 054cc9f495dbf..03b4c370cbfb6 100644 --- a/test/legacy_test/test_pull_gpups_sparse_op.py +++ b/test/legacy_test/test_pull_gpups_sparse_op.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.incubate.layers import _pull_gpups_sparse paddle.enable_static() @@ -27,10 +27,10 @@ class TestPullGpupsSparse(unittest.TestCase): """Test PullGpupsSparse op.""" def test_static_graph(self): - startup_program = fluid.Program() - train_program = fluid.Program() + startup_program = base.Program() + train_program = base.Program() slots = [] - with fluid.program_guard(train_program, startup_program): + with base.program_guard(train_program, startup_program): l = paddle.static.data( name='input', shape=[-1, 1], dtype="int64", lod_level=1 ) @@ -42,10 +42,10 @@ def test_static_graph(self): sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(cost, train_program) block = train_program.global_block() - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) img = np.array([1]).astype(np.int64) res = exe.run( diff --git a/test/legacy_test/test_py_func_op.py b/test/legacy_test/test_py_func_op.py index 2719fff9235d3..3164feab034b5 100644 --- a/test/legacy_test/test_py_func_op.py +++ b/test/legacy_test/test_py_func_op.py @@ -18,12 +18,12 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import compiler +from paddle import base +from paddle.base import compiler dev_cnt = 2 -if fluid.core.is_compiled_with_cuda(): - dev_cnt = fluid.core.get_cuda_device_count() +if base.core.is_compiled_with_cuda(): + dev_cnt = base.core.get_cuda_device_count() os.environ['CPU_NUM'] = str(dev_cnt) @@ -78,7 +78,7 @@ def simple_fc_net(img, label, use_py_func_op): hidden = paddle.static.nn.fc( hidden, size=200, - bias_attr=fluid.ParamAttr( + bias_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ), ) @@ -86,7 +86,7 @@ def simple_fc_net(img, label, use_py_func_op): hidden = paddle.tanh(hidden) else: new_hidden = ( - fluid.default_main_program() + base.default_main_program() .current_block() .create_var( name=f'hidden_{idx}', @@ -109,7 +109,7 @@ def simple_fc_net(img, label, use_py_func_op): ) else: loss = ( - fluid.default_main_program() + base.default_main_program() .current_block() .create_var(name='loss', dtype='float32', shape=[-1, 1]) ) @@ -122,7 +122,7 @@ def simple_fc_net(img, label, use_py_func_op): ) dummy_var = ( - fluid.default_main_program() + base.default_main_program() .current_block() .create_var(name='test_tmp_var', dtype='float32', shape=[1]) ) @@ -133,12 +133,12 @@ def simple_fc_net(img, label, use_py_func_op): paddle.static.py_func(func=dummy_func_with_no_output, x=loss, out=None) loss_out = ( - fluid.default_main_program() + base.default_main_program() .current_block() .create_var(dtype='float32', shape=[-1, 1]) ) dummy_var_out = ( - fluid.default_main_program() + base.default_main_program() .current_block() .create_var(dtype='float32', shape=[1]) ) @@ -172,11 +172,11 @@ def reader(): def test_main(use_cuda, use_py_func_op, use_parallel_executor): - if use_cuda and not fluid.core.is_compiled_with_cuda(): + if use_cuda and not base.core.is_compiled_with_cuda(): return None - with fluid.program_guard(fluid.Program(), fluid.Program()): - with fluid.scope_guard(fluid.core.Scope()): + with base.program_guard(base.Program(), base.Program()): + with base.scope_guard(base.core.Scope()): gen = paddle.seed(1) np.random.seed(1) img = paddle.static.data( @@ -189,18 +189,18 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor): optimizer = paddle.optimizer.SGD(learning_rate=1e-3) optimizer.minimize(loss) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - feeder = fluid.DataFeeder(feed_list=[img, label], place=place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + feeder = base.DataFeeder(feed_list=[img, label], place=place) r = paddle.batch(reader, batch_size=10) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) - train_cp = fluid.default_main_program() + train_cp = base.default_main_program() if use_parallel_executor: train_cp = compiler.CompiledProgram( - fluid.default_main_program() + base.default_main_program() ) fetch_list = [loss.name] else: diff --git a/test/legacy_test/test_py_reader_combination.py b/test/legacy_test/test_py_reader_combination.py index cd228c244c212..df62b0b61ccf7 100644 --- a/test/legacy_test/test_py_reader_combination.py +++ b/test/legacy_test/test_py_reader_combination.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestPyReaderCombination(unittest.TestCase): @@ -55,7 +55,7 @@ def _reset_iterable_reader(self, py_reader): py_reader._loader._reset() def main_impl(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): image = paddle.static.data( name='image', dtype='float32', shape=[-1, 784] ) @@ -63,10 +63,10 @@ def main_impl(self, place): name='label', dtype='int64', shape=[-1, 1] ) - py_reader1 = fluid.io.PyReader( + py_reader1 = base.io.PyReader( feed_list=[image, label], capacity=16, iterable=True ) - py_reader2 = fluid.io.PyReader( + py_reader2 = base.io.PyReader( feed_list=[image, label], capacity=16, iterable=True ) @@ -94,10 +94,10 @@ def main_impl(self, place): self._reset_iterable_reader(py_reader2) def get_places(self): - if fluid.is_compiled_with_cuda(): - return [fluid.CUDAPlace(0), fluid.CPUPlace()] + if base.is_compiled_with_cuda(): + return [base.CUDAPlace(0), base.CPUPlace()] else: - return [fluid.CPUPlace()] + return [base.CPUPlace()] def test_main(self): for p in self.get_places(): diff --git a/test/legacy_test/test_py_reader_return_list.py b/test/legacy_test/test_py_reader_return_list.py index cb3f4ca9b1a28..49c476c926254 100644 --- a/test/legacy_test/test_py_reader_return_list.py +++ b/test/legacy_test/test_py_reader_return_list.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestPyReader(unittest.TestCase): @@ -37,11 +37,11 @@ def reader(): return reader for return_list in [True, False]: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): image = paddle.static.data( name='image', shape=[-1, 784, 784], dtype='float32' ) - reader = fluid.io.PyReader( + reader = base.io.PyReader( feed_list=[image], capacity=4, iterable=True, @@ -53,11 +53,11 @@ def reader(): paddle.batch( user_defined_reader, batch_size=self.batch_size ), - fluid.core.CPUPlace(), + base.core.CPUPlace(), ) # definition of network is omitted - executor = fluid.Executor(fluid.core.CPUPlace()) - executor.run(fluid.default_main_program()) + executor = base.Executor(base.core.CPUPlace()) + executor.run(base.default_main_program()) for _ in range(self.epoch_num): for data in reader(): @@ -66,13 +66,13 @@ def reader(): else: executor.run(feed=data) - with fluid.dygraph.guard(): - batch_py_reader = fluid.io.PyReader(capacity=2) + with base.dygraph.guard(): + batch_py_reader = base.io.PyReader(capacity=2) user_defined_reader = reader_creator_random_image(784, 784) batch_py_reader.decorate_sample_generator( user_defined_reader, batch_size=self.batch_size, - places=fluid.core.CPUPlace(), + places=base.core.CPUPlace(), ) for epoch in range(self.epoch_num): diff --git a/test/legacy_test/test_py_reader_sample_generator.py b/test/legacy_test/test_py_reader_sample_generator.py index 2023b96b5ec03..ef520efb2683e 100644 --- a/test/legacy_test/test_py_reader_sample_generator.py +++ b/test/legacy_test/test_py_reader_sample_generator.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base os.environ['CPU_NUM'] = '1' @@ -58,7 +58,7 @@ def run_main(self, reader, use_sample_generator, iterable, drop_last): name='image', dtype='float32', shape=[-1, 784] ) label = paddle.static.data(name='label', dtype='int64', shape=[-1, 1]) - py_reader = fluid.io.PyReader( + py_reader = base.io.PyReader( feed_list=[image, label], capacity=16, iterable=iterable, @@ -70,11 +70,11 @@ def run_main(self, reader, use_sample_generator, iterable, drop_last): if not use_sample_generator: py_reader.decorate_sample_list_generator( - batch_reader, places=fluid.cpu_places() + batch_reader, places=base.cpu_places() ) else: py_reader.decorate_sample_generator( - reader, self.batch_size, drop_last, places=fluid.cpu_places() + reader, self.batch_size, drop_last, places=base.cpu_places() ) if drop_last: @@ -82,8 +82,8 @@ def run_main(self, reader, use_sample_generator, iterable, drop_last): else: batch_num = math.ceil(float(self.sample_num) / self.batch_size) - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) for _ in range(self.epoch_num): if py_reader.iterable: step = 0 @@ -102,7 +102,7 @@ def run_main(self, reader, use_sample_generator, iterable, drop_last): self.assertArrayEqual(img, all_datas[step][0]) self.assertArrayEqual(lbl, all_datas[step][1]) step += 1 - except fluid.core.EOFException: + except base.core.EOFException: py_reader.reset() self.assertEqual(step, len(all_datas)) break @@ -116,7 +116,7 @@ def test_main(self): for use_sample_generator in [False, True]: for iterable in [False, True]: for drop_last in [False, True]: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): self.run_main( reader, use_sample_generator, iterable, drop_last ) diff --git a/test/legacy_test/test_pybind_interface.py b/test/legacy_test/test_pybind_interface.py index 3a51c4bd09aa7..89c577e206e44 100644 --- a/test/legacy_test/test_pybind_interface.py +++ b/test/legacy_test/test_pybind_interface.py @@ -14,7 +14,7 @@ import unittest -from paddle.fluid import core +from paddle.base import core class TestPybindInference(unittest.TestCase): diff --git a/test/legacy_test/test_pyramid_hash_op.py b/test/legacy_test/test_pyramid_hash_op.py index 8ef1173ac7572..ac1ce7d7c1db3 100644 --- a/test/legacy_test/test_pyramid_hash_op.py +++ b/test/legacy_test/test_pyramid_hash_op.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.incubate.layers.nn import search_pyramid_hash @@ -42,11 +42,11 @@ def test_api(self): black_list_len=2800, seed=3, lr=0.002, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="PyramidHash_emb_0", learning_rate=0, ), - param_attr_wl=fluid.ParamAttr( + param_attr_wl=base.ParamAttr( name="Filter", learning_rate=0, ), @@ -55,13 +55,13 @@ def test_api(self): name=None, ) - place = fluid.CPUPlace() - x_tensor = fluid.create_lod_tensor( + place = base.CPUPlace() + x_tensor = base.create_lod_tensor( np.random.randint(0, num_voc, x_shape).astype('int32'), x_lod, place ) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) ret = exe.run( feed={'x': x_tensor}, fetch_list=[hash_embd], return_numpy=False ) diff --git a/test/legacy_test/test_python_operator_overriding.py b/test/legacy_test/test_python_operator_overriding.py index 0beaf79a4750a..def870b0aa65b 100644 --- a/test/legacy_test/test_python_operator_overriding.py +++ b/test/legacy_test/test_python_operator_overriding.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import framework +from paddle import base +from paddle.base import framework paddle.enable_static() @@ -39,16 +39,16 @@ def check_result(self, fn, place, dtype): ) out = fn(x_var, y_var) - exe = fluid.Executor(place) + exe = base.Executor(place) - exe.run(fluid.default_startup_program()) - fluid_out = exe.run( - fluid.default_main_program(), + exe.run(base.default_startup_program()) + base_out = exe.run( + base.default_main_program(), feed={'x': x_data, 'y': y_data}, fetch_list=[out], ) - np.testing.assert_array_equal(python_out, fluid_out[0]) + np.testing.assert_array_equal(python_out, base_out[0]) def test_override(self): # compare func to check @@ -62,9 +62,9 @@ def test_override(self): ] # places to check - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) # dtypes to check dtypes = ['int32', 'float32'] diff --git a/test/legacy_test/test_qr_op.py b/test/legacy_test/test_qr_op.py index ee7ae544f59f6..7352e16bd65c3 100644 --- a/test/legacy_test/test_qr_op.py +++ b/test/legacy_test/test_qr_op.py @@ -19,8 +19,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestQrOp(OpTest): @@ -143,9 +143,9 @@ def run_qr_dygraph(shape, mode, dtype): np_q = np.zeros(np_q_shape).astype(np_dtype) np_r = np.zeros(np_r_shape).astype(np_dtype) places = [] - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: batch_size = a.size // (a.shape[-1] * a.shape[-2]) for i in range(batch_size): @@ -209,11 +209,11 @@ def run_qr_static(shape, mode, dtype): np_q = np.zeros(np_q_shape).astype(np_dtype) np_r = np.zeros(np_r_shape).astype(np_dtype) places = [] - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): batch_size = a.size // (a.shape[-1] * a.shape[-2]) for i in range(batch_size): coord = np.unravel_index(i, a.shape[:-2]) @@ -229,9 +229,9 @@ def run_qr_static(shape, mode, dtype): ) if mode == "r": r = paddle.linalg.qr(x, mode=mode) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": a}, fetch_list=[r], ) @@ -240,9 +240,9 @@ def run_qr_static(shape, mode, dtype): ) else: q, r = paddle.linalg.qr(x, mode=mode) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": a}, fetch_list=[q, r], ) diff --git a/test/legacy_test/test_query_op.py b/test/legacy_test/test_query_op.py index edb1848f03baf..8c0f6ad3078f8 100644 --- a/test/legacy_test/test_query_op.py +++ b/test/legacy_test/test_query_op.py @@ -15,7 +15,7 @@ import unittest import paddle -from paddle.fluid import core +from paddle.base import core class TestCudnnVersion(unittest.TestCase): diff --git a/test/legacy_test/test_queue.py b/test/legacy_test/test_queue.py index a594ec56cc3c5..5a1cbd53d43aa 100644 --- a/test/legacy_test/test_queue.py +++ b/test/legacy_test/test_queue.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestQueue(unittest.TestCase): @@ -27,10 +27,10 @@ def test_eq(self): test queue_generator op, enqueue op and dequeue op. """ - main_program = fluid.Program() - startup_program = fluid.Program() + main_program = base.Program() + startup_program = base.Program() value = np.random.rand(1) - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): data_in = paddle.static.create_global_var( shape=[2, 3], value=value, @@ -66,11 +66,11 @@ def test_eq(self): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup_program) (ret,) = exe.run(main_program, fetch_list=[data_out.name]) np.testing.assert_allclose( diff --git a/test/legacy_test/test_rad2deg.py b/test/legacy_test/test_rad2deg.py index 710d77f0d9fc1..45841b8ca9d78 100644 --- a/test/legacy_test/test_rad2deg.py +++ b/test/legacy_test/test_rad2deg.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -33,22 +33,22 @@ def setUp(self): self.out_np = np.rad2deg(self.x_np) def test_static_graph(self): - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(startup_program, train_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(startup_program, train_program): x = paddle.static.data( name='input', dtype=self.x_dtype, shape=self.x_shape ) out = paddle.rad2deg(x) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'input': self.x_np}, fetch_list=[out], ) diff --git a/test/legacy_test/test_rand_op.py b/test/legacy_test/test_rand_op.py index 76bc134dafde2..c51005d1fa28c 100644 --- a/test/legacy_test/test_rand_op.py +++ b/test/legacy_test/test_rand_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid, rand -from paddle.fluid import Program, core, program_guard +from paddle import base, rand +from paddle.base import Program, core, program_guard class TestRandOpError(unittest.TestCase): @@ -32,8 +32,8 @@ def test_errors(self): with program_guard(main_prog, start_prog): def test_Variable(): - x1 = fluid.create_lod_tensor( - np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.zeros((4, 784)), [[1, 1, 1, 1]], base.CPUPlace() ) rand(x1) @@ -53,12 +53,12 @@ class TestRandOp(unittest.TestCase): """ def run_net(self, use_cuda=False): - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) - train_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + train_program = base.Program() + startup_program = base.Program() + with base.program_guard(train_program, startup_program): result_0 = rand([3, 4]) result_1 = rand([3, 4], 'float64') @@ -98,8 +98,8 @@ class TestRandOpForDygraph(unittest.TestCase): """ def run_net(self, use_cuda=False): - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - with fluid.dygraph.guard(place): + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + with base.dygraph.guard(place): rand([3, 4]) rand([3, 4], 'float64') @@ -124,17 +124,17 @@ def test_default_dtype(self): def test_default_fp16(): paddle.framework.set_default_dtype('float16') out = paddle.tensor.random.rand([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') out = paddle.tensor.random.rand([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP32) def test_default_fp64(): paddle.framework.set_default_dtype('float64') out = paddle.tensor.random.rand([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP64) if paddle.is_compiled_with_cuda(): paddle.set_device('gpu') diff --git a/test/legacy_test/test_randint_op.py b/test/legacy_test/test_randint_op.py index 073b8255be957..478b980ac1604 100644 --- a/test/legacy_test/test_randint_op.py +++ b/test/legacy_test/test_randint_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.static import Program, program_guard paddle.enable_static() @@ -219,14 +219,14 @@ def test_dygraph(self): paddle.enable_static() def test_static(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x = paddle.randint(-10, 10, []) # Test compile shape self.assertEqual(x.shape, ()) # Test runtime shape - exe = fluid.Executor() + exe = base.Executor() result = exe.run(fetch_list=[x]) self.assertEqual(result[0].shape, ()) diff --git a/test/legacy_test/test_randn_op.py b/test/legacy_test/test_randn_op.py index b298557b0b9f3..6bc3f48bc3faa 100644 --- a/test/legacy_test/test_randn_op.py +++ b/test/legacy_test/test_randn_op.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core from paddle.static import Program, program_guard diff --git a/test/legacy_test/test_random_routing_op.py b/test/legacy_test/test_random_routing_op.py index 8977e30011044..5bc1752ecc993 100644 --- a/test/legacy_test/test_random_routing_op.py +++ b/test/legacy_test/test_random_routing_op.py @@ -18,7 +18,7 @@ import paddle from paddle.distributed.models.moe import utils -from paddle.fluid import core +from paddle.base import core def random_routing(topk_idx, topk_value, prob, topk=2): diff --git a/test/legacy_test/test_random_seed.py b/test/legacy_test/test_random_seed.py index 70583a8cf2660..ead15119a9922 100644 --- a/test/legacy_test/test_random_seed.py +++ b/test/legacy_test/test_random_seed.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.tensor import random @@ -31,7 +31,7 @@ class TestGeneratorSeed(unittest.TestCase): def test_generator_uniform_random_dygraph(self): """Test Generator seed.""" - fluid.enable_dygraph() + base.enable_dygraph() gen = paddle.seed(12312321111) x = paddle.uniform([10], dtype="float32", min=0.0, max=1.0) @@ -56,19 +56,19 @@ def test_generator_uniform_random_dygraph(self): np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_generator_uniform_random_static(self): - fluid.disable_dygraph() + base.disable_dygraph() gen = paddle.seed(123123143) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): # example 1: # attr shape is a list which doesn't contain tensor Variable. result_1 = paddle.uniform(shape=[3, 4]) result_2 = paddle.uniform(shape=[3, 4]) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) out1 = exe.run( train_program, feed={}, fetch_list=[result_1, result_2] @@ -90,7 +90,7 @@ def test_generator_uniform_random_static(self): self.assertTrue(not np.allclose(out1_res2, out1_res1)) def test_gen_dropout_dygraph(self): - fluid.enable_dygraph() + base.enable_dygraph() gen = paddle.seed(111111111) st = gen.get_state() @@ -109,18 +109,18 @@ def test_gen_dropout_dygraph(self): np.testing.assert_allclose(y_np, y1_np, rtol=1e-05) def test_gen_dropout_static(self): - fluid.disable_dygraph() + base.disable_dygraph() gen = paddle.seed(123123143) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): # example 1: # attr shape is a list which doesn't contain tensor Variable. x_1 = paddle.uniform(shape=[2, 10]) y_1 = paddle.nn.functional.dropout(x_1, 0.5) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) out1 = exe.run(train_program, feed={}, fetch_list=[y_1]) # gen.set_state(cur_state) @@ -135,7 +135,7 @@ def test_gen_dropout_static(self): def test_generator_gaussian_random_dygraph(self): """Test Generator seed.""" - fluid.enable_dygraph() + base.enable_dygraph() gen = paddle.seed(12312321111) x = random.gaussian([10], dtype="float32") @@ -156,19 +156,19 @@ def test_generator_gaussian_random_dygraph(self): np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_generator_gaussian_random_static(self): - fluid.disable_dygraph() + base.disable_dygraph() gen = paddle.seed(123123143) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): # example 1: # attr shape is a list which doesn't contain tensor Variable. result_1 = random.gaussian(shape=[3, 4]) result_2 = random.gaussian(shape=[3, 4]) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) out1 = exe.run( train_program, feed={}, fetch_list=[result_1, result_2] @@ -192,7 +192,7 @@ def test_generator_gaussian_random_static(self): def test_generator_randint_dygraph(self): """Test Generator seed.""" - fluid.enable_dygraph() + base.enable_dygraph() gen = paddle.seed(12312321111) x = paddle.randint(low=10, shape=[10], dtype="int32") @@ -213,19 +213,19 @@ def test_generator_randint_dygraph(self): np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_generator_uniform_random_static_1(self): - fluid.disable_dygraph() + base.disable_dygraph() gen = paddle.seed(123123143) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): # example 1: # attr shape is a list which doesn't contain tensor Variable. result_1 = paddle.uniform(shape=[3, 4]) result_2 = paddle.uniform(shape=[3, 4]) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) out1 = exe.run( train_program, feed={}, fetch_list=[result_1, result_2] @@ -248,7 +248,7 @@ def test_generator_uniform_random_static_1(self): def test_generator_randint_dygraph_1(self): """Test Generator seed.""" - fluid.enable_dygraph() + base.enable_dygraph() gen = paddle.seed(12312321111) x = paddle.randint(low=1) @@ -267,19 +267,19 @@ def test_generator_randint_dygraph_1(self): np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_generator_ranint_static(self): - fluid.disable_dygraph() + base.disable_dygraph() gen = paddle.seed(123123143) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): # example 1: # attr shape is a list which doesn't contain tensor Variable. result_1 = paddle.randint(low=10, shape=[3, 4]) result_2 = paddle.randint(low=10, shape=[3, 4]) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) out1 = exe.run( train_program, feed={}, fetch_list=[result_1, result_2] @@ -304,7 +304,7 @@ def test_generator_ranint_static(self): def test_generator_randperm_dygraph(self): """Test Generator seed.""" - fluid.enable_dygraph() + base.enable_dygraph() gen = paddle.seed(12312321111) x = paddle.randperm(10) @@ -325,19 +325,19 @@ def test_generator_randperm_dygraph(self): np.testing.assert_allclose(x_np, x3_np, rtol=1e-05) def test_generator_randperm_static(self): - fluid.disable_dygraph() + base.disable_dygraph() paddle.seed(123123143) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): # example 1: # attr shape is a list which doesn't contain tensor Variable. result_1 = paddle.randperm(10) result_2 = paddle.randperm(10) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) out1 = exe.run( train_program, feed={}, fetch_list=[result_1, result_2] @@ -360,14 +360,14 @@ def test_generator_randperm_static(self): self.assertTrue(not np.allclose(out1_res2, out1_res1)) def test_gen_TruncatedNormal_initializer(self): - fluid.disable_dygraph() + base.disable_dygraph() gen = paddle.seed(123123143) cur_state = gen.get_state() - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): # example 1: # attr shape is a list which doesn't contain tensor Variable. x = paddle.uniform(shape=[2, 10]) @@ -386,14 +386,14 @@ def test_gen_TruncatedNormal_initializer(self): ), ) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) exe.run(startup_program) out1 = exe.run( train_program, feed={}, fetch_list=[result_1, result_2] ) gen.manual_seed(123123143) - with fluid.program_guard(train_program, startup_program): + with base.program_guard(train_program, startup_program): exe.run(startup_program) out2 = exe.run( train_program, feed={}, fetch_list=[result_1, result_2] diff --git a/test/legacy_test/test_randperm_op.py b/test/legacy_test/test_randperm_op.py index 14bf49b387b63..bc14e4cbd4452 100644 --- a/test/legacy_test/test_randperm_op.py +++ b/test/legacy_test/test_randperm_op.py @@ -22,7 +22,7 @@ ) import paddle -from paddle.fluid import core +from paddle.base import core from paddle.static import Program, program_guard diff --git a/test/legacy_test/test_rank_attention_op.py b/test/legacy_test/test_rank_attention_op.py index d699a6311a3f4..f48fa41ba989a 100644 --- a/test/legacy_test/test_rank_attention_op.py +++ b/test/legacy_test/test_rank_attention_op.py @@ -18,7 +18,7 @@ import numpy as np from eager_op_test import OpTest -from paddle.fluid import core +from paddle.base import core def gen_input_help(input, rank_offset, max_rank, max_size): diff --git a/test/legacy_test/test_raw_program_optimizer.py b/test/legacy_test/test_raw_program_optimizer.py index 906b67d61d8c8..362970cdf2d3f 100644 --- a/test/legacy_test/test_raw_program_optimizer.py +++ b/test/legacy_test/test_raw_program_optimizer.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet @@ -52,8 +52,8 @@ def test_single_gpu(self): sharding_startup_program = paddle.static.Program() strategy = fleet.DistributedStrategy() strategy.without_graph_optimization = True - with fluid.program_guard(sharding_program, sharding_startup_program): - with fluid.unique_name.guard(): + with base.program_guard(sharding_program, sharding_startup_program): + with base.unique_name.guard(): input_x = paddle.static.data( name="x", shape=[None, 32], dtype='float32' ) diff --git a/test/legacy_test/test_reader_reset.py b/test/legacy_test/test_reader_reset.py index 9a756af12ecfd..f91ea8a86be67 100644 --- a/test/legacy_test/test_reader_reset.py +++ b/test/legacy_test/test_reader_reset.py @@ -20,8 +20,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import compiler +from paddle import base +from paddle.base import compiler class TestReaderReset(unittest.TestCase): @@ -33,7 +33,7 @@ def fake_data_generator(): return fake_data_generator def setUp(self): - self.use_cuda = fluid.core.is_compiled_with_cuda() + self.use_cuda = base.core.is_compiled_with_cuda() self.ins_shape = [3] self.batch_size = 5 self.batch_num = 20 @@ -42,17 +42,17 @@ def setUp(self): self.prepare_data() def main(self, with_double_buffer): - main_prog = fluid.Program() - startup_prog = fluid.Program() + main_prog = base.Program() + startup_prog = base.Program() - with fluid.program_guard(main_prog, startup_prog): + with base.program_guard(main_prog, startup_prog): image = paddle.static.data( name='image', shape=[-1] + self.ins_shape, dtype='float32' ) label = paddle.static.data( name='label', shape=[-1, 1], dtype='int64' ) - data_reader_handle = fluid.io.PyReader( + data_reader_handle = base.io.PyReader( feed_list=[image, label], capacity=16, iterable=False, @@ -60,8 +60,8 @@ def main(self, with_double_buffer): ) fetch_list = [image.name, label.name] - place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if self.use_cuda else base.CPUPlace() + exe = base.Executor(place) exe.run(startup_prog) data_reader_handle.decorate_sample_list_generator( @@ -85,7 +85,7 @@ def main(self, with_double_buffer): ) * label_val.reshape((ins_num, 1)) self.assertEqual(data_val.all(), broadcasted_label.all()) batch_id += 1 - except fluid.core.EOFException: + except base.core.EOFException: data_reader_handle.reset() pass_count += 1 self.assertEqual(pass_count * self.batch_num, batch_id) diff --git a/test/legacy_test/test_real_imag_op.py b/test/legacy_test/test_real_imag_op.py index 4a33add68e97e..3da20edb1b726 100644 --- a/test/legacy_test/test_real_imag_op.py +++ b/test/legacy_test/test_real_imag_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle import fluid, static +from paddle import base, static numpy_apis = { "real": np.real, @@ -125,7 +125,7 @@ def test_in_dynamic_mode(self): np_res = numpy_apis[self.api](input) for place in self.places: # it is more convenient to use `guard` than `enable/disable_**` here - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_t = paddle.to_tensor(input) res = paddle_apis[self.api](input_t).numpy() np.testing.assert_array_equal(np_res, res) @@ -151,7 +151,7 @@ def test_dtype_error(self): # in dynamic mode with self.assertRaises(RuntimeError): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input = np.random.random(self._shape).astype("float32") input_t = paddle.to_tensor(input) res = paddle_apis[self.api](input_t) diff --git a/test/legacy_test/test_reduce_op.py b/test/legacy_test/test_reduce_op.py index 4043cb8ccba26..91bbd7acdbe26 100644 --- a/test/legacy_test/test_reduce_op.py +++ b/test/legacy_test/test_reduce_op.py @@ -18,9 +18,9 @@ from eager_op_test import OpTest, convert_float_to_uint16, skip_check_grad_ci import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard -from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle import base +from paddle.base import Program, core, program_guard +from paddle.base.framework import convert_np_dtype_to_dtype_ class TestSumOp(OpTest): @@ -1578,11 +1578,11 @@ def test_check_grad(self): class TestReduceSumOpError(unittest.TestCase): def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with program_guard(Program(), Program()): # The input type of reduce_sum_op must be Variable. - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.sum, x1) # The input dtype of reduce_sum_op must be float32 or float64 or int32 or int64. @@ -1597,17 +1597,17 @@ def run_static( if np_axis is None: np_axis = attr_axis - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data = paddle.static.data("data", shape=shape, dtype=x_dtype) result_sum = paddle.sum( x=data, axis=attr_axis, dtype=attr_dtype ) - exe = fluid.Executor(place) + exe = base.Executor(place) input_data = np.random.rand(*shape).astype(x_dtype) (res,) = exe.run( feed={"data": input_data}, fetch_list=[result_sum] @@ -1654,8 +1654,8 @@ def test_static(self): def test_dygraph(self): np_x = np.random.random([2, 3, 4]).astype('int32') - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(np_x) + with base.dygraph.guard(): + x = base.dygraph.to_variable(np_x) out0 = paddle.sum(x).numpy() out1 = paddle.sum(x, axis=0).numpy() out2 = paddle.sum(x, axis=(0, 1)).numpy() @@ -1671,49 +1671,49 @@ class TestAllAPI(unittest.TestCase): def setUp(self): np.random.seed(123) paddle.enable_static() - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") result = paddle.all(x=input) input_np = np.random.randint(0, 2, [4, 4]).astype("bool") - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) self.assertTrue((fetches[0] == np.all(input_np)).all()) def check_static_float_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[4, 4], dtype="float" ) result = paddle.all(x=input) input_np = np.random.randint(0, 2, [4, 4]).astype("float") - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) self.assertTrue((fetches[0] == np.all(input_np)).all()) def check_static_int_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data(name="input", shape=[4, 4], dtype="int") result = paddle.all(x=input) input_np = np.random.randint(0, 2, [4, 4]).astype("int") - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) @@ -1728,7 +1728,7 @@ def test_static(self): def test_dygraph(self): paddle.disable_static() for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_) x = paddle.assign(np_x) x = paddle.cast(x, 'bool') @@ -1772,49 +1772,49 @@ class TestAnyAPI(unittest.TestCase): def setUp(self): np.random.seed(123) paddle.enable_static() - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data(name="input", shape=[4, 4], dtype="bool") result = paddle.any(x=input) input_np = np.random.randint(0, 2, [4, 4]).astype("bool") - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) self.assertTrue((fetches[0] == np.any(input_np)).all()) def check_static_float_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[4, 4], dtype="float" ) result = paddle.any(x=input) input_np = np.random.randint(0, 2, [4, 4]).astype("float") - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) self.assertTrue((fetches[0] == np.any(input_np)).all()) def check_static_int_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data(name="input", shape=[4, 4], dtype="int") result = paddle.any(x=input) input_np = np.random.randint(0, 2, [4, 4]).astype("int") - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) @@ -1829,7 +1829,7 @@ def test_static(self): def test_dygraph(self): paddle.disable_static() for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): np_x = np.random.randint(0, 2, (12, 10)).astype(np.bool_) x = paddle.assign(np_x) x = paddle.cast(x, 'bool') @@ -1874,7 +1874,7 @@ def test_dygraph(self): class TestAllZeroError(unittest.TestCase): def test_errors(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): def test_0_size(): array = np.array([], dtype=np.float32) diff --git a/test/legacy_test/test_registered_phi_kernels.py b/test/legacy_test/test_registered_phi_kernels.py index 67fcb2b9a7b0c..143f7999024f7 100644 --- a/test/legacy_test/test_registered_phi_kernels.py +++ b/test/legacy_test/test_registered_phi_kernels.py @@ -18,7 +18,7 @@ import yaml -from paddle.fluid import core +from paddle.base import core def parse_kernels_name(op_item): diff --git a/test/legacy_test/test_registry.py b/test/legacy_test/test_registry.py index 0b3690221c0a8..3c0b34a599cbe 100644 --- a/test/legacy_test/test_registry.py +++ b/test/legacy_test/test_registry.py @@ -18,7 +18,7 @@ from decorator_helper import prog_scope import paddle -from paddle import fluid +from paddle import base class TestRegistry(unittest.TestCase): @@ -27,8 +27,8 @@ def test_registry_layer(self): x = paddle.static.data(name='X', shape=[-1, 10, 10], dtype='float32') output = paddle.mean(x) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) X = np.random.random((10, 10)).astype("float32") mean_out = exe.run(feed={"X": X}, fetch_list=[output]) self.assertAlmostEqual(np.mean(X), mean_out[0], delta=1e-5) diff --git a/test/legacy_test/test_regularizer.py b/test/legacy_test/test_regularizer.py index da0fde3558a5a..a0edf81058b9e 100644 --- a/test/legacy_test/test_regularizer.py +++ b/test/legacy_test/test_regularizer.py @@ -20,9 +20,9 @@ import numpy as np import paddle -from paddle import fluid, regularizer -from paddle.fluid import core, framework -from paddle.fluid.backward import append_backward +from paddle import base, regularizer +from paddle.base import core, framework +from paddle.base.backward import append_backward class TestL2Decay(unittest.TestCase): @@ -125,7 +125,7 @@ def bow_net( """ BOW net This model is from https://github.com/PaddlePaddle/models: - fluid/PaddleNLP/text_classification/nets.py + base/PaddleNLP/text_classification/nets.py """ emb = paddle.static.nn.embedding( input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] @@ -161,18 +161,18 @@ def get_places(self): @contextlib.contextmanager def scope_prog_guard(self, main_prog, startup_prog): - scope = fluid.core.Scope() - with fluid.unique_name.guard(): - with fluid.scope_guard(scope): - with fluid.program_guard(main_prog, startup_prog): + scope = base.core.Scope() + with base.unique_name.guard(): + with base.scope_guard(scope): + with base.program_guard(main_prog, startup_prog): yield def run_program(self, place, feed_list): - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=feed_list, place=place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=feed_list, place=place) + exe.run(base.default_startup_program()) - main_prog = fluid.default_main_program() + main_prog = base.default_main_program() param_list = [var.name for var in main_prog.block(0).all_parameters()] param_sum = [] @@ -189,8 +189,8 @@ def run_program(self, place, feed_list): def check_l2decay_regularizer(self, place, model): paddle.seed(1) paddle.framework.random._manual_program_seed(1) - main_prog = fluid.framework.Program() - startup_prog = fluid.framework.Program() + main_prog = base.framework.Program() + startup_prog = base.framework.Program() with self.scope_prog_guard( main_prog=main_prog, startup_prog=startup_prog ): @@ -214,8 +214,8 @@ def check_l2decay_regularizer(self, place, model): def check_l2decay(self, place, model): paddle.seed(1) paddle.framework.random._manual_program_seed(1) - main_prog = fluid.framework.Program() - startup_prog = fluid.framework.Program() + main_prog = base.framework.Program() + startup_prog = base.framework.Program() with self.scope_prog_guard( main_prog=main_prog, startup_prog=startup_prog @@ -229,7 +229,7 @@ def check_l2decay(self, place, model): avg_cost_l2 = model(data, label, self.word_len) - param_list = fluid.default_main_program().block(0).all_parameters() + param_list = base.default_main_program().block(0).all_parameters() para_sum = [] for para in param_list: para_mul = paddle.square(x=para) @@ -267,14 +267,14 @@ def test_repeated_regularization(self): fc_param_attr = paddle.ParamAttr( regularizer=paddle.regularizer.L1Decay() ) - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x = paddle.uniform([2, 2, 3]) out = paddle.static.nn.fc(x, 5, weight_attr=fc_param_attr) loss = paddle.sum(out) sgd = paddle.optimizer.SGD(learning_rate=0.1, weight_decay=l2) sgd.minimize(loss) - with fluid.dygraph.guard(): - input = fluid.dygraph.to_variable( + with base.dygraph.guard(): + input = base.dygraph.to_variable( np.random.randn(3, 2).astype('float32') ) paddle.seed(1) @@ -289,14 +289,14 @@ def test_repeated_regularization(self): loss1 = linear1(input) loss1.backward() - # set l2 regularizer in optimizer, but l1 in fluid.ParamAttr + # set l2 regularizer in optimizer, but l1 in base.ParamAttr paddle.optimizer.SGD( parameters=linear1.parameters(), learning_rate=1e-2, weight_decay=l2, ).minimize(loss1) - # only set l1 in fluid.ParamAttr + # only set l1 in base.ParamAttr loss2 = linear2(input) loss2.backward() paddle.optimizer.SGD( @@ -307,13 +307,13 @@ def test_repeated_regularization(self): linear1.weight.numpy(), linear2.weight.numpy(), rtol=1e-05, - err_msg='weight should use the regularization in fluid.ParamAttr!', + err_msg='weight should use the regularization in base.ParamAttr!', ) np.testing.assert_allclose( linear1.bias.numpy(), linear2.bias.numpy(), rtol=1e-05, - err_msg='bias should use the regularization in fluid.ParamAttr!', + err_msg='bias should use the regularization in base.ParamAttr!', ) diff --git a/test/legacy_test/test_regularizer_api.py b/test/legacy_test/test_regularizer_api.py index 5a30dcb63649e..7eb6ecadefb0d 100644 --- a/test/legacy_test/test_regularizer_api.py +++ b/test/legacy_test/test_regularizer_api.py @@ -20,8 +20,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def bow_net( @@ -37,7 +37,7 @@ def bow_net( """ BOW net This model is from https://github.com/PaddlePaddle/models: - fluid/PaddleNLP/text_classification/nets.py + base/PaddleNLP/text_classification/nets.py """ emb = paddle.static.nn.embedding( input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] @@ -74,18 +74,18 @@ def get_places(self): @contextlib.contextmanager def scope_prog_guard(self, main_prog, startup_prog): - scope = fluid.core.Scope() - with fluid.unique_name.guard(): - with fluid.scope_guard(scope): - with fluid.program_guard(main_prog, startup_prog): + scope = base.core.Scope() + with base.unique_name.guard(): + with base.scope_guard(scope): + with base.program_guard(main_prog, startup_prog): yield def run_program(self, place, feed_list): - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=feed_list, place=place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=feed_list, place=place) + exe.run(base.default_startup_program()) - main_prog = fluid.default_main_program() + main_prog = base.default_main_program() param_list = [var.name for var in main_prog.block(0).all_parameters()] param_sum = [] @@ -102,8 +102,8 @@ def run_program(self, place, feed_list): def check_l2decay_regularizer(self, place, model): paddle.seed(1) paddle.framework.random._manual_program_seed(1) - main_prog = fluid.framework.Program() - startup_prog = fluid.framework.Program() + main_prog = base.framework.Program() + startup_prog = base.framework.Program() with self.scope_prog_guard( main_prog=main_prog, startup_prog=startup_prog ): @@ -127,8 +127,8 @@ def check_l2decay_regularizer(self, place, model): def check_l2decay(self, place, model): paddle.seed(1) paddle.framework.random._manual_program_seed(1) - main_prog = fluid.framework.Program() - startup_prog = fluid.framework.Program() + main_prog = base.framework.Program() + startup_prog = base.framework.Program() with self.scope_prog_guard( main_prog=main_prog, startup_prog=startup_prog @@ -142,7 +142,7 @@ def check_l2decay(self, place, model): avg_cost_l2 = model(data, label, self.word_len) - param_list = fluid.default_main_program().block(0).all_parameters() + param_list = base.default_main_program().block(0).all_parameters() para_sum = [] for para in param_list: para_mul = paddle.square(x=para) @@ -182,14 +182,14 @@ def test_repeated_regularization(self): fc_param_attr = paddle.ParamAttr( regularizer=paddle.regularizer.L1Decay() ) - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x = paddle.uniform([2, 2, 3]) out = paddle.static.nn.fc(x, 5, weight_attr=fc_param_attr) loss = paddle.sum(out) sgd = paddle.optimizer.SGD(learning_rate=0.1, weight_decay=l2) sgd.minimize(loss) - with fluid.dygraph.guard(): - input = fluid.dygraph.to_variable( + with base.dygraph.guard(): + input = base.dygraph.to_variable( np.random.randn(3, 2).astype('float32') ) paddle.seed(1) @@ -204,14 +204,14 @@ def test_repeated_regularization(self): loss1 = linear1(input) loss1.backward() - # set l2 regularizer in optimizer, but l1 in fluid.ParamAttr + # set l2 regularizer in optimizer, but l1 in base.ParamAttr paddle.optimizer.SGD( parameters=linear1.parameters(), learning_rate=1e-2, weight_decay=l2, ).minimize(loss1) - # only set l1 in fluid.ParamAttr + # only set l1 in base.ParamAttr loss2 = linear2(input) loss2.backward() paddle.optimizer.SGD( @@ -222,13 +222,13 @@ def test_repeated_regularization(self): linear1.weight.numpy(), linear2.weight.numpy(), rtol=1e-05, - err_msg='weight should use the regularization in fluid.ParamAttr!', + err_msg='weight should use the regularization in base.ParamAttr!', ) np.testing.assert_allclose( linear1.bias.numpy(), linear2.bias.numpy(), rtol=1e-05, - err_msg='bias should use the regularization in fluid.ParamAttr!', + err_msg='bias should use the regularization in base.ParamAttr!', ) diff --git a/test/legacy_test/test_renorm_op.py b/test/legacy_test/test_renorm_op.py index ae857a8bfb849..89029b02bd963 100644 --- a/test/legacy_test/test_renorm_op.py +++ b/test/legacy_test/test_renorm_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard paddle.set_device('cpu') @@ -40,7 +40,7 @@ def test_renorm_api(self): with program_guard(Program(), Program()): x = paddle.static.data(name="x", shape=[-1, 2, 3], dtype='float64') z = paddle.renorm(x, self.p, self.dim, self.max_norm) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={"x": self.data_x}, fetch_list=[z], return_numpy=False ) @@ -61,7 +61,7 @@ def test_renorm_api(self): def test_dygraph_api(self): self.input_data() # case axis none - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): input = [[[2.0, 2, -2], [3, 0.3, 3]], [[2, -8, 2], [3.1, 3.7, 3]]] x = paddle.to_tensor(input, stop_gradient=False) y = paddle.renorm(x, 1.0, 2, 2.05) @@ -90,7 +90,7 @@ def test_dygraph_api(self): expected_grad, np.array(x.grad), rtol=1e-05 ) # #test exception: - with fluid.dygraph.guard(): + with base.dygraph.guard(): input = [[[2.0, 2, -2], [3, 0.3, 3]], [[2, -8, 2], [3.1, 3.7, 3]]] x = paddle.to_tensor(input, stop_gradient=False) exp = False diff --git a/test/legacy_test/test_repeat_interleave_op.py b/test/legacy_test/test_repeat_interleave_op.py index 3c553fecb1b15..a0c1a3bad03d0 100644 --- a/test/legacy_test/test_repeat_interleave_op.py +++ b/test/legacy_test/test_repeat_interleave_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard class TestRepeatInterleaveOp(OpTest): @@ -129,7 +129,7 @@ def test_repeat_interleave_api(self): ) index.desc.set_need_check_feed(False) z = paddle.repeat_interleave(x, index, axis=1) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': self.data_x, 'repeats_': self.data_index}, fetch_list=[z.name], @@ -150,7 +150,7 @@ def test_repeat_interleave_api(self): ) index.desc.set_need_check_feed(False) z = paddle.repeat_interleave(x, index, axis=0) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={ 'x': self.data_x, @@ -167,7 +167,7 @@ def test_repeat_interleave_api(self): x = paddle.static.data(name='x', shape=[-1, 4], dtype='float32') x.desc.set_need_check_feed(False) z = paddle.repeat_interleave(x, repeats, axis=0) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': self.data_x}, fetch_list=[z.name], return_numpy=False ) @@ -179,7 +179,7 @@ def test_repeat_interleave_api(self): x = paddle.static.data(name='x', shape=[-1], dtype="float32") x.desc.set_need_check_feed(False) z = paddle.repeat_interleave(x, repeats) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': self.data_zero_dim_x}, fetch_list=[z.name], @@ -199,7 +199,7 @@ def test_repeat_interleave_api(self): ) index.desc.set_need_check_feed(False) z = paddle.repeat_interleave(x, index, axis=-1) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': self.data_x, 'repeats_': self.data_index}, fetch_list=[z.name], @@ -214,17 +214,17 @@ def test_dygraph_api(self): input_x = np.array([[1, 2, 1], [1, 2, 3]]).astype('int32') index_x = np.array([1, 1, 2, 1, 2, 2]).astype('int32') - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(input_x) - index = fluid.dygraph.to_variable(index_x) + with base.dygraph.guard(): + x = base.dygraph.to_variable(input_x) + index = base.dygraph.to_variable(index_x) z = paddle.repeat_interleave(x, index, None) np_z = z.numpy() expect_out = np.repeat(input_x, index_x, axis=None) np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case repeats int - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(input_x) + with base.dygraph.guard(): + x = base.dygraph.to_variable(input_x) index = 2 z = paddle.repeat_interleave(x, index, None) np_z = z.numpy() @@ -232,17 +232,17 @@ def test_dygraph_api(self): np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 1: - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(self.data_x) - index = fluid.dygraph.to_variable(self.data_index) + with base.dygraph.guard(): + x = base.dygraph.to_variable(self.data_x) + index = base.dygraph.to_variable(self.data_index) z = paddle.repeat_interleave(x, index, -1) np_z = z.numpy() expect_out = np.repeat(self.data_x, self.data_index, axis=-1) np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(self.data_x) - index = fluid.dygraph.to_variable(self.data_index) + with base.dygraph.guard(): + x = base.dygraph.to_variable(self.data_x) + index = base.dygraph.to_variable(self.data_index) z = paddle.repeat_interleave(x, index, 1) np_z = z.numpy() expect_out = np.repeat(self.data_x, self.data_index, axis=1) @@ -250,17 +250,17 @@ def test_dygraph_api(self): # case 2: index_x = np.array([1, 2, 1]).astype('int32') - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(self.data_x) - index = fluid.dygraph.to_variable(index_x) + with base.dygraph.guard(): + x = base.dygraph.to_variable(self.data_x) + index = base.dygraph.to_variable(index_x) z = paddle.repeat_interleave(x, index, axis=0) np_z = z.numpy() expect_out = np.repeat(self.data_x, index, axis=0) np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 3 zero_dim: - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(self.data_zero_dim_x) + with base.dygraph.guard(): + x = base.dygraph.to_variable(self.data_zero_dim_x) index = 2 z = paddle.repeat_interleave(x, index, None) np_z = z.numpy() diff --git a/test/legacy_test/test_require_version.py b/test/legacy_test/test_require_version.py index b989dd8b0a9a6..2b7f5909d6675 100644 --- a/test/legacy_test/test_require_version.py +++ b/test/legacy_test/test_require_version.py @@ -16,64 +16,64 @@ import warnings import paddle -import paddle.version as fluid_version -from paddle import fluid +import paddle.version as base_version +from paddle import base class VersionTest(unittest.TestCase): def test_check_output(self): warnings.warn( - "paddle.__version__: {}, fluid_version.full_version: {}, fluid_version.major: {}, fluid_version.minor: {}, fluid_version.patch: {}, fluid_version.rc: {}.".format( + "paddle.__version__: {}, base_version.full_version: {}, base_version.major: {}, base_version.minor: {}, base_version.patch: {}, base_version.rc: {}.".format( paddle.__version__, - fluid_version.full_version, - fluid_version.major, - fluid_version.minor, - fluid_version.patch, - fluid_version.rc, + base_version.full_version, + base_version.major, + base_version.minor, + base_version.patch, + base_version.rc, ) ) - ori_full_version = fluid_version.full_version + ori_full_version = base_version.full_version ori_sep_version = [ - fluid_version.major, - fluid_version.minor, - fluid_version.patch, - fluid_version.rc, + base_version.major, + base_version.minor, + base_version.patch, + base_version.rc, ] [ - fluid_version.major, - fluid_version.minor, - fluid_version.patch, - fluid_version.rc, + base_version.major, + base_version.minor, + base_version.patch, + base_version.rc, ] = ['1', '4', '1', '0'] - fluid.require_version('1') - fluid.require_version('1.4') - fluid.require_version('1.4.1.0') + base.require_version('1') + base.require_version('1.4') + base.require_version('1.4.1.0') # any version >= 1.4.1 is acceptable. - fluid.require_version('1.4.1') + base.require_version('1.4.1') # if 1.4.1 <= version <= 1.6.0, it is acceptable. - fluid.require_version(min_version='1.4.1', max_version='1.6.0') + base.require_version(min_version='1.4.1', max_version='1.6.0') # only version 1.4.1 is acceptable. - fluid.require_version(min_version='1.4.1', max_version='1.4.1') + base.require_version(min_version='1.4.1', max_version='1.4.1') # if installed version is 0.0.0.0, throw warning and skip the checking. [ - fluid_version.major, - fluid_version.minor, - fluid_version.patch, - fluid_version.rc, + base_version.major, + base_version.minor, + base_version.patch, + base_version.rc, ] = ['0', '0', '0', '0'] - fluid.require_version('0.0.0') + base.require_version('0.0.0') - fluid_version.full_version = ori_full_version + base_version.full_version = ori_full_version [ - fluid_version.major, - fluid_version.minor, - fluid_version.patch, - fluid_version.rc, + base_version.major, + base_version.minor, + base_version.patch, + base_version.rc, ] = ori_sep_version @@ -82,76 +82,76 @@ class TestErrors(unittest.TestCase): def test_errors(self): # The type of params must be str. def test_input_type(): - fluid.require_version(100) + base.require_version(100) self.assertRaises(TypeError, test_input_type) def test_input_type_1(): - fluid.require_version('0', 200) + base.require_version('0', 200) self.assertRaises(TypeError, test_input_type_1) # The value of params must be in format r'\d+(\.\d+){0,3}', like '1.5.2.0', '1.6' ... def test_input_value_1(): - fluid.require_version('string') + base.require_version('string') self.assertRaises(ValueError, test_input_value_1) def test_input_value_1_1(): - fluid.require_version('1.5', 'string') + base.require_version('1.5', 'string') self.assertRaises(ValueError, test_input_value_1_1) def test_input_value_2(): - fluid.require_version('1.5.2.0.0') + base.require_version('1.5.2.0.0') self.assertRaises(ValueError, test_input_value_2) def test_input_value_2_1(): - fluid.require_version('1.5', '1.5.2.0.0') + base.require_version('1.5', '1.5.2.0.0') self.assertRaises(ValueError, test_input_value_2_1) def test_input_value_3(): - fluid.require_version('1.5.2a.0') + base.require_version('1.5.2a.0') self.assertRaises(ValueError, test_input_value_3) # The installed version must be equal or greater than the required version. def test_version(): - fluid.require_version('100') + base.require_version('100') # The installed version must be in [min_version, max_version] def test_version_1(): - fluid.require_version('0.0.0', '1.4') + base.require_version('0.0.0', '1.4') def test_version_2(): - fluid.require_version('1.4.0', '1.2') + base.require_version('1.4.0', '1.2') - ori_full_version = fluid_version.full_version + ori_full_version = base_version.full_version ori_sep_version = [ - fluid_version.major, - fluid_version.minor, - fluid_version.patch, - fluid_version.rc, + base_version.major, + base_version.minor, + base_version.patch, + base_version.rc, ] [ - fluid_version.major, - fluid_version.minor, - fluid_version.patch, - fluid_version.rc, + base_version.major, + base_version.minor, + base_version.patch, + base_version.rc, ] = ['1', '4', '1', '0'] self.assertRaises(Exception, test_version) self.assertRaises(Exception, test_version_1) self.assertRaises(Exception, test_version_2) - fluid_version.full_version = ori_full_version + base_version.full_version = ori_full_version [ - fluid_version.major, - fluid_version.minor, - fluid_version.patch, - fluid_version.rc, + base_version.major, + base_version.minor, + base_version.patch, + base_version.rc, ] = ori_sep_version diff --git a/test/legacy_test/test_reshape_op.py b/test/legacy_test/test_reshape_op.py index dc85f407aceab..5caaaae049b76 100755 --- a/test/legacy_test/test_reshape_op.py +++ b/test/legacy_test/test_reshape_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid +from paddle import base from paddle.static import Program, program_guard @@ -305,7 +305,7 @@ def setUp(self): self.python_api = paddle.tensor.reshape self.python_out_sig = ['Out'] input = np.random.randint(0, 127, self.ori_shape).astype(self.dtype) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(input)} self.attrs = { 'shape': self.new_shape, 'use_mkldnn': self.use_mkldnn, @@ -325,7 +325,7 @@ def init_data(self): def test_check_output(self): self.check_output_with_place( - fluid.core.CPUPlace(), + base.core.CPUPlace(), atol=1e-5, no_check_set=['XShape'], ) @@ -413,7 +413,7 @@ def test_imperative(self): self._set_paddle_api() input = np.random.random([2, 25]).astype("float32") shape = [2, 5, 5] - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = self.to_tensor(input) positive_five = self.fill_constant([1], "int32", 5) @@ -437,7 +437,7 @@ def test_imperative(self): self._set_paddle_api() input = np.random.random([2, 25]).astype("float32") shape = [2, 5, 5] - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = self.to_tensor(input) positive_five = self.fill_constant([1], "int32", 5) @@ -463,7 +463,7 @@ def _test_errors(self): with program_guard(Program(), Program()): # The x type of reshape_op must be Variable. def test_x_type(): - x1 = fluid.create_lod_tensor( + x1 = base.create_lod_tensor( np.array([[-1]]), [[1]], paddle.CPUPlace() ) self.reshape(x1, shape=[1]) @@ -602,18 +602,18 @@ def test_dygraph(self): paddle.enable_static() def test_static(self): - main_prog = fluid.Program() - with fluid.program_guard(main_prog, fluid.Program()): + main_prog = base.Program() + with base.program_guard(main_prog, base.Program()): x = paddle.rand([]) x.stop_gradient = False out = paddle.reshape(x, [-1]) - fluid.backward.append_backward(out) + base.backward.append_backward(out) prog = paddle.static.default_main_program() block = prog.global_block() - x_grad = block.var(fluid.framework.grad_var_name(x.name)) - out_grad = block.var(fluid.framework.grad_var_name(out.name)) + x_grad = block.var(base.framework.grad_var_name(x.name)) + out_grad = block.var(base.framework.grad_var_name(out.name)) # Test compile shape self.assertEqual(x.shape, ()) @@ -621,7 +621,7 @@ def test_static(self): self.assertEqual(x_grad.shape, ()) self.assertEqual(out_grad.shape, (1,)) - exe = fluid.Executor() + exe = base.Executor() result = exe.run(main_prog, fetch_list=[x, out, x_grad, out_grad]) # Test runtime shape diff --git a/test/legacy_test/test_retain_graph.py b/test/legacy_test/test_retain_graph.py index 03d45ec5b8457..873c1ad201249 100644 --- a/test/legacy_test/test_retain_graph.py +++ b/test/legacy_test/test_retain_graph.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base paddle.disable_static() SEED = 2020 @@ -152,7 +152,7 @@ def run_retain(self, need_retain): def test_retain(self): self.run_retain(need_retain=True) - if not fluid.framework.in_dygraph_mode(): + if not base.framework.in_dygraph_mode(): self.assertRaises(RuntimeError, self.run_retain, need_retain=False) diff --git a/test/legacy_test/test_rms_norm_op.py b/test/legacy_test/test_rms_norm_op.py index d5b6530ed51c8..cd9fa001e8362 100644 --- a/test/legacy_test/test_rms_norm_op.py +++ b/test/legacy_test/test_rms_norm_op.py @@ -16,8 +16,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def quant_helper( @@ -331,7 +331,7 @@ def check_rmsnorm(self, x_np, gamma_np, beta_np, dtype): self.epsilon, begin_norm_axis=1, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) out_s = exe.run( feed={ "x_static": x_np.astype(dtype), @@ -381,7 +381,7 @@ def check_rmsnorm_int8(self, x_np, gamma_np, beta_np, dtype): quant_max_bound=self.quant_max_bound, quant_min_bound=self.quant_min_bound, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) out_s = exe.run( feed={ "x_static": x_np.astype(dtype), @@ -435,7 +435,7 @@ def check_residual_bias_rmsnorm( residual=residual_static, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) out_s = exe.run( feed={ "x_static": x_np.astype(dtype), diff --git a/test/legacy_test/test_rmsprop_op.py b/test/legacy_test/test_rmsprop_op.py index 8540e7cf3f264..bc7d38a2d77a9 100644 --- a/test/legacy_test/test_rmsprop_op.py +++ b/test/legacy_test/test_rmsprop_op.py @@ -18,8 +18,8 @@ from op import Operator import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def create_selected_rows_and_tensor( @@ -56,7 +56,7 @@ def setup( ): np.random.seed(5) # fix seed - self.scope = fluid.global_scope() + self.scope = base.global_scope() self.place = place self.param_name = "param" @@ -231,12 +231,12 @@ def test_rmsprop(self): size = (128, 320) for place in places: for centered in [False, True]: - with fluid.scope_guard(core.Scope()): + with base.scope_guard(core.Scope()): self.check_with_place( place, is_sparse=False, centered=centered, size=size ) - with fluid.scope_guard(core.Scope()): + with base.scope_guard(core.Scope()): self.check_with_place( place, is_sparse=True, @@ -245,7 +245,7 @@ def test_rmsprop(self): size=size, ) - with fluid.scope_guard(core.Scope()): + with base.scope_guard(core.Scope()): self.check_with_place( place, is_sparse=True, @@ -274,9 +274,9 @@ def test_rmsprop_dygraph(self): def test_rmsprop(self): paddle.enable_static() - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): + place = base.CPUPlace() + main = base.Program() + with base.program_guard(main): x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') y = paddle.static.data(name='y', shape=[-1, 1], dtype='float32') y_predict = paddle.static.nn.fc(x, size=1) @@ -292,9 +292,9 @@ def test_rmsprop(self): train_reader = paddle.batch( paddle.dataset.uci_housing.train(), batch_size=1 ) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + feeder = base.DataFeeder(place=place, feed_list=[x, y]) + exe = base.Executor(place) + exe.run(base.default_startup_program()) for data in train_reader(): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) diff --git a/test/legacy_test/test_rnn_cell_api.py b/test/legacy_test/test_rnn_cell_api.py index f163e62ea1f02..cf83bdb8120ec 100644 --- a/test/legacy_test/test_rnn_cell_api.py +++ b/test/legacy_test/test_rnn_cell_api.py @@ -23,10 +23,10 @@ from rnn_numpy import rnn as numpy_rnn import paddle -from paddle import fluid -from paddle.fluid import core, framework -from paddle.fluid.executor import Executor -from paddle.fluid.framework import Program, program_guard +from paddle import base +from paddle.base import core, framework +from paddle.base.executor import Executor +from paddle.base.framework import Program, program_guard from paddle.nn.layer.rnn import rnn as dynamic_rnn paddle.enable_static() @@ -160,7 +160,7 @@ def test_run(self): 'float64' ) setattr(numpy_cell, k, param) - fluid.global_scope().find_var(v.name).get_tensor().set(param, place) + base.global_scope().find_var(v.name).get_tensor().set(param, place) sequence_length = paddle.static.data( name="sequence_length", shape=[None], dtype='int64' diff --git a/test/legacy_test/test_rnn_decode_api.py b/test/legacy_test/test_rnn_decode_api.py index efb7e30592da3..72bf68db45c44 100644 --- a/test/legacy_test/test_rnn_decode_api.py +++ b/test/legacy_test/test_rnn_decode_api.py @@ -19,9 +19,9 @@ import numpy as np import paddle -from paddle import Model, fluid, nn, set_device -from paddle.fluid import layers -from paddle.fluid.data_feeder import convert_dtype +from paddle import Model, base, nn, set_device +from paddle.base import layers +from paddle.base.data_feeder import convert_dtype from paddle.nn import ( RNN, BeamSearchDecoder, @@ -171,10 +171,10 @@ def __init__( seed=None, ): self.main_program = ( - fluid.Program() if main_program is None else main_program + base.Program() if main_program is None else main_program ) self.startup_program = ( - fluid.Program() if startup_program is None else startup_program + base.Program() if startup_program is None else startup_program ) if seed is not None: self.main_program.random_seed = seed @@ -183,7 +183,7 @@ def __init__( self.executor = executor def build_program(self, model_cls, alg_cls, model_hparams, alg_hparams): - with fluid.program_guard(self.main_program, self.startup_program): + with base.program_guard(self.main_program, self.startup_program): source = paddle.static.data( name="src", shape=[None, None], dtype="int64" ) @@ -300,13 +300,13 @@ def setUp(self): def _calc_output(self, place, mode="test", dygraph=True): if dygraph: - fluid.enable_dygraph(place) + base.enable_dygraph(place) else: - fluid.disable_dygraph() + base.disable_dygraph() gen = paddle.seed(self._random_seed) paddle.framework.random._manual_program_seed(self._random_seed) - scope = fluid.core.Scope() - with fluid.scope_guard(scope): + scope = base.core.Scope() + with base.scope_guard(scope): layer = ( self.model_cls(**self.attrs) if isinstance(self.attrs, dict) @@ -331,7 +331,7 @@ def check_output_with_place(self, place, mode="test"): ) def check_output(self): - devices = ["CPU", "GPU"] if fluid.is_compiled_with_cuda() else ["CPU"] + devices = ["CPU", "GPU"] if base.is_compiled_with_cuda() else ["CPU"] for device in devices: place = set_device(device) self.check_output_with_place(place) diff --git a/test/legacy_test/test_rnn_memory_helper_op.py b/test/legacy_test/test_rnn_memory_helper_op.py index 68bea9c94ff39..16a0cccb10d6f 100644 --- a/test/legacy_test/test_rnn_memory_helper_op.py +++ b/test/legacy_test/test_rnn_memory_helper_op.py @@ -16,9 +16,9 @@ import numpy as np -from paddle.fluid import core -from paddle.fluid.executor import Executor -from paddle.fluid.framework import Program +from paddle.base import core +from paddle.base.executor import Executor +from paddle.base.framework import Program class RNNMemoryHelperOpTest(unittest.TestCase): diff --git a/test/legacy_test/test_rnn_op.py b/test/legacy_test/test_rnn_op.py index da9c76b27ffa3..4d7412f949786 100644 --- a/test/legacy_test/test_rnn_op.py +++ b/test/legacy_test/test_rnn_op.py @@ -20,7 +20,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core sys.path.append("../../test/rnn") from convert import get_params_for_net diff --git a/test/legacy_test/test_roll_op.py b/test/legacy_test/test_roll_op.py index f491112b6a482..b8aa8e28529bd 100644 --- a/test/legacy_test/test_roll_op.py +++ b/test/legacy_test/test_roll_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestRollOp(OpTest): @@ -169,7 +169,7 @@ def test_roll_op_api(self): x = paddle.static.data(name='x', shape=[-1, 3], dtype='float32') x.desc.set_need_check_feed(False) z = paddle.roll(x, shifts=1) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': self.data_x}, fetch_list=[z.name], return_numpy=False ) @@ -183,7 +183,7 @@ def test_roll_op_api(self): x = paddle.static.data(name='x', shape=[-1, 3], dtype='float32') x.desc.set_need_check_feed(False) z = paddle.roll(x, shifts=1, axis=0) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': self.data_x}, fetch_list=[z.name], return_numpy=False ) @@ -195,8 +195,8 @@ def test_roll_op_api(self): def test_dygraph_api(self): self.input_data() # case 1: - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(self.data_x) + with base.dygraph.guard(): + x = base.dygraph.to_variable(self.data_x) z = paddle.roll(x, shifts=1) np_z = z.numpy() expect_out = np.array( @@ -205,8 +205,8 @@ def test_dygraph_api(self): np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 2: - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(self.data_x) + with base.dygraph.guard(): + x = base.dygraph.to_variable(self.data_x) z = paddle.roll(x, shifts=1, axis=0) np_z = z.numpy() expect_out = np.array( @@ -222,7 +222,7 @@ def test_axis_out_range(): x = paddle.static.data(name='x', shape=[-1, 3], dtype='float32') x.desc.set_need_check_feed(False) z = paddle.roll(x, shifts=1, axis=10) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': self.data_x}, fetch_list=[z.name], @@ -232,7 +232,7 @@ def test_axis_out_range(): self.assertRaises(ValueError, test_axis_out_range) def test_shifts_as_tensor_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = paddle.arange(9).reshape([3, 3]) shape = paddle.shape(x) shifts = shape // 2 @@ -250,12 +250,12 @@ def test_shifts_as_tensor_static(self): out = paddle.roll(x, shifts=shifts, axis=axes) expected_out = np.array([[8, 6, 7], [2, 0, 1], [5, 3, 4]]) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) [out_np] = exe.run(fetch_list=[out]) np.testing.assert_allclose(out_np, expected_out, rtol=1e-05) if paddle.is_compiled_with_cuda(): - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) [out_np] = exe.run(fetch_list=[out]) np.testing.assert_allclose(out_np, expected_out, rtol=1e-05) diff --git a/test/legacy_test/test_rot90_op.py b/test/legacy_test/test_rot90_op.py index e3fc1a75bd757..795cff3f7b357 100644 --- a/test/legacy_test/test_rot90_op.py +++ b/test/legacy_test/test_rot90_op.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TestRot90_API(unittest.TestCase): @@ -25,19 +25,19 @@ class TestRot90_API(unittest.TestCase): def test_static_graph(self): paddle.enable_static() - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): input = paddle.static.data( name='input', dtype='float32', shape=[2, 3] ) output = paddle.rot90(input, k=1, axes=[0, 1]) output = paddle.rot90(output, k=1, axes=[0, 1]) output = output.rot90(k=1, axes=[0, 1]) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) @@ -56,17 +56,17 @@ def test_static_graph(self): def test_static_k_0(self): paddle.enable_static() input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): input = paddle.static.data( name='input', dtype='float32', shape=[2, 3] ) output = paddle.rot90(input, k=0, axes=[0, 1]) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) @@ -85,17 +85,17 @@ def test_static_k_0(self): def test_static_k_2(self): paddle.enable_static() input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): input = paddle.static.data( name='input', dtype='float32', shape=[2, 3] ) output = paddle.rot90(input, k=2, axes=[0, 1]) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) @@ -114,17 +114,17 @@ def test_static_k_2(self): def test_static_k_3(self): paddle.enable_static() input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): input = paddle.static.data( name='input', dtype='float32', shape=[2, 3] ) output = paddle.rot90(input, k=3, axes=[0, 1]) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) @@ -143,17 +143,17 @@ def test_static_k_3(self): def test_static_neg_k_1(self): paddle.enable_static() input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): input = paddle.static.data( name='input', dtype='float32', shape=[2, 3] ) output = paddle.rot90(input, k=-1, axes=[0, 1]) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) @@ -172,17 +172,17 @@ def test_static_neg_k_1(self): def test_static_neg_k_2(self): paddle.enable_static() input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): input = paddle.static.data( name='input', dtype='float32', shape=[2, 3] ) output = paddle.rot90(input, k=-2, axes=[0, 1]) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) @@ -201,17 +201,17 @@ def test_static_neg_k_2(self): def test_static_neg_k_3(self): paddle.enable_static() input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): input = paddle.static.data( name='input', dtype='float32', shape=[2, 3] ) output = paddle.rot90(input, k=-3, axes=[0, 1]) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) @@ -230,17 +230,17 @@ def test_static_neg_k_3(self): def test_static_neg_k_4(self): paddle.enable_static() input = paddle.static.data(name='input', dtype='float32', shape=[2, 3]) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): input = paddle.static.data( name='input', dtype='float32', shape=[2, 3] ) output = paddle.rot90(input, k=-4, axes=[0, 1]) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) @@ -301,8 +301,8 @@ def run5(): def test_dygraph(self): img = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32) - with fluid.dygraph.guard(): - inputs = fluid.dygraph.to_variable(img) + with base.dygraph.guard(): + inputs = base.dygraph.to_variable(img) ret = paddle.rot90(inputs, k=1, axes=[0, 1]) ret = ret.rot90(1, axes=[0, 1]) diff --git a/test/legacy_test/test_row_conv_op.py b/test/legacy_test/test_row_conv_op.py index e01f18a3f63e9..595fb7b8b12aa 100644 --- a/test/legacy_test/test_row_conv_op.py +++ b/test/legacy_test/test_row_conv_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -187,18 +187,18 @@ def setUp(self): self.out = row_conv_foward_Tensor(self.x, self.w) def check_identity(self): - start = fluid.Program() - main = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, start): + start = base.Program() + main = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, start): x = paddle.static.data("x", (-1, -1, self.C), "float32") out = paddle.static.nn.row_conv( x, self.context_length, param_attr=paddle.nn.initializer.Assign(self.w), ) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(start) (out_np,) = exe.run(main, feed={'x': self.x}, fetch_list=[out]) diff --git a/test/legacy_test/test_rrelu_op.py b/test/legacy_test/test_rrelu_op.py index b86b7808aba93..851b90c44c7ba 100644 --- a/test/legacy_test/test_rrelu_op.py +++ b/test/legacy_test/test_rrelu_op.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core, dygraph +from paddle import base +from paddle.base import core, dygraph paddle.seed(102) np.random.seed(102) @@ -51,13 +51,13 @@ def setUp(self): self.upper_1 = 0.33 self.places = [ - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ] def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 4, 5], dtype="float32" ) @@ -70,9 +70,9 @@ def check_static_result(self, place): in_np = np.random.uniform(-1.0, 1.0, [2, 3, 4, 5]).astype("float32") res_np1 = ref_rrelu(in_np, self.lower_0, self.upper_0) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": in_np}, fetch_list=[res1], ) @@ -81,7 +81,7 @@ def check_static_result(self, place): res_np2 = ref_rrelu(in_np, self.lower_1, self.upper_1) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": in_np}, fetch_list=[res2], ) @@ -108,19 +108,19 @@ def test_static_graph_functional(self): exe = paddle.static.Executor(place=place) (res_1,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_np}, fetch_list=out_1, use_prune=True, ) (res_2,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x2": self.x_np}, fetch_list=out_2, use_prune=True, ) (res_3,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x2": self.x_np}, fetch_list=out_3, use_prune=True, @@ -153,13 +153,13 @@ def test_static_graph_layer(self): exe = paddle.static.Executor(place=place) res_1 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": self.x_np}, fetch_list=out_1, use_prune=True, ) res_2 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x2": self.x_np}, fetch_list=out_2, use_prune=True, @@ -267,37 +267,37 @@ def test_error_functional(self): def test_error_layer(self): def error_int_dtype(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 3]).astype("float64") rrelu = paddle.nn.RReLU(2, 3) rrelu(paddle.to_tensor(x)) def error_lower_dtype(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 3]).astype("float32") rrelu = paddle.nn.RReLU(0, 0.5) rrelu(paddle.to_tensor(x)) def error_upper_dtype(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 3]).astype("float32") rrelu = paddle.nn.RReLU(0.5, 1) rrelu(paddle.to_tensor(x)) def error_lower_range(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 3]).astype("float32") rrelu = paddle.nn.RReLU(-1.0, 0.5) rrelu(paddle.to_tensor(x)) def error_upper_range(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 3]).astype("float32") rrelu = paddle.nn.RReLU(0.5, 2.0) rrelu(paddle.to_tensor(x)) def error_lower_upper(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = np.random.random([2, 3]).astype("float32") rrelu = paddle.nn.RReLU(0.5, 0.2) rrelu(paddle.to_tensor(x)) diff --git a/test/legacy_test/test_run_fluid_by_module_or_command_line.py b/test/legacy_test/test_run_fluid_by_module_or_command_line.py index 473ff182aefc3..55927d1f383d8 100644 --- a/test/legacy_test/test_run_fluid_by_module_or_command_line.py +++ b/test/legacy_test/test_run_fluid_by_module_or_command_line.py @@ -20,13 +20,13 @@ class TestRunFluidByModule(unittest.TestCase): def test_module(self): print(sys.executable) - res = os.system(sys.executable + ' -m "paddle.fluid.reader"') + res = os.system(sys.executable + ' -m "paddle.base.reader"') self.assertEqual(res, 0) # 0 means status OK class TestRunFluidByCommand(unittest.TestCase): def test_command(self): - res = os.system(sys.executable + ' -c "import paddle.fluid"') + res = os.system(sys.executable + ' -c "import paddle.base"') self.assertEqual(res, 0) # 0 means status OK diff --git a/test/legacy_test/test_run_program_op.py b/test/legacy_test/test_run_program_op.py index 4cc056f2237f4..2d223e9474703 100644 --- a/test/legacy_test/test_run_program_op.py +++ b/test/legacy_test/test_run_program_op.py @@ -18,21 +18,21 @@ import numpy as np import paddle -from paddle import _legacy_C_ops, fluid -from paddle.fluid import core, framework -from paddle.fluid.dygraph.base import switch_to_static_graph +from paddle import _legacy_C_ops, base +from paddle.base import core, framework +from paddle.base.dygraph.base import switch_to_static_graph paddle.enable_static() @contextlib.contextmanager def program_scope_guard(): - prog = fluid.Program() - startup_prog = fluid.Program() - scope = fluid.core.Scope() - with fluid.scope_guard(scope): - with fluid.program_guard(prog, startup_prog): - with fluid.unique_name.guard(): + prog = base.Program() + startup_prog = base.Program() + scope = base.core.Scope() + with base.scope_guard(scope): + with base.program_guard(prog, startup_prog): + with base.unique_name.guard(): yield @@ -45,7 +45,7 @@ def _add_build_strategy_for(input_program, start_op_index, end_op_index): compiled_program._compile( core.Scope(), paddle.framework._current_expected_place() ) - ir_graph = paddle.fluid.framework.IrGraph(compiled_program._graph) + ir_graph = paddle.base.framework.IrGraph(compiled_program._graph) builded_program = ir_graph.to_program() return builded_program @@ -75,18 +75,18 @@ def build_model(self): ) def check_output(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: # TODO: RunProgramOp is not recommended for use in static graph mode now self.expect_outs = self.run_static_model(place, is_test=True) self.check_output_with_place(place) def check_grad(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: # TODO: RunProgramOp is not recommended for use in static graph mode now self.expect_grads = self.run_static_model(place, is_test=False) @@ -94,12 +94,12 @@ def check_grad(self): def run_static_model(self, place, is_test=True): with program_scope_guard(): - startup_program = fluid.default_startup_program() - main_program = fluid.default_main_program() + startup_program = base.default_startup_program() + main_program = base.default_main_program() self.build_model() - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup_program) if is_test: @@ -115,7 +115,7 @@ def run_static_model(self, place, is_test=True): def get_program_desc(self): with program_scope_guard(): fwd_op_num = self.build_model() - return fluid.default_main_program().desc, fwd_op_num + return base.default_main_program().desc, fwd_op_num def get_forward_backward_program_desc( self, whole_program_desc, forward_op_num, output_num @@ -215,7 +215,7 @@ def calc_dygraph_output(self, place): self.program_desc, self.fwd_op_num = self.get_program_desc() self.attrs = self.prepare_attrs() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): inputs = self.prepare_dygraph_input(place) outputs = self.prepare_dygraph_output() @@ -267,7 +267,7 @@ def calc_dygraph_grad(self, place): self.program_desc, self.fwd_op_num = self.get_program_desc() self.attrs = self.prepare_attrs() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): # Step 1. run forward inputs, input_param_list = self.prepare_dygraph_input(place, True) outputs = self.prepare_dygraph_output() @@ -381,7 +381,7 @@ def build_model(self): shape=[None, 1, 28, 28], dtype='float32', ) - weight_attr = fluid.ParamAttr( + weight_attr = base.ParamAttr( name=self.input_names['Params'][0], learning_rate=0.5, initializer=paddle.nn.initializer.Assign( @@ -389,7 +389,7 @@ def build_model(self): ), trainable=True, ) - bias_attr = fluid.ParamAttr( + bias_attr = base.ParamAttr( name=self.input_names['Params'][1], learning_rate=0.5, initializer=paddle.nn.initializer.Assign( @@ -405,9 +405,9 @@ def build_model(self): activation='relu', ) # 2. get forward op num - fwd_op_num = fluid.default_main_program().global_block().desc.op_size() + fwd_op_num = base.default_main_program().global_block().desc.op_size() # 3. append backward - grads = fluid.backward.gradients(targets=[pred], inputs=[img]) + grads = base.backward.gradients(targets=[pred], inputs=[img]) return fwd_op_num @@ -432,9 +432,9 @@ def test_check_output(self): def test_check_grad(self): # NOTE: fecth not support SelectedRows, catnot compare # sparse gradients with staic mode, only run dygraph - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: # TODO: RunProgramOp is not recommended for use in static graph mode now self.calc_dygraph_grad(place) @@ -447,7 +447,7 @@ def build_model(self): emb = paddle.static.nn.embedding( input=x, size=[10, 16], - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="emb_weight", learning_rate=10, initializer=paddle.nn.initializer.Assign( @@ -458,9 +458,9 @@ def build_model(self): ) y = paddle.sum(emb, axis=-1) # 2. get forward op num - fwd_op_num = fluid.default_main_program().global_block().desc.op_size() + fwd_op_num = base.default_main_program().global_block().desc.op_size() # 3. append backward - grads = fluid.backward.gradients(targets=[y], inputs=[x]) + grads = base.backward.gradients(targets=[y], inputs=[x]) return fwd_op_num diff --git a/test/legacy_test/test_save_model_without_var.py b/test/legacy_test/test_save_model_without_var.py index 04437ab783499..2da87c2142a9b 100644 --- a/test/legacy_test/test_save_model_without_var.py +++ b/test/legacy_test/test_save_model_without_var.py @@ -16,7 +16,7 @@ import warnings import paddle -from paddle import fluid +from paddle import base class TestSaveModelWithoutVar(unittest.TestCase): @@ -24,13 +24,13 @@ def test_no_var_save(self): data = paddle.static.data(name='data', shape=[-1, 1], dtype='float32') data_plus = data + 1 - if fluid.core.is_compiled_with_cuda(): - place = fluid.core.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = base.core.CUDAPlace(0) else: - place = fluid.core.CPUPlace() + place = base.core.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") diff --git a/test/legacy_test/test_scalar.py b/test/legacy_test/test_scalar.py index 3a49611c15c34..0081fbaad9196 100644 --- a/test/legacy_test/test_scalar.py +++ b/test/legacy_test/test_scalar.py @@ -17,7 +17,7 @@ import numpy as np import op -from paddle.fluid import framework +from paddle.base import framework class TestWarpAsScalar(unittest.TestCase): diff --git a/test/legacy_test/test_scale_op.py b/test/legacy_test/test_scale_op.py index 7708ce8deaa88..9585357ce1b07 100644 --- a/test/legacy_test/test_scale_op.py +++ b/test/legacy_test/test_scale_op.py @@ -21,8 +21,8 @@ from op import Operator import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.static import Program, program_guard @@ -262,9 +262,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -293,9 +293,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_scatter_nd_op.py b/test/legacy_test/test_scatter_nd_op.py index ee6a2423e0d20..9d8c7bf876773 100644 --- a/test/legacy_test/test_scatter_nd_op.py +++ b/test/legacy_test/test_scatter_nd_op.py @@ -18,9 +18,9 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import switch_to_static_graph +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import switch_to_static_graph def numpy_scatter_nd(ref, index, updates, fun): @@ -324,7 +324,7 @@ class TestScatterNdOpAPI(unittest.TestCase): """ def testcase1(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): ref1 = paddle.static.data( name='ref1', shape=[10, 9, 8, 1, 3], @@ -343,7 +343,7 @@ def testcase1(self): output1 = paddle.scatter_nd_add(ref1, index1, updates1) def testcase2(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): ref2 = paddle.static.data( name='ref2', shape=[10, 9, 8, 1, 3], @@ -364,7 +364,7 @@ def testcase2(self): ) def testcase3(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): shape3 = [10, 9, 8, 1, 3] index3 = paddle.static.data( name='index3', @@ -379,7 +379,7 @@ def testcase3(self): output3 = paddle.scatter_nd(index3, updates3, shape3) def testcase4(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): shape4 = [10, 9, 8, 1, 3] index4 = paddle.static.data( name='index4', @@ -396,7 +396,7 @@ def testcase4(self): ) def testcase5(self): - if not fluid.core.is_compiled_with_cuda(): + if not base.core.is_compiled_with_cuda(): return shape = [2, 3, 4] @@ -404,7 +404,7 @@ def testcase5(self): index = np.array([[0, 0, 2], [0, 1, 2]]) val = np.array([-1, -3]) - with fluid.dygraph.guard(): + with base.dygraph.guard(): device = paddle.get_device() paddle.set_device('gpu') gpu_value = paddle.scatter_nd_add( @@ -450,7 +450,7 @@ def test_static_graph(): class TestScatterNdOpRaise(unittest.TestCase): def test_check_raise(self): def check_raise_is_test(): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): try: ref5 = paddle.static.data( name='ref5', shape=[-1, 3, 4, 5], dtype='float32' @@ -471,7 +471,7 @@ def check_raise_is_test(): def test_check_raise2(self): with self.assertRaises(ValueError): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): ref6 = paddle.static.data( name='ref6', shape=[10, 9, 8, 1, 3], @@ -491,7 +491,7 @@ def test_check_raise2(self): def test_check_raise3(self): def check_raise_is_test(): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): try: shape = [3, 4, 5] index7 = paddle.static.data( @@ -513,19 +513,19 @@ def check_raise_is_test(): class TestDygraph(unittest.TestCase): def test_dygraph(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): index_data = np.array([[1, 1], [0, 1], [1, 3]]).astype(np.int64) - index = fluid.dygraph.to_variable(index_data) + index = base.dygraph.to_variable(index_data) updates = paddle.rand(shape=[3, 9, 10], dtype='float32') shape = [3, 5, 9, 10] output = paddle.scatter_nd(index, updates, shape) def test_dygraph_1(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): x = paddle.rand(shape=[3, 5, 9, 10], dtype='float32') updates = paddle.rand(shape=[3, 9, 10], dtype='float32') index_data = np.array([[1, 1], [0, 1], [1, 3]]).astype(np.int64) - index = fluid.dygraph.to_variable(index_data) + index = base.dygraph.to_variable(index_data) output = paddle.scatter_nd_add(x, index, updates) diff --git a/test/legacy_test/test_scatter_op.py b/test/legacy_test/test_scatter_op.py index df264887c6265..5c87f6aed919e 100644 --- a/test/legacy_test/test_scatter_op.py +++ b/test/legacy_test/test_scatter_op.py @@ -19,9 +19,9 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.dygraph.base import switch_to_static_graph +from paddle import base +from paddle.base import core +from paddle.base.dygraph.base import switch_to_static_graph class TestScatterOp(OpTest): @@ -571,16 +571,16 @@ def test_check_grad(self): class TestScatterAPI(unittest.TestCase): def setUp(self): - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) self.executed_api() def executed_api(self): self.scatter = paddle.scatter def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[3, 2], dtype="float64" ) @@ -596,9 +596,9 @@ def check_static_result(self, place): np.float64 ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "input": input_data, "index": index_data, @@ -619,16 +619,16 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): x_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float64) index_data = np.array([2, 1, 0, 1]).astype(np.int64) updates_data = np.array( [[1, 1], [2, 2], [3, 3], [4, 4]] ).astype(np.float64) - x = fluid.dygraph.to_variable(x_data) - index = fluid.dygraph.to_variable(index_data) - updates = fluid.dygraph.to_variable(updates_data) + x = base.dygraph.to_variable(x_data) + index = base.dygraph.to_variable(index_data) + updates = base.dygraph.to_variable(updates_data) output1 = self.scatter(x, index, updates, overwrite=False) self.assertEqual( @@ -648,7 +648,7 @@ def test_large_data(self): updates = np.ones(shape=[10759233, 256], dtype="float32") def test_dygraph(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): gpu_out = paddle.scatter( paddle.to_tensor(x), paddle.to_tensor(index), diff --git a/test/legacy_test/test_scope.py b/test/legacy_test/test_scope.py index f5c53718c5ad0..6d0e2ed77906b 100644 --- a/test/legacy_test/test_scope.py +++ b/test/legacy_test/test_scope.py @@ -14,24 +14,24 @@ import unittest -import paddle.fluid.core +import paddle.base.core class TestScope(unittest.TestCase): def test_create_destroy(self): - paddle_c = paddle.fluid.core + paddle_c = paddle.base.core scope = paddle_c.Scope() self.assertIsNotNone(scope) scope_with_parent = scope.new_scope() self.assertIsNotNone(scope_with_parent) def test_none_variable(self): - paddle_c = paddle.fluid.core + paddle_c = paddle.base.core scope = paddle_c.Scope() self.assertIsNone(scope.find_var("test")) def test_create_var_get_var(self): - paddle_c = paddle.fluid.core + paddle_c = paddle.base.core scope = paddle_c.Scope() var_a = scope.var("var_a") self.assertIsNotNone(var_a) @@ -40,7 +40,7 @@ def test_create_var_get_var(self): self.assertIsNotNone(scope2.find_var('var_a')) def test_var_get_int(self): - paddle_c = paddle.fluid.core + paddle_c = paddle.base.core scope = paddle_c.Scope() var = scope.var("test_int") var.set_int(10) @@ -48,7 +48,7 @@ def test_var_get_int(self): self.assertEqual(10, var.get_int()) def test_scope_pool(self): - paddle_c = paddle.fluid.core + paddle_c = paddle.base.core scope = paddle_c.Scope() # Delete the scope. scope._remove_from_pool() @@ -59,7 +59,7 @@ def test_scope_pool(self): scope._remove_from_pool() def test_size(self): - paddle_c = paddle.fluid.core + paddle_c = paddle.base.core scope = paddle_c.Scope() var_a = scope.var("var_a") self.assertEqual(scope.size(), 1) diff --git a/test/legacy_test/test_searchsorted_op.py b/test/legacy_test/test_searchsorted_op.py index 6795c6e16424f..84fd7afac7aa0 100644 --- a/test/legacy_test/test_searchsorted_op.py +++ b/test/legacy_test/test_searchsorted_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_segment_ops.py b/test/legacy_test/test_segment_ops.py index d2be362e650cf..00b4f403fc7a0 100644 --- a/test/legacy_test/test_segment_ops.py +++ b/test/legacy_test/test_segment_ops.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def compute_segment_sum(x, segment_ids): @@ -365,7 +365,7 @@ def test_static(self): def test_dygraph(self): device = paddle.CPUPlace() - with paddle.fluid.dygraph.guard(device): + with paddle.base.dygraph.guard(device): x = paddle.to_tensor( [[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32' ) @@ -418,7 +418,7 @@ def test_static(self): def test_dygraph(self): device = paddle.CPUPlace() - with paddle.fluid.dygraph.guard(device): + with paddle.base.dygraph.guard(device): x = paddle.to_tensor( [[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float32' ) @@ -442,7 +442,7 @@ def test_dygraph(self): def test_dygraph_cpu_float16(self): device = paddle.CPUPlace() - with paddle.fluid.dygraph.guard(device): + with paddle.base.dygraph.guard(device): x = paddle.to_tensor( [[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float16' ) @@ -466,7 +466,7 @@ def test_dygraph_cpu_float16(self): def test_dygraph_cuda_float16(self): if core.is_compiled_with_cuda(): device = paddle.CUDAPlace(0) - with paddle.fluid.dygraph.guard(device): + with paddle.base.dygraph.guard(device): x = paddle.to_tensor( [[1, 2, 3], [3, 2, 1], [4, 5, 6]], dtype='float16' ) diff --git a/test/legacy_test/test_select_input_output_op.py b/test/legacy_test/test_select_input_output_op.py index 99292cbe2f25e..a06495a13c71a 100644 --- a/test/legacy_test/test_select_input_output_op.py +++ b/test/legacy_test/test_select_input_output_op.py @@ -17,11 +17,11 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.backward import append_backward -from paddle.fluid.executor import Executor -from paddle.fluid.framework import Program, program_guard +from paddle import base +from paddle.base import core +from paddle.base.backward import append_backward +from paddle.base.executor import Executor +from paddle.base.framework import Program, program_guard from paddle.static.nn.control_flow import select_input, select_output paddle.enable_static() @@ -53,9 +53,9 @@ def test_forward_backward_list_output(self): append_backward(mean) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) exe = Executor(place) diff --git a/test/legacy_test/test_selected_rows.py b/test/legacy_test/test_selected_rows.py index aab95d1f22d99..b8b8604d31c63 100644 --- a/test/legacy_test/test_selected_rows.py +++ b/test/legacy_test/test_selected_rows.py @@ -16,7 +16,7 @@ import numpy as np -from paddle.fluid import core +from paddle.base import core class TestSelectedRows(unittest.TestCase): diff --git a/test/legacy_test/test_selu_op.py b/test/legacy_test/test_selu_op.py index acc9214220ef3..17e4311615145 100644 --- a/test/legacy_test/test_selu_op.py +++ b/test/legacy_test/test_selu_op.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def ref_selu( @@ -144,11 +144,11 @@ def test_dygraph_api(self): np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05) paddle.enable_static() - def test_fluid_api(self): - with fluid.program_guard(fluid.Program()): + def test_base_api(self): + with base.program_guard(base.Program()): x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype) out = F.selu(x, self.scale, self.alpha) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) out_ref = ref_selu(self.x_np, self.scale, self.alpha) np.testing.assert_allclose(out_ref, res[0], rtol=1e-05) diff --git a/test/legacy_test/test_set_bool_attr.py b/test/legacy_test/test_set_bool_attr.py index 66d0d774e7f1c..5f7282e12a1f8 100644 --- a/test/legacy_test/test_set_bool_attr.py +++ b/test/legacy_test/test_set_bool_attr.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base class TestAttrSet(unittest.TestCase): @@ -24,18 +24,18 @@ def test_set_bool_attr(self): x = paddle.static.data( name='x', shape=[-1, 3, 7, 3, 7], dtype='float32' ) - param_attr = fluid.ParamAttr( + param_attr = base.ParamAttr( name='batch_norm_w', initializer=paddle.nn.initializer.Constant(value=1.0), ) - bias_attr = fluid.ParamAttr( + bias_attr = base.ParamAttr( name='batch_norm_b', initializer=paddle.nn.initializer.Constant(value=0.0), ) bn = paddle.static.nn.batch_norm( input=x, param_attr=param_attr, bias_attr=bias_attr ) - block = fluid.default_main_program().desc.block(0) + block = base.default_main_program().desc.block(0) op = block.op(0) before_type = op.attr_type('is_test') op._set_attr('is_test', True) diff --git a/test/legacy_test/test_set_value_op.py b/test/legacy_test/test_set_value_op.py index 9f797e6ab0ac3..db7a8ebbf91fa 100644 --- a/test/legacy_test/test_set_value_op.py +++ b/test/legacy_test/test_set_value_op.py @@ -21,8 +21,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core -from paddle.fluid.layer_helper import LayerHelper +from paddle.base import core +from paddle.base.layer_helper import LayerHelper class TestSetValueBase(unittest.TestCase): @@ -1828,9 +1828,9 @@ def set_value(array, i, op): sgd = paddle.optimizer.Adam() sgd.minimize(loss) place = ( - paddle.fluid.CPUPlace() - if not paddle.fluid.core.is_compiled_with_cuda() - else paddle.fluid.CUDAPlace(0) + paddle.base.CPUPlace() + if not paddle.base.core.is_compiled_with_cuda() + else paddle.base.CUDAPlace(0) ) prog = paddle.static.default_main_program() @@ -1874,7 +1874,7 @@ def set_value(array, i, op): class TestSetValueInplace(unittest.TestCase): def test_inplace(self): paddle.disable_static() - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): paddle.seed(100) a = paddle.rand(shape=[1, 4]) a.stop_gradient = False @@ -1894,7 +1894,7 @@ def test_inplace_var_become_leaf_var(self): paddle.disable_static() a_grad_1, b_grad_1, a_grad_2, b_grad_2 = 0, 1, 2, 3 - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): paddle.seed(100) a = paddle.rand(shape=[1, 4]) b = paddle.rand(shape=[1, 4]) @@ -1905,7 +1905,7 @@ def test_inplace_var_become_leaf_var(self): a_grad_1 = a.grad.numpy() b_grad_1 = b.grad.numpy() - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): paddle.seed(100) a = paddle.rand(shape=[1, 4]) b = paddle.rand(shape=[1, 4]) diff --git a/test/legacy_test/test_sgd_op.py b/test/legacy_test/test_sgd_op.py index e6239c8d8f112..1d9cdb3ffa3f3 100644 --- a/test/legacy_test/test_sgd_op.py +++ b/test/legacy_test/test_sgd_op.py @@ -19,8 +19,8 @@ from op import Operator import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -220,11 +220,11 @@ def runTest(self): sgd_optimizer = paddle.optimizer.SGD(learning_rate=0.001) sgd_optimizer.minimize(avg_cost) - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - compiled_prog = fluid.compiler.CompiledProgram( - fluid.default_main_program() + place = base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) + compiled_prog = base.compiler.CompiledProgram( + base.default_main_program() ) result = exe.run(compiled_prog, fetch_list=[avg_cost]) diff --git a/test/legacy_test/test_sgd_op_bf16.py b/test/legacy_test/test_sgd_op_bf16.py index 9b58c7b00d2ce..816ab3b6359d8 100644 --- a/test/legacy_test/test_sgd_op_bf16.py +++ b/test/legacy_test/test_sgd_op_bf16.py @@ -25,8 +25,8 @@ from op import Operator import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.static import amp @@ -236,7 +236,7 @@ class TestSGDOpBF16API(unittest.TestCase): @classmethod def setUpClass(cls): np.random.seed(12345) - fluid.set_flags({'FLAGS_use_mkldnn': True}) + base.set_flags({'FLAGS_use_mkldnn': True}) def setUp(self): self.sample_count = 20 @@ -331,9 +331,9 @@ def _data_reader(self): yield data, label def test_sgd(self): - place = fluid.CPUPlace() - main = fluid.Program() - with fluid.program_guard(main): + place = base.CPUPlace() + main = base.Program() + with base.program_guard(main): ids_shape = list(self.ids_shape) x = paddle.static.data( name='X', shape=[-1] + ids_shape, dtype='int64' @@ -345,7 +345,7 @@ def test_sgd(self): emb = paddle.static.nn.embedding( input=x, size=self.w_shape, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( name="emb_weight", initializer=self.initializer ), is_sparse=False, @@ -368,12 +368,12 @@ def test_sgd(self): use_pure_bf16=True, ) sgd_optimizer.minimize( - avg_cost, startup_program=fluid.default_startup_program() + avg_cost, startup_program=base.default_startup_program() ) train_reader = paddle.batch(self._data_reader, batch_size=1) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) test_prog = main.clone(for_test=True) sgd_optimizer.amp_init( place, test_program=test_prog, use_bf16_test=True diff --git a/test/legacy_test/test_shape_op.py b/test/legacy_test/test_shape_op.py index d9dade1cf99ce..e7e7f9b62110a 100644 --- a/test/legacy_test/test_shape_op.py +++ b/test/legacy_test/test_shape_op.py @@ -19,7 +19,7 @@ from op import Operator import paddle -from paddle.fluid import core +from paddle.base import core class TestShapeOp(OpTest): diff --git a/test/legacy_test/test_share_data_op.py b/test/legacy_test/test_share_data_op.py index b369652e0ee4d..bf158beac1c97 100644 --- a/test/legacy_test/test_share_data_op.py +++ b/test/legacy_test/test_share_data_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest from op import Operator -from paddle.fluid import core +from paddle.base import core class TestShareDataOp(OpTest): diff --git a/test/legacy_test/test_shuffle_batch_op.py b/test/legacy_test/test_shuffle_batch_op.py index c20890261294e..2bee79bda306d 100644 --- a/test/legacy_test/test_shuffle_batch_op.py +++ b/test/legacy_test/test_shuffle_batch_op.py @@ -19,7 +19,7 @@ import numpy as np from eager_op_test import OpTest -from paddle import fluid +from paddle import base class TestShuffleBatchOpBase(OpTest): @@ -33,7 +33,7 @@ def get_shape(self): def _get_places(self): # NOTE: shuffle_batch is not supported on Windows if os.name == 'nt': - return [fluid.CPUPlace()] + return [base.CPUPlace()] return super()._get_places() def setUp(self): diff --git a/test/legacy_test/test_sigmoid_cross_entropy_with_logits_op.py b/test/legacy_test/test_sigmoid_cross_entropy_with_logits_op.py index bb02c11f440e1..e0efedffc0043 100644 --- a/test/legacy_test/test_sigmoid_cross_entropy_with_logits_op.py +++ b/test/legacy_test/test_sigmoid_cross_entropy_with_logits_op.py @@ -19,8 +19,8 @@ from scipy.special import expit, logit import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard def loss_wrapper( @@ -321,15 +321,15 @@ def test_errors(self): def test_Variable(): # the input of sigmoid_cross_entropy_with_logits must be Variable. - x1 = fluid.create_lod_tensor( + x1 = base.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], - fluid.CPUPlace(), + base.CPUPlace(), ) - lab1 = fluid.create_lod_tensor( + lab1 = base.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], - fluid.CPUPlace(), + base.CPUPlace(), ) paddle.nn.functional.binary_cross_entropy_with_logits( x1, lab1 diff --git a/test/legacy_test/test_sigmoid_focal_loss.py b/test/legacy_test/test_sigmoid_focal_loss.py index 73df6a4c8d99b..b151d4c56a21e 100644 --- a/test/legacy_test/test_sigmoid_focal_loss.py +++ b/test/legacy_test/test_sigmoid_focal_loss.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base def call_sfl_functional( @@ -130,9 +130,9 @@ def test_SigmoidFocalLoss(self): np.asarray([np.sum(label_np > 0)], dtype=label_np.dtype), None, ] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) reductions = ['sum', 'mean', 'none'] alphas = [0.25, 0.5] gammas = [3, 0.0] diff --git a/test/legacy_test/test_sign_op.py b/test/legacy_test/test_sign_op.py index 2617c2451f330..516330a58898a 100644 --- a/test/legacy_test/test_sign_op.py +++ b/test/legacy_test/test_sign_op.py @@ -20,8 +20,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestSignOp(OpTest): @@ -99,7 +99,7 @@ def test_errors(self): class TestSignAPI(unittest.TestCase): def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.array([-1.0, 0.0, -0.0, 1.2, 1.5], dtype='float64') x = paddle.to_tensor(np_x) z = paddle.sign(x) @@ -151,9 +151,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -182,9 +182,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_simple_rnn_op.py b/test/legacy_test/test_simple_rnn_op.py index 9bc75230953a4..fb731f87e951a 100644 --- a/test/legacy_test/test_simple_rnn_op.py +++ b/test/legacy_test/test_simple_rnn_op.py @@ -20,7 +20,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core sys.path.append("../../test/rnn") from convert import get_params_for_net diff --git a/test/legacy_test/test_size_op.py b/test/legacy_test/test_size_op.py index 0bb3ac64bce75..68ec73fb57ba4 100644 --- a/test/legacy_test/test_size_op.py +++ b/test/legacy_test/test_size_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle import fluid +from paddle import base def size_wrapper(input): @@ -64,9 +64,9 @@ def config(self): class TestSizeAPI(unittest.TestCase): def test_size_static(self): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): shape1 = [2, 1, 4, 5] shape2 = [1, 4, 5] x_1 = paddle.static.data(shape=shape1, dtype='int32', name='x_1') @@ -103,9 +103,9 @@ def test_size_imperative(self): paddle.enable_static() def test_error(self): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): def test_x_type(): shape = [1, 4, 5] diff --git a/test/legacy_test/test_slice_op.py b/test/legacy_test/test_slice_op.py index 271824bd2fd5e..0aa24c85b87b0 100644 --- a/test/legacy_test/test_slice_op.py +++ b/test/legacy_test/test_slice_op.py @@ -20,8 +20,8 @@ from eager_op_test import OpTest, convert_float_to_uint16, paddle_static_guard import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.tensor.manipulation import tensor_array_to_tensor paddle.enable_static() @@ -620,9 +620,9 @@ def test_1(self): out_6 = x[minus_3:3, 0:100, :, 2:-1] out_7 = x[minus_1, 0:100, :, 2:minus_1] - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": input, 'starts': np.array([-3, 0, 2]).astype("int32"), @@ -642,7 +642,7 @@ def test_1(self): class TestSliceApiWithTensor(unittest.TestCase): def test_starts_ends_is_tensor(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): a = paddle.rand(shape=[4, 5, 6], dtype='float32') axes = [0, 1, 2] starts = [-3, 0, 2] @@ -658,7 +658,7 @@ def test_starts_ends_is_tensor(self): np.testing.assert_array_equal(a_1.numpy(), a_2.numpy()) def test_bool_tensor(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): array = (np.arange(60).reshape([3, 4, 5]) % 3).astype('bool') tt = paddle.to_tensor(array) tt.stop_gradient = False @@ -676,7 +676,7 @@ def test_bool_tensor(self): class TestSliceApiEager(unittest.TestCase): def test_slice_api(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): a = paddle.rand(shape=[4, 5, 6], dtype='float32') a.stop_gradient = False axes = [0, 1, 2] @@ -711,15 +711,15 @@ def setUp(self): self.axis = 1 self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) - self.exe = fluid.Executor(self.place) + self.exe = base.Executor(self.place) def set_program_and_run(self, main_program, case_num): with paddle_static_guard(): - with fluid.program_guard(main_program): + with base.program_guard(main_program): x = [ paddle.static.data( name='x0', shape=self.shape, dtype="float32" @@ -761,7 +761,7 @@ def set_program_and_run(self, main_program, case_num): ) loss = paddle.sum(output) - fluid.backward.append_backward(loss) + base.backward.append_backward(loss) g_vars = list( map( main_program.global_block().var, @@ -775,7 +775,7 @@ def set_program_and_run(self, main_program, case_num): ) def test_case_1(self): - main_program = fluid.Program() + main_program = base.Program() self.set_program_and_run(main_program, 1) self.assertTrue(self.sliced_arr.type == core.VarDesc.VarType.LOD_TENSOR) @@ -787,7 +787,7 @@ def test_case_1(self): def test_case_2(self): with paddle_static_guard(): - main_program = fluid.Program() + main_program = base.Program() self.set_program_and_run(main_program, 2) self.assertTrue( @@ -803,7 +803,7 @@ def test_case_2(self): def test_case_3(self): with paddle_static_guard(): - main_program = fluid.Program() + main_program = base.Program() self.set_program_and_run(main_program, 3) self.assertTrue( @@ -821,9 +821,9 @@ def test_case_3(self): class TestImperativeVarBaseGetItem(unittest.TestCase): def test_getitem_with_long(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): data = np.random.random((2, 80, 16128)).astype('float32') - var = fluid.dygraph.to_variable(data) + var = base.dygraph.to_variable(data) sliced = var[:, 10:, : var.shape[1]] # var.shape[1] is 80L here self.assertEqual(sliced.shape, [2, 70, 80]) @@ -832,17 +832,17 @@ def test_getitem_with_long(self): def test_getitem_with_float(self): def test_float_in_slice_item(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): data = np.random.random((2, 80, 16128)).astype('float32') - var = fluid.dygraph.to_variable(data) + var = base.dygraph.to_variable(data) sliced = var[:, 1.1:, : var.shape[1]] self.assertRaises(Exception, test_float_in_slice_item) def test_float_in_index(): - with fluid.dygraph.guard(): + with base.dygraph.guard(): data = np.random.random((2, 80, 16128)).astype('float32') - var = fluid.dygraph.to_variable(data) + var = base.dygraph.to_variable(data) sliced = var[1.1] self.assertRaises(Exception, test_float_in_index) @@ -860,7 +860,7 @@ def test(self): def test_axis_less_than_zero(self): # Using paddle.disable_static will make other unittests fail. - with fluid.dygraph.guard(): + with base.dygraph.guard(): x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4]) x = paddle.to_tensor(x_arr) @@ -904,7 +904,7 @@ def test_axis_less_than_zero(self): class TestSliceOpError(unittest.TestCase): def test_dismatch_shape(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): with self.assertRaises(ValueError): array = np.array([], dtype=np.float32) x = paddle.to_tensor(np.reshape(array, [0]), dtype='float32') @@ -928,13 +928,13 @@ def test_dismatch_shape(self): ) class TestImperativeCUDAPinnedInput(unittest.TestCase): def test_input_cuda_pinned_var(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): data = np.random.random((2, 80, 16128)).astype('float32') var = core.eager.Tensor( value=data, name='', persistable=False, - place=fluid.CUDAPinnedPlace(), + place=base.CUDAPinnedPlace(), zero_copy=False, ) sliced = var[:, 10:, : var.shape[1]] @@ -969,9 +969,9 @@ def func(self, place): def test_grad(self): with paddle_static_guard(): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -1004,9 +1004,9 @@ def func(self, place): def test_grad(self): with paddle_static_guard(): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_slice_var.py b/test/legacy_test/test_slice_var.py index 29ec23bd13e6a..1ed9d4dfa9e27 100644 --- a/test/legacy_test/test_slice_var.py +++ b/test/legacy_test/test_slice_var.py @@ -15,14 +15,14 @@ import random import unittest -from paddle import fluid +from paddle import base from paddle.distributed.transpiler.distribute_transpiler import slice_variable class TestSliceVar(unittest.TestCase): def check_slice_output(self, shapes, expected_sizes, min_size): var_list = [] - program = fluid.Program() + program = base.Program() for shape in shapes: var = program.global_block().create_var( name=str(random.randint(10000, 99999)), diff --git a/test/legacy_test/test_smooth_l1_loss.py b/test/legacy_test/test_smooth_l1_loss.py index 9ad2e0ae718cb..f070b747aeb5e 100644 --- a/test/legacy_test/test_smooth_l1_loss.py +++ b/test/legacy_test/test_smooth_l1_loss.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base def smooth_l1_loss_forward(val, delta): @@ -46,14 +46,14 @@ def setUp(self): def test_smooth_l1_loss_mean(self): input_np = np.random.random([100, 200]).astype(np.float32) label_np = np.random.random([100, 200]).astype(np.float32) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[100, 200], dtype='float32' ) @@ -63,7 +63,7 @@ def test_smooth_l1_loss_mean(self): smooth_l1_loss = paddle.nn.loss.SmoothL1Loss() ret = smooth_l1_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_ret,) = exe.run( prog, feed={ @@ -73,11 +73,11 @@ def test_smooth_l1_loss_mean(self): fetch_list=[ret], ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): smooth_l1_loss = paddle.nn.loss.SmoothL1Loss() dy_ret = smooth_l1_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -89,14 +89,14 @@ def test_smooth_l1_loss_mean(self): def test_smooth_l1_loss_sum(self): input_np = np.random.random([100, 200]).astype(np.float32) label_np = np.random.random([100, 200]).astype(np.float32) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[100, 200], dtype='float32' ) @@ -106,7 +106,7 @@ def test_smooth_l1_loss_sum(self): smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='sum') ret = smooth_l1_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_ret,) = exe.run( prog, feed={ @@ -116,11 +116,11 @@ def test_smooth_l1_loss_sum(self): fetch_list=[ret], ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='sum') dy_ret = smooth_l1_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -132,14 +132,14 @@ def test_smooth_l1_loss_sum(self): def test_smooth_l1_loss_none(self): input_np = np.random.random([100, 200]).astype(np.float32) label_np = np.random.random([100, 200]).astype(np.float32) - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[100, 200], dtype='float32' ) @@ -149,7 +149,7 @@ def test_smooth_l1_loss_none(self): smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='none') ret = smooth_l1_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_ret,) = exe.run( prog, feed={ @@ -159,11 +159,11 @@ def test_smooth_l1_loss_none(self): fetch_list=[ret], ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(reduction='none') dy_ret = smooth_l1_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) @@ -176,14 +176,14 @@ def test_smooth_l1_loss_delta(self): input_np = np.random.random([100, 200]).astype(np.float32) label_np = np.random.random([100, 200]).astype(np.float32) delta = np.random.rand() - prog = fluid.Program() - startup_prog = fluid.Program() + prog = base.Program() + startup_prog = base.Program() place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - with fluid.program_guard(prog, startup_prog): + with base.program_guard(prog, startup_prog): input = paddle.static.data( name='input', shape=[100, 200], dtype='float32' ) @@ -193,7 +193,7 @@ def test_smooth_l1_loss_delta(self): smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(delta=delta) ret = smooth_l1_loss(input, label) - exe = fluid.Executor(place) + exe = base.Executor(place) (static_ret,) = exe.run( prog, feed={ @@ -203,11 +203,11 @@ def test_smooth_l1_loss_delta(self): fetch_list=[ret], ) self.assertIsNotNone(static_ret) - with fluid.dygraph.guard(): + with base.dygraph.guard(): smooth_l1_loss = paddle.nn.loss.SmoothL1Loss(delta=delta) dy_ret = smooth_l1_loss( - fluid.dygraph.to_variable(input_np), - fluid.dygraph.to_variable(label_np), + base.dygraph.to_variable(input_np), + base.dygraph.to_variable(label_np), ) dy_ret_value = dy_ret.numpy() self.assertIsNotNone(dy_ret_value) diff --git a/test/legacy_test/test_softmax2d.py b/test/legacy_test/test_softmax2d.py index 26bc47038bae6..4d57793744993 100644 --- a/test/legacy_test/test_softmax2d.py +++ b/test/legacy_test/test_softmax2d.py @@ -18,7 +18,7 @@ from test_softmax_op import ref_softmax import paddle -from paddle.fluid import core +from paddle.base import core class TestSoftmax2DAPI(unittest.TestCase): diff --git a/test/legacy_test/test_softmax_mask_fuse_op.py b/test/legacy_test/test_softmax_mask_fuse_op.py index 56a4ba24a6862..2adb2d4ce7029 100644 --- a/test/legacy_test/test_softmax_mask_fuse_op.py +++ b/test/legacy_test/test_softmax_mask_fuse_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid, incubate -from paddle.fluid import core +from paddle import base, incubate +from paddle.base import core paddle.enable_static() @@ -104,7 +104,7 @@ def test_check_grad(self): ) class TestDropoutBiasFuseOp3(unittest.TestCase): def test_static_result(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input_x = paddle.static.data( name="x", shape=[1, 1, 8, 32], dtype="float32" ) @@ -118,22 +118,22 @@ def test_static_result(self): mask_in_np = np.where(mask == 1, -10000.0, mask) rst_np = _get_softmax(x_in_np, mask_in_np, False) - exe = fluid.Executor(fluid.CUDAPlace(0)) + exe = base.Executor(base.CUDAPlace(0)) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": x_in_np, "mask": mask_in_np}, fetch_list=[rst], ) np.testing.assert_allclose(fetches[0], rst_np, rtol=1e-05) def test_dygraph(self): - with fluid.dygraph.guard(fluid.CUDAPlace(0)): + with base.dygraph.guard(base.CUDAPlace(0)): x_in_np = np.random.random((1, 1, 8, 32)).astype("float32") mask = np.random.randint(0, 2, (1, 1, 8, 32)).astype("float32") mask_in_np = np.where(mask == 1, -10000.0, mask) rst_np = _get_softmax(x_in_np, mask_in_np, False) - input_x = fluid.dygraph.to_variable(x_in_np) - input_mask = fluid.dygraph.to_variable(mask_in_np) + input_x = base.dygraph.to_variable(x_in_np) + input_mask = base.dygraph.to_variable(mask_in_np) rst = incubate.softmax_mask_fuse(input_x, input_mask) np.testing.assert_allclose(rst, rst_np, rtol=1e-05) diff --git a/test/legacy_test/test_softmax_mask_fuse_upper_triangle_op.py b/test/legacy_test/test_softmax_mask_fuse_upper_triangle_op.py index cf1efa779dc83..67f2676a12885 100644 --- a/test/legacy_test/test_softmax_mask_fuse_upper_triangle_op.py +++ b/test/legacy_test/test_softmax_mask_fuse_upper_triangle_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid, incubate -from paddle.fluid import core +from paddle import base, incubate +from paddle.base import core paddle.enable_static() @@ -92,7 +92,7 @@ def setUp(self): def test_static(self): for dtype in self.dtypes: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input_x = paddle.static.data( name="x", shape=[1, 4, 32, 32], dtype=dtype ) @@ -101,9 +101,9 @@ def test_static(self): x_in_np = np.random.random((1, 4, 32, 32)).astype(dtype) rst_np = _get_softmax_upper(x_in_np, dtype == 'float16') - exe = fluid.Executor(fluid.CUDAPlace(0)) + exe = base.Executor(base.CUDAPlace(0)) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": x_in_np}, fetch_list=[rst], ) @@ -111,10 +111,10 @@ def test_static(self): def test_dygraph(self): for dtype in self.dtypes: - with fluid.dygraph.guard(fluid.CUDAPlace(0)): + with base.dygraph.guard(base.CUDAPlace(0)): x_in_np = np.random.random((1, 4, 32, 32)).astype(dtype) rst_np = _get_softmax_upper(x_in_np, dtype == 'float16') - input_x = fluid.dygraph.to_variable(x_in_np) + input_x = base.dygraph.to_variable(x_in_np) rst = incubate.softmax_mask_fuse_upper_triangle(input_x) np.testing.assert_allclose(rst, rst_np, rtol=1e-05) diff --git a/test/legacy_test/test_softmax_op.py b/test/legacy_test/test_softmax_op.py index 4374dede00b66..6eb86b95fa620 100644 --- a/test/legacy_test/test_softmax_op.py +++ b/test/legacy_test/test_softmax_op.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core np.random.seed(10) @@ -67,7 +67,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.apply_along_axis(stable_softmax, self.axis, x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = { 'axis': self.axis, @@ -130,7 +130,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, []).astype(self.dtype) out = np.array(1.0).astype(self.dtype) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = { 'axis': -1, @@ -164,7 +164,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, []).astype(self.dtype) out = np.array(1.0).astype(self.dtype) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = { 'axis': -1, @@ -412,7 +412,7 @@ def setUp(self): out = np.apply_along_axis(stable_softmax, self.axis, x) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x)) + 'X': OpTest.np_dtype_to_base_dtype(convert_float_to_uint16(x)) } self.outputs = {'Out': convert_float_to_uint16(out)} self.attrs = { @@ -468,7 +468,7 @@ def executed_api(self): self.softmax = F.softmax def test_static_check(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data('X', self.x_np.shape, 'float32') out1 = self.softmax(x) @@ -512,7 +512,7 @@ def test_dygraph_check(self): paddle.enable_static() def test_error(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, self.softmax, 1) @@ -546,19 +546,19 @@ def test_dygraph(self): paddle.enable_static() def test_static(self): - with paddle.fluid.framework._static_guard(): - main_prog = fluid.Program() - with fluid.program_guard(main_prog, fluid.Program()): + with paddle.base.framework._static_guard(): + main_prog = base.Program() + with base.program_guard(main_prog, base.Program()): x = paddle.rand([]) x.stop_gradient = False out = paddle.nn.functional.softmax(x) - fluid.backward.append_backward(out) + base.backward.append_backward(out) # Test compile shape self.assertEqual(x.shape, ()) self.assertEqual(out.shape, ()) - exe = fluid.Executor() + exe = base.Executor() result = exe.run(main_prog, fetch_list=[x, out]) # Test runtime shape diff --git a/test/legacy_test/test_softmax_with_cross_entropy_op.py b/test/legacy_test/test_softmax_with_cross_entropy_op.py index f94c6c4772cb1..73828072e9661 100644 --- a/test/legacy_test/test_softmax_with_cross_entropy_op.py +++ b/test/legacy_test/test_softmax_with_cross_entropy_op.py @@ -19,7 +19,7 @@ from test_softmax_op import stable_softmax import paddle -from paddle.fluid import Program, core, program_guard +from paddle.base import Program, core, program_guard def cross_entropy(softmax, label, soft_label, axis, ignore_index=-1): diff --git a/test/legacy_test/test_solve_op.py b/test/legacy_test/test_solve_op.py index bda4d46bd627b..ecb05b1d18a28 100644 --- a/test/legacy_test/test_solve_op.py +++ b/test/legacy_test/test_solve_op.py @@ -18,13 +18,13 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core sys.path.append("..") from eager_op_test import OpTest -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard # 2D normal case @@ -249,11 +249,11 @@ class TestSolveOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The input type of solve_op must be Variable. - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) - y1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + y1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.linalg.solve, x1, y1) @@ -295,7 +295,7 @@ def setUp(self): self.place.append(paddle.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): paddle_input_x = paddle.static.data( name="input_x", shape=[3, 3], dtype=self.dtype ) @@ -309,9 +309,9 @@ def check_static_result(self, place): np_result = np.linalg.solve(np_input_x, np_input_y) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input_x": np_input_x, "input_y": np_input_y}, fetch_list=[paddle_result], ) @@ -356,7 +356,7 @@ def setUp(self): def check_static_result(self, place): paddle.enable_static() - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): paddle_input_x = paddle.static.data( name="input_x", shape=[10, 10], dtype=self.dtype ) @@ -370,9 +370,9 @@ def check_static_result(self, place): np_result = np.linalg.solve(np_input_x, np_input_y) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input_x": np_input_x, "input_y": np_input_y}, fetch_list=[paddle_result], ) @@ -416,7 +416,7 @@ def setUp(self): def check_static_result(self, place): paddle.enable_static() - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): paddle_input_x = paddle.static.data( name="input_x", shape=[10, 10], dtype=self.dtype ) @@ -430,9 +430,9 @@ def check_static_result(self, place): np_result = np.linalg.solve(np_input_x, np_input_y) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input_x": np_input_x, "input_y": np_input_y}, fetch_list=[paddle_result], ) @@ -476,7 +476,7 @@ def setUp(self): self.place.append(paddle.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): paddle_input_x = paddle.static.data( name="input_x", shape=[2, 3, 3], dtype=self.dtype ) @@ -490,9 +490,9 @@ def check_static_result(self, place): np_result = np.linalg.solve(np_input_x, np_input_y) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input_x": np_input_x, "input_y": np_input_y}, fetch_list=[paddle_result], ) @@ -529,13 +529,13 @@ def run(place): class TestSolveOpSingularAPI(unittest.TestCase): # Singular matrix is ​​not invertible def setUp(self): - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] self.dtype = "float64" if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x = paddle.static.data(name="x", shape=[4, 4], dtype=self.dtype) y = paddle.static.data(name="y", shape=[4, 4], dtype=self.dtype) @@ -544,10 +544,10 @@ def check_static_result(self, place): input_x_np = np.ones([4, 4]).astype(self.dtype) input_y_np = np.ones([4, 4]).astype(self.dtype) - exe = fluid.Executor(place) + exe = base.Executor(place) try: fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": input_x_np, "y": input_y_np}, fetch_list=[result], ) @@ -563,11 +563,11 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_x_np = np.ones([4, 4]).astype(self.dtype) input_y_np = np.ones([4, 4]).astype(self.dtype) - input_x = fluid.dygraph.to_variable(input_x_np) - input_y = fluid.dygraph.to_variable(input_y_np) + input_x = base.dygraph.to_variable(input_x_np) + input_y = base.dygraph.to_variable(input_y_np) try: result = paddle.linalg.solve(input_x, input_y) except RuntimeError as ex: diff --git a/test/legacy_test/test_sort_op.py b/test/legacy_test/test_sort_op.py index c2a8de287e6b9..bbae7e75c833b 100644 --- a/test/legacy_test/test_sort_op.py +++ b/test/legacy_test/test_sort_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestSortOnCPU(unittest.TestCase): @@ -26,12 +26,12 @@ def setUp(self): self.place = core.CPUPlace() def test_api_0(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 4], dtype="float32" ) output = paddle.sort(x=input) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) data = np.array( [ [[5, 8, 9, 5], [0, 0, 1, 7], [6, 9, 2, 4]], @@ -44,12 +44,12 @@ def test_api_0(self): self.assertEqual((result == np_result).all(), True) def test_api_1(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): input = paddle.static.data( name="input", shape=[2, 3, 4], dtype="float32" ) output = paddle.sort(x=input, axis=1) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) data = np.array( [ [[5, 8, 9, 5], [0, 0, 1, 7], [6, 9, 2, 4]], diff --git a/test/legacy_test/test_space_to_depth_op.py b/test/legacy_test/test_space_to_depth_op.py index 4f3af77924270..3e37ccfb2e104 100644 --- a/test/legacy_test/test_space_to_depth_op.py +++ b/test/legacy_test/test_space_to_depth_op.py @@ -17,7 +17,7 @@ import numpy as np from eager_op_test import OpTest -from paddle import fluid +from paddle import base class TestSpaceToDepthOp(OpTest): @@ -75,9 +75,9 @@ def init_data(self): def test_check_output(self): place = ( - fluid.core.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.core.CPUPlace() + base.core.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.core.CPUPlace() ) self.check_output_with_place( place=place, atol=1e-5, no_check_set=None, equal_nan=False @@ -85,9 +85,9 @@ def test_check_output(self): def test_check_grad(self): place = ( - fluid.core.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.core.CPUPlace() + base.core.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.core.CPUPlace() ) self.check_grad_with_place(place, ['X'], 'Out') diff --git a/test/legacy_test/test_sparse_attention_op.py b/test/legacy_test/test_sparse_attention_op.py index 87306b8b8d542..2a80906ad74aa 100644 --- a/test/legacy_test/test_sparse_attention_op.py +++ b/test/legacy_test/test_sparse_attention_op.py @@ -22,8 +22,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def get_cuda_version(): @@ -372,7 +372,7 @@ def test_static_graph(self): key_padding_mask_np = key_padding_mask_np.astype(self.dtype) attn_mask_np = attn_mask_np.astype(self.dtype) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) if self.use_mask: fetches_result = exe.run( feed={ diff --git a/test/legacy_test/test_sparse_conv_op.py b/test/legacy_test/test_sparse_conv_op.py index 2d2af3c11fcb6..755f792215834 100644 --- a/test/legacy_test/test_sparse_conv_op.py +++ b/test/legacy_test/test_sparse_conv_op.py @@ -19,7 +19,7 @@ import paddle from paddle import sparse -from paddle.fluid import core +from paddle.base import core logging.basicConfig( format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO diff --git a/test/legacy_test/test_sparse_embedding_op.py b/test/legacy_test/test_sparse_embedding_op.py index 0e0beda67971e..b70cb26c012a8 100644 --- a/test/legacy_test/test_sparse_embedding_op.py +++ b/test/legacy_test/test_sparse_embedding_op.py @@ -19,7 +19,7 @@ class TestSparseEmbeddingAPIError(unittest.TestCase): def test_errors(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): # The size of input in sparse_embedding should not be 0. def test_0_size(): input = paddle.to_tensor([], dtype='int64') diff --git a/test/legacy_test/test_sparse_fused_attention_op.py b/test/legacy_test/test_sparse_fused_attention_op.py index d2db24218e1d2..68cdd16d4bd12 100644 --- a/test/legacy_test/test_sparse_fused_attention_op.py +++ b/test/legacy_test/test_sparse_fused_attention_op.py @@ -21,7 +21,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core def get_cuda_version(): diff --git a/test/legacy_test/test_sparse_norm_op.py b/test/legacy_test/test_sparse_norm_op.py index 7d745ada2da7f..039c147a3bb4f 100644 --- a/test/legacy_test/test_sparse_norm_op.py +++ b/test/legacy_test/test_sparse_norm_op.py @@ -18,7 +18,7 @@ import numpy as np import paddle -from paddle import fluid, sparse +from paddle import base, sparse from paddle.sparse import nn @@ -157,8 +157,8 @@ def test_convert(self): nn.BatchNorm(5), nn.BatchNorm( 5, - weight_attr=fluid.ParamAttr(name='bn.scale'), - bias_attr=fluid.ParamAttr(name='bn.bias'), + weight_attr=base.ParamAttr(name='bn.scale'), + bias_attr=base.ParamAttr(name='bn.bias'), ), ) model = nn.SyncBatchNorm.convert_sync_batchnorm(model) diff --git a/test/legacy_test/test_sparse_unary_op.py b/test/legacy_test/test_sparse_unary_op.py index 908121ace0118..1f04694747e71 100644 --- a/test/legacy_test/test_sparse_unary_op.py +++ b/test/legacy_test/test_sparse_unary_op.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle.base.framework import convert_np_dtype_to_dtype_ class TestSparseUnary(unittest.TestCase): diff --git a/test/legacy_test/test_sparse_utils_op.py b/test/legacy_test/test_sparse_utils_op.py index 2b7583db92175..f387b0738dc81 100644 --- a/test/legacy_test/test_sparse_utils_op.py +++ b/test/legacy_test/test_sparse_utils_op.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core devices = ['cpu', 'gpu'] diff --git a/test/legacy_test/test_spawn_and_init_parallel_env.py b/test/legacy_test/test_spawn_and_init_parallel_env.py index 290d48d72c911..8c9e3d4dde836 100644 --- a/test/legacy_test/test_spawn_and_init_parallel_env.py +++ b/test/legacy_test/test_spawn_and_init_parallel_env.py @@ -23,7 +23,7 @@ _get_subprocess_env_list, _options_valid_check, ) -from paddle.fluid import core +from paddle.base import core # NOTE(chenweihang): Coverage CI is currently not able to count python3 # unittest, so the unittests here covers some cases that will only be diff --git a/test/legacy_test/test_spectral_norm_op.py b/test/legacy_test/test_spectral_norm_op.py index 74ba596087212..d0b4b712ce087 100644 --- a/test/legacy_test/test_spectral_norm_op.py +++ b/test/legacy_test/test_spectral_norm_op.py @@ -19,7 +19,7 @@ import paddle from paddle import _C_ops -from paddle.fluid.framework import Program, program_guard +from paddle.base.framework import Program, program_guard paddle.enable_static() diff --git a/test/legacy_test/test_split_op.py b/test/legacy_test/test_split_op.py index 8f7781f925cea..77a720ffc8d30 100644 --- a/test/legacy_test/test_split_op.py +++ b/test/legacy_test/test_split_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestSplitOp(OpTest): @@ -315,9 +315,9 @@ def test_api(self): ) paddle.split(x=x_2, num_or_sections=2, axis=2) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) [res_0, res_1, res_2, res_3, res_4, res_5] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x_1": input_1, "x_2": input_1}, fetch_list=[out_0, out_1, out_2, out_3, out_4, out_5], ) @@ -380,7 +380,7 @@ def test_axis_type_tensor(): self.assertRaises(TypeError, test_axis_type_tensor) - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): def test_0_num_tensor(): x = paddle.uniform([1, 1, 1], dtype='float32') @@ -391,7 +391,7 @@ def test_0_num_tensor(): class API_TestSplit(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data( 'data1', shape=[-1, 4, 6, 6], dtype='float64' ) @@ -399,8 +399,8 @@ def test_out(self): data2 = paddle.static.data('data2', shape=[-1, 1], dtype='int32') data2.desc.set_need_check_feed(False) x0, x1, x2 = paddle.split(data1, num_or_sections=3, axis=data2) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) input1 = np.random.random([4, 6, 6]).astype('float64') input2 = np.array([2]).astype('int32') ( @@ -418,14 +418,14 @@ def test_out(self): class API_TestSplit2(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data( 'data1', shape=[-1, 4, 6, 6], dtype='float64' ) data1.desc.set_need_check_feed(False) x0, x1, x2 = paddle.split(data1, num_or_sections=3, axis=2) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) input1 = np.random.random([4, 6, 6]).astype('float64') ( r0, @@ -440,11 +440,11 @@ def test_out(self): class API_TestSplit3(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data = paddle.static.data('data', shape=[-1, 10], dtype='float64') x0, x1 = paddle.split(data, num_or_sections=(3, 7), axis=1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) input1 = np.random.random([1, 10]).astype('float64') r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1]) ex_x0, ex_x1 = np.split(input1, (3,), axis=1) @@ -454,12 +454,12 @@ def test_out(self): class API_TestSplit4(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data = paddle.static.data('data', shape=[-1, 10], dtype='float64') index = paddle.static.data('index', shape=[1], dtype='int32') x0, x1 = paddle.split(data, num_or_sections=(3, index), axis=1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) input1 = np.random.random([1, 10]).astype('float64') input2 = np.array([7]).astype('int32') r0, r1 = exe.run( @@ -476,7 +476,7 @@ def test_out(self): [False, True] if core.is_compiled_with_cuda() else [False] ): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input_1 = np.random.random([5, 4]).astype("int32") # input is a variable which shape is [5, 4] input = paddle.to_tensor(input_1) @@ -492,11 +492,11 @@ def test_out(self): class API_TestSplit6(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data = paddle.static.data('data', shape=[-1, 10], dtype='float64') x0, x1 = paddle.split(data, num_or_sections=[1, 1], axis=0) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) input1 = np.random.random([2, 10]).astype('float64') r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1]) ex_x0, ex_x1 = np.split(input1, (1,), axis=0) @@ -506,7 +506,7 @@ def test_out(self): class API_TestDygraphFluidSplit(unittest.TestCase): def test_out1(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("int32") # input is a variable which shape is [4, 6, 6] input = paddle.to_tensor(input_1) @@ -536,7 +536,7 @@ def test_out1(self): np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def test_out2(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("int32") # input is a variable which shape is [4, 6, 6] input = paddle.to_tensor(input_1) @@ -568,7 +568,7 @@ def test_out2(self): class API_TestDygraphSplit(unittest.TestCase): def test_out1(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("int32") # input is a variable which shape is [4, 6, 6] input = paddle.to_tensor(input_1) @@ -599,7 +599,7 @@ def test_out1(self): np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def test_out2(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("bool") # input is a variable which shape is [4, 6, 6] input = paddle.to_tensor(input_1) @@ -613,7 +613,7 @@ def test_out2(self): np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def test_out3(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np.random.seed(2021) input_1 = np.random.random([4, 6, 6]).astype("int32") # input is a variable which shape is [4, 6, 6] @@ -631,7 +631,7 @@ def test_out3(self): np.testing.assert_allclose(ex_out, out_dy_np, rtol=1e-05) def test_out_tensor_input(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("int32") # input is a variable which shape is [4, 6, 6] input = paddle.to_tensor(input_1) @@ -648,7 +648,7 @@ def test_out_tensor_input(self): np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def test_axis_tensor_input(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("int32") # input is a variable which shape is [4, 6, 6] input = paddle.to_tensor(input_1) @@ -665,7 +665,7 @@ def test_axis_tensor_input(self): np.testing.assert_allclose(ex_x2, x2_out, rtol=1e-05) def test_negative_one_section(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_1 = np.random.random([4, 6, 6]).astype("int32") # input is a variable which shape is [4, 6, 6] input = paddle.to_tensor(input_1) @@ -677,7 +677,7 @@ def test_negative_one_section(self): class API_TestEmptySplit(unittest.TestCase): def test_axis_input_empty_section(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input_1 = np.random.random([8, 6, 6]).astype("float32") # input is a variable which shape is [8, 6, 6] input = paddle.to_tensor(input_1) diff --git a/test/legacy_test/test_splits_api.py b/test/legacy_test/test_splits_api.py index 74722d83632a7..2b562179b8752 100644 --- a/test/legacy_test/test_splits_api.py +++ b/test/legacy_test/test_splits_api.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core def func_ref(func, x, num_or_sections): diff --git a/test/legacy_test/test_square_error_cost.py b/test/legacy_test/test_square_error_cost.py index 05932c67d589f..3dd54557e051e 100644 --- a/test/legacy_test/test_square_error_cost.py +++ b/test/legacy_test/test_square_error_cost.py @@ -17,9 +17,9 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.executor import Executor +from paddle import base +from paddle.base import core +from paddle.base.executor import Executor class TestSquareErrorCost(unittest.TestCase): @@ -39,10 +39,10 @@ def test_square_error_cost(self): for use_cuda in ( [False, True] if core.is_compiled_with_cuda() else [False] ): - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() exe = Executor(place) (result,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_val, "label": label_val}, fetch_list=[output], ) diff --git a/test/legacy_test/test_squared_l2_norm_op.py b/test/legacy_test/test_squared_l2_norm_op.py index 4067acd29c525..96872b10f1295 100755 --- a/test/legacy_test/test_squared_l2_norm_op.py +++ b/test/legacy_test/test_squared_l2_norm_op.py @@ -96,7 +96,7 @@ def test_check_grad(self): class TestL2LossDeterministic(unittest.TestCase): def check_place(self, place): - with paddle.fluid.dygraph.guard(place): + with paddle.base.dygraph.guard(place): x_np = np.random.rand(5, 11, 13).astype('float32') x = paddle.to_tensor(x_np) y1 = _legacy_C_ops.squared_l2_norm(x) diff --git a/test/legacy_test/test_squeeze2_op.py b/test/legacy_test/test_squeeze2_op.py index f43ccb8ba8120..ec9b96aed46cc 100755 --- a/test/legacy_test/test_squeeze2_op.py +++ b/test/legacy_test/test_squeeze2_op.py @@ -20,8 +20,8 @@ from test_attribute_var import UnittestBase import paddle -from paddle.fluid import core -from paddle.fluid.framework import Program, program_guard +from paddle.base import core +from paddle.base.framework import Program, program_guard paddle.enable_static() diff --git a/test/legacy_test/test_squeeze_op.py b/test/legacy_test/test_squeeze_op.py index f0400f24667d5..6cdfa5fec0f02 100755 --- a/test/legacy_test/test_squeeze_op.py +++ b/test/legacy_test/test_squeeze_op.py @@ -20,8 +20,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard paddle.enable_static() @@ -140,7 +140,7 @@ def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): # The input type of softmax_op must be Variable. - x1 = fluid.create_lod_tensor( + x1 = base.create_lod_tensor( np.array([[-1]]), [[1]], paddle.CPUPlace() ) self.assertRaises(TypeError, paddle.squeeze, x1) @@ -265,9 +265,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -296,9 +296,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_stack_op.py b/test/legacy_test/test_stack_op.py index 5c5e653dbaeb6..5cc2234555853 100644 --- a/test/legacy_test/test_stack_op.py +++ b/test/legacy_test/test_stack_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid.framework import Program, program_guard +from paddle import base +from paddle.base.framework import Program, program_guard paddle.enable_static() @@ -204,15 +204,15 @@ def setUp(self): self.input_shape = [2, 3] self.x = np.random.random(self.input_shape).astype("float32") self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.set_program() def set_program(self): - self.program = fluid.Program() - with fluid.program_guard(self.program): + self.program = base.Program() + with base.program_guard(self.program): input = paddle.assign(self.x) tensor_array = paddle.tensor.create_array(dtype='float32') zero = paddle.tensor.fill_constant( @@ -226,7 +226,7 @@ def set_program(self): def test_case(self): self.assertTrue(self.out_var.shape[self.axis] == -1) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) res = exe.run(self.program, fetch_list=self.out_var) np.testing.assert_array_equal( res[0], np.stack([self.x] * self.iter_num, axis=self.axis) @@ -244,15 +244,15 @@ def setUp(self): self.input_shape = [2, 3] self.x = np.random.random(self.input_shape).astype("float32") self.place = ( - fluid.CUDAPlace(0) - if fluid.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.is_compiled_with_cuda() + else base.CPUPlace() ) self.set_program() def set_program(self): - self.program = fluid.Program() - with fluid.program_guard(self.program): + self.program = base.Program() + with base.program_guard(self.program): input = paddle.assign(self.x) tensor_array = paddle.tensor.create_array(dtype='float32') zero = paddle.tensor.fill_constant( @@ -266,7 +266,7 @@ def set_program(self): def test_case(self): self.assertTrue(self.out_var.shape[self.axis] == -1) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) res = exe.run(self.program, fetch_list=self.out_var) np.testing.assert_array_equal( res[0], np.stack([self.x] * self.iter_num, axis=self.axis) @@ -275,13 +275,13 @@ def test_case(self): class API_test(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data('data1', shape=[1, 2], dtype='float64') data2 = paddle.static.data('data2', shape=[1, 2], dtype='float64') data3 = paddle.static.data('data3', shape=[1, 2], dtype='float64') result_stack = paddle.stack([data1, data2, data3], axis=0) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) input1 = np.random.random([1, 2]).astype('float64') input2 = np.random.random([1, 2]).astype('float64') input3 = np.random.random([1, 2]).astype('float64') @@ -293,7 +293,7 @@ def test_out(self): np.testing.assert_allclose(expected_result, result, rtol=1e-05) def test_single_tensor_error(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x = paddle.rand([2, 3]) self.assertRaises(TypeError, paddle.stack, x) @@ -303,24 +303,24 @@ def test_out(self): data1 = np.array([[1.0, 2.0]]) data2 = np.array([[3.0, 4.0]]) data3 = np.array([[5.0, 6.0]]) - with fluid.dygraph.guard(): - x1 = fluid.dygraph.to_variable(data1) - x2 = fluid.dygraph.to_variable(data2) - x3 = fluid.dygraph.to_variable(data3) + with base.dygraph.guard(): + x1 = base.dygraph.to_variable(data1) + x2 = base.dygraph.to_variable(data2) + x3 = base.dygraph.to_variable(data3) result = paddle.stack([x1, x2, x3]) result_np = result.numpy() expected_result = np.stack([data1, data2, data3]) np.testing.assert_allclose(expected_result, result_np, rtol=1e-05) - with fluid.dygraph.guard(): - y1 = fluid.dygraph.to_variable(data1) + with base.dygraph.guard(): + y1 = base.dygraph.to_variable(data1) result = paddle.stack([y1], axis=0) result_np_2 = result.numpy() expected_result_2 = np.stack([data1], axis=0) np.testing.assert_allclose(expected_result_2, result_np_2, rtol=1e-05) def test_single_tensor_error(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = paddle.to_tensor([1, 2, 3]) self.assertRaises(Exception, paddle.stack, x) @@ -383,7 +383,7 @@ def setUp(self): def test_list_single_tensor(self): expect = paddle.stack(self.x) - paddle.fluid.core._set_prim_all_enabled(True) + paddle.base.core._set_prim_all_enabled(True) st_model = paddle.jit.to_static(paddle.stack) actual = st_model(self.x) np.testing.assert_allclose(expect, actual) diff --git a/test/legacy_test/test_static_model_parallel_fused_attention.py b/test/legacy_test/test_static_model_parallel_fused_attention.py index c00a91bda1f13..e11e1d25f2fa7 100644 --- a/test/legacy_test/test_static_model_parallel_fused_attention.py +++ b/test/legacy_test/test_static_model_parallel_fused_attention.py @@ -32,9 +32,9 @@ def _setup_config(self): self._pipeline_mode = True def test_dist_static_model_parallel_fused_feedforward(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "static_model_parallel_fused_attention.py", delta=1e-5, diff --git a/test/legacy_test/test_static_model_parallel_fused_feedforward.py b/test/legacy_test/test_static_model_parallel_fused_feedforward.py index 9c8c95bf769ca..61a0d5b4a00b3 100644 --- a/test/legacy_test/test_static_model_parallel_fused_feedforward.py +++ b/test/legacy_test/test_static_model_parallel_fused_feedforward.py @@ -32,9 +32,9 @@ def _setup_config(self): self._pipeline_mode = True def test_dist_static_model_parallel_fused_feedforward(self): - from paddle import fluid + from paddle import base - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): self.check_with_place( "static_model_parallel_fused_feedforward.py", delta=1e-5, diff --git a/test/legacy_test/test_static_model_parallel_fused_multi_transformer.py b/test/legacy_test/test_static_model_parallel_fused_multi_transformer.py index 705680b531b30..729772699d90e 100644 --- a/test/legacy_test/test_static_model_parallel_fused_multi_transformer.py +++ b/test/legacy_test/test_static_model_parallel_fused_multi_transformer.py @@ -32,10 +32,10 @@ def _setup_config(self): self._pipeline_mode = True def test_dist_static_model_parallel_fused_multi_transformer(self): - from paddle import fluid + from paddle import base if ( - fluid.core.is_compiled_with_cuda() + base.core.is_compiled_with_cuda() and not paddle.is_compiled_with_rocm() ): self.check_with_place( diff --git a/test/legacy_test/test_static_save_load.py b/test/legacy_test/test_static_save_load.py index 9078de654d606..09e204e62191e 100644 --- a/test/legacy_test/test_static_save_load.py +++ b/test/legacy_test/test_static_save_load.py @@ -23,8 +23,8 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import core, framework +from paddle import base +from paddle.base import core, framework from paddle.optimizer import Adam paddle.enable_static() @@ -57,7 +57,7 @@ def __init__( for i in range(self._num_layers): weight_1 = self.create_parameter( - attr=fluid.ParamAttr( + attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-self._init_scale, high=self._init_scale ) @@ -70,7 +70,7 @@ def __init__( ) self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1)) bias_1 = self.create_parameter( - attr=fluid.ParamAttr( + attr=base.ParamAttr( initializer=paddle.nn.initializer.Uniform( low=-self._init_scale, high=self._init_scale ) @@ -181,7 +181,7 @@ def __init__( self.embedding = paddle.nn.Embedding( num_embeddings=vocab_size, embedding_dim=hidden_size, - weight_attr=fluid.ParamAttr( + weight_attr=base.ParamAttr( name='embedding_para', initializer=paddle.nn.initializer.Uniform( low=-init_scale, high=init_scale @@ -189,7 +189,7 @@ def __init__( ), ) self.softmax_weight = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.hidden_size, self.vocab_size], dtype="float32", default_initializer=paddle.nn.initializer.Uniform( @@ -197,7 +197,7 @@ def __init__( ), ) self.softmax_bias = self.create_parameter( - attr=fluid.ParamAttr(), + attr=base.ParamAttr(), shape=[self.vocab_size], dtype="float32", default_initializer=paddle.nn.initializer.Uniform( @@ -249,9 +249,9 @@ def forward(self, input, label, init_hidden, init_cell): class TestSaveLoadBase(unittest.TestCase): def set_place(self): return ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) def test_ptb_rnn_cpu_float32(self): @@ -266,8 +266,8 @@ def test_ptb_rnn_cpu_float32(self): temp_dir = tempfile.TemporaryDirectory() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed ptb_model = PtbModel( "ptb_model", hidden_size=hidden_size, @@ -278,7 +278,7 @@ def test_ptb_rnn_cpu_float32(self): ) place = self.set_place() - exe = fluid.Executor(place) + exe = base.Executor(place) sgd = Adam(learning_rate=1e-3) x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' @@ -320,7 +320,7 @@ def test_ptb_rnn_cpu_float32(self): ) fetch_list = [static_loss, static_last_hidden, static_last_cell] out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "y": y_data, @@ -339,7 +339,7 @@ def test_ptb_rnn_cpu_float32(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) @@ -352,11 +352,11 @@ def test_ptb_rnn_cpu_float32(self): # set var to zero for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -370,7 +370,7 @@ def test_ptb_rnn_cpu_float32(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -380,9 +380,9 @@ def test_ptb_rnn_cpu_float32(self): class TestSaveLoadPartial(unittest.TestCase): def set_place(self): return ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) def test_ptb_rnn_cpu_float32(self): @@ -397,8 +397,8 @@ def test_ptb_rnn_cpu_float32(self): temp_dir = tempfile.TemporaryDirectory() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed ptb_model = PtbModel( "ptb_model", hidden_size=hidden_size, @@ -409,7 +409,7 @@ def test_ptb_rnn_cpu_float32(self): ) place = self.set_place() - exe = fluid.Executor(place) + exe = base.Executor(place) sgd = Adam(learning_rate=1e-3) x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' @@ -430,7 +430,7 @@ def test_ptb_rnn_cpu_float32(self): x, y, init_hidden, init_cell ) - test_program = fluid.default_main_program().clone(for_test=True) + test_program = base.default_main_program().clone(for_test=True) add_1 = paddle.static.nn.fc( static_last_hidden, @@ -461,7 +461,7 @@ def test_ptb_rnn_cpu_float32(self): ) fetch_list = [static_loss, static_last_hidden, static_last_cell] out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "y": y_data, @@ -480,7 +480,7 @@ def test_ptb_rnn_cpu_float32(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) @@ -493,11 +493,11 @@ def test_ptb_rnn_cpu_float32(self): # set var to zero for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -509,7 +509,7 @@ def test_ptb_rnn_cpu_float32(self): for var in test_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -524,9 +524,9 @@ def test_ptb_rnn_cpu_float32(self): class TestSaveLoadSetStateDict(unittest.TestCase): def set_place(self): return ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) def test_ptb_rnn_cpu_float32(self): @@ -541,8 +541,8 @@ def test_ptb_rnn_cpu_float32(self): temp_dir = tempfile.TemporaryDirectory() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed ptb_model = PtbModel( "ptb_model", hidden_size=hidden_size, @@ -553,7 +553,7 @@ def test_ptb_rnn_cpu_float32(self): ) place = self.set_place() - exe = fluid.Executor(place) + exe = base.Executor(place) sgd = Adam(learning_rate=1e-3) x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' @@ -595,7 +595,7 @@ def test_ptb_rnn_cpu_float32(self): ) fetch_list = [static_loss, static_last_hidden, static_last_cell] out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "y": y_data, @@ -614,7 +614,7 @@ def test_ptb_rnn_cpu_float32(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) @@ -627,11 +627,11 @@ def test_ptb_rnn_cpu_float32(self): # set var to zero for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -643,7 +643,7 @@ def test_ptb_rnn_cpu_float32(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -653,9 +653,9 @@ def test_ptb_rnn_cpu_float32(self): class TestProgramStatePartial(unittest.TestCase): def set_place(self): return ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) def test_ptb_rnn_cpu_float32(self): @@ -670,8 +670,8 @@ def test_ptb_rnn_cpu_float32(self): temp_dir = tempfile.TemporaryDirectory() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed ptb_model = PtbModel( "ptb_model", hidden_size=hidden_size, @@ -682,7 +682,7 @@ def test_ptb_rnn_cpu_float32(self): ) place = self.set_place() - exe = fluid.Executor(place) + exe = base.Executor(place) sgd = Adam(learning_rate=1e-3) x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' @@ -703,7 +703,7 @@ def test_ptb_rnn_cpu_float32(self): x, y, init_hidden, init_cell ) - test_program = fluid.default_main_program().clone(for_test=True) + test_program = base.default_main_program().clone(for_test=True) add_1 = paddle.static.nn.fc( static_last_hidden, @@ -734,7 +734,7 @@ def test_ptb_rnn_cpu_float32(self): ) fetch_list = [static_loss, static_last_hidden, static_last_cell] out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "y": y_data, @@ -753,7 +753,7 @@ def test_ptb_rnn_cpu_float32(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) @@ -766,16 +766,16 @@ def test_ptb_rnn_cpu_float32(self): # set var to zero for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) - # fluid.load(test_program, "./test_1", None ) + # base.load(test_program, "./test_1", None ) program_state = paddle.static.load_program_state( os.path.join(temp_dir.name, 'test_1') ) @@ -797,7 +797,7 @@ def test_ptb_rnn_cpu_float32(self): for var in test_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -805,11 +805,11 @@ def test_ptb_rnn_cpu_float32(self): # check 1 for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -819,7 +819,7 @@ def test_ptb_rnn_cpu_float32(self): for var in test_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -827,11 +827,11 @@ def test_ptb_rnn_cpu_float32(self): # check 2 for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -841,7 +841,7 @@ def test_ptb_rnn_cpu_float32(self): for var in test_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -849,11 +849,11 @@ def test_ptb_rnn_cpu_float32(self): # check 3 for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -863,7 +863,7 @@ def test_ptb_rnn_cpu_float32(self): for var in test_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -873,9 +873,9 @@ def test_ptb_rnn_cpu_float32(self): class TestVariableInit(unittest.TestCase): def set_place(self): return ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) def test_variable_init(self): @@ -884,12 +884,12 @@ def test_variable_init(self): z = paddle.static.nn.fc(y, 10) place = self.set_place() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) temp_dir = tempfile.TemporaryDirectory() paddle.static.save( - fluid.default_main_program(), + base.default_main_program(), os.path.join(temp_dir.name, "test_path"), ) @@ -897,26 +897,26 @@ def set_var(var, ndarray): t = var.get_tensor() p = t._place() if p.is_cpu_place(): - place = paddle.fluid.CPUPlace() + place = paddle.base.CPUPlace() elif p.is_cuda_pinned_place(): - place = paddle.fluid.CUDAPinnedPlace() + place = paddle.base.CUDAPinnedPlace() else: - p = paddle.fluid.core.Place() + p = paddle.base.core.Place() p.set_place(t._place()) - place = paddle.fluid.CUDAPlace(p.gpu_device_id()) + place = paddle.base.CUDAPlace(p.gpu_device_id()) t.set(ndarray, place) - program = fluid.default_main_program() - new_scope = fluid.core.Scope() + program = base.default_main_program() + new_scope = base.core.Scope() place = self.set_place() - exe = fluid.Executor(place) + exe = base.Executor(place) parameter_list = list( filter(paddle.framework.is_parameter, program.list_vars()) ) - fluid.core._create_loaded_parameter( + base.core._create_loaded_parameter( parameter_list, new_scope, exe._default_executor ) parameter_file_name = os.path.join(temp_dir.name, "test_path.pdparams") @@ -939,7 +939,7 @@ def set_var(var, ndarray): ) ) - fluid.core._create_loaded_parameter( + base.core._create_loaded_parameter( opt_list, new_scope, exe._default_executor ) opt_file_name = os.path.join(temp_dir.name, "test_path.pdopt") @@ -960,7 +960,7 @@ def set_var(var, ndarray): for var in program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update base_map[var.name] = t @@ -986,9 +986,9 @@ def setUp(self): def set_place(self): return ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) def tearDown(self): @@ -1005,8 +1005,8 @@ def test_load_from_old_interface(self): batch_num = 200 with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed ptb_model = PtbModel( "ptb_model", hidden_size=hidden_size, @@ -1017,7 +1017,7 @@ def test_load_from_old_interface(self): ) place = self.set_place() - exe = fluid.Executor(place) + exe = base.Executor(place) sgd = Adam(learning_rate=1e-3) x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' @@ -1038,7 +1038,7 @@ def test_load_from_old_interface(self): x, y, init_hidden, init_cell ) - test_clone_program = fluid.default_main_program().clone() + test_clone_program = base.default_main_program().clone() sgd.minimize(static_loss) static_param_updated = {} static_param_init = {} @@ -1061,7 +1061,7 @@ def test_load_from_old_interface(self): ) fetch_list = [static_loss, static_last_hidden, static_last_cell] out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "y": y_data, @@ -1080,13 +1080,13 @@ def test_load_from_old_interface(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t - # fluid.save(main_program, "./test_1") + # base.save(main_program, "./test_1") paddle.distributed.io.save_persistables( exe, os.path.join(self.temp_dir.name, "test_path"), main_program ) @@ -1094,11 +1094,11 @@ def test_load_from_old_interface(self): # set var to zero for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -1110,14 +1110,14 @@ def test_load_from_old_interface(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() old_shape = np.array(ten).shape new_shape = [e + 10 for e in old_shape] @@ -1148,8 +1148,8 @@ def test_load_from_old_interface_var_list(self): batch_num = 200 with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed ptb_model = PtbModel( "ptb_model", hidden_size=hidden_size, @@ -1160,7 +1160,7 @@ def test_load_from_old_interface_var_list(self): ) place = self.set_place() - exe = fluid.Executor(place) + exe = base.Executor(place) sgd = Adam(learning_rate=1e-3) x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' @@ -1180,7 +1180,7 @@ def test_load_from_old_interface_var_list(self): x, y, init_hidden, init_cell ) - test_clone_program = fluid.default_main_program().clone() + test_clone_program = base.default_main_program().clone() sgd.minimize(static_loss) static_param_updated = {} static_param_init = {} @@ -1203,7 +1203,7 @@ def test_load_from_old_interface_var_list(self): ) fetch_list = [static_loss, static_last_hidden, static_last_cell] out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "y": y_data, @@ -1222,13 +1222,13 @@ def test_load_from_old_interface_var_list(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t - # fluid.save(main_program, "./test_1") + # base.save(main_program, "./test_1") paddle.distributed.io.save_persistables( exe, os.path.join(self.temp_dir.name, "test_static_load_var_list"), @@ -1241,11 +1241,11 @@ def test_load_from_old_interface_var_list(self): if isinstance(var, framework.Parameter) or var.persistable: if i % 2 == 0: var_list.append(var) - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -1260,7 +1260,7 @@ def test_load_from_old_interface_var_list(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) if var.name in var_list_names: # loaded vars @@ -1274,9 +1274,9 @@ def test_load_from_old_interface_var_list(self): class TestLoadFromOldInterfaceSingleFile(unittest.TestCase): def set_place(self): return ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) def test_load_from_old_interface(self): @@ -1291,8 +1291,8 @@ def test_load_from_old_interface(self): temp_dir = tempfile.TemporaryDirectory() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed ptb_model = PtbModel( "ptb_model", hidden_size=hidden_size, @@ -1303,7 +1303,7 @@ def test_load_from_old_interface(self): ) place = self.set_place() - exe = fluid.Executor(place) + exe = base.Executor(place) sgd = Adam(learning_rate=1e-3) x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' @@ -1345,7 +1345,7 @@ def test_load_from_old_interface(self): ) fetch_list = [static_loss, static_last_hidden, static_last_cell] out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "y": y_data, @@ -1364,13 +1364,13 @@ def test_load_from_old_interface(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) base_map[var.name] = t save_dir = os.path.join(temp_dir.name, "test_path") - # fluid.save(main_program, "./test_1") + # base.save(main_program, "./test_1") paddle.distributed.io.save_persistables( exe, save_dir, main_program, filename="model_single" ) @@ -1378,11 +1378,11 @@ def test_load_from_old_interface(self): # set var to zero for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -1398,7 +1398,7 @@ def test_load_from_old_interface(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -1407,7 +1407,7 @@ def test_load_from_old_interface(self): # change shape for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() old_shape = np.array(ten).shape new_shape = [e + 10 for e in old_shape] @@ -1467,9 +1467,9 @@ def tearDown(self): def set_place(self): return ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) def test_ptb_rnn_cpu_float32(self): @@ -1483,8 +1483,8 @@ def test_ptb_rnn_cpu_float32(self): batch_num = 200 with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed ptb_model = PtbModel( "ptb_model", hidden_size=hidden_size, @@ -1495,7 +1495,7 @@ def test_ptb_rnn_cpu_float32(self): ) place = self.set_place() - exe = fluid.Executor(place) + exe = base.Executor(place) sgd = Adam(learning_rate=1e-3) x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' @@ -1516,7 +1516,7 @@ def test_ptb_rnn_cpu_float32(self): x, y, init_hidden, init_cell ) - test_program = fluid.default_main_program().clone(for_test=True) + test_program = base.default_main_program().clone(for_test=True) add_1 = paddle.static.nn.fc( static_last_hidden, @@ -1547,7 +1547,7 @@ def test_ptb_rnn_cpu_float32(self): ) fetch_list = [static_loss, static_last_hidden, static_last_cell] out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "y": y_data, @@ -1566,7 +1566,7 @@ def test_ptb_rnn_cpu_float32(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) @@ -1577,11 +1577,11 @@ def test_ptb_rnn_cpu_float32(self): # set var to zero for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -1615,7 +1615,7 @@ def symlink_force(target, link_name): if self.test_dygraph: # make sure `load_program_state` can be used in dynamic graph mode - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): load_state = paddle.static.load_program_state(save_dir) for k, v in load_state.items(): np.testing.assert_array_equal(base_map[k], v) @@ -1633,7 +1633,7 @@ def check_in_static(self, main_program, base_map): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -1642,9 +1642,9 @@ def check_in_static(self, main_program, base_map): class TestProgramStateOldSaveSingleModel(unittest.TestCase): def set_place(self): return ( - fluid.CPUPlace() + base.CPUPlace() if not core.is_compiled_with_cuda() - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) def test_ptb_rnn_cpu_float32(self): @@ -1659,8 +1659,8 @@ def test_ptb_rnn_cpu_float32(self): temp_dir = tempfile.TemporaryDirectory() with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed ptb_model = PtbModel( "ptb_model", hidden_size=hidden_size, @@ -1671,7 +1671,7 @@ def test_ptb_rnn_cpu_float32(self): ) place = self.set_place() - exe = fluid.Executor(place) + exe = base.Executor(place) sgd = Adam(learning_rate=1e-3) x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' @@ -1692,7 +1692,7 @@ def test_ptb_rnn_cpu_float32(self): x, y, init_hidden, init_cell ) - test_program = fluid.default_main_program().clone(for_test=True) + test_program = base.default_main_program().clone(for_test=True) add_1 = paddle.static.nn.fc( static_last_hidden, @@ -1723,7 +1723,7 @@ def test_ptb_rnn_cpu_float32(self): ) fetch_list = [static_loss, static_last_hidden, static_last_cell] out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "y": y_data, @@ -1742,7 +1742,7 @@ def test_ptb_rnn_cpu_float32(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) @@ -1756,16 +1756,16 @@ def test_ptb_rnn_cpu_float32(self): # set var to zero for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) - # fluid.load(test_program, "./test_1", None ) + # base.load(test_program, "./test_1", None ) program_state = paddle.static.load_program_state( os.path.join(save_dir, "model_1"), var_list=paddle.static.io.get_program_persistable_vars( @@ -1777,7 +1777,7 @@ def test_ptb_rnn_cpu_float32(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -1827,7 +1827,7 @@ def test_pickle_protocol(self): for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) @@ -1854,12 +1854,12 @@ def test_pickle_protocol(self): for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: ten = ( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -1868,7 +1868,7 @@ def test_pickle_protocol(self): for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) diff --git a/test/legacy_test/test_static_save_load_bf16.py b/test/legacy_test/test_static_save_load_bf16.py index 10b374689d960..b087ac8dadfc2 100644 --- a/test/legacy_test/test_static_save_load_bf16.py +++ b/test/legacy_test/test_static_save_load_bf16.py @@ -21,8 +21,8 @@ from test_static_save_load import PtbModel import paddle -from paddle import fluid -from paddle.fluid import core, framework +from paddle import base +from paddle.base import core, framework @unittest.skipIf( @@ -36,7 +36,7 @@ def tearDown(self): self.temp_dir.cleanup() def set_place(self): - return fluid.CPUPlace() + return base.CPUPlace() def test_ptb_rnn_cpu_bfloat16(self): seed = 90 @@ -49,8 +49,8 @@ def test_ptb_rnn_cpu_bfloat16(self): batch_num = 100 with new_program_scope(): - fluid.default_startup_program().random_seed = seed - fluid.default_main_program().random_seed = seed + base.default_startup_program().random_seed = seed + base.default_main_program().random_seed = seed ptb_model = PtbModel( "ptb_model", hidden_size=hidden_size, @@ -61,7 +61,7 @@ def test_ptb_rnn_cpu_bfloat16(self): ) place = self.set_place() - exe = fluid.Executor(place) + exe = base.Executor(place) sgd = paddle.optimizer.SGD(learning_rate=1e-3) x = paddle.static.data( name="x", shape=[-1, num_steps], dtype='int64' @@ -110,7 +110,7 @@ def test_ptb_rnn_cpu_bfloat16(self): fetch_list = [static_loss, static_last_hidden, static_last_cell] out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": x_data, "y": y_data, @@ -126,7 +126,7 @@ def test_ptb_rnn_cpu_bfloat16(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) @@ -137,11 +137,11 @@ def test_ptb_rnn_cpu_bfloat16(self): # set var to zero for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been set to zero self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -155,7 +155,7 @@ def test_ptb_rnn_cpu_bfloat16(self): for var in main_program.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) diff --git a/test/legacy_test/test_static_save_load_large.py b/test/legacy_test/test_static_save_load_large.py index baf4837737744..201f30039c59b 100644 --- a/test/legacy_test/test_static_save_load_large.py +++ b/test/legacy_test/test_static_save_load_large.py @@ -20,8 +20,8 @@ from test_imperative_base import new_program_scope import paddle -from paddle import fluid -from paddle.fluid import framework +from paddle import base +from paddle.base import framework LARGE_PARAM = 2**26 @@ -47,7 +47,7 @@ def test_large_parameters_static_save(self): for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) # make sure all the paramerter or optimizer var have been update self.assertTrue(np.sum(np.abs(t)) != 0) @@ -62,11 +62,11 @@ def test_large_parameters_static_save(self): # set var to zero for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -75,7 +75,7 @@ def test_large_parameters_static_save(self): for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) @@ -83,11 +83,11 @@ def test_large_parameters_static_save(self): # set var to zero for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: - ten = fluid.global_scope().find_var(var.name).get_tensor() + ten = base.global_scope().find_var(var.name).get_tensor() ten.set(np.zeros_like(np.array(ten)), place) new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) self.assertTrue(np.sum(np.abs(new_t)) == 0) @@ -96,7 +96,7 @@ def test_large_parameters_static_save(self): for var in prog.list_vars(): if isinstance(var, framework.Parameter) or var.persistable: new_t = np.array( - fluid.global_scope().find_var(var.name).get_tensor() + base.global_scope().find_var(var.name).get_tensor() ) base_t = base_map[var.name] np.testing.assert_array_equal(new_t, base_t) diff --git a/test/legacy_test/test_std_layer.py b/test/legacy_test/test_std_layer.py index 7cbf235699bd6..22ef341259142 100644 --- a/test/legacy_test/test_std_layer.py +++ b/test/legacy_test/test_std_layer.py @@ -39,7 +39,7 @@ def setUp(self): self.x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) self.place = ( paddle.CUDAPlace(0) - if paddle.fluid.core.is_compiled_with_cuda() + if paddle.base.core.is_compiled_with_cuda() else paddle.CPUPlace() ) @@ -122,7 +122,7 @@ def test_error(self): class Testfp16Std(unittest.TestCase): def test_fp16_with_gpu(self): paddle.enable_static() - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() diff --git a/test/legacy_test/test_stride.py b/test/legacy_test/test_stride.py index 531575a287230..a80451e36fdc4 100644 --- a/test/legacy_test/test_stride.py +++ b/test/legacy_test/test_stride.py @@ -629,7 +629,7 @@ def test_stride_cpu(self): @unittest.skipIf( - not paddle.fluid.core.is_compiled_with_cuda(), + not paddle.base.core.is_compiled_with_cuda(), "core is not compiled with CUDA", ) class TestStrideGPU(TestStride): diff --git a/test/legacy_test/test_strided_slice_op.py b/test/legacy_test/test_strided_slice_op.py index dde0bbf4e5c25..9aca4ab961aa7 100644 --- a/test/legacy_test/test_strided_slice_op.py +++ b/test/legacy_test/test_strided_slice_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -594,9 +594,9 @@ def test_1(self): out_6 = x[minus_3:3:1, 0:100:2, :, minus_1:2:minus_1] out_7 = x[minus_1, 0:100:2, :, -1:2:-1] - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) res_1, res_2, res_3, res_4, res_5, res_6, res_7 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": input, 'starts': np.array([-3, 0, 2]).astype("int32"), @@ -629,7 +629,7 @@ def test_dygraph_op(self): "Cannot use CUDAPinnedPlace in CPU only version", ) def test_cuda_pinned_place(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = paddle.to_tensor( np.random.randn(2, 10), place=paddle.CUDAPinnedPlace() ) @@ -760,7 +760,7 @@ def create_case(self, net): def test_strided_slice_tensor_array_cuda_pinned_place(self): if paddle.device.is_compiled_with_cuda(): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): class Simple(paddle.nn.Layer): def __init__(self): @@ -971,7 +971,7 @@ def array_slice(self, tensors): @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) class TestStridedSliceFloat16(unittest.TestCase): def init_test_case(self): diff --git a/test/legacy_test/test_subtract_op.py b/test/legacy_test/test_subtract_op.py index 10b9e4d0c97bb..a6b85af0f463d 100644 --- a/test/legacy_test/test_subtract_op.py +++ b/test/legacy_test/test_subtract_op.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core class ApiSubtractTest(unittest.TestCase): diff --git a/test/legacy_test/test_sum_op.py b/test/legacy_test/test_sum_op.py index e70ec88176fea..db71738e61e00 100644 --- a/test/legacy_test/test_sum_op.py +++ b/test/legacy_test/test_sum_op.py @@ -29,9 +29,9 @@ import paddle import paddle.inference as paddle_infer -from paddle import enable_static, fluid -from paddle.fluid import core -from paddle.fluid.layer_helper import LayerHelper +from paddle import enable_static, base +from paddle.base import core +from paddle.base.layer_helper import LayerHelper def sum_wrapper(X, use_mkldnn=False): @@ -358,7 +358,7 @@ def test_check_grad(self): class API_Test_Add_n(unittest.TestCase): def test_api(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input0 = paddle.tensor.fill_constant( shape=[2, 3], dtype='int64', value=5 ) @@ -368,12 +368,12 @@ def test_api(self): expected_result = np.empty((2, 3)) expected_result.fill(8) sum_value = paddle.add_n([input0, input1]) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) result = exe.run(fetch_list=[sum_value]) self.assertEqual((result == expected_result).all(), True) - with fluid.dygraph.guard(): + with base.dygraph.guard(): input0 = paddle.ones(shape=[2, 3], dtype='float32') expected_result = np.empty((2, 3)) expected_result.fill(2) @@ -382,7 +382,7 @@ def test_api(self): self.assertEqual((sum_value.numpy() == expected_result).all(), True) def test_dygraph_api(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): input0 = paddle.ones(shape=[2, 3], dtype='float32') input1 = paddle.ones(shape=[2, 3], dtype='float32') input0.stop_gradient = False @@ -403,7 +403,7 @@ def test_dygraph_api(self): ) def test_add_n_and_add_and_grad(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.array([[1, 2, 3], [4, 5, 6]]) np_y = [[7, 8, 9], [10, 11, 12]] np_z = [[1, 1, 1], [1, 1, 1]] @@ -495,12 +495,12 @@ def test_out_dtype(): class TestSumOpError(unittest.TestCase): def test_errors(self): def test_empty_list_input(): - with fluid.dygraph.guard(): - fluid._legacy_C_ops.sum([]) + with base.dygraph.guard(): + base._legacy_C_ops.sum([]) def test_list_of_none_input(): - with fluid.dygraph.guard(): - fluid._legacy_C_ops.sum([None]) + with base.dygraph.guard(): + base._legacy_C_ops.sum([None]) self.assertRaises(Exception, test_empty_list_input) self.assertRaises(Exception, test_list_of_none_input) @@ -645,9 +645,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -687,9 +687,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -718,9 +718,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -749,9 +749,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_svd_op.py b/test/legacy_test/test_svd_op.py index 545a7aa8cf7d0..847a2e7e161cb 100644 --- a/test/legacy_test/test_svd_op.py +++ b/test/legacy_test/test_svd_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, skip_check_grad_ci import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestSvdOp(OpTest): @@ -295,27 +295,27 @@ def test_dygraph(self): def test_static(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for place in places: - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): a = np.random.rand(5, 5) x = paddle.static.data( name="input", shape=[5, 5], dtype='float64' ) u, s, vh = paddle.linalg.svd(x) - exe = fluid.Executor(place) + exe = base.Executor(place) gt_u, gt_s, gt_vh = np.linalg.svd(a, full_matrices=False) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": a}, fetch_list=[s], ) np.testing.assert_allclose(fetches[0], gt_s, rtol=1e-05) def test_errors(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): # The size of input in svd should not be 0. def test_0_size(): array = np.array([], dtype=np.float32) diff --git a/test/legacy_test/test_switch.py b/test/legacy_test/test_switch.py index 428e5537f8ba6..3c90ba5260542 100644 --- a/test/legacy_test/test_switch.py +++ b/test/legacy_test/test_switch.py @@ -15,9 +15,9 @@ import unittest import paddle -from paddle.fluid import core, framework -from paddle.fluid.executor import Executor -from paddle.fluid.framework import default_startup_program +from paddle.base import core, framework +from paddle.base.executor import Executor +from paddle.base.framework import default_startup_program class TestSwitch(unittest.TestCase): diff --git a/test/legacy_test/test_switch_autotune.py b/test/legacy_test/test_switch_autotune.py index 92ca789d8b074..fbd8928c4413c 100644 --- a/test/legacy_test/test_switch_autotune.py +++ b/test/legacy_test/test_switch_autotune.py @@ -85,7 +85,7 @@ def test_autotune(self): self.assertEqual(self.get_flags("FLAGS_use_autotune"), True) def check_status(self, expected_res): - status = paddle.fluid.core.autotune_status() + status = paddle.base.core.autotune_status() for key in status.keys(): v = status[key] if key == "cache_hit_rate": @@ -134,7 +134,7 @@ def run_program(self, enable_autotune): loss = static_program(net, data) place = ( paddle.CUDAPlace(0) - if paddle.fluid.core.is_compiled_with_cuda() + if paddle.base.core.is_compiled_with_cuda() else paddle.CPUPlace() ) exe = paddle.static.Executor(place) @@ -160,7 +160,7 @@ def run_program(self, enable_autotune): for i in range(3): exe.run(program=main_program, feed={'X': x}, fetch_list=[loss]) - status = paddle.fluid.core.autotune_status() + status = paddle.base.core.autotune_status() expected_res = self.get_expected_res(i, enable_autotune) self.check_status(expected_res) paddle.disable_static() diff --git a/test/legacy_test/test_switch_case.py b/test/legacy_test/test_switch_case.py index d16c48540549a..742de7a9a01fa 100644 --- a/test/legacy_test/test_switch_case.py +++ b/test/legacy_test/test_switch_case.py @@ -18,10 +18,10 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.backward import append_backward -from paddle.fluid.framework import Program, program_guard +from paddle import base +from paddle.base import core +from paddle.base.backward import append_backward +from paddle.base.framework import Program, program_guard paddle.enable_static() @@ -85,11 +85,11 @@ def fn_3(): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run( main_program, fetch_list=[out_0, out_1, out_2, out_3, out_4] @@ -172,11 +172,11 @@ def fn_3(): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run( main_program, fetch_list=[out_0, out_1, out_2, out_3, out_4] @@ -234,11 +234,11 @@ def test_0d_tensor_backward(self): append_backward(out) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run(main_program, fetch_list=[out.name, x.grad_name]) np.testing.assert_allclose( @@ -365,11 +365,11 @@ def fn_3(): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) ret = exe.run(main_program, fetch_list=out) np.testing.assert_allclose( @@ -463,11 +463,11 @@ def fn_3(): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run( main_program, @@ -560,11 +560,11 @@ def fn_3(): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run( main_program, diff --git a/test/legacy_test/test_sync_batch_norm_op.py b/test/legacy_test/test_sync_batch_norm_op.py index 5fa9bce916b52..d98aed3531689 100644 --- a/test/legacy_test/test_sync_batch_norm_op.py +++ b/test/legacy_test/test_sync_batch_norm_op.py @@ -33,9 +33,9 @@ ) import paddle -from paddle import fluid, nn -from paddle.fluid import Program, core, program_guard -from paddle.fluid.framework import in_dygraph_mode +from paddle import base, nn +from paddle.base import Program, core, program_guard +from paddle.base.framework import in_dygraph_mode _set_use_system_allocator(True) @@ -149,13 +149,13 @@ def _build_program( self, place, layout, seed, sync_bn=False, only_forward=False ): """Build program.""" - main = fluid.Program() - startup = fluid.Program() + main = base.Program() + startup = base.Program() main.random_seed = seed startup.random_seed = seed use_cudnn = (self.dtype == np.float16) or (self.dtype == np.uint16) - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + with base.unique_name.guard(): + with base.program_guard(main, startup): data = paddle.static.data( name='input', shape=self.dshape, @@ -166,14 +166,14 @@ def _build_program( input=data, num_filters=32, filter_size=1, - param_attr=fluid.ParamAttr(name='conv2d_weight'), + param_attr=base.ParamAttr(name='conv2d_weight'), bias_attr=False, use_cudnn=use_cudnn, ) bn = paddle.static.nn.batch_norm( conv, - param_attr=fluid.ParamAttr(name='bn_scale'), - bias_attr=fluid.ParamAttr(name='bn_bias'), + param_attr=base.ParamAttr(name='bn_scale'), + bias_attr=base.ParamAttr(name='bn_bias'), moving_mean_name='bn_moving_mean', moving_variance_name='bn_moving_variance', data_layout=layout, @@ -226,14 +226,14 @@ def _compare_impl(self, place, layout, only_forward): ) np.save(filepath, data[id * stride : (id + 1) * stride]) data = create_or_get_tensor( - scope, "input", OpTest.np_dtype_to_fluid_dtype(data), place + scope, "input", OpTest.np_dtype_to_base_dtype(data), place ) # Single-GPU, N = 32 per GPU main, startup, outs = self._build_program( place, layout, seed, False, only_forward ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup) fetch_names = [v.name for v in outs] + [ 'bn_moving_mean', @@ -374,8 +374,8 @@ def test_errors(self): cleanup = enable_static() with program_guard(Program(), Program()): my_sync_batch_norm = paddle.nn.SyncBatchNorm(10) - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CUDAPlace(0) + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.CUDAPlace(0) ) self.assertRaises(TypeError, my_sync_batch_norm, x1) @@ -405,8 +405,8 @@ def test_convert(self): paddle.nn.BatchNorm2D(5), paddle.nn.BatchNorm2D( 5, - weight_attr=fluid.ParamAttr(name='bn.scale'), - bias_attr=fluid.ParamAttr(name='bn.bias'), + weight_attr=base.ParamAttr(name='bn.scale'), + bias_attr=base.ParamAttr(name='bn.bias'), ), ) model = paddle.nn.SyncBatchNorm.convert_sync_batchnorm(model) @@ -451,7 +451,7 @@ def test_errors(self): if not core.is_compiled_with_cuda(): return - with fluid.dygraph.guard(fluid.CUDAPlace(0)): + with base.dygraph.guard(base.CUDAPlace(0)): my_sync_batch_norm = paddle.nn.SyncBatchNorm(10, data_format='CN') data = np.random.random([3, 3, 3]).astype('float32') x = paddle.to_tensor(data) diff --git a/test/legacy_test/test_take.py b/test/legacy_test/test_take.py index 1098dcdbc4f94..2ea39ae96dc90 100644 --- a/test/legacy_test/test_take.py +++ b/test/legacy_test/test_take.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class TestTakeAPI(unittest.TestCase): @@ -44,9 +44,9 @@ def setUp(self): self.set_dtype() self.set_input() self.place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) def test_static_graph(self): @@ -62,9 +62,9 @@ def test_static_graph(self): ) out = paddle.take(x, index, mode=self.mode) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) st_result = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'input': self.input_np, 'index': self.index_np}, fetch_list=out, ) @@ -173,9 +173,9 @@ def setUp(self): self.set_dtype() self.set_input() self.place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) def test_static_index_error(self): @@ -225,9 +225,9 @@ def setUp(self): self.set_dtype() self.set_input() self.place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) diff --git a/test/legacy_test/test_tdm_child_op.py b/test/legacy_test/test_tdm_child_op.py index 3aca9f10c8f78..0bb0e38c6cde6 100644 --- a/test/legacy_test/test_tdm_child_op.py +++ b/test/legacy_test/test_tdm_child_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, paddle_static_guard import paddle -from paddle import fluid +from paddle import base from paddle.incubate.layers.nn import tdm_child @@ -152,14 +152,14 @@ def test_shape(self): x=x, node_nums=26, child_nums=2, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Assign(tree_info_np) ), ) - place = fluid.CPUPlace() - exe = fluid.Executor(place=place) - exe.run(fluid.default_startup_program()) + place = base.CPUPlace() + exe = base.Executor(place=place) + exe.run(base.default_startup_program()) feed = { 'x': np.array( diff --git a/test/legacy_test/test_tdm_sampler_op.py b/test/legacy_test/test_tdm_sampler_op.py index 9abaca34066d0..84d003ca323b0 100644 --- a/test/legacy_test/test_tdm_sampler_op.py +++ b/test/legacy_test/test_tdm_sampler_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, paddle_static_guard import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.incubate.layers.nn import tdm_sampler @@ -290,10 +290,10 @@ def test_shape(self): neg_samples_num_list, layer_node_num_list, leaf_node_num, - tree_travel_attr=fluid.ParamAttr( + tree_travel_attr=base.ParamAttr( initializer=paddle.nn.initializer.Assign(travel_array) ), - tree_layer_attr=fluid.ParamAttr( + tree_layer_attr=base.ParamAttr( initializer=paddle.nn.initializer.Assign(layer_array) ), output_positive=True, @@ -303,9 +303,9 @@ def test_shape(self): dtype='int32', ) - place = fluid.CPUPlace() - exe = fluid.Executor(place=place) - exe.run(fluid.default_startup_program()) + place = base.CPUPlace() + exe = base.Executor(place=place) + exe.run(base.default_startup_program()) feed = { 'x': np.array( diff --git a/test/legacy_test/test_temporal_shift_op.py b/test/legacy_test/test_temporal_shift_op.py index 4dd5b29fc7014..4c51cbefe7317 100644 --- a/test/legacy_test/test_temporal_shift_op.py +++ b/test/legacy_test/test_temporal_shift_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def temporal_shift(x, seg_num, shift_ratio, data_format): @@ -140,14 +140,14 @@ def test_api(self): ) # dygraph - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): input = paddle.randn([6, 4, 2, 2]) out = paddle.nn.functional.temporal_shift( x=input, seg_num=2, shift_ratio=0.2 ) def test_static_fp16_gpu(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() diff --git a/test/legacy_test/test_tensor.py b/test/legacy_test/test_tensor.py index 01e55791cc985..bb30cc98d8fb3 100644 --- a/test/legacy_test/test_tensor.py +++ b/test/legacy_test/test_tensor.py @@ -17,8 +17,8 @@ import numpy as np -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestTensorPtr(unittest.TestCase): @@ -263,7 +263,7 @@ def test_empty_tensor(self): self.assertEqual((0, 1), tensor_array.shape) def run_slice_tensor(self, place, dtype): - tensor = fluid.Tensor() + tensor = base.Tensor() shape = [3, 3, 3] tensor._set_dims(shape) @@ -358,7 +358,7 @@ def test_tensor_poiter(self): self.assertTrue( isinstance(tensor._mutable_data(place, dtype), numbers.Integral) ) - places = fluid.cuda_pinned_places() + places = base.cuda_pinned_places() self.assertTrue( isinstance( tensor._mutable_data(places[0], dtype), numbers.Integral @@ -367,7 +367,7 @@ def test_tensor_poiter(self): def test_tensor_set_fp16(self): array = np.random.random((300, 500)).astype("float16") - tensor = fluid.Tensor() + tensor = base.Tensor() place = core.CPUPlace() tensor.set(array, place) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.FP16) @@ -386,7 +386,7 @@ def test_tensor_set_fp16(self): def test_tensor_set_int16(self): array = np.random.randint(100, size=(300, 500)).astype("int16") - tensor = fluid.Tensor() + tensor = base.Tensor() place = core.CPUPlace() tensor.set(array, place) self.assertEqual(tensor._dtype(), core.VarDesc.VarType.INT16) @@ -406,7 +406,7 @@ def test_tensor_set_int16(self): def test_tensor_set_from_array_list(self): array = np.random.randint(1000, size=(200, 300)) list_array = [array, array] - tensor = fluid.Tensor() + tensor = base.Tensor() place = core.CPUPlace() tensor.set(list_array, place) self.assertEqual([2, 200, 300], tensor.shape()) @@ -443,7 +443,7 @@ def test_tensor_set_item_complex128(self): array = ( np.random.random((100, 100)) + 1j * np.random.random((100, 100)) ).astype(np.complex128) - tensor = fluid.Tensor() + tensor = base.Tensor() place = core.CPUPlace() tensor.set(array, place) @@ -474,7 +474,7 @@ def test_tensor_set_item_complex64(self): array = ( np.random.random((100, 100)) + 1j * np.random.random((100, 100)) ).astype(np.complex64) - tensor = fluid.Tensor() + tensor = base.Tensor() place = core.CPUPlace() tensor.set(array, place) diff --git a/test/legacy_test/test_tensor_array_to_tensor.py b/test/legacy_test/test_tensor_array_to_tensor.py index edca59989b7d5..827e2138fb3f6 100644 --- a/test/legacy_test/test_tensor_array_to_tensor.py +++ b/test/legacy_test/test_tensor_array_to_tensor.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard from paddle.tensor.manipulation import tensor_array_to_tensor paddle.enable_static() @@ -52,7 +52,7 @@ def setUp(self): def test_get_set(self): scope = core.Scope() - program = fluid.Program() + program = base.Program() block = program.global_block() input_arr = block.create_var( @@ -122,7 +122,7 @@ def test_get_set(self): fetch_list.append(block.var('Out')) fetch_list.append(block.var('OutIndex')) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) out = exe.run(program, fetch_list=fetch_list, scope=scope) # print ("index: ", np.array(out[1])) @@ -179,8 +179,8 @@ def setUp(self): var.persistable = True def set_program(self): - self.program = fluid.Program() - with fluid.program_guard(self.program): + self.program = base.Program() + with base.program_guard(self.program): self.array = array = paddle.tensor.create_array(dtype='float32') idx = paddle.tensor.fill_constant(shape=[1], dtype="int64", value=0) for i, x in enumerate(self.inputs): @@ -190,7 +190,7 @@ def set_program(self): input=array, **self.attrs ) loss = paddle.sum(output) - fluid.backward.append_backward(loss) + base.backward.append_backward(loss) self.output_vars = [output] def run_check(self, executor, scope): @@ -208,14 +208,14 @@ def run_check(self, executor, scope): def test_cpu(self): scope = core.Scope() place = core.CPUPlace() - executor = fluid.Executor(place) + executor = base.Executor(place) self.run_check(executor, scope) def test_gpu(self): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) scope = core.Scope() - executor = fluid.Executor(place) + executor = base.Executor(place) self.run_check(executor, scope) @@ -246,18 +246,18 @@ def test_case(self): inp1 = np.random.rand(2, 3, 4).astype("float32") _outs_static = self._test_case(inp0, inp1) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) outs_static = exe.run(fetch_list=list(_outs_static)) - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): outs_dynamic = self._test_case(inp0, inp1) for s, d in zip(outs_static, outs_dynamic): np.testing.assert_array_equal(s, d.numpy()) def test_while_loop_case(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): zero = paddle.tensor.fill_constant( shape=[1], dtype='int64', value=0 ) diff --git a/test/legacy_test/test_tensor_copy_from.py b/test/legacy_test/test_tensor_copy_from.py index 087da70861d42..0258a464043e7 100644 --- a/test/legacy_test/test_tensor_copy_from.py +++ b/test/legacy_test/test_tensor_copy_from.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid.core import LoDTensor as Tensor +from paddle.base.core import LoDTensor as Tensor class TestTensorCopyFrom(unittest.TestCase): diff --git a/test/legacy_test/test_tensor_fill_.py b/test/legacy_test/test_tensor_fill_.py index 5008630952a68..ee663ed246b39 100644 --- a/test/legacy_test/test_tensor_fill_.py +++ b/test/legacy_test/test_tensor_fill_.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TensorFill_Test(unittest.TestCase): @@ -26,10 +26,10 @@ def setUp(self): def test_tensor_fill_true(self): typelist = ['float32', 'float64', 'int32', 'int64', 'float16'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) - places.append(fluid.CUDAPinnedPlace()) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) + places.append(base.CUDAPinnedPlace()) for idx, p in enumerate(places): if idx == 0: @@ -50,10 +50,10 @@ def test_tensor_fill_true(self): def test_tensor_fill_backward(self): typelist = ['float32'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) - places.append(fluid.CUDAPinnedPlace()) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) + places.append(base.CUDAPinnedPlace()) for idx, p in enumerate(places): if idx == 0: diff --git a/test/legacy_test/test_tensor_fill_diagonal_.py b/test/legacy_test/test_tensor_fill_diagonal_.py index 68ec97331c368..c48337fa06fe9 100644 --- a/test/legacy_test/test_tensor_fill_diagonal_.py +++ b/test/legacy_test/test_tensor_fill_diagonal_.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TensorFillDiagonal_Test(unittest.TestCase): @@ -30,9 +30,9 @@ def test_dim2_normal(self): ) typelist = ['float32', 'float64', 'int32', 'int64'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: @@ -65,9 +65,9 @@ def test_offset(self): ) typelist = ['float32', 'float64', 'int32', 'int64'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: @@ -97,9 +97,9 @@ def test_bool(self): ) typelist = ['bool'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: @@ -138,9 +138,9 @@ def test_dim2_unnormal_wrap(self): ).astype('float32') typelist = ['float32', 'float64', 'int32', 'int64'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: @@ -189,9 +189,9 @@ def test_dim2_unnormal_unwrap(self): ).astype('float32') typelist = ['float32', 'float64', 'int32', 'int64'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: @@ -232,9 +232,9 @@ def test_dim_larger2_normal(self): ).astype('float32') typelist = ['float32', 'float64', 'int32', 'int64'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) for idx, p in enumerate(places): if idx == 0: diff --git a/test/legacy_test/test_tensor_fill_diagonal_tensor.py b/test/legacy_test/test_tensor_fill_diagonal_tensor.py index c09666eaa7ce1..7409cdae1f007 100644 --- a/test/legacy_test/test_tensor_fill_diagonal_tensor.py +++ b/test/legacy_test/test_tensor_fill_diagonal_tensor.py @@ -18,15 +18,15 @@ import paddle import paddle.nn.functional as F -from paddle import fluid +from paddle import base class TensorFillDiagTensor_Test(unittest.TestCase): def setUp(self): self.typelist = ['float32', 'float64', 'int32', 'int64'] - self.places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + self.places.append(base.CUDAPlace(0)) def test_dim2(self): expected_np = np.array( diff --git a/test/legacy_test/test_tensor_fill_diagonal_tensor_.py b/test/legacy_test/test_tensor_fill_diagonal_tensor_.py index 992e09394891a..482f3e542f6fc 100644 --- a/test/legacy_test/test_tensor_fill_diagonal_tensor_.py +++ b/test/legacy_test/test_tensor_fill_diagonal_tensor_.py @@ -18,15 +18,15 @@ import paddle import paddle.nn.functional as F -from paddle import fluid +from paddle import base class TensorFillDiagTensor_Test(unittest.TestCase): def setUp(self): self.typelist = ['float32', 'float64', 'int32', 'int64'] - self.places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + self.places.append(base.CUDAPlace(0)) def test_dim2(self): expected_np = np.array( diff --git a/test/legacy_test/test_tensor_register_hook.py b/test/legacy_test/test_tensor_register_hook.py index d0fa1fe25974b..29c614713dff3 100644 --- a/test/legacy_test/test_tensor_register_hook.py +++ b/test/legacy_test/test_tensor_register_hook.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid, nn -from paddle.fluid import core +from paddle import base, nn +from paddle.base import core class SimpleNet(nn.Layer): @@ -475,7 +475,7 @@ def double_print_hook(grad): # after changed by hook: 8.0 # TODO(wuweilong): enable this case when DoubleGrad in eager mode is ready - if fluid.in_dygraph_mode(): + if base.in_dygraph_mode(): pass else: z.backward() diff --git a/test/legacy_test/test_tensor_to_list.py b/test/legacy_test/test_tensor_to_list.py index 0bc6162694215..b7c8093ca49d2 100644 --- a/test/legacy_test/test_tensor_to_list.py +++ b/test/legacy_test/test_tensor_to_list.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TensorToListTest(unittest.TestCase): @@ -25,10 +25,10 @@ def setUp(self): self.shape = [11, 25, 32, 43] def test_tensor_tolist(self): - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) - places.append(fluid.CUDAPinnedPlace()) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) + places.append(base.CUDAPinnedPlace()) for p in places: np_arr = np.reshape( diff --git a/test/legacy_test/test_tensor_to_numpy.py b/test/legacy_test/test_tensor_to_numpy.py index e9eab862e7042..8aa70a9b26ba2 100644 --- a/test/legacy_test/test_tensor_to_numpy.py +++ b/test/legacy_test/test_tensor_to_numpy.py @@ -16,7 +16,7 @@ import numpy as np -from paddle import fluid +from paddle import base class TensorToNumpyTest(unittest.TestCase): @@ -34,10 +34,10 @@ def test_main(self): 'bool', ] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) - places.append(fluid.CUDAPinnedPlace()) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) + places.append(base.CUDAPinnedPlace()) for p in places: for dtype in dtypes: @@ -46,7 +46,7 @@ def test_main(self): self.shape, ) - t = fluid.LoDTensor() + t = base.LoDTensor() t.set(np_arr, p) ret_np_arr = np.array(t) diff --git a/test/legacy_test/test_tensor_uva.py b/test/legacy_test/test_tensor_uva.py index 7942734473d95..34d7e59609e0b 100644 --- a/test/legacy_test/test_tensor_uva.py +++ b/test/legacy_test/test_tensor_uva.py @@ -17,12 +17,12 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core class TestTensorCopyFrom(unittest.TestCase): def test_main(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CPUPlace() np_value = np.random.random(size=[10, 30]).astype('float32') tensor = paddle.to_tensor(np_value, place=place) @@ -32,7 +32,7 @@ def test_main(self): class TestUVATensorFromNumpy(unittest.TestCase): def test_uva_tensor_creation(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): dtype_list = [ "int32", "int64", @@ -54,7 +54,7 @@ def test_uva_tensor_creation(self): np.testing.assert_allclose(tensor2.numpy(), data, rtol=1e-05) def test_uva_tensor_corectness(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): a = np.arange(0, 100, dtype="int32") a = a.reshape([10, 10]) slice_a = a[:, 5] diff --git a/test/legacy_test/test_tensor_zero_.py b/test/legacy_test/test_tensor_zero_.py index 6e34de1370908..0cfbfc7f1ae67 100644 --- a/test/legacy_test/test_tensor_zero_.py +++ b/test/legacy_test/test_tensor_zero_.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base class TensorFill_Test(unittest.TestCase): @@ -26,10 +26,10 @@ def setUp(self): def test_tensor_fill_true(self): typelist = ['float32', 'float64', 'int32', 'int64', 'float16'] - places = [fluid.CPUPlace()] - if fluid.core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) - places.append(fluid.CUDAPinnedPlace()) + places = [base.CPUPlace()] + if base.core.is_compiled_with_cuda(): + places.append(base.CUDAPlace(0)) + places.append(base.CUDAPinnedPlace()) for p in places: np_arr = np.reshape( diff --git a/test/legacy_test/test_tensordot.py b/test/legacy_test/test_tensordot.py index 4c647a1df17af..16d2015573d10 100644 --- a/test/legacy_test/test_tensordot.py +++ b/test/legacy_test/test_tensordot.py @@ -17,13 +17,13 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(2021) def tensordot_np(x, y, axes): - if isinstance(axes, paddle.fluid.framework.Variable): + if isinstance(axes, paddle.base.framework.Variable): axes = axes.tolist() # np.tensordot does not support empty axes @@ -228,7 +228,7 @@ def test_static(self): def test_fp16_with_gpu(self): paddle.enable_static() - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): for axes in self.all_axes: place = paddle.CUDAPlace(0) with paddle.static.program_guard( diff --git a/test/legacy_test/test_tf32_cublas.py b/test/legacy_test/test_tf32_cublas.py index b78695966ab83..c211bf5b5c053 100644 --- a/test/legacy_test/test_tf32_cublas.py +++ b/test/legacy_test/test_tf32_cublas.py @@ -17,14 +17,14 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestTF32Switch(unittest.TestCase): def test_on_off(self): if core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + place = base.CUDAPlace(0) self.assertTrue(core.get_cublas_switch()) # default core.set_cublas_switch(False) self.assertFalse(core.get_cublas_switch()) # turn off @@ -39,9 +39,9 @@ def test_on_off(self): class TestTF32OnMatmul(unittest.TestCase): def test_dygraph_without_out(self): if core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + place = base.CUDAPlace(0) core.set_cublas_switch(False) # turn off - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_array1 = np.random.rand(4, 12, 64, 88).astype("float32") input_array2 = np.random.rand(4, 12, 88, 512).astype("float32") data1 = paddle.to_tensor(input_array1) diff --git a/test/legacy_test/test_tf32_cudnn.py b/test/legacy_test/test_tf32_cudnn.py index 18f24ab35ad2e..547757c6b9b8b 100644 --- a/test/legacy_test/test_tf32_cudnn.py +++ b/test/legacy_test/test_tf32_cudnn.py @@ -14,7 +14,7 @@ import unittest -from paddle.fluid import core +from paddle.base import core class TestTF32Switch(unittest.TestCase): diff --git a/test/legacy_test/test_tile_op.py b/test/legacy_test/test_tile_op.py index 282a0a52e86e8..e64e7113653ec 100644 --- a/test/legacy_test/test_tile_op.py +++ b/test/legacy_test/test_tile_op.py @@ -20,8 +20,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard # Situation 1: repeat_times is a list (without tensor) @@ -346,8 +346,8 @@ def test_check_output(self): class TestTileError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) repeat_times = [2, 2] self.assertRaises(TypeError, paddle.tile, x1, repeat_times) @@ -373,7 +373,7 @@ def test_api(self): # Test python API class TestTileAPI(unittest.TestCase): def test_api(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.random.random([12, 14]).astype("float32") x = paddle.to_tensor(np_x) @@ -416,9 +416,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -447,9 +447,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_top_k_v2_op.py b/test/legacy_test/test_top_k_v2_op.py index b3fa77086941b..df4cd67b70753 100644 --- a/test/legacy_test/test_top_k_v2_op.py +++ b/test/legacy_test/test_top_k_v2_op.py @@ -22,7 +22,7 @@ ) import paddle -from paddle.fluid import core +from paddle.base import core def numpy_topk(x, k=1, axis=-1, largest=True): @@ -288,7 +288,7 @@ def setUp(self): self.large_input_data = np.random.rand(2, 1030) def run_dygraph(self, place): - with paddle.fluid.dygraph.guard(place): + with paddle.base.dygraph.guard(place): input_tensor = paddle.to_tensor(self.input_data) large_input_tensor = paddle.to_tensor(self.large_input_data) # test case for basic test case 1 @@ -474,7 +474,7 @@ def test_cases(self): self.run_static(place) def test_errors(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = paddle.to_tensor([1, 2, 3]) with self.assertRaises(BaseException): paddle.topk(x, k=-1) diff --git a/test/legacy_test/test_trace_op.py b/test/legacy_test/test_trace_op.py index f13dd1701affe..7b8f66b4bc35d 100644 --- a/test/legacy_test/test_trace_op.py +++ b/test/legacy_test/test_trace_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid, tensor -from paddle.fluid import core +from paddle import base, tensor +from paddle.base import core class TestTraceOp(OpTest): @@ -154,9 +154,9 @@ def test_case1(self): out2 = tensor.trace(data1, offset=-5, axis1=1, axis2=-1) place = core.CPUPlace() - exe = fluid.Executor(place) + exe = base.Executor(place) results = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"data1": case}, fetch_list=[out1, out2], return_numpy=True, diff --git a/test/legacy_test/test_traced_layer_err_msg.py b/test/legacy_test/test_traced_layer_err_msg.py index 7365f29975cb5..29f010aa209d0 100644 --- a/test/legacy_test/test_traced_layer_err_msg.py +++ b/test/legacy_test/test_traced_layer_err_msg.py @@ -20,7 +20,7 @@ import numpy as np import paddle -from paddle import fluid, nn +from paddle import base, nn class SimpleFCLayer(nn.Layer): @@ -60,17 +60,17 @@ def tearDown(self): self.temp_dir.cleanup() def test_trace_err(self): - if fluid.framework.in_dygraph_mode(): + if base.framework.in_dygraph_mode(): return - with fluid.dygraph.guard(): - in_x = fluid.dygraph.to_variable( + with base.dygraph.guard(): + in_x = base.dygraph.to_variable( np.random.random((self.batch_size, self.feature_size)).astype( 'float32' ) ) with self.assertRaises(AssertionError) as e: - dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( + dygraph_out, traced_layer = base.dygraph.TracedLayer.trace( None, [in_x] ) self.assertEqual( @@ -80,77 +80,77 @@ def test_trace_err(self): str(e.exception), ) with self.assertRaises(TypeError) as e: - dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( + dygraph_out, traced_layer = base.dygraph.TracedLayer.trace( self.layer, 3 ) self.assertEqual( - "The type of 'each element of inputs' in paddle.jit.TracedLayer.trace must be fluid.Variable, but received <{} 'int'>.".format( + "The type of 'each element of inputs' in paddle.jit.TracedLayer.trace must be base.Variable, but received <{} 'int'>.".format( self.type_str ), str(e.exception), ) with self.assertRaises(TypeError) as e: - dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( + dygraph_out, traced_layer = base.dygraph.TracedLayer.trace( self.layer, [True, 1] ) self.assertEqual( - "The type of 'each element of inputs' in paddle.jit.TracedLayer.trace must be fluid.Variable, but received <{} 'bool'>.".format( + "The type of 'each element of inputs' in paddle.jit.TracedLayer.trace must be base.Variable, but received <{} 'bool'>.".format( self.type_str ), str(e.exception), ) - dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( + dygraph_out, traced_layer = base.dygraph.TracedLayer.trace( self.layer, [in_x] ) def test_set_strategy_err(self): - if fluid.framework.in_dygraph_mode(): + if base.framework.in_dygraph_mode(): return - with fluid.dygraph.guard(): - in_x = fluid.dygraph.to_variable( + with base.dygraph.guard(): + in_x = base.dygraph.to_variable( np.random.random((self.batch_size, self.feature_size)).astype( 'float32' ) ) - dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( + dygraph_out, traced_layer = base.dygraph.TracedLayer.trace( self.layer, [in_x] ) with self.assertRaises(AssertionError) as e: - traced_layer.set_strategy(1, fluid.ExecutionStrategy()) + traced_layer.set_strategy(1, base.ExecutionStrategy()) self.assertEqual( - "The type of 'build_strategy' in paddle.jit.TracedLayer.set_strategy must be fluid.BuildStrategy, but received <{} 'int'>.".format( + "The type of 'build_strategy' in paddle.jit.TracedLayer.set_strategy must be base.BuildStrategy, but received <{} 'int'>.".format( self.type_str ), str(e.exception), ) with self.assertRaises(AssertionError) as e: - traced_layer.set_strategy(fluid.BuildStrategy(), False) + traced_layer.set_strategy(base.BuildStrategy(), False) self.assertEqual( - "The type of 'exec_strategy' in paddle.jit.TracedLayer.set_strategy must be fluid.ExecutionStrategy, but received <{} 'bool'>.".format( + "The type of 'exec_strategy' in paddle.jit.TracedLayer.set_strategy must be base.ExecutionStrategy, but received <{} 'bool'>.".format( self.type_str ), str(e.exception), ) - traced_layer.set_strategy(build_strategy=fluid.BuildStrategy()) - traced_layer.set_strategy(exec_strategy=fluid.ExecutionStrategy()) + traced_layer.set_strategy(build_strategy=base.BuildStrategy()) + traced_layer.set_strategy(exec_strategy=base.ExecutionStrategy()) traced_layer.set_strategy( - fluid.BuildStrategy(), fluid.ExecutionStrategy() + base.BuildStrategy(), base.ExecutionStrategy() ) def test_save_inference_model_err(self): - if fluid.framework.in_dygraph_mode(): + if base.framework.in_dygraph_mode(): return - with fluid.dygraph.guard(): - in_x = fluid.dygraph.to_variable( + with base.dygraph.guard(): + in_x = base.dygraph.to_variable( np.random.random((self.batch_size, self.feature_size)).astype( 'float32' ) ) - dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( + dygraph_out, traced_layer = base.dygraph.TracedLayer.trace( self.layer, [in_x] ) @@ -207,7 +207,7 @@ def test_save_inference_model_err(self): def _train_simple_net(self): layer = None - with fluid.dygraph.guard(): + with base.dygraph.guard(): layer = SimpleFCLayer( self.feature_size, self.batch_size, self.fc_size ) @@ -216,7 +216,7 @@ def _train_simple_net(self): ) for i in range(5): - in_x = fluid.dygraph.to_variable( + in_x = base.dygraph.to_variable( np.random.random( (self.batch_size, self.feature_size) ).astype('float32') @@ -230,12 +230,12 @@ def _train_simple_net(self): class TestOutVarWithNoneErrMsg(unittest.TestCase): def test_linear_net_with_none(self): - if fluid.framework.in_dygraph_mode(): + if base.framework.in_dygraph_mode(): return model = LinearNetWithNone(100, 16) in_x = paddle.to_tensor(np.random.random((4, 100)).astype('float32')) with self.assertRaises(TypeError): - dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( + dygraph_out, traced_layer = base.dygraph.TracedLayer.trace( model, [in_x] ) @@ -255,12 +255,12 @@ def tearDown(self): self.temp_dir.cleanup() def test_mkdir_when_input_path_non_exist(self): - if fluid.framework.in_dygraph_mode(): + if base.framework.in_dygraph_mode(): return fc_layer = SimpleFCLayer(3, 4, 2) input_var = paddle.to_tensor(np.random.random([4, 3]).astype('float32')) - with fluid.dygraph.guard(): - dygraph_out, traced_layer = fluid.dygraph.TracedLayer.trace( + with base.dygraph.guard(): + dygraph_out, traced_layer = base.dygraph.TracedLayer.trace( fc_layer, inputs=[input_var] ) self.assertFalse(os.path.exists(os.path.dirname(self.save_path))) diff --git a/test/legacy_test/test_trainable.py b/test/legacy_test/test_trainable.py index 07501923885a9..e6703637212c3 100644 --- a/test/legacy_test/test_trainable.py +++ b/test/legacy_test/test_trainable.py @@ -18,7 +18,7 @@ from simple_nets import init_data import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -27,7 +27,7 @@ def test_trainable(): x = paddle.static.data(name='image', shape=[-1, 784], dtype='float32') label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64') feature = paddle.static.nn.fc( - x, size=10, weight_attr=fluid.ParamAttr(trainable=False) + x, size=10, weight_attr=base.ParamAttr(trainable=False) ) loss = paddle.nn.functional.cross_entropy( input=feature, label=label, reduction='none', use_softmax=False @@ -40,13 +40,13 @@ class TestTrainable(unittest.TestCase): def check_trainable( self, model, feed_dict, op_count, optimizer=paddle.optimizer.Adam() ): - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) - main = fluid.Program() - startup = fluid.Program() + main = base.Program() + startup = base.Program() - with fluid.program_guard(main, startup): + with base.program_guard(main, startup): loss = model() optimizer.minimize(loss) @@ -58,7 +58,7 @@ def check_trainable( else: assert ops[op] == op_count[op] - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) exe.run(feed=feed_dict) def test_trainable(self): diff --git a/test/legacy_test/test_trainer_desc.py b/test/legacy_test/test_trainer_desc.py index 1e5ebd2977055..787015116744a 100644 --- a/test/legacy_test/test_trainer_desc.py +++ b/test/legacy_test/test_trainer_desc.py @@ -18,7 +18,7 @@ import unittest -from paddle import fluid +from paddle import base class TestTrainerDesc(unittest.TestCase): @@ -28,7 +28,7 @@ def test_config(self): """ Testcase for python config. """ - trainer_desc = fluid.trainer_desc.TrainerDesc() + trainer_desc = base.trainer_desc.TrainerDesc() trainer_desc._set_dump_fields(["a", "b"]) trainer_desc._set_mpi_rank(1) trainer_desc._set_dump_fields_path("path") @@ -46,7 +46,7 @@ def test_config_dump_simple(self): """ Testcase for dump_in_simple_mode """ - trainer_desc = fluid.trainer_desc.TrainerDesc() + trainer_desc = base.trainer_desc.TrainerDesc() trainer_desc._set_dump_fields(["a", "b"]) trainer_desc._set_is_dump_in_simple_mode(True) diff --git a/test/legacy_test/test_trans_layout_op.py b/test/legacy_test/test_trans_layout_op.py index e6e1967d75e00..3617cdb013b8a 100644 --- a/test/legacy_test/test_trans_layout_op.py +++ b/test/legacy_test/test_trans_layout_op.py @@ -44,11 +44,11 @@ def test_check_output(self): class LayoutAutoTune(unittest.TestCase): def test_config(self): - paddle.fluid.core.enable_layout_autotune() + paddle.base.core.enable_layout_autotune() if self.use_autoune(): - self.assertEqual(paddle.fluid.core.use_layout_autotune(), True) - paddle.fluid.core.disable_layout_autotune() - self.assertEqual(paddle.fluid.core.use_layout_autotune(), False) + self.assertEqual(paddle.base.core.use_layout_autotune(), True) + paddle.base.core.disable_layout_autotune() + self.assertEqual(paddle.base.core.use_layout_autotune(), False) self.use_autoune() def setUp(self): @@ -60,7 +60,7 @@ def use_autoune(self): paddle.incubate.autotune.set_config( config={"layout": {"enable": True}} ) - return paddle.fluid.core.use_layout_autotune() + return paddle.base.core.use_layout_autotune() else: config = {"layout": {"enable": False}} tfile = tempfile.NamedTemporaryFile(mode="w+", delete=False) @@ -68,7 +68,7 @@ def use_autoune(self): tfile.close() paddle.incubate.autotune.set_config(tfile.name) os.remove(tfile.name) - return paddle.fluid.core.use_layout_autotune() + return paddle.base.core.use_layout_autotune() def test_flatten_op_transposer(self): conv = paddle.nn.Conv2D(3, 8, (3, 3)) diff --git a/test/legacy_test/test_transfer_dtype_op.py b/test/legacy_test/test_transfer_dtype_op.py index d9d4310b0a516..0a087b5d4f205 100644 --- a/test/legacy_test/test_transfer_dtype_op.py +++ b/test/legacy_test/test_transfer_dtype_op.py @@ -22,7 +22,7 @@ ) import paddle -from paddle.fluid import core +from paddle.base import core class TestTransferDtypeOpFp32ToFp64(OpTest): diff --git a/test/legacy_test/test_transfer_layout_op.py b/test/legacy_test/test_transfer_layout_op.py index 6dff0622c1b10..ffcd979b0ba06 100644 --- a/test/legacy_test/test_transfer_layout_op.py +++ b/test/legacy_test/test_transfer_layout_op.py @@ -18,10 +18,10 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.framework import Program, program_guard -from paddle.fluid.layer_helper import LayerHelper +from paddle import base +from paddle.base import core +from paddle.base.framework import Program, program_guard +from paddle.base.layer_helper import LayerHelper def transpose_layout(x, src_layout, dst_layout): @@ -75,11 +75,11 @@ def test_layout_transfer(self): z = softmax_with_data_format(y, data_format='NHWC') place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup_program) ret = exe.run( main_program, diff --git a/test/legacy_test/test_transformer_api.py b/test/legacy_test/test_transformer_api.py index d313c513595ba..a4297155de0e9 100644 --- a/test/legacy_test/test_transformer_api.py +++ b/test/legacy_test/test_transformer_api.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.nn.layer.transformer import ( MultiHeadAttention, Transformer, @@ -187,7 +187,7 @@ def scaled_dot_product_attention(q, k, v, d_key, attn_mask, multi_head_attn): def cal_qkv(key, value, num_heads, embed_dim, multi_head_attn): - with fluid.dygraph.guard(): + with base.dygraph.guard(): head_dim = embed_dim // num_heads k_weight = multi_head_attn.k_proj.weight.numpy() v_weight = multi_head_attn.v_proj.weight.numpy() @@ -226,8 +226,8 @@ def prepare_qkv( def add(x, y=None): - fluid.enable_dygraph() - with fluid.dygraph.guard(): + base.enable_dygraph() + with base.dygraph.guard(): x = x.numpy() if not isinstance(x, np.ndarray) else x if y is not None: x += y @@ -241,8 +241,8 @@ def relu(x): def layer_norm(x, normalized_shape, norm, epsilon=1e-05, act=None): - fluid.enable_dygraph() - with fluid.dygraph.guard(): + base.enable_dygraph() + with base.dygraph.guard(): # scale: weight = norm.weight.numpy() # shift: @@ -265,8 +265,8 @@ def layer_norm(x, normalized_shape, norm, epsilon=1e-05, act=None): def ffn(src, encoder_layer, ffn_fc1_act="relu"): assert ffn_fc1_act == "relu", "only relu is supported" - fluid.enable_dygraph() - with fluid.dygraph.guard(): + base.enable_dygraph() + with base.dygraph.guard(): src = src.numpy() if not isinstance(src, np.ndarray) else src w1 = encoder_layer.linear1.weight.numpy() w2 = encoder_layer.linear2.weight.numpy() @@ -284,7 +284,7 @@ def multihead_attention_test_helper(self_attention, cache): paddle.seed(2020) paddle.framework.random._manual_program_seed(2020) # self_attention|cross_attention, cache|No cache - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): # generate params for multi_head_attention ( batch_size, @@ -398,7 +398,7 @@ def multihead_attention_test_helper(self_attention, cache): multihead_attention_test_helper(False, False) def test_transformer_encoder_layer(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): paddle.framework.seed(2020) paddle.framework.random._manual_program_seed(2020) @@ -463,7 +463,7 @@ def test_transformer_encoder_layer(self): ) def test_transformer_encoder_layer_attr_1(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): paddle.framework.seed(2020) paddle.framework.random._manual_program_seed(2020) @@ -544,7 +544,7 @@ def test_transformer_encoder_layer_attr_1(self): ) def test_transformer_decoder_layer(self): - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): paddle.framework.seed(2020) activation = "relu" normalize_before = False @@ -679,7 +679,7 @@ def test_encoder(self): (batch_size, n_head, sequence_length, sequence_length) ).astype("float32") src_mask[0][0][0][0] = -np.inf - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): encoder_layer = TransformerEncoderLayer( d_model, n_head, dim_feedforward, dropout ) @@ -710,7 +710,7 @@ def test_encoder_attr_1(self): (batch_size, n_head, sequence_length, sequence_length) ).astype("float32") src_mask[0][0][0][0] = -np.inf - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): for cache in [True, False]: # paddle encoder_layer = TransformerEncoderLayer( @@ -755,7 +755,7 @@ def test_decoder(self): (batch_size, n_head, target_length, source_length) ).astype("float32") memory_mask[0][0][0][0] = -1e9 - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): decoder_layer = TransformerDecoderLayer( d_model, n_head, dim_feedforward, dropout ) @@ -783,7 +783,7 @@ def test_transformer(self): ) = generate_basic_params(mode="decoder_layer") # batch_size, source_length, target_length, d_model, n_head = 4, 8, 8, 64, 8 - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): transformer = Transformer( d_model, n_head, @@ -834,7 +834,7 @@ def test_transformer_attr_1(self): ) = generate_basic_params(mode="decoder_layer") # batch_size, source_length, target_length, d_model, n_head = 4, 8, 8, 64, 8 - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): transformer = Transformer( d_model, n_head, @@ -887,7 +887,7 @@ def test_transformer_attr_2(self): ) = generate_basic_params(mode="decoder_layer") # batch_size, source_length, target_length, d_model, n_head = 4, 8, 8, 64, 8 - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): transformer = Transformer( d_model, n_head, @@ -940,7 +940,7 @@ def test_transformer_attr_3(self): ) = generate_basic_params(mode="decoder_layer") # batch_size, source_length, target_length, d_model, n_head = 4, 8, 8, 64, 8 - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): transformer = Transformer( d_model, n_head, @@ -993,7 +993,7 @@ def test_transformer_attr_boolean(self): ) = generate_basic_params(mode="decoder_layer") # batch_size, source_length, target_length, d_model, n_head = 4, 8, 8, 64, 8 - with fluid.dygraph.guard(fluid.CPUPlace()): + with base.dygraph.guard(base.CPUPlace()): transformer = Transformer( d_model, n_head, diff --git a/test/legacy_test/test_transpose_op.py b/test/legacy_test/test_transpose_op.py index 5f4ba4fb188de..adb0ca0779f87 100644 --- a/test/legacy_test/test_transpose_op.py +++ b/test/legacy_test/test_transpose_op.py @@ -20,8 +20,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard paddle.enable_static() @@ -194,9 +194,9 @@ def setUp(self): } def initTestCase(self): - fluid.core.set_autotune_range(0, 3) - fluid.core.update_autotune_status() - fluid.core.enable_autotune() + base.core.set_autotune_range(0, 3) + base.core.update_autotune_status() + base.core.enable_autotune() self.shape = (1, 12, 256, 1) self.axis = (0, 3, 2, 1) @@ -206,7 +206,7 @@ def init_op_type(self): def test_check_output(self): self.check_output(no_check_set=['XShape']) - fluid.core.disable_autotune() + base.core.disable_autotune() def test_check_grad(self): self.check_grad(['X'], 'Out', check_prim=True) @@ -231,9 +231,9 @@ def setUp(self): } def initTestCase(self): - fluid.core.set_autotune_range(0, 3) - fluid.core.update_autotune_status() - fluid.core.enable_autotune() + base.core.set_autotune_range(0, 3) + base.core.update_autotune_status() + base.core.enable_autotune() self.shape = (1, 12, 256, 1) self.axis = (0, 3, 2, 1) @@ -243,7 +243,7 @@ def init_op_type(self): def test_check_output(self): self.check_output(no_check_set=['XShape']) - fluid.core.disable_autotune() + base.core.disable_autotune() def test_check_grad(self): self.check_grad(['X'], 'Out', check_prim=True) @@ -275,9 +275,9 @@ def if_enable_cinn(self): self.enable_cinn = False def initTestCase(self): - fluid.core.set_autotune_range(0, 3) - fluid.core.update_autotune_status() - fluid.core.enable_autotune() + base.core.set_autotune_range(0, 3) + base.core.update_autotune_status() + base.core.enable_autotune() self.shape = (2, 8, 10) self.axis = (0, 2, 1) @@ -287,7 +287,7 @@ def init_op_type(self): def test_check_output(self): self.check_output(no_check_set=['XShape']) - fluid.core.disable_autotune() + base.core.disable_autotune() def test_check_grad(self): self.check_grad(['X'], 'Out', check_prim=True) @@ -549,66 +549,66 @@ def test_dygraph_out(self): class TestTAPI(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): data = paddle.static.data(shape=[10], dtype="float64", name="data") data_t = paddle.t(data) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) data_np = np.random.random([10]).astype("float64") (result,) = exe.run(feed={"data": data_np}, fetch_list=[data_t]) expected_result = np.transpose(data_np) self.assertEqual((result == expected_result).all(), True) - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): data = paddle.static.data( shape=[10, 5], dtype="float64", name="data" ) data_t = paddle.t(data) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) data_np = np.random.random([10, 5]).astype("float64") (result,) = exe.run(feed={"data": data_np}, fetch_list=[data_t]) expected_result = np.transpose(data_np) self.assertEqual((result == expected_result).all(), True) - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): data = paddle.static.data( shape=[1, 5], dtype="float64", name="data" ) data_t = paddle.t(data) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) data_np = np.random.random([1, 5]).astype("float64") (result,) = exe.run(feed={"data": data_np}, fetch_list=[data_t]) expected_result = np.transpose(data_np) self.assertEqual((result == expected_result).all(), True) - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.random.random([10]).astype("float64") - data = fluid.dygraph.to_variable(np_x) + data = base.dygraph.to_variable(np_x) z = paddle.t(data) np_z = z.numpy() z_expected = np.array(np.transpose(np_x)) self.assertEqual((np_z == z_expected).all(), True) - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.random.random([10, 5]).astype("float64") - data = fluid.dygraph.to_variable(np_x) + data = base.dygraph.to_variable(np_x) z = paddle.t(data) np_z = z.numpy() z_expected = np.array(np.transpose(np_x)) self.assertEqual((np_z == z_expected).all(), True) - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.random.random([1, 5]).astype("float64") - data = fluid.dygraph.to_variable(np_x) + data = base.dygraph.to_variable(np_x) z = paddle.t(data) np_z = z.numpy() z_expected = np.array(np.transpose(np_x)) self.assertEqual((np_z == z_expected).all(), True) def test_errors(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data(name='x', shape=[10, 5, 3], dtype='float64') def test_x_dimension_check(): @@ -622,7 +622,7 @@ def test_moveaxis1(self): x_np = np.random.randn(2, 3, 4, 5, 7) expected = np.moveaxis(x_np, [0, 4, 3, 2], [1, 3, 2, 0]) paddle.enable_static() - with paddle.static.program_guard(fluid.Program()): + with paddle.static.program_guard(base.Program()): x = paddle.static.data("x", shape=[2, 3, 4, 5, 7], dtype='float64') out = paddle.moveaxis(x, [0, 4, 3, 2], [1, 3, 2, 0]) @@ -642,7 +642,7 @@ def test_moveaxis2(self): x_np = np.random.randn(2, 3, 5) expected = np.moveaxis(x_np, -2, -1) paddle.enable_static() - with paddle.static.program_guard(fluid.Program()): + with paddle.static.program_guard(base.Program()): x = paddle.static.data("x", shape=[2, 3, 5], dtype='float64') out = x.moveaxis(-2, -1) @@ -722,9 +722,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -753,9 +753,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_trapezoid.py b/test/legacy_test/test_trapezoid.py index de18e75512717..d81590e5a50b3 100644 --- a/test/legacy_test/test_trapezoid.py +++ b/test/legacy_test/test_trapezoid.py @@ -228,7 +228,7 @@ def set_api(self): def test_fp16_with_gpu(self): paddle.enable_static() - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() @@ -253,7 +253,7 @@ def test_fp16_with_gpu(self): ) def test_fp16_func_dygraph(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) paddle.disable_static() input_y = np.random.random([4, 4]) diff --git a/test/legacy_test/test_triangular_solve_op.py b/test/legacy_test/test_triangular_solve_op.py index 23cb650b14bbe..60f699d88822d 100644 --- a/test/legacy_test/test_triangular_solve_op.py +++ b/test/legacy_test/test_triangular_solve_op.py @@ -21,8 +21,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard paddle.enable_static() @@ -257,7 +257,7 @@ def setUp(self): self.place.append(paddle.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): x = paddle.static.data(name="x", shape=[3, 3], dtype=self.dtype) y = paddle.static.data(name="y", shape=[3, 2], dtype=self.dtype) z = paddle.linalg.triangular_solve(x, y) @@ -266,9 +266,9 @@ def check_static_result(self, place): y_np = np.random.random([3, 2]).astype(self.dtype) z_np = np.linalg.solve(np.triu(x_np), y_np) - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": x_np, "y": y_np}, fetch_list=[z], ) @@ -301,11 +301,11 @@ class TestTriangularSolveOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The input type of solve_op must be Variable. - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) - y1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace() + y1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.CPUPlace() ) self.assertRaises(TypeError, paddle.linalg.triangular_solve, x1, y1) diff --git a/test/legacy_test/test_tril_indices_op.py b/test/legacy_test/test_tril_indices_op.py index db29ecb8acfb0..26d19e44c2887 100644 --- a/test/legacy_test/test_tril_indices_op.py +++ b/test/legacy_test/test_tril_indices_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle import fluid +from paddle import base class TestTrilIndicesOp(OpTest): @@ -60,8 +60,8 @@ def init_config(self): class TestTrilIndicesAPICaseStatic(unittest.TestCase): def test_static(self): places = ( - [paddle.CPUPlace(), paddle.fluid.CUDAPlace(0)] - if fluid.core.is_compiled_with_cuda() + [paddle.CPUPlace(), paddle.base.CUDAPlace(0)] + if base.core.is_compiled_with_cuda() else [paddle.CPUPlace()] ) paddle.enable_static() @@ -79,12 +79,12 @@ def test_static(self): class TestTrilIndicesAPICaseDygraph(unittest.TestCase): def test_dygraph(self): places = ( - [paddle.CPUPlace(), paddle.fluid.CUDAPlace(0)] - if fluid.core.is_compiled_with_cuda() + [paddle.CPUPlace(), paddle.base.CUDAPlace(0)] + if base.core.is_compiled_with_cuda() else [paddle.CPUPlace()] ) for place in places: - with fluid.dygraph.base.guard(place=place): + with base.dygraph.base.guard(place=place): out1 = paddle.tril_indices(4, 4, 2) expected_result1 = np.tril_indices(4, 2, 4) self.assertEqual((out1.numpy() == expected_result1).all(), True) @@ -120,7 +120,7 @@ def test_default_CPU(self): expected_result = np.tril_indices(4, 2) np.testing.assert_allclose(result, expected_result, rtol=1e-05) - with fluid.dygraph.base.guard(paddle.CPUPlace()): + with base.dygraph.base.guard(paddle.CPUPlace()): out = paddle.tril_indices(4, None, 2) expected_result = np.tril_indices(4, 2) self.assertEqual((out.numpy() == expected_result).all(), True) diff --git a/test/legacy_test/test_tril_triu_op.py b/test/legacy_test/test_tril_triu_op.py index c2b80f1c4a9cf..c0304354a1bd5 100644 --- a/test/legacy_test/test_tril_triu_op.py +++ b/test/legacy_test/test_tril_triu_op.py @@ -17,9 +17,9 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid, tensor -from paddle.fluid import core -from paddle.fluid.framework import Program, program_guard +from paddle import base, tensor +from paddle.base import core +from paddle.base.framework import Program, program_guard class TrilTriuOpDefaultTest(OpTest): @@ -215,13 +215,13 @@ def test_api(self): tril_out, triu_out = tensor.tril(x), tensor.triu(x) place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) tril_out, triu_out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": data}, fetch_list=[tril_out, triu_out], ) @@ -233,9 +233,9 @@ def test_api_with_dygraph(self): dtypes = ['float16', 'float32'] for dtype in dtypes: - with fluid.dygraph.guard(): + with base.dygraph.guard(): data = np.random.random([1, 9, 9, 4]).astype(dtype) - x = fluid.dygraph.to_variable(data) + x = base.dygraph.to_variable(data) tril_out, triu_out = ( tensor.tril(x).numpy(), tensor.triu(x).numpy(), @@ -243,7 +243,7 @@ def test_api_with_dygraph(self): np.testing.assert_allclose(tril_out, np.tril(data), rtol=1e-05) np.testing.assert_allclose(triu_out, np.triu(data), rtol=1e-05) - def test_fluid_api(self): + def test_base_api(self): paddle.enable_static() dtypes = ['float16', 'float32'] @@ -258,13 +258,13 @@ def test_fluid_api(self): triu_out = paddle.triu(x) place = ( - fluid.CUDAPlace(0) - if fluid.core.is_compiled_with_cuda() - else fluid.CPUPlace() + base.CUDAPlace(0) + if base.core.is_compiled_with_cuda() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) triu_out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": data}, fetch_list=[triu_out], ) diff --git a/test/legacy_test/test_trilinear_interp_op.py b/test/legacy_test/test_trilinear_interp_op.py index 031639fc080aa..bbdc14e434cfb 100755 --- a/test/legacy_test/test_trilinear_interp_op.py +++ b/test/legacy_test/test_trilinear_interp_op.py @@ -17,7 +17,7 @@ import numpy as np from eager_op_test import OpTest -from paddle.fluid import core +from paddle.base import core def trilinear_interp_np( diff --git a/test/legacy_test/test_trilinear_interp_v2_op.py b/test/legacy_test/test_trilinear_interp_v2_op.py index 43d4a9587f7fd..68101d5c5e0f7 100755 --- a/test/legacy_test/test_trilinear_interp_v2_op.py +++ b/test/legacy_test/test_trilinear_interp_v2_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.nn.functional import interpolate np.random.seed(123) @@ -964,7 +964,7 @@ def init_test_case(self): @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) class TestTrilinearInterpOpForFloat16(unittest.TestCase): def init_test_case(self): @@ -1007,7 +1007,7 @@ def test_main(self): @unittest.skipIf( - not fluid.core.is_compiled_with_cuda(), "core is not compiled with CUDA" + not base.core.is_compiled_with_cuda(), "core is not compiled with CUDA" ) class TestTrilinearInterpDatalayoutForFloat16(TestTrilinearInterpOpForFloat16): def init_test_case(self): diff --git a/test/legacy_test/test_triu_indices_op.py b/test/legacy_test/test_triu_indices_op.py index b560cec7dbae5..4f5c7610ddd7e 100644 --- a/test/legacy_test/test_triu_indices_op.py +++ b/test/legacy_test/test_triu_indices_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle import fluid +from paddle import base class TestTriuIndicesOp(OpTest): @@ -59,8 +59,8 @@ def init_config(self): class TestTriuIndicesAPICaseStatic(unittest.TestCase): def test_static(self): - if fluid.core.is_compiled_with_cuda(): - place = paddle.fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = paddle.base.CUDAPlace(0) else: place = paddle.CPUPlace() with paddle.static.program_guard( @@ -75,11 +75,11 @@ def test_static(self): class TestTriuIndicesAPICaseDygraph(unittest.TestCase): def test_dygraph(self): - if fluid.core.is_compiled_with_cuda(): - place = paddle.fluid.CUDAPlace(0) + if base.core.is_compiled_with_cuda(): + place = paddle.base.CUDAPlace(0) else: place = paddle.CPUPlace() - with fluid.dygraph.base.guard(place=place): + with base.dygraph.base.guard(place=place): out = paddle.triu_indices(4, 4, 2) expected_result = np.triu_indices(4, 2, 4) np.testing.assert_array_equal(out, expected_result) @@ -115,7 +115,7 @@ def test_default_CPU(self): expected_result = np.triu_indices(4, 2) np.testing.assert_array_equal(result[0], expected_result) - with fluid.dygraph.base.guard(paddle.CPUPlace()): + with base.dygraph.base.guard(paddle.CPUPlace()): out = paddle.triu_indices(4, None, 2) expected_result = np.triu_indices(4, 2) np.testing.assert_array_equal(out, expected_result) diff --git a/test/legacy_test/test_trunc_op.py b/test/legacy_test/test_trunc_op.py index 404403fca0453..d1dae7c8f4081 100644 --- a/test/legacy_test/test_trunc_op.py +++ b/test/legacy_test/test_trunc_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_truncated_gaussian_random_op.py b/test/legacy_test/test_truncated_gaussian_random_op.py index 9380d4f81ea36..0f56c5f9ef15e 100644 --- a/test/legacy_test/test_truncated_gaussian_random_op.py +++ b/test/legacy_test/test_truncated_gaussian_random_op.py @@ -17,9 +17,9 @@ import numpy import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.executor import Executor +from paddle import base +from paddle.base import core +from paddle.base.executor import Executor class TestTrunctedGaussianRandomOp(unittest.TestCase): @@ -35,16 +35,16 @@ def setUp(self): self.outputs = ["Out"] def test_cpu(self): - self.gaussian_random_test(place=fluid.CPUPlace()) - self.gaussian_random_test_eager(place=fluid.CPUPlace()) + self.gaussian_random_test(place=base.CPUPlace()) + self.gaussian_random_test_eager(place=base.CPUPlace()) def test_gpu(self): if core.is_compiled_with_cuda(): - self.gaussian_random_test(place=fluid.CUDAPlace(0)) - self.gaussian_random_test_eager(place=fluid.CUDAPlace(0)) + self.gaussian_random_test(place=base.CUDAPlace(0)) + self.gaussian_random_test_eager(place=base.CUDAPlace(0)) def gaussian_random_test(self, place): - program = fluid.Program() + program = base.Program() block = program.global_block() vout = block.create_var(name="Out") op = block.append_op( @@ -67,7 +67,7 @@ def gaussian_random_test(self, place): # TruncatedNormal.__call__ has no return value, so here call _C_ops api # directly def gaussian_random_test_eager(self, place): - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): out = paddle._C_ops.truncated_gaussian_random( self.attrs["shape"], self.attrs["mean"], diff --git a/test/legacy_test/test_unbind_op.py b/test/legacy_test/test_unbind_op.py index 670433a84c8b5..45a4d92fef019 100644 --- a/test/legacy_test/test_unbind_op.py +++ b/test/legacy_test/test_unbind_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid, tensor -from paddle.fluid import Program, program_guard +from paddle import base, tensor +from paddle.base import Program, program_guard class TestUnbind(unittest.TestCase): @@ -30,10 +30,10 @@ def test_unbind(self): [out_0, out_1] = tensor.unbind(input=x_1, axis=0) input_1 = np.random.random([2, 3]).astype("float32") axis = paddle.static.data(shape=[], dtype='int32', name='axis') - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) [res_1, res_2] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x_1": input_1, "axis": 0}, fetch_list=[out_0, out_1], ) @@ -42,7 +42,7 @@ def test_unbind(self): np.testing.assert_array_equal(res_2, input_1[1, 0:100]) def test_unbind_static_fp16_gpu(self): - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) with paddle.static.program_guard( paddle.static.Program(), paddle.static.Program() @@ -65,7 +65,7 @@ def test_unbind_static_fp16_gpu(self): np.testing.assert_array_equal(res[1], input[1, :]) def test_unbind_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.random.random([2, 3]).astype("float32") x = paddle.to_tensor(np_x) x.stop_gradient = False @@ -88,10 +88,10 @@ def test_layers_unbind(self): [out_0, out_1] = paddle.unbind(input=x_1, axis=0) input_1 = np.random.random([2, 3]).astype("float32") axis = paddle.static.data(shape=[], dtype='int32', name='axis') - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = base.Executor(place=base.CPUPlace()) [res_1, res_2] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x_1": input_1, "axis": 0}, fetch_list=[out_0, out_1], ) diff --git a/test/legacy_test/test_unfold_op.py b/test/legacy_test/test_unfold_op.py index 2ed5bf3f82c85..9e9c0806708de 100644 --- a/test/legacy_test/test_unfold_op.py +++ b/test/legacy_test/test_unfold_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestUnfoldOp(OpTest): @@ -203,14 +203,14 @@ def setUp(self): self.op_type = 'unfold' self.python_api = paddle.nn.functional.unfold self.set_data() - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): - input = fluid.dygraph.to_variable(self.inputs['X']) + with base.dygraph.guard(place): + input = base.dygraph.to_variable(self.inputs['X']) m = paddle.nn.Unfold(**self.attrs) m.eval() result = m(input) diff --git a/test/legacy_test/test_uniform_random_bf16_op.py b/test/legacy_test/test_uniform_random_bf16_op.py index 426013daa2013..2e92c2041107b 100644 --- a/test/legacy_test/test_uniform_random_bf16_op.py +++ b/test/legacy_test/test_uniform_random_bf16_op.py @@ -20,8 +20,8 @@ from test_uniform_random_op import output_hist, output_hist_diag import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.tensor import random @@ -163,17 +163,17 @@ class TestUniformRandomOpAPISeed(unittest.TestCase): def test_attr_tensor_API(self): _seed = 10 gen = paddle.seed(_seed) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): _min = 5 _max = 10 ret = paddle.uniform([2, 3, 2], min=_min, max=_max, seed=_seed) ret_2 = paddle.uniform([2, 3, 2], min=_min, max=_max, seed=_seed) res = paddle.equal(ret, ret_2) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(startup_program) ret_value, cmp_value = exe.run(train_program, fetch_list=[ret, res]) @@ -237,9 +237,9 @@ def test_check_output(self): class TestUniformRandomBatchSizeLikeOpBF16API(unittest.TestCase): def test_attr_tensorlist_int32_API(self): - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): input = paddle.static.data( name="input", shape=[1, 3], dtype='uint16' ) @@ -247,8 +247,8 @@ def test_attr_tensorlist_int32_API(self): input, [2, 4], dtype=np.uint16 ) # out_1.shape=[1, 4] - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) exe.run(startup_program) outs = exe.run(train_program, fetch_list=[out_1]) diff --git a/test/legacy_test/test_uniform_random_inplace_op.py b/test/legacy_test/test_uniform_random_inplace_op.py index c4a243f47897b..f022656d1ada6 100644 --- a/test/legacy_test/test_uniform_random_inplace_op.py +++ b/test/legacy_test/test_uniform_random_inplace_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_uint16_to_float import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def output_hist(out): @@ -46,7 +46,7 @@ def test_fp64(): self.assertEqual(tensor_fp64.dtype, paddle.float64) places = ['cpu'] - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): places.append('gpu') for place in places: paddle.set_device(place) @@ -219,7 +219,7 @@ def test_attr_error(): class TestUniformRandomInplaceOpEmptyTensor(unittest.TestCase): def test_uniform_random_inplace_op_empty_tensor(self): places = ['cpu'] - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): places.append('gpu') test_shapes = [(200, 0), (0, 200)] for place in places: @@ -249,7 +249,7 @@ def test_grad(): self.assertTrue((uniform_grad == 0).all()) places = ['cpu'] - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): places.append('gpu') for place in places: paddle.set_device(place) diff --git a/test/legacy_test/test_uniform_random_op.py b/test/legacy_test/test_uniform_random_op.py index ad74c34908bef..4cfafe4c7a84e 100644 --- a/test/legacy_test/test_uniform_random_op.py +++ b/test/legacy_test/test_uniform_random_op.py @@ -21,9 +21,9 @@ from test_attribute_var import UnittestBase import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard -from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle import base +from paddle.base import Program, core, program_guard +from paddle.base.framework import convert_np_dtype_to_dtype_ from paddle.tensor import random @@ -179,7 +179,7 @@ def verify_output(self, outs): def test_check_api(self): places = self._get_places() for place in places: - with fluid.dygraph.base.guard(place=place): + with base.dygraph.base.guard(place=place): out = self.python_api( self.attrs['shape'], self.dtype, @@ -208,8 +208,8 @@ def test_errors(self): with program_guard(main_prog, start_prog): def test_Variable(): - x1 = fluid.create_lod_tensor( - np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.zeros((4, 784)), [[1, 1, 1, 1]], base.CPUPlace() ) paddle.uniform(x1) @@ -223,7 +223,7 @@ def test_Variable2(): def test_out_dtype(): out = paddle.uniform(shape=[3, 4], dtype='float64') - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP64) test_out_dtype() @@ -314,60 +314,60 @@ def test_api(self): ), ) - place = fluid.CPUPlace() - x_tensor = fluid.create_lod_tensor( + place = base.CPUPlace() + x_tensor = base.create_lod_tensor( np.random.rand(3, 16).astype("float32"), [[1, 2]], place ) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) ret = exe.run(feed={'x': x_tensor}, fetch_list=[y], return_numpy=False) class TestUniformRandomOp_attr_tensor_API(unittest.TestCase): def test_attr_tensor_API(self): - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): dim_tensor = paddle.tensor.fill_constant([1], "int64", 3) ret = paddle.uniform([1, dim_tensor, 2]) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) outs = exe.run(train_program, fetch_list=[ret]) def test_attr_tensorlist_int32_API(self): - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): dim_1 = paddle.tensor.fill_constant([1], "int64", 3) dim_2 = paddle.tensor.fill_constant([1], "int32", 2) ret = paddle.uniform([1, dim_1, dim_2]) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) outs = exe.run(train_program, fetch_list=[ret]) def test_attr_tensor_int32_API(self): - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): shape = paddle.static.data( name='shape_tensor', shape=[2], dtype="int32" ) ret = paddle.uniform(shape) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) Shape = np.array([2, 3]).astype('int32') exe.run(startup_program) outs = exe.run( @@ -379,19 +379,19 @@ class TestUniformRandomOp_API_seed(unittest.TestCase): def test_attr_tensor_API(self): _seed = 10 gen = paddle.seed(_seed) - startup_program = fluid.Program() - train_program = fluid.Program() - with fluid.program_guard(train_program, startup_program): + startup_program = base.Program() + train_program = base.Program() + with base.program_guard(train_program, startup_program): _min = 5 _max = 10 ret = paddle.uniform([2, 3, 2], min=_min, max=_max, seed=_seed) ret_2 = paddle.uniform([2, 3, 2], min=_min, max=_max, seed=_seed) res = paddle.equal(ret, ret_2) - place = fluid.CPUPlace() - if fluid.core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CPUPlace() + if base.core.is_compiled_with_cuda(): + place = base.CUDAPlace(0) + exe = base.Executor(place) exe.run(startup_program) ret_value, cmp_value = exe.run(train_program, fetch_list=[ret, res]) @@ -467,7 +467,7 @@ def check_with_place(self, place): class TestUniformRandomDygraphMode(unittest.TestCase): def test_check_output(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = paddle.uniform([10], dtype="float32", min=0.0, max=1.0) x_np = x.numpy() for i in range(10): @@ -481,8 +481,8 @@ def test_errors(self): with program_guard(main_prog, start_prog): def test_Variable(): - x1 = fluid.create_lod_tensor( - np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.zeros((100, 784)), [[10, 10, 10, 70]], base.CPUPlace() ) random.uniform_random_batch_size_like(x1) @@ -524,8 +524,8 @@ def test_errors(self): with program_guard(main_prog, start_prog): def test_Variable(): - x1 = fluid.create_lod_tensor( - np.zeros((100, 784)), [[10, 10, 10, 70]], fluid.CPUPlace() + x1 = base.create_lod_tensor( + np.zeros((100, 784)), [[10, 10, 10, 70]], base.CPUPlace() ) paddle.tensor.random.uniform(x1) @@ -549,14 +549,14 @@ def test_out_dtype(): out = paddle.tensor.random.uniform( shape=[3, 4], dtype='float64' ) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP64) test_out_dtype() class TestUniformDygraphMode(unittest.TestCase): def test_check_output(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = paddle.tensor.random.uniform( [10], dtype="float32", min=0.0, max=1.0 ) @@ -572,17 +572,17 @@ def test_default_dtype(self): def test_default_fp16(): paddle.framework.set_default_dtype('float16') out = paddle.tensor.random.uniform([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') out = paddle.tensor.random.uniform([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP32) def test_default_fp64(): paddle.framework.set_default_dtype('float64') out = paddle.tensor.random.uniform([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP64) def test_dygraph_fp16(): if not paddle.is_compiled_with_cuda(): @@ -590,7 +590,7 @@ def test_dygraph_fp16(): return paddle.set_device('gpu') out = paddle.uniform([2, 3], dtype=paddle.float16) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP16) if paddle.is_compiled_with_cuda(): paddle.set_device('gpu') diff --git a/test/legacy_test/test_unique.py b/test/legacy_test/test_unique.py index 438a50df403cc..cf1746a4fd1d4 100644 --- a/test/legacy_test/test_unique.py +++ b/test/legacy_test/test_unique.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16, paddle_static_guard import paddle -from paddle.fluid import core +from paddle.base import core class TestUniqueOp(OpTest): diff --git a/test/legacy_test/test_unique_consecutive_op.py b/test/legacy_test/test_unique_consecutive_op.py index aa903830b7a43..587ef404b7c39 100644 --- a/test/legacy_test/test_unique_consecutive_op.py +++ b/test/legacy_test/test_unique_consecutive_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def reference_unique_consecutive( @@ -199,12 +199,12 @@ def setUp(self): class TestUniqueConsecutiveAPI(unittest.TestCase): def setUp(self): - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): paddle.enable_static() input_x = paddle.static.data( name="input_x", @@ -215,9 +215,9 @@ def check_static_result(self, place): ) result = paddle.unique_consecutive(input_x) x_np = np.random.randint(20, size=100).astype("float32") - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input_x": x_np}, fetch_list=[result], ) @@ -228,7 +228,7 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_x = np.random.randint(20, size=100).astype("float64") x = paddle.to_tensor(input_x) result = paddle.unique_consecutive(x) @@ -236,12 +236,12 @@ def test_dygraph(self): class TestUniqueConsecutiveCase2API(unittest.TestCase): def setUp(self): - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): paddle.enable_static() input_x = paddle.static.data( name="input_x", @@ -254,9 +254,9 @@ def check_static_result(self, place): input_x, return_inverse=True, return_counts=True ) x_np = np.random.randint(20, size=100).astype("float32") - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input_x": x_np}, fetch_list=[result], ) @@ -267,7 +267,7 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_x = np.random.randint(20, size=100).astype("float64") x = paddle.to_tensor(input_x) result, inverse, counts = paddle.unique_consecutive( @@ -277,12 +277,12 @@ def test_dygraph(self): class TestUniqueConsecutiveCase3API(unittest.TestCase): def setUp(self): - self.places = [fluid.CPUPlace()] + self.places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - self.places.append(fluid.CUDAPlace(0)) + self.places.append(base.CUDAPlace(0)) def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): paddle.enable_static() input_x = paddle.static.data( name="input_x", @@ -295,9 +295,9 @@ def check_static_result(self, place): input_x, return_inverse=True, return_counts=True, axis=-1 ) x_np = np.random.randint(20, size=100).astype("float32") - exe = fluid.Executor(place) + exe = base.Executor(place) fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input_x": x_np}, fetch_list=[result], ) @@ -308,7 +308,7 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_x = np.random.randint(20, size=100).astype("float64") x = paddle.to_tensor(input_x) result, inverse, counts = paddle.unique_consecutive( diff --git a/test/legacy_test/test_unique_name.py b/test/legacy_test/test_unique_name.py index a0b6d790200ed..f6b6c18b12682 100644 --- a/test/legacy_test/test_unique_name.py +++ b/test/legacy_test/test_unique_name.py @@ -14,40 +14,40 @@ import unittest -from paddle import fluid +from paddle import base class TestUniqueName(unittest.TestCase): def test_guard(self): - with fluid.unique_name.guard(): - name_1 = fluid.unique_name.generate('') + with base.unique_name.guard(): + name_1 = base.unique_name.generate('') - with fluid.unique_name.guard(): - name_2 = fluid.unique_name.generate('') + with base.unique_name.guard(): + name_2 = base.unique_name.generate('') self.assertEqual(name_1, name_2) - with fluid.unique_name.guard("A"): - name_1 = fluid.unique_name.generate('') + with base.unique_name.guard("A"): + name_1 = base.unique_name.generate('') - with fluid.unique_name.guard('B'): - name_2 = fluid.unique_name.generate('') + with base.unique_name.guard('B'): + name_2 = base.unique_name.generate('') self.assertNotEqual(name_1, name_2) def test_generate(self): - with fluid.unique_name.guard(): - name1 = fluid.unique_name.generate('fc') - name2 = fluid.unique_name.generate('fc') - name3 = fluid.unique_name.generate('tmp') + with base.unique_name.guard(): + name1 = base.unique_name.generate('fc') + name2 = base.unique_name.generate('fc') + name3 = base.unique_name.generate('tmp') self.assertNotEqual(name1, name2) self.assertEqual(name1[-2:], name3[-2:]) class TestImperativeUniqueName(unittest.TestCase): def test_name_generator(self): - with fluid.dygraph.guard(): - tracer = fluid.framework._dygraph_tracer() + with base.dygraph.guard(): + tracer = base.framework._dygraph_tracer() tmp_var_0 = tracer._generate_unique_name() self.assertEqual(tmp_var_0, "dygraph_tmp_0") diff --git a/test/legacy_test/test_unique_with_counts.py b/test/legacy_test/test_unique_with_counts.py index 2690d61802476..364276a5e473d 100644 --- a/test/legacy_test/test_unique_with_counts.py +++ b/test/legacy_test/test_unique_with_counts.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, paddle_static_guard import paddle -from paddle.fluid import core +from paddle.base import core class TestUniqueWithCountsOp(OpTest): diff --git a/test/legacy_test/test_unpool1d_op.py b/test/legacy_test/test_unpool1d_op.py index e3cab5fe6d5ee..8d6435cc2a86f 100644 --- a/test/legacy_test/test_unpool1d_op.py +++ b/test/legacy_test/test_unpool1d_op.py @@ -61,7 +61,7 @@ def unpool1dmax_forward_naive( class TestUnpool1DOpAPI_dygraph(unittest.TestCase): def test_case(self): places = [paddle.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: paddle.disable_static() @@ -86,7 +86,7 @@ def test_case(self): class TestUnpool1DOpAPI_dygraph2(unittest.TestCase): def test_case(self): places = [paddle.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: paddle.disable_static() @@ -111,7 +111,7 @@ def test_case(self): class TestUnpool1DOpAPI_dygraph3(unittest.TestCase): def test_case(self): places = [paddle.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: paddle.disable_static() @@ -138,7 +138,7 @@ class TestUnpool1DOpAPI_static(unittest.TestCase): def test_case(self): paddle.enable_static() places = [paddle.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: with paddle.static.program_guard( @@ -157,9 +157,9 @@ def test_case(self): output, indices, kernel_size=2, stride=None ) - exe = paddle.fluid.Executor(place) + exe = paddle.base.Executor(place) fetches = exe.run( - paddle.fluid.default_main_program(), + paddle.base.default_main_program(), feed={"x": input_data}, fetch_list=[output_unpool], return_numpy=True, diff --git a/test/legacy_test/test_unpool3d_op.py b/test/legacy_test/test_unpool3d_op.py index bb12198f9910f..f8404d9680985 100644 --- a/test/legacy_test/test_unpool3d_op.py +++ b/test/legacy_test/test_unpool3d_op.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core paddle.enable_static() paddle.seed(2022) @@ -284,7 +284,7 @@ def data_outputsize_error2(): class TestUnpool3DOpAPI_dygraph(unittest.TestCase): def test_case(self): places = [paddle.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: paddle.disable_static() @@ -314,7 +314,7 @@ def test_case(self): class TestUnpool3DOpAPI_dygraph2(unittest.TestCase): def test_case(self): places = [paddle.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: paddle.disable_static() @@ -344,7 +344,7 @@ def test_case(self): class TestUnpool3DOpAPI_dygraph3(unittest.TestCase): def test_case(self): places = [paddle.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: paddle.disable_static() @@ -376,7 +376,7 @@ class TestUnpool3DOpAPI_static(unittest.TestCase): def test_case(self): paddle.enable_static() places = [paddle.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: with paddle.static.program_guard( @@ -412,9 +412,9 @@ def test_case(self): output, indices, kernel_size=2, stride=None ) - exe = paddle.fluid.Executor(place) + exe = paddle.base.Executor(place) fetches = exe.run( - paddle.fluid.default_main_program(), + paddle.base.default_main_program(), feed={"x": input_data}, fetch_list=[output_unpool], return_numpy=True, diff --git a/test/legacy_test/test_unpool_op.py b/test/legacy_test/test_unpool_op.py index ecb7798ff09dd..6020b7b87f17a 100644 --- a/test/legacy_test/test_unpool_op.py +++ b/test/legacy_test/test_unpool_op.py @@ -21,7 +21,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import Program, core, program_guard +from paddle.base import Program, core, program_guard def _unpool_output_size(x, kernel_size, stride, padding, output_size): @@ -281,14 +281,14 @@ def test_case(self): import paddle import paddle.nn.functional as F - from paddle import fluid - from paddle.fluid import core + from paddle import base + from paddle.base import core if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: place = core.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_data = np.array( [ [ @@ -322,14 +322,14 @@ def test_case(self): import paddle import paddle.nn.functional as F - from paddle import fluid - from paddle.fluid import core + from paddle import base + from paddle.base import core if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: place = core.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_data = np.array( [ [ @@ -362,14 +362,14 @@ def test_case(self): import numpy as np import paddle - from paddle import fluid - from paddle.fluid import core + from paddle import base + from paddle.base import core if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) else: place = core.CPUPlace() - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_data = np.array( [ [ @@ -402,8 +402,8 @@ class TestUnpoolOpAPI_st(unittest.TestCase): def test_case(self): import paddle import paddle.nn.functional as F - from paddle import fluid - from paddle.fluid import core + from paddle import base + from paddle.base import core paddle.enable_static() @@ -422,11 +422,11 @@ def test_case(self): place = core.CUDAPlace(0) else: place = core.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) results = exe.run( - paddle.fluid.default_main_program(), + paddle.base.default_main_program(), feed={"x": input_data}, fetch_list=[unpool_out], return_numpy=True, diff --git a/test/legacy_test/test_unsqueeze_op.py b/test/legacy_test/test_unsqueeze_op.py index 85d21f5646472..0fbc44a728eed 100755 --- a/test/legacy_test/test_unsqueeze_op.py +++ b/test/legacy_test/test_unsqueeze_op.py @@ -20,8 +20,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -381,9 +381,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) @@ -412,9 +412,9 @@ def func(self, place): def test_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func(p) diff --git a/test/legacy_test/test_unstack_op.py b/test/legacy_test/test_unstack_op.py index 44830b66d4c1d..e07c96ce19019 100755 --- a/test/legacy_test/test_unstack_op.py +++ b/test/legacy_test/test_unstack_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestUnStackOpBase(OpTest): @@ -167,7 +167,7 @@ def test_check_output(self): self.check_output_with_place(place) def test_check_grad(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = paddle.to_tensor(self.inputs['X']) x.stop_gradient = False y = paddle.unstack( diff --git a/test/legacy_test/test_unzip_op.py b/test/legacy_test/test_unzip_op.py index a4136b3d12ce2..8125edc15125a 100644 --- a/test/legacy_test/test_unzip_op.py +++ b/test/legacy_test/test_unzip_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestUnzipOp(unittest.TestCase): @@ -28,7 +28,7 @@ def test_result(self): """ paddle.enable_static() if core.is_compiled_with_cuda(): - place = fluid.CUDAPlace(0) + place = base.CUDAPlace(0) x = paddle.static.data(name='X', shape=[3, 4], dtype='float64') lod = paddle.static.data(name='lod', shape=[11], dtype='int64') output = paddle.incubate.operators.unzip(x, lod) @@ -45,8 +45,8 @@ def test_result(self): 'lod': np.array(lod).astype("int64"), } - exe = fluid.Executor(place=place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place=place) + exe.run(base.default_startup_program()) res = exe.run(feed=feed, fetch_list=[output]) out = [ [1.0, 2.0, 3.0, 4.0], diff --git a/test/legacy_test/test_update_loss_scaling_op.py b/test/legacy_test/test_update_loss_scaling_op.py index 56ffc0499699a..32cee8125fc27 100644 --- a/test/legacy_test/test_update_loss_scaling_op.py +++ b/test/legacy_test/test_update_loss_scaling_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, convert_float_to_uint16, paddle_static_guard import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.static.amp import amp_nn @@ -206,7 +206,7 @@ def test_check_output(self): class TestUpdateLossScalingLayer(unittest.TestCase): - def loss_scaling_check(self, use_cuda=True, scope=fluid.Scope()): + def loss_scaling_check(self, use_cuda=True, scope=base.Scope()): with paddle_static_guard(): a = paddle.static.data( name="a", shape=[1024, 1024], dtype='float32' @@ -251,10 +251,10 @@ def loss_scaling_check(self, use_cuda=True, scope=fluid.Scope()): name="update_loss_scaling", ) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - with fluid.scope_guard(scope): - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) + with base.scope_guard(scope): + exe.run(base.default_startup_program()) result_v = exe.run( feed={ 'a': a_v, @@ -289,7 +289,7 @@ def loss_scaling_check(self, use_cuda=True, scope=fluid.Scope()): result_v[7], np.zeros_like(num_bad_steps_v) ) - def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): + def loss_scaling_check_inf(self, use_cuda=True, scope=base.Scope()): with paddle_static_guard(): a = paddle.static.data( name="a", shape=[1024, 1024], dtype='float32' @@ -337,10 +337,10 @@ def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): name="update_loss_scaling", ) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) - with fluid.scope_guard(scope): - exe.run(fluid.default_startup_program()) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) + with base.scope_guard(scope): + exe.run(base.default_startup_program()) result_v = exe.run( feed={ 'a': a_v, @@ -376,36 +376,36 @@ def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): def test_loss_scaling_cpu(self): with paddle_static_guard(): - main = fluid.Program() - startup = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, startup): self.loss_scaling_check(use_cuda=False) def test_loss_scaling_cpu_inf(self): with paddle_static_guard(): - main = fluid.Program() - startup = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, startup): self.loss_scaling_check_inf(use_cuda=False) def test_loss_scaling_gpu(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): with paddle_static_guard(): - main = fluid.Program() - startup = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, startup): self.loss_scaling_check(use_cuda=True) def test_loss_scaling_gpu_inf(self): - if fluid.core.is_compiled_with_cuda(): + if base.core.is_compiled_with_cuda(): with paddle_static_guard(): - main = fluid.Program() - startup = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, startup): self.loss_scaling_check_inf(use_cuda=True) diff --git a/test/legacy_test/test_vander.py b/test/legacy_test/test_vander.py index 7ad2d8e120103..2399bd14f55c1 100644 --- a/test/legacy_test/test_vander.py +++ b/test/legacy_test/test_vander.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core np.random.seed(10) diff --git a/test/legacy_test/test_var_base.py b/test/legacy_test/test_var_base.py index 99209ca798cfa..205b2fecae192 100644 --- a/test/legacy_test/test_var_base.py +++ b/test/legacy_test/test_var_base.py @@ -19,8 +19,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestVarBase(unittest.TestCase): @@ -31,7 +31,7 @@ def setUp(self): def test_to_tensor(self): def check_with_place(place): - with fluid.dygraph.guard(): + with base.dygraph.guard(): paddle.set_default_dtype('float32') # set_default_dtype should not take effect on int x = paddle.to_tensor(1, place=place, stop_gradient=False) @@ -230,8 +230,8 @@ def check_with_place(place): numpy_array = np.random.randn(3, 4) # covert core.LoDTensor to paddle.Tensor - lod_tensor = paddle.fluid.core.LoDTensor() - place = paddle.fluid.framework._current_expected_place() + lod_tensor = paddle.base.core.LoDTensor() + place = paddle.base.framework._current_expected_place() lod_tensor.set(numpy_array, place) x = paddle.to_tensor(lod_tensor) np.testing.assert_array_equal(x.numpy(), numpy_array) @@ -241,7 +241,7 @@ def check_with_place(place): # covert core.Tensor to paddle.Tensor x = paddle.to_tensor(numpy_array) dlpack = x.value().get_tensor()._to_dlpack() - tensor_from_dlpack = paddle.fluid.core.from_dlpack(dlpack) + tensor_from_dlpack = paddle.base.core.from_dlpack(dlpack) x = paddle.to_tensor(tensor_from_dlpack) np.testing.assert_array_equal(x.numpy(), numpy_array) self.assertEqual(x.type, core.VarDesc.VarType.LOD_TENSOR) @@ -318,7 +318,7 @@ def check_with_place(place): check_with_place("gpu:0") def test_to_tensor_not_change_input_stop_gradient(self): - with paddle.fluid.dygraph.guard(core.CPUPlace()): + with paddle.base.dygraph.guard(core.CPUPlace()): a = paddle.zeros([1024]) a.stop_gradient = False b = paddle.to_tensor(a) @@ -328,17 +328,17 @@ def test_to_tensor_not_change_input_stop_gradient(self): def test_to_tensor_change_place(self): if core.is_compiled_with_cuda(): a_np = np.random.rand(1024, 1024) - with paddle.fluid.dygraph.guard(core.CPUPlace()): + with paddle.base.dygraph.guard(core.CPUPlace()): a = paddle.to_tensor(a_np, place=paddle.CUDAPinnedPlace()) a = paddle.to_tensor(a) self.assertEqual(a.place.__repr__(), "Place(cpu)") - with paddle.fluid.dygraph.guard(core.CUDAPlace(0)): + with paddle.base.dygraph.guard(core.CUDAPlace(0)): a = paddle.to_tensor(a_np, place=paddle.CUDAPinnedPlace()) a = paddle.to_tensor(a) self.assertEqual(a.place.__repr__(), "Place(gpu:0)") - with paddle.fluid.dygraph.guard(core.CUDAPlace(0)): + with paddle.base.dygraph.guard(core.CUDAPlace(0)): a = paddle.to_tensor(a_np, place=paddle.CPUPlace()) a = paddle.to_tensor(a, place=paddle.CUDAPinnedPlace()) self.assertEqual(a.place.__repr__(), "Place(gpu_pinned)") @@ -346,13 +346,13 @@ def test_to_tensor_change_place(self): def test_to_tensor_with_lodtensor(self): if core.is_compiled_with_cuda(): a_np = np.random.rand(1024, 1024) - with paddle.fluid.dygraph.guard(core.CPUPlace()): + with paddle.base.dygraph.guard(core.CPUPlace()): lod_tensor = core.LoDTensor() lod_tensor.set(a_np, core.CPUPlace()) a = paddle.to_tensor(lod_tensor) np.testing.assert_array_equal(a_np, a.numpy()) - with paddle.fluid.dygraph.guard(core.CUDAPlace(0)): + with paddle.base.dygraph.guard(core.CUDAPlace(0)): lod_tensor = core.LoDTensor() lod_tensor.set(a_np, core.CUDAPlace(0)) a = paddle.to_tensor(lod_tensor, place=core.CPUPlace()) @@ -360,8 +360,8 @@ def test_to_tensor_with_lodtensor(self): self.assertTrue(a.place.__repr__(), "Place(cpu)") def test_to_variable(self): - with fluid.dygraph.guard(): - var = fluid.dygraph.to_variable(self.array, name="abc") + with base.dygraph.guard(): + var = base.dygraph.to_variable(self.array, name="abc") np.testing.assert_array_equal(var.numpy(), self.array) self.assertEqual(var.name, 'abc') # default value @@ -372,39 +372,39 @@ def test_to_variable(self): self.assertEqual(var.type, core.VarDesc.VarType.LOD_TENSOR) # The type of input must be 'ndarray' or 'Variable', it will raise TypeError with self.assertRaises(TypeError): - var = fluid.dygraph.to_variable("test", name="abc") + var = base.dygraph.to_variable("test", name="abc") # test to_variable of LayerObjectHelper(LayerHelperBase) with self.assertRaises(TypeError): linear = paddle.nn.Linear(32, 64) var = linear._helper.to_variable("test", name="abc") def test_list_to_variable(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): array = [[[1, 2], [1, 2], [1.0, 2]], [[1, 2], [1, 2], [1, 2]]] - var = fluid.dygraph.to_variable(array, dtype='int32') + var = base.dygraph.to_variable(array, dtype='int32') np.testing.assert_array_equal(var.numpy(), array) self.assertEqual(var.shape, [2, 3, 2]) self.assertEqual(var.dtype, core.VarDesc.VarType.INT32) self.assertEqual(var.type, core.VarDesc.VarType.LOD_TENSOR) def test_tuple_to_variable(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): array = (((1, 2), (1, 2), (1, 2)), ((1, 2), (1, 2), (1, 2))) - var = fluid.dygraph.to_variable(array, dtype='float32') + var = base.dygraph.to_variable(array, dtype='float32') np.testing.assert_array_equal(var.numpy(), array) self.assertEqual(var.shape, [2, 3, 2]) self.assertEqual(var.dtype, core.VarDesc.VarType.FP32) self.assertEqual(var.type, core.VarDesc.VarType.LOD_TENSOR) def test_tensor_to_variable(self): - with fluid.dygraph.guard(): - t = fluid.Tensor() - t.set(np.random.random((1024, 1024)), fluid.CPUPlace()) - var = fluid.dygraph.to_variable(t) + with base.dygraph.guard(): + t = base.Tensor() + t.set(np.random.random((1024, 1024)), base.CPUPlace()) + var = base.dygraph.to_variable(t) np.testing.assert_array_equal(t, var.numpy()) def test_leaf_tensor(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = paddle.to_tensor(np.random.uniform(-1, 1, size=[10, 10])) self.assertTrue(x.is_leaf) y = x + 1 @@ -430,7 +430,7 @@ def test_leaf_tensor(self): self.assertFalse(out.is_leaf) def test_detach(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = paddle.to_tensor([1.0], dtype="float64", stop_gradient=False) detach_x = x.detach() self.assertTrue(detach_x.stop_gradient, True) @@ -466,8 +466,8 @@ def test_detach(self): y.backward() def test_write_property(self): - with fluid.dygraph.guard(): - var = fluid.dygraph.to_variable(self.array) + with base.dygraph.guard(): + var = base.dygraph.to_variable(self.array) self.assertEqual(var.name, 'generated_tensor_0') var.name = 'test' @@ -482,7 +482,7 @@ def test_write_property(self): self.assertEqual(var.stop_gradient, False) def test_deep_copy(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): empty_var = core.eager.Tensor() empty_var_copy = copy.deepcopy(empty_var) self.assertEqual( @@ -554,8 +554,8 @@ def test_deep_copy(self): # test some patched methods def test_set_value(self): - with fluid.dygraph.guard(): - var = fluid.dygraph.to_variable(self.array) + with base.dygraph.guard(): + var = base.dygraph.to_variable(self.array) tmp1 = np.random.uniform(0.1, 1, [2, 2, 3]).astype(self.dtype) self.assertRaises(AssertionError, var.set_value, tmp1) @@ -564,12 +564,12 @@ def test_set_value(self): np.testing.assert_array_equal(var.numpy(), tmp2) def test_to_string(self): - with fluid.dygraph.guard(): - var = fluid.dygraph.to_variable(self.array) + with base.dygraph.guard(): + var = base.dygraph.to_variable(self.array) self.assertTrue(isinstance(str(var), str)) def test_element_size(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = paddle.to_tensor(1, dtype='bool') self.assertEqual(x.element_size(), 1) @@ -604,8 +604,8 @@ def test_element_size(self): self.assertEqual(x.element_size(), 16) def test_backward(self): - with fluid.dygraph.guard(): - var = fluid.dygraph.to_variable(self.array) + with base.dygraph.guard(): + var = base.dygraph.to_variable(self.array) var.stop_gradient = False loss = F.relu(var) loss.backward() @@ -613,8 +613,8 @@ def test_backward(self): self.assertEqual(grad_var.shape, self.shape) def test_gradient(self): - with fluid.dygraph.guard(): - var = fluid.dygraph.to_variable(self.array) + with base.dygraph.guard(): + var = base.dygraph.to_variable(self.array) var.stop_gradient = False loss = F.relu(var) loss.backward() @@ -622,14 +622,14 @@ def test_gradient(self): self.assertEqual(grad_var.shape, self.array.shape) def test_block(self): - with fluid.dygraph.guard(): - var = fluid.dygraph.to_variable(self.array) + with base.dygraph.guard(): + var = base.dygraph.to_variable(self.array) self.assertEqual( - var.block, fluid.default_main_program().global_block() + var.block, base.default_main_program().global_block() ) def _test_slice(self): - w = fluid.dygraph.to_variable( + w = base.dygraph.to_variable( np.random.random((784, 100, 100)).astype('float64') ) @@ -660,7 +660,7 @@ def _test_slice(self): [[19, 20, 21], [22, 23, 24], [25, 26, 27]], ] ).astype('float32') - var = fluid.dygraph.to_variable(tensor_array) + var = base.dygraph.to_variable(tensor_array) var1 = var[0, 1, 1] var2 = var[1:] var3 = var[0:1] @@ -753,7 +753,7 @@ def _test_slice_for_tensor_attr(self): negative_one = paddle.full(shape=[], fill_value=-1, dtype="int32") four = paddle.full(shape=[], fill_value=4, dtype="int32") - var = fluid.dygraph.to_variable(tensor_array) + var = base.dygraph.to_variable(tensor_array) var1 = var[0, one, one] var2 = var[one:] var3 = var[0:one] @@ -964,7 +964,7 @@ def _test_scalar_bool_index(self): def _test_for_var(self): np_value = np.random.random((30, 100, 100)).astype('float32') - w = fluid.dygraph.to_variable(np_value) + w = base.dygraph.to_variable(np_value) for i, e in enumerate(w): np.testing.assert_array_equal(e.numpy(), np_value[i]) @@ -1028,7 +1028,7 @@ def _test_list_index(self): np.testing.assert_array_equal(array[row, col], x[row, col].numpy()) def test_slice(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): self._test_slice() self._test_slice_for_tensor_attr() self._test_for_var() @@ -1039,7 +1039,7 @@ def test_slice(self): self._test_numpy_index() self._test_list_index() - var = fluid.dygraph.to_variable(self.array) + var = base.dygraph.to_variable(self.array) np.testing.assert_array_equal(var[1, :].numpy(), self.array[1, :]) np.testing.assert_array_equal(var[::-1].numpy(), self.array[::-1]) @@ -1054,22 +1054,22 @@ def test_slice(self): var[paddle.to_tensor([0, 1]), mask] def test_var_base_to_np(self): - with fluid.dygraph.guard(): - var = fluid.dygraph.to_variable(self.array) + with base.dygraph.guard(): + var = base.dygraph.to_variable(self.array) np.testing.assert_array_equal(var.numpy(), var.numpy(False)) def test_var_base_as_np(self): - with fluid.dygraph.guard(): - var = fluid.dygraph.to_variable(self.array) + with base.dygraph.guard(): + var = base.dygraph.to_variable(self.array) np.testing.assert_array_equal(var.numpy(), np.array(var)) np.testing.assert_array_equal( var.numpy(), np.array(var, dtype=np.float32) ) def test_if(self): - with fluid.dygraph.guard(): - var1 = fluid.dygraph.to_variable(np.array([[[0]]])) - var2 = fluid.dygraph.to_variable(np.array([[[1]]])) + with base.dygraph.guard(): + var1 = base.dygraph.to_variable(np.array([[[0]]])) + var2 = base.dygraph.to_variable(np.array([[[1]]])) var1_bool = False var2_bool = False @@ -1086,13 +1086,13 @@ def test_if(self): assert bool(var2), "bool(var2) is True" def test_to_static_var(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): # Convert Tensor into Variable or Parameter - var_base = fluid.dygraph.to_variable(self.array, name="var_base_1") + var_base = base.dygraph.to_variable(self.array, name="var_base_1") static_var = var_base._to_static_var() self._assert_to_static(var_base, static_var) - var_base = fluid.dygraph.to_variable(self.array, name="var_base_2") + var_base = base.dygraph.to_variable(self.array, name="var_base_2") static_param = var_base._to_static_var(to_parameter=True) self._assert_to_static(var_base, static_param, True) @@ -1112,9 +1112,9 @@ def test_to_static_var(self): def _assert_to_static(self, var_base, static_var, is_param=False): if is_param: - self.assertTrue(isinstance(static_var, fluid.framework.Parameter)) + self.assertTrue(isinstance(static_var, base.framework.Parameter)) self.assertTrue(static_var.persistable, True) - if isinstance(var_base, fluid.framework.EagerParamBase): + if isinstance(var_base, base.framework.EagerParamBase): for attr in ['trainable', 'is_distributed', 'do_model_average']: self.assertEqual( getattr(var_base, attr), getattr(static_var, attr) @@ -1129,7 +1129,7 @@ def _assert_to_static(self, var_base, static_var, is_param=False): ) ) else: - self.assertTrue(isinstance(static_var, fluid.framework.Variable)) + self.assertTrue(isinstance(static_var, base.framework.Variable)) attr_keys = ['block', 'dtype', 'type', 'name'] for attr in attr_keys: @@ -1511,14 +1511,14 @@ def func_setUp(self): def func_test_to_api(self): x_double = self.x._to(dtype='double') - self.assertEqual(x_double.dtype, paddle.fluid.core.VarDesc.VarType.FP64) + self.assertEqual(x_double.dtype, paddle.base.core.VarDesc.VarType.FP64) np.testing.assert_allclose(self.np_x, x_double, rtol=1e-05) x_ = self.x._to() - self.assertEqual(self.x.dtype, paddle.fluid.core.VarDesc.VarType.FP64) + self.assertEqual(self.x.dtype, paddle.base.core.VarDesc.VarType.FP64) np.testing.assert_allclose(self.np_x, x_, rtol=1e-05) - if paddle.fluid.is_compiled_with_cuda(): + if paddle.base.is_compiled_with_cuda(): x_gpu = self.x._to(device=paddle.CUDAPlace(0)) self.assertTrue(x_gpu.place.is_gpu_place()) self.assertEqual(x_gpu.place.gpu_device_id(), 0) @@ -1531,14 +1531,14 @@ def func_test_to_api(self): self.assertTrue(x_gpu1.place.is_gpu_place()) self.assertEqual(x_gpu1.place.gpu_device_id(), 0) self.assertEqual( - x_gpu1.dtype, paddle.fluid.core.VarDesc.VarType.FP64 + x_gpu1.dtype, paddle.base.core.VarDesc.VarType.FP64 ) x_gpu2 = self.x._to(device='gpu:0', dtype="float16") self.assertTrue(x_gpu2.place.is_gpu_place()) self.assertEqual(x_gpu2.place.gpu_device_id(), 0) self.assertEqual( - x_gpu2.dtype, paddle.fluid.core.VarDesc.VarType.FP16 + x_gpu2.dtype, paddle.base.core.VarDesc.VarType.FP16 ) x_cpu = self.x._to(device=paddle.CPUPlace()) @@ -1549,11 +1549,11 @@ def func_test_to_api(self): x_cpu1 = self.x._to(device=paddle.CPUPlace(), dtype="float64") self.assertTrue(x_cpu1.place.is_cpu_place()) - self.assertEqual(x_cpu1.dtype, paddle.fluid.core.VarDesc.VarType.FP64) + self.assertEqual(x_cpu1.dtype, paddle.base.core.VarDesc.VarType.FP64) x_cpu2 = self.x._to(device='cpu', dtype="float16") self.assertTrue(x_cpu2.place.is_cpu_place()) - self.assertEqual(x_cpu2.dtype, paddle.fluid.core.VarDesc.VarType.FP16) + self.assertEqual(x_cpu2.dtype, paddle.base.core.VarDesc.VarType.FP16) self.assertRaises(ValueError, self.x._to, device=1) self.assertRaises(AssertionError, self.x._to, blocking=1) @@ -1566,18 +1566,18 @@ def test_to_api(self): class TestVarBaseInitVarBaseFromTensorWithDevice(unittest.TestCase): def test_varbase_init(self): paddle.disable_static() - t = fluid.Tensor() + t = base.Tensor() np_x = np.random.random((3, 8, 8)) - t.set(np_x, fluid.CPUPlace()) + t.set(np_x, base.CPUPlace()) - if paddle.fluid.is_compiled_with_cuda(): + if paddle.base.is_compiled_with_cuda(): device = paddle.CUDAPlace(0) - tmp = fluid.core.eager.Tensor(t, device) + tmp = base.core.eager.Tensor(t, device) self.assertTrue(tmp.place.is_gpu_place()) self.assertEqual(tmp.numpy().all(), np_x.all()) device = paddle.CPUPlace() - tmp = fluid.core.eager.Tensor(t, device) + tmp = base.core.eager.Tensor(t, device) self.assertEqual(tmp.numpy().all(), np_x.all()) diff --git a/test/legacy_test/test_variable.py b/test/legacy_test/test_variable.py index 5774b08a3280b..cbc339fad8026 100644 --- a/test/legacy_test/test_variable.py +++ b/test/legacy_test/test_variable.py @@ -18,9 +18,9 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.framework import ( +from paddle import base +from paddle.base import core +from paddle.base.framework import ( Program, convert_np_dtype_to_dtype_, default_main_program, @@ -70,14 +70,14 @@ def test_var(self): ) w = b.create_var( - dtype=paddle.fluid.core.VarDesc.VarType.STRINGS, + dtype=paddle.base.core.VarDesc.VarType.STRINGS, shape=[1], name="str_var", ) self.assertEqual(None, w.lod_level) def test_element_size(self): - with fluid.program_guard(Program(), Program()): + with base.program_guard(Program(), Program()): x = paddle.static.data(name='x1', shape=[2], dtype='bool') self.assertEqual(x.element_size(), 1) @@ -139,9 +139,9 @@ def _test_slice(self, place): self.assertEqual(0, nw.lod_level) - main = fluid.Program() - with fluid.program_guard(main): - exe = fluid.Executor(place) + main = base.Program() + with base.program_guard(main): + exe = base.Executor(place) tensor_array = np.array( [ [[1, 2, 3], [4, 5, 6], [7, 8, 9]], @@ -170,10 +170,10 @@ def _test_slice(self, place): x = paddle.static.data(name='x', shape=[-1, 13], dtype='float32') y = paddle.static.nn.fc(x, size=1, activation=None) y_1 = y[:, 0] - feeder = fluid.DataFeeder(place=place, feed_list=[x]) + feeder = base.DataFeeder(place=place, feed_list=[x]) data = [] data.append(np.random.randint(10, size=[13]).astype('float32')) - exe.run(fluid.default_startup_program()) + exe.run(base.default_startup_program()) local_out = exe.run( main, @@ -387,7 +387,7 @@ def _test_slice_index_scalar_bool(self, place): self.assertTrue((result[0] == expected[0]).all()) def test_slice(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) @@ -409,16 +409,16 @@ def _tostring(self): self.assertTrue(isinstance(str(wc), str)) def test_tostring(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): self._tostring() - with fluid.program_guard(default_main_program()): + with base.program_guard(default_main_program()): self._tostring() def test_fake_interface_only_api(self): b = default_main_program().current_block() var = b.create_var(dtype="float64", lod_level=0) - with fluid.dygraph.guard(): + with base.dygraph.guard(): self.assertRaises(AssertionError, var.numpy) self.assertRaises(AssertionError, var.backward) self.assertRaises(AssertionError, var.gradient) @@ -427,7 +427,7 @@ def test_fake_interface_only_api(self): def test_variable_in_dygraph_mode(self): b = default_main_program().current_block() var = b.create_var(dtype="float64", shape=[1, 1]) - with fluid.dygraph.guard(): + with base.dygraph.guard(): self.assertTrue(var.to_string(True).startswith('name:')) self.assertFalse(var.persistable) @@ -440,8 +440,8 @@ def test_variable_in_dygraph_mode(self): self.assertTrue(var.name.startswith('_generated_var_')) self.assertEqual(var.shape, (1, 1)) - self.assertEqual(var.dtype, fluid.core.VarDesc.VarType.FP64) - self.assertEqual(var.type, fluid.core.VarDesc.VarType.LOD_TENSOR) + self.assertEqual(var.dtype, base.core.VarDesc.VarType.FP64) + self.assertEqual(var.type, base.core.VarDesc.VarType.LOD_TENSOR) def test_create_selected_rows(self): b = default_main_program().current_block() @@ -450,7 +450,7 @@ def test_create_selected_rows(self): name="var", shape=[1, 1], dtype="float32", - type=fluid.core.VarDesc.VarType.SELECTED_ROWS, + type=base.core.VarDesc.VarType.SELECTED_ROWS, persistable=True, ) @@ -463,7 +463,7 @@ def test_size(self): prog = paddle.static.Program() with paddle.static.program_guard(prog): x = paddle.assign(np.random.rand(2, 3, 4).astype("float32")) - exe = paddle.static.Executor(fluid.CPUPlace()) + exe = paddle.static.Executor(base.CPUPlace()) exe.run(paddle.static.default_startup_program()) output = exe.run(prog, fetch_list=[x.size()]) @@ -484,7 +484,7 @@ def test_detach(self): startup = paddle.static.Program() main = paddle.static.Program() - scope = fluid.core.Scope() + scope = base.core.Scope() with paddle.static.scope_guard(scope): with paddle.static.program_guard(main, startup): x = paddle.static.data( @@ -575,7 +575,7 @@ def _test_item_none_and_decrease(self, place): self.assertTrue((result[i] == expected[i]).all()) def test_slice(self): - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): places.append(core.CUDAPlace(0)) @@ -628,9 +628,9 @@ def test_static_graph_list_index(self): y = x[index_mod] place = ( - paddle.fluid.CPUPlace() - if not paddle.fluid.core.is_compiled_with_cuda() - else paddle.fluid.CUDAPlace(0) + paddle.base.CPUPlace() + if not paddle.base.core.is_compiled_with_cuda() + else paddle.base.CUDAPlace(0) ) prog = paddle.static.default_main_program() @@ -713,9 +713,9 @@ def test_static_graph_list_index_muti_dim(self): y = x[index1, index2] place = ( - paddle.fluid.CPUPlace() - if not paddle.fluid.core.is_compiled_with_cuda() - else paddle.fluid.CUDAPlace(0) + paddle.base.CPUPlace() + if not paddle.base.core.is_compiled_with_cuda() + else paddle.base.CUDAPlace(0) ) prog = paddle.static.default_main_program() @@ -777,7 +777,7 @@ def run_getitem_list_index(self, array, index): x = paddle.static.data(name='x', shape=array.shape, dtype='float32') y = x[index] - place = paddle.fluid.CPUPlace() + place = paddle.base.CPUPlace() prog = paddle.static.default_main_program() exe = paddle.static.Executor(place) @@ -835,7 +835,7 @@ def run_setitem_list_index(self, array, index, value_np): ) y = paddle.static.setitem(x, index, value) - place = paddle.fluid.CPUPlace() + place = paddle.base.CPUPlace() prog = paddle.static.default_main_program() exe = paddle.static.Executor(place) @@ -1034,9 +1034,9 @@ def test_static_graph_tensor_index_setitem_muti_dim(self): x1_out = paddle.static.setitem(x1, (index_1, index_2), value) x2_out = paddle.static.setitem(x2, index_1, value) place = ( - paddle.fluid.CPUPlace() - if not paddle.fluid.core.is_compiled_with_cuda() - else paddle.fluid.CUDAPlace(0) + paddle.base.CPUPlace() + if not paddle.base.core.is_compiled_with_cuda() + else paddle.base.CUDAPlace(0) ) prog = paddle.static.default_main_program() @@ -1117,9 +1117,9 @@ def test_static_graph_array_index_muti_dim(self): y1 = x1_out[index_mod2, index_mod1] y2 = x2_out[index_mod2] place = ( - paddle.fluid.CPUPlace() - if not paddle.fluid.core.is_compiled_with_cuda() - else paddle.fluid.CUDAPlace(0) + paddle.base.CPUPlace() + if not paddle.base.core.is_compiled_with_cuda() + else paddle.base.CUDAPlace(0) ) prog = paddle.static.default_main_program() diff --git a/test/legacy_test/test_variable_length_memory_efficient_attention.py b/test/legacy_test/test_variable_length_memory_efficient_attention.py index 76630a8e6c2f1..a02e942a29697 100644 --- a/test/legacy_test/test_variable_length_memory_efficient_attention.py +++ b/test/legacy_test/test_variable_length_memory_efficient_attention.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.framework import core from paddle.incubate.nn.functional import ( variable_length_memory_efficient_attention, @@ -277,7 +277,7 @@ def test_all(self): out = variable_length_memory_efficient_attention( q, k, v, seq_lens, seq_lens, mask, self.scale ) - exe = fluid.Executor() + exe = base.Executor() res = exe.run( feed={ "query": self.q, diff --git a/test/legacy_test/test_variance_layer.py b/test/legacy_test/test_variance_layer.py index 2613fb91b15c0..d4dc28f8f6cce 100644 --- a/test/legacy_test/test_variance_layer.py +++ b/test/legacy_test/test_variance_layer.py @@ -39,7 +39,7 @@ def setUp(self): self.x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) self.place = ( paddle.CUDAPlace(0) - if paddle.fluid.core.is_compiled_with_cuda() + if paddle.base.core.is_compiled_with_cuda() else paddle.CPUPlace() ) diff --git a/test/legacy_test/test_version.py b/test/legacy_test/test_version.py index 55b5228882bff..2b6d8f599c582 100644 --- a/test/legacy_test/test_version.py +++ b/test/legacy_test/test_version.py @@ -15,7 +15,7 @@ import re import unittest -import paddle.version as fluid_version +import paddle.version as base_version class VersionTest(unittest.TestCase): @@ -29,24 +29,24 @@ def setUp(self): def test_check_output(self): # check commit format - self.assertTrue(re.match(self._commit_regex, fluid_version.commit)) - self.assertTrue(isinstance(fluid_version.istaged, bool)) + self.assertTrue(re.match(self._commit_regex, base_version.commit)) + self.assertTrue(isinstance(base_version.istaged, bool)) # check version format - if fluid_version.istaged: - self.assertTrue(re.match(self._major_regex, fluid_version.major)) - self.assertTrue(re.match(self._minor_regex, fluid_version.minor)) - self.assertTrue(re.match(self._patch_regex, fluid_version.patch)) - self.assertTrue(re.match(self._rc_regex, fluid_version.rc)) + if base_version.istaged: + self.assertTrue(re.match(self._major_regex, base_version.major)) + self.assertTrue(re.match(self._minor_regex, base_version.minor)) + self.assertTrue(re.match(self._patch_regex, base_version.patch)) + self.assertTrue(re.match(self._rc_regex, base_version.rc)) self.assertTrue( - re.match(self._version_regex, fluid_version.full_version) + re.match(self._version_regex, base_version.full_version) ) else: - self.assertEqual(fluid_version.major, "0") - self.assertEqual(fluid_version.minor, "0") - self.assertEqual(fluid_version.patch, "0") - self.assertEqual(fluid_version.rc, "0") - self.assertEqual(fluid_version.full_version, "0.0.0") + self.assertEqual(base_version.major, "0") + self.assertEqual(base_version.minor, "0") + self.assertEqual(base_version.patch, "0") + self.assertEqual(base_version.rc, "0") + self.assertEqual(base_version.full_version, "0.0.0") if __name__ == '__main__': diff --git a/test/legacy_test/test_view_op_reuse_allocation.py b/test/legacy_test/test_view_op_reuse_allocation.py index cee8294541089..ea48c9addb5b3 100644 --- a/test/legacy_test/test_view_op_reuse_allocation.py +++ b/test/legacy_test/test_view_op_reuse_allocation.py @@ -66,7 +66,7 @@ def test_forward_version(self): def test_backward_error(self): # It raises an error because the inplace operator will result # in incorrect gradient computation. - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.ones(shape=self.input_shape, dtype="float32") var_a.stop_gradient = False diff --git a/test/legacy_test/test_viterbi_decode_op.py b/test/legacy_test/test_viterbi_decode_op.py index c7891ddcd93a6..3b62d035b64fd 100644 --- a/test/legacy_test/test_viterbi_decode_op.py +++ b/test/legacy_test/test_viterbi_decode_op.py @@ -14,8 +14,8 @@ from eager_op_test import OpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -107,9 +107,9 @@ def set_attr(self): self.use_tag = True self.bz, self.len, self.ntags = 4, 8, 10 self.places = ( - [fluid.CPUPlace(), fluid.CUDAPlace(0)] + [base.CPUPlace(), base.CUDAPlace(0)] if core.is_compiled_with_cuda() - else [fluid.CPUPlace()] + else [base.CPUPlace()] ) def setUp(self): @@ -123,7 +123,7 @@ def setUp(self): def check_static_result(self, place): bz, length, ntags = self.bz, self.len, self.ntags - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): Input = paddle.static.data( name="Input", shape=[bz, length, ntags], dtype="float32" ) @@ -135,7 +135,7 @@ def check_static_result(self, place): ) decoder = paddle.text.ViterbiDecoder(Transition, self.use_tag) score, path = decoder(Input, Length) - exe = fluid.Executor(place) + exe = base.Executor(place) feed_list = { "Input": self.input, "Transition": self.transitions, diff --git a/test/legacy_test/test_warpctc_op.py b/test/legacy_test/test_warpctc_op.py index d33584ac9fd8c..e7c1ff28e1b5b 100644 --- a/test/legacy_test/test_warpctc_op.py +++ b/test/legacy_test/test_warpctc_op.py @@ -21,7 +21,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import Program, core, program_guard +from paddle.base import Program, core, program_guard CUDA_BLOCK_SIZE = 32 diff --git a/test/legacy_test/test_warprnnt_op.py b/test/legacy_test/test_warprnnt_op.py index 30ee35a2ae3cd..c5c2950cc7c24 100644 --- a/test/legacy_test/test_warprnnt_op.py +++ b/test/legacy_test/test_warprnnt_op.py @@ -19,7 +19,7 @@ import paddle from paddle import _C_ops -from paddle.fluid import Program, core, program_guard +from paddle.base import Program, core, program_guard paddle.enable_static() diff --git a/test/legacy_test/test_weight_decay.py b/test/legacy_test/test_weight_decay.py index 41bea82c4cd03..c237aa75fd8fd 100644 --- a/test/legacy_test/test_weight_decay.py +++ b/test/legacy_test/test_weight_decay.py @@ -19,8 +19,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import compiler, core +from paddle import base +from paddle.base import compiler, core def get_places(): @@ -32,10 +32,10 @@ def get_places(): @contextlib.contextmanager def prog_scope_guard(main_prog, startup_prog): - scope = fluid.core.Scope() - with fluid.unique_name.guard(): - with fluid.scope_guard(scope): - with fluid.program_guard(main_prog, startup_prog): + scope = base.core.Scope() + with base.unique_name.guard(): + with base.scope_guard(scope): + with base.program_guard(main_prog, startup_prog): yield @@ -52,7 +52,7 @@ def bow_net( """ BOW net This model is from https://github.com/PaddlePaddle/models: - fluid/PaddleNLP/text_classification/nets.py + base/PaddleNLP/text_classification/nets.py """ emb = paddle.static.nn.embedding( input=data, is_sparse=is_sparse, size=[dict_dim, emb_dim] @@ -84,10 +84,10 @@ def setUp(self): self.learning_rate = 0.5 def run_executor(self, place, feed_list, loss): - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=feed_list, place=place) - exe.run(fluid.default_startup_program()) - main_prog = fluid.default_main_program() + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=feed_list, place=place) + exe.run(base.default_startup_program()) + main_prog = base.default_main_program() loss_set = [] for data in self.train_data: out = exe.run( @@ -108,24 +108,24 @@ def run_parallel_exe( use_fast_executor=False, use_ir_memory_optimize=False, ): - exe = fluid.Executor(place) - feeder = fluid.DataFeeder(feed_list=feed_list, place=place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + feeder = base.DataFeeder(feed_list=feed_list, place=place) + exe.run(base.default_startup_program()) - exec_strategy = fluid.ExecutionStrategy() + exec_strategy = base.ExecutionStrategy() if use_fast_executor: exec_strategy.use_experimental_executor = True - build_strategy = fluid.BuildStrategy() + build_strategy = base.BuildStrategy() build_strategy.reduce_strategy = ( - fluid.BuildStrategy.ReduceStrategy.Reduce + base.BuildStrategy.ReduceStrategy.Reduce if use_reduce - else fluid.BuildStrategy.ReduceStrategy.AllReduce + else base.BuildStrategy.ReduceStrategy.AllReduce ) build_strategy.memory_optimize = use_ir_memory_optimize train_cp = compiler.CompiledProgram( - fluid.default_main_program(), build_strategy=build_strategy + base.default_main_program(), build_strategy=build_strategy ) loss_set = [] @@ -140,8 +140,8 @@ def run_parallel_exe( def check_weight_decay( self, place, model, use_parallel_exe=False, use_reduce=False ): - main_prog = fluid.framework.Program() - startup_prog = fluid.framework.Program() + main_prog = base.framework.Program() + startup_prog = base.framework.Program() startup_prog.random_seed = 1 with prog_scope_guard(main_prog=main_prog, startup_prog=startup_prog): data = paddle.static.data( diff --git a/test/legacy_test/test_weight_normalization.py b/test/legacy_test/test_weight_normalization.py index e94e61387c061..f8793aef3052e 100644 --- a/test/legacy_test/test_weight_normalization.py +++ b/test/legacy_test/test_weight_normalization.py @@ -18,9 +18,9 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.param_attr import WeightNormParamAttr +from paddle import base +from paddle.base import core +from paddle.base.param_attr import WeightNormParamAttr class TestWeightNormalization(unittest.TestCase): @@ -49,7 +49,7 @@ def set_program(cls): activation=None, ) loss = paddle.sum(out) - fluid.backward.append_backward(loss=loss) + base.backward.append_backward(loss=loss) cls.fetch_list = [ 'weight_norm_param_g', 'weight_norm_param_v', @@ -63,10 +63,10 @@ def run_program(self): places.append(core.CUDAPlace(0)) for place in places: self.set_inputs(place) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) output = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed=self.inputs, fetch_list=self.fetch_list, return_numpy=False, @@ -99,7 +99,7 @@ def set_data(self): def set_inputs(self, place): self.inputs = {} for desc in self.data_desc: - tensor = fluid.Tensor() + tensor = base.Tensor() tensor.set(self.data[desc[0]][0], place) if self.data[desc[0]][1]: tensor.set_recursive_sequence_lengths(self.data[desc[0]][1]) diff --git a/test/legacy_test/test_where_op.py b/test/legacy_test/test_where_op.py index aa03f7276c1b3..12fc6ea887829 100644 --- a/test/legacy_test/test_where_op.py +++ b/test/legacy_test/test_where_op.py @@ -18,9 +18,9 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard -from paddle.fluid.backward import append_backward +from paddle import base +from paddle.base import Program, core, program_guard +from paddle.base.backward import append_backward class TestWhereOp(OpTest): @@ -127,7 +127,7 @@ def ref_y_backward(self, dout): def test_api(self, use_cuda=False): for x_stop_gradient in [False, True]: for y_stop_gradient in [False, True]: - with fluid.program_guard(Program(), Program()): + with base.program_guard(Program(), Program()): cond = paddle.static.data( name='cond', shape=[-1] + self.shape, dtype='bool' ) @@ -149,20 +149,20 @@ def test_api(self, use_cuda=False): append_backward(paddle.mean(result)) for use_cuda in [False, True]: if use_cuda and ( - not fluid.core.is_compiled_with_cuda() + not base.core.is_compiled_with_cuda() ): break place = ( - fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + base.CUDAPlace(0) if use_cuda else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) fetch_list = [result, result.grad_name] if x_stop_gradient is False: fetch_list.append(x.grad_name) if y_stop_gradient is False: fetch_list.append(y.grad_name) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'cond': self.cond, 'x': self.x, 'y': self.y}, fetch_list=fetch_list, ) @@ -182,7 +182,7 @@ def test_api(self, use_cuda=False): def test_api_broadcast(self, use_cuda=False): main_program = Program() - with fluid.program_guard(main_program): + with base.program_guard(main_program): x = paddle.static.data(name='x', shape=[-1, 4, 1], dtype='float32') x.desc.set_need_check_feed(False) y = paddle.static.data(name='y', shape=[-1, 4, 2], dtype='float32') @@ -193,12 +193,12 @@ def test_api_broadcast(self, use_cuda=False): ) result = paddle.where((x > 1), x=x, y=y) for use_cuda in [False, True]: - if use_cuda and (not fluid.core.is_compiled_with_cuda()): + if use_cuda and (not base.core.is_compiled_with_cuda()): return - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'x': x_i, 'y': y_i}, fetch_list=[result], ) @@ -209,7 +209,7 @@ def test_api_broadcast(self, use_cuda=False): def test_scalar(self): paddle.enable_static() main_program = Program() - with fluid.program_guard(main_program): + with base.program_guard(main_program): cond_shape = [2, 4] cond = paddle.static.data( name='cond', shape=[-1] + cond_shape, dtype='bool' @@ -220,12 +220,12 @@ def test_scalar(self): cond_data = np.array([False, False, True, True]).astype('bool') result = paddle.where(condition=cond, x=x_data, y=y_data) for use_cuda in [False, True]: - if use_cuda and (not fluid.core.is_compiled_with_cuda()): + if use_cuda and (not base.core.is_compiled_with_cuda()): return - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'cond': cond_data}, fetch_list=[result], ) @@ -235,7 +235,7 @@ def test_scalar(self): def __test_where_with_broadcast_static(self, cond_shape, x_shape, y_shape): paddle.enable_static() main_program = Program() - with fluid.program_guard(main_program): + with base.program_guard(main_program): cond = paddle.static.data( name='cond', shape=[-1] + cond_shape, dtype='bool' ) @@ -254,12 +254,12 @@ def __test_where_with_broadcast_static(self, cond_shape, x_shape, y_shape): y_data = np.random.random(size=y_shape).astype('float32') result = paddle.where(condition=cond, x=x, y=y) for use_cuda in [False, True]: - if use_cuda and (not fluid.core.is_compiled_with_cuda()): + if use_cuda and (not base.core.is_compiled_with_cuda()): return - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CUDAPlace(0) if use_cuda else base.CPUPlace() + exe = base.Executor(place) out = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'cond': cond_data, 'x': x_data, 'y': y_data}, fetch_list=[result], ) @@ -317,29 +317,29 @@ def test_static_api_broadcast_8(self): class TestWhereDygraphAPI(unittest.TestCase): def test_api(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype('float64') y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype('float64') cond_i = np.array([False, False, True, True]).astype('bool') - x = fluid.dygraph.to_variable(x_i) - y = fluid.dygraph.to_variable(y_i) - cond = fluid.dygraph.to_variable(cond_i) + x = base.dygraph.to_variable(x_i) + y = base.dygraph.to_variable(y_i) + cond = base.dygraph.to_variable(cond_i) out = paddle.where(cond, x, y) np.testing.assert_array_equal( out.numpy(), np.where(cond_i, x_i, y_i) ) def test_scalar(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): cond_i = np.array([False, False, True, True]).astype('bool') x = 1.0 y = 2.0 - cond = fluid.dygraph.to_variable(cond_i) + cond = base.dygraph.to_variable(cond_i) out = paddle.where(cond, x, y) np.testing.assert_array_equal(out.numpy(), np.where(cond_i, x, y)) def __test_where_with_broadcast_dygraph(self, cond_shape, a_shape, b_shape): - with fluid.dygraph.guard(): + with base.dygraph.guard(): cond_tmp = paddle.rand(cond_shape) cond = cond_tmp < 0.3 a = paddle.rand(a_shape) @@ -406,7 +406,7 @@ def test_where_condition(self): self.assertEqual(type(y), tuple) self.assertEqual(len(y), 2) z = paddle.concat(list(y), axis=1) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': data}, fetch_list=[z.name], return_numpy=False ) @@ -420,7 +420,7 @@ def test_where_condition(self): self.assertEqual(type(y), tuple) self.assertEqual(len(y), 1) z = paddle.concat(list(y), axis=1) - exe = fluid.Executor(fluid.CPUPlace()) + exe = base.Executor(base.CPUPlace()) (res,) = exe.run( feed={'x': data}, fetch_list=[z.name], return_numpy=False ) @@ -454,7 +454,7 @@ def test_type(): self.assertRaises(TypeError, test_type) def test_value_error(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): cond_shape = [2, 2, 4] cond_tmp = paddle.rand(cond_shape) cond = cond_tmp < 0.3 diff --git a/test/legacy_test/test_while_loop_op.py b/test/legacy_test/test_while_loop_op.py index 9ba690f5b1d93..c05b62b7f7ac4 100644 --- a/test/legacy_test/test_while_loop_op.py +++ b/test/legacy_test/test_while_loop_op.py @@ -18,10 +18,10 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.backward import append_backward -from paddle.fluid.framework import Program, program_guard +from paddle import base +from paddle.base import core +from paddle.base.backward import append_backward +from paddle.base.framework import Program, program_guard paddle.enable_static() @@ -45,11 +45,11 @@ def body(i): out = paddle.static.nn.while_loop(cond, body, (i,)) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run(main_program, fetch_list=out) np.testing.assert_allclose( np.asarray(res[0]), np.full(1, 10, np.int64), rtol=1e-05 @@ -81,11 +81,11 @@ def body(i, mem): data_one = np.ones(10).astype('float32') place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run(main_program, feed={'mem': data}, fetch_list=out) for i in range(10): data = np.add(data, data_one) @@ -144,11 +144,11 @@ def body(i, ten, test_dict, test_list, test_list_dict): cond, body, [i, ten, test_dict, test_list, test_list_dict] ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run( main_program, fetch_list=[ @@ -228,11 +228,11 @@ def internal_body(j, init, sums): data_sums = np.zeros([3, 3]).astype('float32') place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run( main_program, feed={'init': data, 'sums': data_sums}, fetch_list=out ) @@ -256,7 +256,7 @@ def body(i, x): main_program = Program() startup_program = Program() - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): i = paddle.static.data(name='i', shape=[1], dtype='float32') i.stop_gradient = False eleven = paddle.tensor.fill_constant( @@ -273,11 +273,11 @@ def body(i, x): append_backward(mean) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) feed_i = np.ones(1).astype('float32') feed_x = np.ones(1).astype('float32') @@ -303,7 +303,7 @@ def body(i, x): main_program = Program() startup_program = Program() - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): i = paddle.static.data(name='i', shape=[1], dtype='float32') i.stop_gradient = False x = paddle.static.data(name='x', shape=[1], dtype='float32') @@ -314,11 +314,11 @@ def body(i, x): append_backward(mean) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) feed_i = np.ones(1).astype('float32') feed_x = np.ones(1).astype('float32') @@ -367,7 +367,7 @@ def internal_body(j, x, mem_array): main_program = Program() startup_program = Program() - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): d0 = paddle.static.data(name='d0', shape=[10], dtype='float32') d1 = paddle.static.data(name='d1', shape=[10], dtype='float32') d2 = paddle.static.data(name='d2', shape=[10], dtype='float32') @@ -403,11 +403,11 @@ def internal_body(j, x, mem_array): append_backward(mean) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) d = [] for i in range(3): @@ -450,7 +450,7 @@ def fn_add_one(): main_program = Program() startup_program = Program() - with fluid.program_guard(main_program, startup_program): + with base.program_guard(main_program, startup_program): i = paddle.tensor.fill_constant(shape=[1], dtype='int64', value=1) ten = paddle.tensor.fill_constant( shape=[1], dtype='int64', value=10 @@ -462,11 +462,11 @@ def fn_add_one(): out = paddle.static.nn.while_loop(cond, body, [i]) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) res = exe.run(main_program, fetch_list=out) data = np.asarray([25]).astype('int64') @@ -661,11 +661,11 @@ def body(z, i): z, _ = paddle.static.nn.while_loop(cond, body, [z, i]) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) np_x = np.array([1, 2, 3, 4, 5], dtype='int32') res = exe.run(main_program, feed={'x': np_x}, fetch_list=[z]) diff --git a/test/legacy_test/test_while_op.py b/test/legacy_test/test_while_op.py index 25d34f53e1de2..3f12fa397a3a8 100644 --- a/test/legacy_test/test_while_op.py +++ b/test/legacy_test/test_while_op.py @@ -17,10 +17,10 @@ import numpy import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.backward import append_backward -from paddle.fluid.executor import Executor +from paddle import base +from paddle.base import core +from paddle.base.backward import append_backward +from paddle.base.executor import Executor from paddle.incubate.layers.nn import shuffle_batch paddle.enable_static() @@ -78,9 +78,9 @@ def simple_net(self): return loss, sum_result def test_simple_net(self): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): loss, sum_result = self.simple_net() append_backward(loss) @@ -99,11 +99,11 @@ def test_simple_net(self): self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01) def test_simple_net_forward(self): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): self.simple_net() - binary = fluid.compiler.CompiledProgram(main_program) + binary = base.compiler.CompiledProgram(main_program) cpu = core.CPUPlace() exe = Executor(cpu) @@ -130,7 +130,7 @@ def test_exceptions(self): class BadInputTest(unittest.TestCase): def test_error(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def test_bad_x(): x = [1, 2, 3] @@ -166,8 +166,8 @@ def body_func(i, ten, batch_info, origin_seq): output = shuffle_temp - exe = fluid.Executor(fluid.CPUPlace()) - exe.run(fluid.default_startup_program()) + exe = base.Executor(base.CPUPlace()) + exe.run(base.default_startup_program()) input_x = numpy.array([[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]]) input_x = input_x.reshape(3, 1, 4) @@ -175,7 +175,7 @@ def body_func(i, ten, batch_info, origin_seq): input_y = input_y.reshape(3, 1, 1) (res,) = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={'x': input_x, 'y': input_y}, fetch_list=[output], ) @@ -188,9 +188,9 @@ def test_outputs_exists_inputs(self): """ We guarantee that the output tensor must be in the input tensor, so that the output and input can correspond to each other, but the input can be greater than the number of outputs. It's required in paddle2onnx. """ - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): def func(x): s = paddle.zeros([]) diff --git a/test/legacy_test/test_yolov3_loss_op.py b/test/legacy_test/test_yolov3_loss_op.py index afe1dbc1c0a1f..c9a9779a2a984 100644 --- a/test/legacy_test/test_yolov3_loss_op.py +++ b/test/legacy_test/test_yolov3_loss_op.py @@ -19,7 +19,7 @@ from scipy.special import expit, logit import paddle -from paddle.fluid import core +from paddle.base import core def l1loss(x, y): diff --git a/test/legacy_test/test_zero_dim_tensor.py b/test/legacy_test/test_zero_dim_tensor.py index 2f02a00209f56..ed6eb3786e077 100644 --- a/test/legacy_test/test_zero_dim_tensor.py +++ b/test/legacy_test/test_zero_dim_tensor.py @@ -4921,7 +4921,7 @@ def test_sequence_pad(self): value = paddle.to_tensor(1000, dtype=paddle.int64).squeeze() out = paddle.static.nn.sequence_pad(x, value) - x_tensor = paddle.fluid.create_lod_tensor( + x_tensor = paddle.base.create_lod_tensor( np.arange(20).astype(np.int64).reshape(-1, 2), [[3, 3, 4]], place=self.exe.place, diff --git a/test/legacy_test/test_zeros_like_op.py b/test/legacy_test/test_zeros_like_op.py index 23ec95877da8c..538556cd4f1fc 100644 --- a/test/legacy_test/test_zeros_like_op.py +++ b/test/legacy_test/test_zeros_like_op.py @@ -17,9 +17,9 @@ import numpy as np import paddle -from paddle import _C_ops, fluid, zeros_like -from paddle.fluid import Program, core, program_guard -from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle import _C_ops, base, zeros_like +from paddle.base import Program, core, program_guard +from paddle.base.framework import convert_np_dtype_to_dtype_ class TestZerosLikeAPIError(unittest.TestCase): @@ -42,11 +42,11 @@ def test_api(self): out4 = zeros_like(x, 'int32') out5 = zeros_like(x, 'int64') place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) outs = exe.run( train_program, feed={'X': np.ones(shape).astype('float32')}, @@ -63,9 +63,9 @@ class TestZerosLikeImpeartive(unittest.TestCase): def test_out(self): shape = [3, 4] place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) paddle.disable_static(place) x = paddle.to_tensor(np.ones(shape)) @@ -85,9 +85,9 @@ class TestZerosAPI(unittest.TestCase): def test_api(self): shape = [3, 4] place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) paddle.disable_static(place) diff --git a/test/legacy_test/test_zeros_op.py b/test/legacy_test/test_zeros_op.py index f8da6e20726c1..ce4449335425c 100644 --- a/test/legacy_test/test_zeros_op.py +++ b/test/legacy_test/test_zeros_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard class ApiZerosTest(unittest.TestCase): @@ -52,7 +52,7 @@ def test_out(self): result = exe.run(fetch_list=[out]) self.assertEqual((result == out_np).all(), True) - def test_fluid_out(self): + def test_base_out(self): with program_guard(Program()): zeros = paddle.zeros(shape=[10], dtype='int64') place = paddle.CPUPlace() @@ -65,13 +65,13 @@ def test_fluid_out(self): class ApiZerosError(unittest.TestCase): def test_errors(self): def test_error1(): - with paddle.static.program_guard(fluid.Program()): + with paddle.static.program_guard(base.Program()): ones = paddle.zeros(shape=10, dtype='int64') self.assertRaises(TypeError, test_error1) def test_shape_errors(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): try: shape = [-1, 5] out = paddle.zeros(shape) diff --git a/test/legacy_test/testsuite.py b/test/legacy_test/testsuite.py index 9c591fdaafad0..1c48b90cb77a9 100644 --- a/test/legacy_test/testsuite.py +++ b/test/legacy_test/testsuite.py @@ -15,7 +15,7 @@ import numpy as np from op import Operator -from paddle.fluid import core +from paddle.base import core def create_op(scope, op_type, inputs, outputs, attrs, cache_list=None): diff --git a/test/legacy_test/transformer_model.py b/test/legacy_test/transformer_model.py index 03f926c1fb4c0..d1dffc491ba45 100644 --- a/test/legacy_test/transformer_model.py +++ b/test/legacy_test/transformer_model.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base pos_enc_param_names = ( "src_pos_enc_table", @@ -273,7 +273,7 @@ def prepare_encoder( src_pos, size=[src_max_len, src_emb_dim], padding_idx=pos_pad_idx, - param_attr=fluid.ParamAttr(name=pos_enc_param_name, trainable=False), + param_attr=base.ParamAttr(name=pos_enc_param_name, trainable=False), ) src_pos_enc.stop_gradient = True enc_input = src_word_emb + src_pos_enc diff --git a/test/legacy_test/utils.py b/test/legacy_test/utils.py index 86f467f46047c..71478024cbcdc 100644 --- a/test/legacy_test/utils.py +++ b/test/legacy_test/utils.py @@ -14,8 +14,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.framework import _dygraph_guard +from paddle import base +from paddle.base.framework import _dygraph_guard __all__ = ['DyGraphProgramDescTracerTestHelper', 'is_equal_program'] @@ -86,7 +86,7 @@ def _is_equal_program(prog1, prog2): def load_dygraph_vars_to_scope(model_path, scope, place): def load_dict_to_scope(scope, dictionary): if scope is None: - scope = fluid.global_scope() + scope = base.global_scope() for k, v in dictionary.items(): dst_t = scope.var(k).get_tensor() diff --git a/test/mkldnn/check_flags_mkldnn_ops_on_off.py b/test/mkldnn/check_flags_mkldnn_ops_on_off.py index f00f3967225ca..cc1b01c7ae2e9 100644 --- a/test/mkldnn/check_flags_mkldnn_ops_on_off.py +++ b/test/mkldnn/check_flags_mkldnn_ops_on_off.py @@ -17,9 +17,9 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.framework import _global_flags -from paddle.fluid.layer_helper import LayerHelper +from paddle import base +from paddle.base.framework import _global_flags +from paddle.base.layer_helper import LayerHelper def check(): @@ -28,8 +28,8 @@ def check(): _global_flags()["FLAGS_use_mkldnn"], ) print( - "check: fluid.get_flags('FLAGS_use_mkldnn')=", - fluid.get_flags(['FLAGS_use_mkldnn']), + "check: base.get_flags('FLAGS_use_mkldnn')=", + base.get_flags(['FLAGS_use_mkldnn']), ) print("check: DNNL_VERBOSE=", os.environ['DNNL_VERBOSE']) print( @@ -42,11 +42,11 @@ def check(): ) a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32) b_np = np.random.uniform(-5, 5, (10, 20, 30)).astype(np.float32) - helper = LayerHelper(fluid.unique_name.generate("test"), act="relu") + helper = LayerHelper(base.unique_name.generate("test"), act="relu") func = helper.append_activation - with fluid.dygraph.guard(fluid.core.CPUPlace()): - a = fluid.dygraph.to_variable(a_np) - b = fluid.dygraph.to_variable(b_np) + with base.dygraph.guard(base.core.CPUPlace()): + a = base.dygraph.to_variable(a_np) + b = base.dygraph.to_variable(b_np) y = paddle.add(x=a, y=b) y = paddle.matmul(x=y, y=b, transpose_y=True) res1 = func(y) diff --git a/test/mkldnn/check_flags_use_mkldnn.py b/test/mkldnn/check_flags_use_mkldnn.py index 07b4829743cd6..87b590bcd4617 100644 --- a/test/mkldnn/check_flags_use_mkldnn.py +++ b/test/mkldnn/check_flags_use_mkldnn.py @@ -16,9 +16,9 @@ import numpy as np -from paddle import fluid -from paddle.fluid.framework import _global_flags -from paddle.fluid.layer_helper import LayerHelper +from paddle import base +from paddle.base.framework import _global_flags +from paddle.base.layer_helper import LayerHelper def check(): @@ -27,15 +27,15 @@ def check(): _global_flags()["FLAGS_use_mkldnn"], ) print( - "check: fluid.get_flags('FLAGS_use_mkldnn')=", - fluid.get_flags(['FLAGS_use_mkldnn']), + "check: base.get_flags('FLAGS_use_mkldnn')=", + base.get_flags(['FLAGS_use_mkldnn']), ) print("check: DNNL_VERBOSE=", os.environ['DNNL_VERBOSE']) a_np = np.random.uniform(-2, 2, (10, 20, 30)).astype(np.float32) - helper = LayerHelper(fluid.unique_name.generate("test"), act="relu") + helper = LayerHelper(base.unique_name.generate("test"), act="relu") func = helper.append_activation - with fluid.dygraph.guard(fluid.core.CPUPlace()): - a = fluid.dygraph.to_variable(a_np) + with base.dygraph.guard(base.core.CPUPlace()): + a = base.dygraph.to_variable(a_np) res1 = func(a) res2 = np.maximum(a_np, 0) np.testing.assert_array_equal(res1.numpy(), res2) diff --git a/test/mkldnn/mkldnn_op_test.py b/test/mkldnn/mkldnn_op_test.py index 6e4776ab84c24..0a72ab8800dfb 100644 --- a/test/mkldnn/mkldnn_op_test.py +++ b/test/mkldnn/mkldnn_op_test.py @@ -14,8 +14,8 @@ import numpy as np -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core def __assert_close(test_case, tensor, np_array, msg, atol=1e-4): @@ -33,8 +33,8 @@ def check_if_mkldnn_primitives_exist_in_bwd( var_names = list(var_dict.keys()) ground_truth = {name: var_dict[name] for name in var_names} - program = fluid.Program() - with fluid.program_guard(program): + program = base.Program() + with base.program_guard(program): block = program.global_block() for name in ground_truth: block.create_var( @@ -65,7 +65,7 @@ def check_if_mkldnn_primitives_exist_in_bwd( grad_var = block.desc.find_var(arg.encode('ascii')) grad_var.set_dtype(core.VarDesc.VarType.FP32) - exe = fluid.Executor(place) + exe = base.Executor(place) # Do at least 2 iterations for i in range(2): @@ -92,8 +92,8 @@ def check_if_mkldnn_batchnorm_primitives_exist_in_bwd( 'saved_variance', ] ground_truth = {name: var_dict[name] for name in var_names} - program = fluid.Program() - with fluid.program_guard(program): + program = base.Program() + with base.program_guard(program): block = program.global_block() for name in ground_truth: block.create_var( @@ -145,7 +145,7 @@ def check_if_mkldnn_batchnorm_primitives_exist_in_bwd( grad_var.set_dtype(core.VarDesc.VarType.FP32) program._sync_with_cpp() - exe = fluid.Executor(place) + exe = base.Executor(place) # Do at least 2 iterations for i in range(2): out = exe.run( diff --git a/test/mkldnn/test_activation_bf16_mkldnn_op.py b/test/mkldnn/test_activation_bf16_mkldnn_op.py index a3ce4b74fcba0..6e002222ae429 100644 --- a/test/mkldnn/test_activation_bf16_mkldnn_op.py +++ b/test/mkldnn/test_activation_bf16_mkldnn_op.py @@ -21,7 +21,7 @@ from test_activation_op import TestActivation from test_gelu_op import gelu -from paddle.fluid import core +from paddle.base import core @OpTestTool.skip_if_not_cpu_bf16() diff --git a/test/mkldnn/test_activation_mkldnn_op.py b/test/mkldnn/test_activation_mkldnn_op.py index fe147e9228f29..6887bd9059edd 100644 --- a/test/mkldnn/test_activation_mkldnn_op.py +++ b/test/mkldnn/test_activation_mkldnn_op.py @@ -45,7 +45,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core class TestMKLDNNReluDim2(TestRelu): @@ -123,7 +123,7 @@ def setUp(self): x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) out = gelu(x, False) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} @@ -137,7 +137,7 @@ def setUp(self): x = np.random.uniform(-1, 1, []).astype(self.dtype) out = gelu(x, False) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} @@ -151,7 +151,7 @@ def setUp(self): x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) out = gelu(x, True) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True, "approximate": True} @@ -268,7 +268,7 @@ def setUp(self): x[np.abs(x) < 0.005] = 0.02 out = np.maximum(x, 0) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} @@ -285,7 +285,7 @@ def setUp(self): x[np.abs(x) < 0.005] = 0.02 out = np.maximum(x, 0.02 * x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} @@ -310,7 +310,7 @@ def setUp(self): x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype) out = gelu(x, False) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} @@ -324,7 +324,7 @@ def setUp(self): x = np.random.uniform(-1, 1, [2, 4, 3, 5]).astype(self.dtype) out = gelu(x, True) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True, "approximate": True} @@ -435,7 +435,7 @@ def setUp(self): out = ref_hardswish(x, threshold, scale, offset) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} @@ -452,7 +452,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype) out = x * np.tanh(np.log(1 + np.exp(x))) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} @@ -466,7 +466,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, []).astype(self.dtype) out = x * np.tanh(np.log(1 + np.exp(x))) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} @@ -501,7 +501,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, [2, 4, 3, 5]).astype(self.dtype) out = 1 / (1 + np.exp(-x)) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {"use_mkldnn": True} diff --git a/test/mkldnn/test_batch_norm_mkldnn_op.py b/test/mkldnn/test_batch_norm_mkldnn_op.py index 49703d0d1a4a4..a274990047f17 100644 --- a/test/mkldnn/test_batch_norm_mkldnn_op.py +++ b/test/mkldnn/test_batch_norm_mkldnn_op.py @@ -24,7 +24,7 @@ _reference_training, ) -from paddle.fluid import core +from paddle.base import core _set_use_system_allocator(True) diff --git a/test/mkldnn/test_cast_mkldnn_op.py b/test/mkldnn/test_cast_mkldnn_op.py index 2bf0bb6d70b1a..edf484b8aa9cd 100644 --- a/test/mkldnn/test_cast_mkldnn_op.py +++ b/test/mkldnn/test_cast_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/mkldnn/test_clip_mkldnn_op.py b/test/mkldnn/test_clip_mkldnn_op.py index d484ddd890efd..b7538cb45b18c 100644 --- a/test/mkldnn/test_clip_mkldnn_op.py +++ b/test/mkldnn/test_clip_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core class TestClipOneDNNOp(OpTest): diff --git a/test/mkldnn/test_concat_bf16_mkldnn_op.py b/test/mkldnn/test_concat_bf16_mkldnn_op.py index d2bf99452a832..f62886aca8951 100644 --- a/test/mkldnn/test_concat_bf16_mkldnn_op.py +++ b/test/mkldnn/test_concat_bf16_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/mkldnn/test_concat_mkldnn_op.py b/test/mkldnn/test_concat_mkldnn_op.py index a719a81a07610..906bb4c731305 100644 --- a/test/mkldnn/test_concat_mkldnn_op.py +++ b/test/mkldnn/test_concat_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest from paddle import enable_static -from paddle.fluid import core +from paddle.base import core class TestConcatAxis0OneDNNOp(OpTest): diff --git a/test/mkldnn/test_conv2d_bf16_mkldnn_op.py b/test/mkldnn/test_conv2d_bf16_mkldnn_op.py index b1300028368af..acaaa7406471c 100644 --- a/test/mkldnn/test_conv2d_bf16_mkldnn_op.py +++ b/test/mkldnn/test_conv2d_bf16_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16 from test_conv2d_op import TestConv2DOp, conv2d_forward_naive -from paddle.fluid import core +from paddle.base import core def conv2d_residual_naive(out, residual): @@ -91,14 +91,14 @@ def setUp(self): self.inputs = { 'Input': self.input, - 'Filter': OpTest.np_dtype_to_fluid_dtype( + 'Filter': OpTest.np_dtype_to_base_dtype( self.filter.astype(self.weight_type) ), } if self.fuse_residual: self.op_type = "fused_conv2d" - self.inputs['ResidualData'] = OpTest.np_dtype_to_fluid_dtype( + self.inputs['ResidualData'] = OpTest.np_dtype_to_base_dtype( convert_float_to_uint16(self.input_residual) ) diff --git a/test/mkldnn/test_conv2d_int8_mkldnn_op.py b/test/mkldnn/test_conv2d_int8_mkldnn_op.py index 21519593c6da2..dc2fe60848148 100644 --- a/test/mkldnn/test_conv2d_int8_mkldnn_op.py +++ b/test/mkldnn/test_conv2d_int8_mkldnn_op.py @@ -19,7 +19,7 @@ from eager_op_test import OpTest from test_conv2d_op import TestConv2DOp, conv2d_forward_naive -from paddle.fluid import core +from paddle.base import core def conv2d_forward_refer(input, filter, group, conv_param): @@ -148,11 +148,11 @@ def residual_helper(init_low, init_high, output_): output = np.round(output).astype(self.dsttype) self.inputs = { - 'Input': OpTest.np_dtype_to_fluid_dtype(input.astype(self.srctype)), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), + 'Input': OpTest.np_dtype_to_base_dtype(input.astype(self.srctype)), + 'Filter': OpTest.np_dtype_to_base_dtype(filter), } if self.fuse_residual: - self.inputs['ResidualData'] = OpTest.np_dtype_to_fluid_dtype( + self.inputs['ResidualData'] = OpTest.np_dtype_to_base_dtype( input_residual ) diff --git a/test/mkldnn/test_conv2d_mkldnn_op.py b/test/mkldnn/test_conv2d_mkldnn_op.py index cd4b84f443813..9fbd4ea46471c 100644 --- a/test/mkldnn/test_conv2d_mkldnn_op.py +++ b/test/mkldnn/test_conv2d_mkldnn_op.py @@ -70,7 +70,7 @@ def setUp(self): output = conv2d_bias_naive(output, bias) output = output.astype(self.dtype) self.attrs['fuse_bias'] = self.fuse_bias - self.inputs['Bias'] = OpTest.np_dtype_to_fluid_dtype(bias) + self.inputs['Bias'] = OpTest.np_dtype_to_base_dtype(bias) if ( self.fuse_residual_connection @@ -84,7 +84,7 @@ def setUp(self): self.attrs[ 'fuse_residual_connection' ] = self.fuse_residual_connection - self.inputs['ResidualData'] = OpTest.np_dtype_to_fluid_dtype( + self.inputs['ResidualData'] = OpTest.np_dtype_to_base_dtype( input_residual ) diff --git a/test/mkldnn/test_conv2d_transpose_bf16_mkldnn_op.py b/test/mkldnn/test_conv2d_transpose_bf16_mkldnn_op.py index a807a43ff85af..53fa07cb42628 100644 --- a/test/mkldnn/test_conv2d_transpose_bf16_mkldnn_op.py +++ b/test/mkldnn/test_conv2d_transpose_bf16_mkldnn_op.py @@ -19,7 +19,7 @@ from test_conv2d_transpose_op import conv2dtranspose_forward_naive from paddle import enable_static -from paddle.fluid import core +from paddle.base import core def conv2d_bias_naive(out, bias): @@ -113,7 +113,7 @@ def setUp(self): self.inputs = { 'Input': input.view(self.input_type), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), + 'Filter': OpTest.np_dtype_to_base_dtype(filter), } if self.fuse_bias and self.bias_size is not None: @@ -121,7 +121,7 @@ def setUp(self): output = conv2d_bias_naive(output, bias) output = output.astype(np.float32) self.attrs['fuse_bias'] = self.fuse_bias - self.inputs['Bias'] = OpTest.np_dtype_to_fluid_dtype(bias) + self.inputs['Bias'] = OpTest.np_dtype_to_base_dtype(bias) if self.fuse_activation == "relu": output = np.maximum(output, 0).astype(np.float32) diff --git a/test/mkldnn/test_conv2d_transpose_mkldnn_op.py b/test/mkldnn/test_conv2d_transpose_mkldnn_op.py index 507071a090d28..6a1a012f4c8fa 100644 --- a/test/mkldnn/test_conv2d_transpose_mkldnn_op.py +++ b/test/mkldnn/test_conv2d_transpose_mkldnn_op.py @@ -71,7 +71,7 @@ def setUp(self): output = conv2d_bias_naive(output, bias) output = output.astype(self.dtype) self.attrs['fuse_bias'] = self.fuse_bias - self.inputs['Bias'] = OpTest.np_dtype_to_fluid_dtype(bias) + self.inputs['Bias'] = OpTest.np_dtype_to_base_dtype(bias) if self.fuse_activation == "relu": output = np.maximum(output, 0).astype(self.dtype) diff --git a/test/mkldnn/test_dequantize_mkldnn_op.py b/test/mkldnn/test_dequantize_mkldnn_op.py index 0406b0a9243b0..e2a49d6d4754a 100644 --- a/test/mkldnn/test_dequantize_mkldnn_op.py +++ b/test/mkldnn/test_dequantize_mkldnn_op.py @@ -40,7 +40,7 @@ def setUp(self): def prepare_input_output_bf16(self): output = np.random.random(self.input_size).astype(np.float32) input = convert_float_to_uint16(output) - self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(input)} + self.inputs = {'Input': OpTest.np_dtype_to_base_dtype(input)} self.outputs = {'Output': output} def prepare_input_int8(self): @@ -55,7 +55,7 @@ def prepare_input_int8(self): self.data_type ) - self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(self.input)} + self.inputs = {'Input': OpTest.np_dtype_to_base_dtype(self.input)} self.attrs = {'Scale': self.scale, 'Shift': self.shift} def prepare_output_int8(self): diff --git a/test/mkldnn/test_elementwise_add_bf16_mkldnn_op.py b/test/mkldnn/test_elementwise_add_bf16_mkldnn_op.py index a2894d09a2613..d9d3bac981ced 100644 --- a/test/mkldnn/test_elementwise_add_bf16_mkldnn_op.py +++ b/test/mkldnn/test_elementwise_add_bf16_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/mkldnn/test_elementwise_div_mkldnn_op.py b/test/mkldnn/test_elementwise_div_mkldnn_op.py index 18cac1574e3bf..d6c545cc017f5 100644 --- a/test/mkldnn/test_elementwise_div_mkldnn_op.py +++ b/test/mkldnn/test_elementwise_div_mkldnn_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16 from paddle import enable_static -from paddle.fluid import core -from paddle.fluid.framework import _current_expected_place +from paddle.base import core +from paddle.base.framework import _current_expected_place @OpTestTool.skip_if( @@ -34,8 +34,8 @@ def setUp(self): self.init_kernel_type() self.init_axis() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} diff --git a/test/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py b/test/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py index 45743a5ae7ad1..ac1e47dd868d1 100644 --- a/test/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py +++ b/test/mkldnn/test_elementwise_mul_bf16_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/mkldnn/test_elementwise_sub_onednn_op.py b/test/mkldnn/test_elementwise_sub_onednn_op.py index fc9b536899e08..1340eac9c92b7 100644 --- a/test/mkldnn/test_elementwise_sub_onednn_op.py +++ b/test/mkldnn/test_elementwise_sub_onednn_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16 from paddle import enable_static -from paddle.fluid import core -from paddle.fluid.framework import _current_expected_place +from paddle.base import core +from paddle.base.framework import _current_expected_place @OpTestTool.skip_if( @@ -34,8 +34,8 @@ def setUp(self): self.init_kernel_type() self.init_axis() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} diff --git a/test/mkldnn/test_expand_v2_mkldnn_op.py b/test/mkldnn/test_expand_v2_mkldnn_op.py index 17374b922e711..1ef44c8b27cb8 100644 --- a/test/mkldnn/test_expand_v2_mkldnn_op.py +++ b/test/mkldnn/test_expand_v2_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core @OpTestTool.skip_if( diff --git a/test/mkldnn/test_fc_bf16_mkldnn_op.py b/test/mkldnn/test_fc_bf16_mkldnn_op.py index 8db2458be1956..7bc726e677693 100644 --- a/test/mkldnn/test_fc_bf16_mkldnn_op.py +++ b/test/mkldnn/test_fc_bf16_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -from paddle.fluid import core +from paddle.base import core def fully_connected_naive(input, weights, bias_data): diff --git a/test/mkldnn/test_flatten_mkldnn_op.py b/test/mkldnn/test_flatten_mkldnn_op.py index ba1fb8a252f40..b54a3974614f2 100644 --- a/test/mkldnn/test_flatten_mkldnn_op.py +++ b/test/mkldnn/test_flatten_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core @OpTestTool.skip_if_not_cpu_bf16() diff --git a/test/mkldnn/test_fused_vit_attention.py b/test/mkldnn/test_fused_vit_attention.py index c3718886544ef..1de329d7032b8 100644 --- a/test/mkldnn/test_fused_vit_attention.py +++ b/test/mkldnn/test_fused_vit_attention.py @@ -17,7 +17,7 @@ import paddle import paddle.incubate -from paddle.fluid import core +from paddle.base import core paddle.enable_static() np.random.seed(0) @@ -54,7 +54,7 @@ def test_fuse_resenet_unit(): graph = core.Graph(program.desc) core.get_pass("self_attention_fuse_pass").apply(graph) - after_program = paddle.fluid.framework.IrGraph(graph).to_program() + after_program = paddle.base.framework.IrGraph(graph).to_program() exe = paddle.static.Executor(place) exe.run(startup_program) diff --git a/test/mkldnn/test_fusion_gru_bf16_mkldnn_op.py b/test/mkldnn/test_fusion_gru_bf16_mkldnn_op.py index 611edbb64ed6b..0bac634467b5b 100644 --- a/test/mkldnn/test_fusion_gru_bf16_mkldnn_op.py +++ b/test/mkldnn/test_fusion_gru_bf16_mkldnn_op.py @@ -19,7 +19,7 @@ from test_fusion_gru_op import fusion_gru from test_fusion_lstm_op import ACTIVATION -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/mkldnn/test_fusion_lstm_bf16_mkldnn_op.py b/test/mkldnn/test_fusion_lstm_bf16_mkldnn_op.py index bf1e3215fa195..4ccd419d40731 100644 --- a/test/mkldnn/test_fusion_lstm_bf16_mkldnn_op.py +++ b/test/mkldnn/test_fusion_lstm_bf16_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 from test_fusion_lstm_op import ACTIVATION, fusion_lstm -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/mkldnn/test_layer_norm_bf16_mkldnn_op.py b/test/mkldnn/test_layer_norm_bf16_mkldnn_op.py index d92bb83ce084f..f3bd7f731b361 100644 --- a/test/mkldnn/test_layer_norm_bf16_mkldnn_op.py +++ b/test/mkldnn/test_layer_norm_bf16_mkldnn_op.py @@ -24,8 +24,8 @@ _reference_layer_norm_naive, ) -from paddle import enable_static, fluid -from paddle.fluid import core +from paddle import enable_static, base +from paddle.base import core np.random.random(123) @@ -75,8 +75,8 @@ def check_forward( var_names.append('bias') ground_truth = {name: var_dict[name] for name in var_names} - program = fluid.Program() - with fluid.program_guard(program): + program = base.Program() + with base.program_guard(program): block = program.global_block() # scale and bias are fp32 and other vars are of bf16 @@ -115,7 +115,7 @@ def check_forward( }, ) - exe = fluid.Executor(core.CPUPlace()) + exe = base.Executor(core.CPUPlace()) input_list = ['x_bf16'] if with_scale_bias: diff --git a/test/mkldnn/test_layer_norm_mkldnn_op.py b/test/mkldnn/test_layer_norm_mkldnn_op.py index b58677fa0a731..9db2b1966607d 100644 --- a/test/mkldnn/test_layer_norm_mkldnn_op.py +++ b/test/mkldnn/test_layer_norm_mkldnn_op.py @@ -20,8 +20,8 @@ import numpy as np from eager_op_test import OpTestTool, _set_use_system_allocator -from paddle import enable_static, fluid -from paddle.fluid import core +from paddle import enable_static, base +from paddle.base import core np.random.random(123) @@ -92,8 +92,8 @@ def check_forward( var_names.append('bias') ground_truth = {name: var_dict[name] for name in var_names} - program = fluid.Program() - with fluid.program_guard(program): + program = base.Program() + with base.program_guard(program): block = program.global_block() for name in ground_truth: @@ -122,7 +122,7 @@ def check_forward( }, ) - exe = fluid.Executor(core.CPUPlace()) + exe = base.Executor(core.CPUPlace()) input_list = ['x'] if with_scale_bias: diff --git a/test/mkldnn/test_log_softmax_mkldnn_op.py b/test/mkldnn/test_log_softmax_mkldnn_op.py index 98898a7ae6f23..7bed35162cec6 100644 --- a/test/mkldnn/test_log_softmax_mkldnn_op.py +++ b/test/mkldnn/test_log_softmax_mkldnn_op.py @@ -19,7 +19,7 @@ from test_log_softmax import ref_log_softmax import paddle -from paddle.fluid import core +from paddle.base import core class TestLogSoftmaxOneDNNOp(OpTest): diff --git a/test/mkldnn/test_matmul_bf16_mkldnn_op.py b/test/mkldnn/test_matmul_bf16_mkldnn_op.py index 19578e7eb3ac0..74b55b8db78bb 100644 --- a/test/mkldnn/test_matmul_bf16_mkldnn_op.py +++ b/test/mkldnn/test_matmul_bf16_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/mkldnn/test_matmul_v2_mkldnn_op.py b/test/mkldnn/test_matmul_v2_mkldnn_op.py index 51c63970b7fed..97dab61f9dc0a 100644 --- a/test/mkldnn/test_matmul_v2_mkldnn_op.py +++ b/test/mkldnn/test_matmul_v2_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def reference_matmul(X, Y, transpose_x=False, transpose_y=False): diff --git a/test/mkldnn/test_mul_int8_mkldnn_op.py b/test/mkldnn/test_mul_int8_mkldnn_op.py index 575ed8e7b26a6..4d3fd1afca472 100644 --- a/test/mkldnn/test_mul_int8_mkldnn_op.py +++ b/test/mkldnn/test_mul_int8_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, skip_check_grad_ci import paddle -from paddle.fluid import core +from paddle.base import core ''' test case for s8 * s8 diff --git a/test/mkldnn/test_mul_mkldnn_op.py b/test/mkldnn/test_mul_mkldnn_op.py index f8a09633dc3be..948addfc17751 100644 --- a/test/mkldnn/test_mul_mkldnn_op.py +++ b/test/mkldnn/test_mul_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core @OpTestTool.skip_if_not_cpu_bf16() diff --git a/test/mkldnn/test_onnx_format_quantization_mobilenetv1.py b/test/mkldnn/test_onnx_format_quantization_mobilenetv1.py index bf5c55ae93869..71e484f87569b 100644 --- a/test/mkldnn/test_onnx_format_quantization_mobilenetv1.py +++ b/test/mkldnn/test_onnx_format_quantization_mobilenetv1.py @@ -22,7 +22,7 @@ from PIL import Image import paddle -from paddle import fluid +from paddle import base from paddle.dataset.common import download from paddle.static.quantization import PostTrainingQuantization @@ -271,8 +271,8 @@ def generate_quantized_model( is_optimize_model=False, onnx_format=False, ): - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = base.CPUPlace() + exe = base.Executor(place) val_reader = val() ptq = PostTrainingQuantization( diff --git a/test/mkldnn/test_pool2d_bf16_mkldnn_op.py b/test/mkldnn/test_pool2d_bf16_mkldnn_op.py index 527c868999df2..0a1b04d157a9b 100644 --- a/test/mkldnn/test_pool2d_bf16_mkldnn_op.py +++ b/test/mkldnn/test_pool2d_bf16_mkldnn_op.py @@ -25,7 +25,7 @@ ) from paddle import enable_static -from paddle.fluid import core +from paddle.base import core def pool2d_backward_naive( diff --git a/test/mkldnn/test_pool2d_int8_mkldnn_op.py b/test/mkldnn/test_pool2d_int8_mkldnn_op.py index 4736895f99b49..47ed3728150be 100644 --- a/test/mkldnn/test_pool2d_int8_mkldnn_op.py +++ b/test/mkldnn/test_pool2d_int8_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest from test_pool2d_op import TestPool2D_Op, max_pool2D_forward_naive -from paddle.fluid import core +from paddle.base import core class TestPool2DMKLDNNInt8_Op(TestPool2D_Op): @@ -48,7 +48,7 @@ def setUp(self): self.dtype, ) ).astype(self.dtype) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(input)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(input)} self.outputs = {'Out': output} def test_check_output(self): diff --git a/test/mkldnn/test_prelu_mkldnn_op.py b/test/mkldnn/test_prelu_mkldnn_op.py index c2079f0e4d9ce..a3264c646bc3e 100644 --- a/test/mkldnn/test_prelu_mkldnn_op.py +++ b/test/mkldnn/test_prelu_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core def ref_prelu(x, weight, mode): diff --git a/test/mkldnn/test_quantize_mkldnn_op.py b/test/mkldnn/test_quantize_mkldnn_op.py index c7fedaffa8636..22324e4b035b3 100644 --- a/test/mkldnn/test_quantize_mkldnn_op.py +++ b/test/mkldnn/test_quantize_mkldnn_op.py @@ -48,7 +48,7 @@ def prepare_input(self): 'float32' ) - self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(self.input)} + self.inputs = {'Input': OpTest.np_dtype_to_base_dtype(self.input)} self.attrs = { 'Scale': self.scale, 'Shift': self.shift, diff --git a/test/mkldnn/test_reduce_bf16_mkldnn_op.py b/test/mkldnn/test_reduce_bf16_mkldnn_op.py index 5001a03372e75..5407efd05a48d 100644 --- a/test/mkldnn/test_reduce_bf16_mkldnn_op.py +++ b/test/mkldnn/test_reduce_bf16_mkldnn_op.py @@ -23,7 +23,7 @@ ) import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/mkldnn/test_requantize_mkldnn_op.py b/test/mkldnn/test_requantize_mkldnn_op.py index 52f87b94cf9e5..fbe5d075422d1 100644 --- a/test/mkldnn/test_requantize_mkldnn_op.py +++ b/test/mkldnn/test_requantize_mkldnn_op.py @@ -18,8 +18,8 @@ from eager_op_test import OpTest from mkldnn_op_test import format_reorder -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class TestReQuantizeOp(OpTest): @@ -53,7 +53,7 @@ def prepare_input(self): self.input_data_type ) - self.inputs = {'Input': OpTest.np_dtype_to_fluid_dtype(self.input)} + self.inputs = {'Input': OpTest.np_dtype_to_base_dtype(self.input)} self.attrs = { 'Scale_in': self.scale_in, 'Scale_out': self.scale_out, @@ -339,8 +339,8 @@ def test_check_output(self): "input": self.input, "output": self.output, } - program = fluid.Program() - with fluid.program_guard(program): + program = base.Program() + with base.program_guard(program): block = program.global_block() for name in variables: block.create_var( @@ -360,7 +360,7 @@ def test_check_output(self): }, ) place = core.CPUPlace() - exe = fluid.Executor(place) + exe = base.Executor(place) for i in range(2): out = exe.run( program, diff --git a/test/mkldnn/test_reshape_bf16_op.py b/test/mkldnn/test_reshape_bf16_op.py index d2350b9d01386..76b509219b105 100644 --- a/test/mkldnn/test_reshape_bf16_op.py +++ b/test/mkldnn/test_reshape_bf16_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/mkldnn/test_reshape_mkldnn_op.py b/test/mkldnn/test_reshape_mkldnn_op.py index b62aef0c1deff..ca06b5fd4c748 100644 --- a/test/mkldnn/test_reshape_mkldnn_op.py +++ b/test/mkldnn/test_reshape_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/mkldnn/test_scale_bf16_mkldnn_op.py b/test/mkldnn/test_scale_bf16_mkldnn_op.py index b1d3aa09510a9..a50e47a712cb4 100644 --- a/test/mkldnn/test_scale_bf16_mkldnn_op.py +++ b/test/mkldnn/test_scale_bf16_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/mkldnn/test_shape_mkldnn_op.py b/test/mkldnn/test_shape_mkldnn_op.py index 14c869c4cb36f..4325062628e95 100644 --- a/test/mkldnn/test_shape_mkldnn_op.py +++ b/test/mkldnn/test_shape_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool import paddle -from paddle.fluid import core +from paddle.base import core class TestShape3DFP32OneDNNOp(OpTest): diff --git a/test/mkldnn/test_shuffle_channel_mkldnn_op.py b/test/mkldnn/test_shuffle_channel_mkldnn_op.py index ea8640ebbc520..d363bef88606b 100644 --- a/test/mkldnn/test_shuffle_channel_mkldnn_op.py +++ b/test/mkldnn/test_shuffle_channel_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool import paddle -from paddle.fluid import core +from paddle.base import core @OpTestTool.skip_if_not_cpu_bf16() diff --git a/test/mkldnn/test_slice_mkldnn_op.py b/test/mkldnn/test_slice_mkldnn_op.py index b18313b941238..b733a11ca912c 100644 --- a/test/mkldnn/test_slice_mkldnn_op.py +++ b/test/mkldnn/test_slice_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core @OpTestTool.skip_if( diff --git a/test/mkldnn/test_softmax_bf16_mkldnn_op.py b/test/mkldnn/test_softmax_bf16_mkldnn_op.py index a7fdc1de413f1..2e04100ec31ad 100644 --- a/test/mkldnn/test_softmax_bf16_mkldnn_op.py +++ b/test/mkldnn/test_softmax_bf16_mkldnn_op.py @@ -26,7 +26,7 @@ ) from paddle import enable_static -from paddle.fluid import core +from paddle.base import core def stable_softmax(x): diff --git a/test/mkldnn/test_softmax_mkldnn_op.py b/test/mkldnn/test_softmax_mkldnn_op.py index 2237161deba8b..777dfb0cb02dc 100644 --- a/test/mkldnn/test_softmax_mkldnn_op.py +++ b/test/mkldnn/test_softmax_mkldnn_op.py @@ -27,7 +27,7 @@ TestSoftmaxOp_ZeroDim1, ) -from paddle.fluid import core +from paddle.base import core def stable_softmax(x): @@ -56,7 +56,7 @@ def setUp(self): x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) out = np.apply_along_axis(stable_softmax, self.axis, x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = { 'axis': self.axis, diff --git a/test/mkldnn/test_split_bf16_mkldnn_op.py b/test/mkldnn/test_split_bf16_mkldnn_op.py index f1f79192c01a3..81e25ce43b9a9 100644 --- a/test/mkldnn/test_split_bf16_mkldnn_op.py +++ b/test/mkldnn/test_split_bf16_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/mkldnn/test_squeeze2_mkldnn_op.py b/test/mkldnn/test_squeeze2_mkldnn_op.py index fd02578e8a1a2..b1efad9690bb6 100644 --- a/test/mkldnn/test_squeeze2_mkldnn_op.py +++ b/test/mkldnn/test_squeeze2_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16 import paddle -from paddle.fluid import core +from paddle.base import core @OpTestTool.skip_if( diff --git a/test/mkldnn/test_stack_mkldnn_op.py b/test/mkldnn/test_stack_mkldnn_op.py index 4cc93e502b405..7a04ea38be9a5 100644 --- a/test/mkldnn/test_stack_mkldnn_op.py +++ b/test/mkldnn/test_stack_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, OpTestTool import paddle -from paddle.fluid import core +from paddle.base import core @OpTestTool.skip_if_not_cpu() diff --git a/test/mkldnn/test_sum_bf16_mkldnn_op.py b/test/mkldnn/test_sum_bf16_mkldnn_op.py index dc570c7f4f93c..af622b8cff5d4 100644 --- a/test/mkldnn/test_sum_bf16_mkldnn_op.py +++ b/test/mkldnn/test_sum_bf16_mkldnn_op.py @@ -19,7 +19,7 @@ from test_sum_op import TestSumOp from paddle import enable_static -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/mkldnn/test_sum_mkldnn_op.py b/test/mkldnn/test_sum_mkldnn_op.py index 030158882a2b1..6750f1a79c7ce 100644 --- a/test/mkldnn/test_sum_mkldnn_op.py +++ b/test/mkldnn/test_sum_mkldnn_op.py @@ -18,7 +18,7 @@ from op import Operator from test_sum_op import TestSumOp -from paddle.fluid import core +from paddle.base import core class TestSumMKLDNN(TestSumOp): diff --git a/test/mkldnn/test_transpose_bf16_mkldnn_op.py b/test/mkldnn/test_transpose_bf16_mkldnn_op.py index d8a9519bf788d..d0817ab103db6 100644 --- a/test/mkldnn/test_transpose_bf16_mkldnn_op.py +++ b/test/mkldnn/test_transpose_bf16_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest, convert_float_to_uint16 from paddle import enable_static -from paddle.fluid import core +from paddle.base import core @unittest.skipIf( diff --git a/test/mkldnn/test_transpose_int8_mkldnn_op.py b/test/mkldnn/test_transpose_int8_mkldnn_op.py index 30df0eafb0415..282c6dd94acea 100644 --- a/test/mkldnn/test_transpose_int8_mkldnn_op.py +++ b/test/mkldnn/test_transpose_int8_mkldnn_op.py @@ -18,7 +18,7 @@ from eager_op_test import OpTest from mkldnn_op_test import format_reorder -from paddle.fluid import core +from paddle.base import core class TestTransposeOp(OpTest): diff --git a/test/prim/composite_ops/test_composite_batch_norm.py b/test/prim/composite_ops/test_composite_batch_norm.py index 9a25c6dc1d6d4..c74097b6e2313 100644 --- a/test/prim/composite_ops/test_composite_batch_norm.py +++ b/test/prim/composite_ops/test_composite_batch_norm.py @@ -20,7 +20,7 @@ import paddle import paddle.nn.functional as F from paddle import nn -from paddle.fluid import core, framework +from paddle.base import core, framework from paddle.incubate.autograd import primapi from paddle.nn import BatchNorm from paddle.tensor import ones # noqa: F401 diff --git a/test/prim/composite_ops/test_composite_batch_norm_grad.py b/test/prim/composite_ops/test_composite_batch_norm_grad.py index f8699f6c84c31..f23fe76456994 100644 --- a/test/prim/composite_ops/test_composite_batch_norm_grad.py +++ b/test/prim/composite_ops/test_composite_batch_norm_grad.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core from paddle.incubate.autograd import primapi np.random.seed(2023) diff --git a/test/prim/composite_ops/test_composite_dropout.py b/test/prim/composite_ops/test_composite_dropout.py index d1dabef0d04ff..1d835f78b2037 100644 --- a/test/prim/composite_ops/test_composite_dropout.py +++ b/test/prim/composite_ops/test_composite_dropout.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core from paddle.incubate.autograd import primapi np.random.seed(2023) diff --git a/test/prim/composite_ops/test_composite_gelu.py b/test/prim/composite_ops/test_composite_gelu.py index ef214c9a81f7f..89c721be8a504 100644 --- a/test/prim/composite_ops/test_composite_gelu.py +++ b/test/prim/composite_ops/test_composite_gelu.py @@ -21,7 +21,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core from paddle.incubate.autograd import primapi diff --git a/test/prim/composite_ops/test_composite_gelu_grad.py b/test/prim/composite_ops/test_composite_gelu_grad.py index ca195fffce2a1..c87e985ebe8ff 100644 --- a/test/prim/composite_ops/test_composite_gelu_grad.py +++ b/test/prim/composite_ops/test_composite_gelu_grad.py @@ -21,7 +21,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core from paddle.incubate.autograd import primapi diff --git a/test/prim/composite_ops/test_composite_layer_norm.py b/test/prim/composite_ops/test_composite_layer_norm.py index 6bfefc608eaf8..1f3fc2ccf31cd 100644 --- a/test/prim/composite_ops/test_composite_layer_norm.py +++ b/test/prim/composite_ops/test_composite_layer_norm.py @@ -19,8 +19,8 @@ import paddle from paddle import _C_ops -from paddle.fluid import core, framework -from paddle.fluid.layer_helper import LayerHelper +from paddle.base import core, framework +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode from paddle.incubate.autograd import primapi from paddle.nn import LayerNorm @@ -70,7 +70,7 @@ def layer_norm_wrapper( # create output helper = LayerHelper('layer_norm', **locals()) - from paddle.fluid.data_feeder import convert_dtype + from paddle.base.data_feeder import convert_dtype param_dtype = ( x.dtype if convert_dtype(x.dtype) != 'float16' else 'float32' diff --git a/test/prim/composite_ops/test_composite_layer_norm_grad.py b/test/prim/composite_ops/test_composite_layer_norm_grad.py index 61f34b4a311fd..06940b82fc704 100644 --- a/test/prim/composite_ops/test_composite_layer_norm_grad.py +++ b/test/prim/composite_ops/test_composite_layer_norm_grad.py @@ -21,7 +21,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core from paddle.incubate.autograd import primapi TOLERANCE_NUMPY = { diff --git a/test/prim/composite_ops/test_composite_mean.py b/test/prim/composite_ops/test_composite_mean.py index a1ad278b9e536..01d8feb72ba90 100644 --- a/test/prim/composite_ops/test_composite_mean.py +++ b/test/prim/composite_ops/test_composite_mean.py @@ -19,7 +19,7 @@ import paddle from paddle import tensor -from paddle.fluid import core +from paddle.base import core from paddle.incubate.autograd import primapi diff --git a/test/prim/composite_ops/test_composite_mean_grad.py b/test/prim/composite_ops/test_composite_mean_grad.py index ae1bfbc56af08..ba952ff3258bd 100644 --- a/test/prim/composite_ops/test_composite_mean_grad.py +++ b/test/prim/composite_ops/test_composite_mean_grad.py @@ -19,7 +19,7 @@ import paddle from paddle import tensor -from paddle.fluid import core +from paddle.base import core from paddle.incubate.autograd import primapi diff --git a/test/prim/composite_ops/test_composite_relu_custom_vjp.py b/test/prim/composite_ops/test_composite_relu_custom_vjp.py index 644d4956573de..af99b78d36d1e 100644 --- a/test/prim/composite_ops/test_composite_relu_custom_vjp.py +++ b/test/prim/composite_ops/test_composite_relu_custom_vjp.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core def generate_data(shape, dtype="float32"): diff --git a/test/prim/composite_ops/test_composite_softmax.py b/test/prim/composite_ops/test_composite_softmax.py index a9cc804a1a4ec..ccd918f045b57 100644 --- a/test/prim/composite_ops/test_composite_softmax.py +++ b/test/prim/composite_ops/test_composite_softmax.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core, framework +from paddle.base import core, framework from paddle.incubate.autograd import primapi diff --git a/test/prim/composite_ops/test_composite_softmax_custom_vjp.py b/test/prim/composite_ops/test_composite_softmax_custom_vjp.py index 97bf177f7e296..d89574f2d521f 100644 --- a/test/prim/composite_ops/test_composite_softmax_custom_vjp.py +++ b/test/prim/composite_ops/test_composite_softmax_custom_vjp.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core def generate_data(shape, dtype="float32"): diff --git a/test/prim/composite_ops/test_composite_softmax_grad.py b/test/prim/composite_ops/test_composite_softmax_grad.py index 3e491984955f0..bf228b569a9c3 100644 --- a/test/prim/composite_ops/test_composite_softmax_grad.py +++ b/test/prim/composite_ops/test_composite_softmax_grad.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core from paddle.incubate.autograd import primapi diff --git a/test/prim/model/bert.py b/test/prim/model/bert.py index 227d0cdcaf504..2b0805c6123e2 100644 --- a/test/prim/model/bert.py +++ b/test/prim/model/bert.py @@ -22,7 +22,7 @@ import paddle.nn.functional as F from paddle import Tensor, nn from paddle.distributed.fleet.utils import recompute -from paddle.fluid.data_feeder import convert_dtype +from paddle.base.data_feeder import convert_dtype from paddle.io import DataLoader, Dataset from paddle.nn import MultiHeadAttention diff --git a/test/prim/model/test_bert_cinn.py b/test/prim/model/test_bert_cinn.py index 9651859a48b49..c1bb108f58fee 100644 --- a/test/prim/model/test_bert_cinn.py +++ b/test/prim/model/test_bert_cinn.py @@ -20,9 +20,9 @@ from bert import Bert, BertPretrainingCriterion, create_pretraining_dataset import paddle -from paddle import fluid +from paddle import base from paddle.dataset.common import DATA_HOME, download -from paddle.fluid import core +from paddle.base import core SEED = 2023 BATCH_SIZE = 2 @@ -56,7 +56,7 @@ def train(to_static, enable_prim, enable_cinn): paddle.set_device('gpu') else: paddle.set_device('cpu') - fluid.core._set_prim_all_enabled(enable_prim) + base.core._set_prim_all_enabled(enable_prim) np.random.seed(SEED) paddle.seed(SEED) diff --git a/test/prim/model/test_bert_prim.py b/test/prim/model/test_bert_prim.py index 2c55eeba8c9ff..22e3cf856ae85 100644 --- a/test/prim/model/test_bert_prim.py +++ b/test/prim/model/test_bert_prim.py @@ -20,9 +20,9 @@ from bert import Bert, BertPretrainingCriterion, create_pretraining_dataset import paddle -from paddle import fluid +from paddle import base from paddle.dataset.common import DATA_HOME, download -from paddle.fluid import core +from paddle.base import core SEED = 2023 BATCH_SIZE = 2 @@ -55,7 +55,7 @@ def train(to_static, enable_prim, enable_cinn): paddle.set_device('gpu') else: paddle.set_device('cpu') - fluid.core._set_prim_all_enabled(enable_prim) + base.core._set_prim_all_enabled(enable_prim) np.random.seed(SEED) paddle.seed(SEED) diff --git a/test/prim/model/test_bert_prim_cinn.py b/test/prim/model/test_bert_prim_cinn.py index 3a8634fbdc099..21dd6c202baac 100644 --- a/test/prim/model/test_bert_prim_cinn.py +++ b/test/prim/model/test_bert_prim_cinn.py @@ -20,9 +20,9 @@ from bert import Bert, BertPretrainingCriterion, create_pretraining_dataset import paddle -from paddle import fluid +from paddle import base from paddle.dataset.common import DATA_HOME, download -from paddle.fluid import core +from paddle.base import core SEED = 2023 BATCH_SIZE = 2 @@ -56,7 +56,7 @@ def train(to_static, enable_prim, enable_cinn): paddle.set_device('gpu') else: paddle.set_device('cpu') - fluid.core._set_prim_all_enabled(enable_prim) + base.core._set_prim_all_enabled(enable_prim) np.random.seed(SEED) paddle.seed(SEED) diff --git a/test/prim/model/test_comp_model_simple_net.py b/test/prim/model/test_comp_model_simple_net.py index 27b300e9afd44..e9b5dcf2f7fcb 100644 --- a/test/prim/model/test_comp_model_simple_net.py +++ b/test/prim/model/test_comp_model_simple_net.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core, framework +from paddle.base import core, framework @param.parameterized_class( diff --git a/test/prim/model/test_prim_simplenet_cinn.py b/test/prim/model/test_prim_simplenet_cinn.py index 8a4a0861b0d86..6482e849560e0 100644 --- a/test/prim/model/test_prim_simplenet_cinn.py +++ b/test/prim/model/test_prim_simplenet_cinn.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core from paddle.nn import BatchNorm np.random.seed(2023) diff --git a/test/prim/model/test_resnet_cinn.py b/test/prim/model/test_resnet_cinn.py index b2d6004aa2f7b..ef932603f8a58 100644 --- a/test/prim/model/test_resnet_cinn.py +++ b/test/prim/model/test_resnet_cinn.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.vision.models import resnet50 SEED = 2020 @@ -42,7 +42,7 @@ # 8.438933372497559, # 10.305074691772461, -# note: Version 2.0 momentum is fused to OP when L2Decay is available, and the results are different from the fluid version. +# note: Version 2.0 momentum is fused to OP when L2Decay is available, and the results are different from the base version. # The results in ci as as follows: DY2ST_CINN_GT = [ 5.847333908081055, @@ -170,7 +170,7 @@ def train(to_static, enable_prim, enable_cinn): np.random.seed(SEED) paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - fluid.core._set_prim_all_enabled(enable_prim) + base.core._set_prim_all_enabled(enable_prim) dataset = TransedFlowerDataSet( reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), diff --git a/test/prim/model/test_resnet_prim.py b/test/prim/model/test_resnet_prim.py index c423702a31db0..de81f2b78b650 100644 --- a/test/prim/model/test_resnet_prim.py +++ b/test/prim/model/test_resnet_prim.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.vision.models import resnet50 SEED = 2020 @@ -43,7 +43,7 @@ # 10.256929397583008, # ] -# note: Version 2.0 momentum is fused to OP when L2Decay is available, and the results are different from the fluid version. +# note: Version 2.0 momentum is fused to OP when L2Decay is available, and the results are different from the base version. # The results in ci as as follows: DY2ST_PRIM_GT = [ 5.847333908081055, @@ -171,7 +171,7 @@ def train(to_static, enable_prim, enable_cinn): np.random.seed(SEED) paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - fluid.core._set_prim_all_enabled(enable_prim) + base.core._set_prim_all_enabled(enable_prim) dataset = TransedFlowerDataSet( reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), diff --git a/test/prim/model/test_resnet_prim_cinn.py b/test/prim/model/test_resnet_prim_cinn.py index d3ad08e80c51e..933da8fcf105c 100644 --- a/test/prim/model/test_resnet_prim_cinn.py +++ b/test/prim/model/test_resnet_prim_cinn.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.vision.models import resnet50 SEED = 2020 @@ -43,7 +43,7 @@ # 9.919631958007812, # ] -# note: Version 2.0 momentum is fused to OP when L2Decay is available, and the results are different from the fluid version. +# note: Version 2.0 momentum is fused to OP when L2Decay is available, and the results are different from the base version. # The results in ci as as follows: DY2ST_PRIM_CINN_GT = [ 5.847333908081055, @@ -171,7 +171,7 @@ def train(to_static, enable_prim, enable_cinn): np.random.seed(SEED) paddle.seed(SEED) paddle.framework.random._manual_program_seed(SEED) - fluid.core._set_prim_all_enabled(enable_prim) + base.core._set_prim_all_enabled(enable_prim) dataset = TransedFlowerDataSet( reader_decorator(paddle.dataset.flowers.train(use_xmap=False)), diff --git a/test/prim/prim/flags/test_prim_flags.py b/test/prim/prim/flags/test_prim_flags.py index db736e95a65ac..610d8acdd6315 100644 --- a/test/prim/prim/flags/test_prim_flags.py +++ b/test/prim/prim/flags/test_prim_flags.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.framework import core +from paddle.base import core from paddle.incubate.autograd import primapi diff --git a/test/prim/prim/flags/test_prim_flags_case.py b/test/prim/prim/flags/test_prim_flags_case.py index 30fe2af5621f9..d244aca1faca2 100644 --- a/test/prim/prim/flags/test_prim_flags_case.py +++ b/test/prim/prim/flags/test_prim_flags_case.py @@ -17,7 +17,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core def apply_to_static(net, use_cinn): diff --git a/test/prim/prim/vjp/eager/test_comp_eager_add_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_add_grad.py index b50e49a3e4443..6348514109801 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_add_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_add_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core core.set_prim_eager_enabled(True) diff --git a/test/prim/prim/vjp/eager/test_comp_eager_batch_norm_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_batch_norm_grad.py index 5fa9599097398..720653b3d0240 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_batch_norm_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_batch_norm_grad.py @@ -18,7 +18,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core np.random.seed(2023) diff --git a/test/prim/prim/vjp/eager/test_comp_eager_cast_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_cast_grad.py index b8c47466e8584..6547352b2b001 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_cast_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_cast_grad.py @@ -19,7 +19,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core @param.parameterized_class( @@ -67,7 +67,7 @@ def desired(primal, cotangent): actual = actual(self.primal, self.cotangent) desired = desired(self.primal, self.cotangent) - from paddle.fluid.data_feeder import _PADDLE_DTYPE_2_NUMPY_DTYPE + from paddle.base.data_feeder import _PADDLE_DTYPE_2_NUMPY_DTYPE self.assertEqual( _PADDLE_DTYPE_2_NUMPY_DTYPE[actual[0].dtype], desired.dtype diff --git a/test/prim/prim/vjp/eager/test_comp_eager_div_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_div_grad.py index 6546776a20774..e838bf2a5f466 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_div_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_div_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core core.set_prim_eager_enabled(True) diff --git a/test/prim/prim/vjp/eager/test_comp_eager_exp_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_exp_grad.py index 172a37956c883..e7cb5f9fd002d 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_exp_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_exp_grad.py @@ -20,7 +20,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core @param.parameterized_class( diff --git a/test/prim/prim/vjp/eager/test_comp_eager_expand_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_expand_grad.py index 1991bd06f0cf4..e0dc1736b21fa 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_expand_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_expand_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core @param.parameterized_class( diff --git a/test/prim/prim/vjp/eager/test_comp_eager_gather_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_gather_grad.py index 7d71e5187f2c3..8ca9b3294c018 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_gather_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_gather_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core @param.parameterized_class( diff --git a/test/prim/prim/vjp/eager/test_comp_eager_matmul_double_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_matmul_double_grad.py index 3d24604419dee..ce94cc1eb2a50 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_matmul_double_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_matmul_double_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core core.set_prim_eager_enabled(True) diff --git a/test/prim/prim/vjp/eager/test_comp_eager_multiply_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_multiply_grad.py index 207e3f414f256..86bf36eb6a72b 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_multiply_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_multiply_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core @param.parameterized_class( diff --git a/test/prim/prim/vjp/eager/test_comp_eager_reshape_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_reshape_grad.py index e98f8ba58c3cd..3395aecf68ea2 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_reshape_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_reshape_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core core.set_prim_eager_enabled(True) diff --git a/test/prim/prim/vjp/eager/test_comp_eager_sigmoid_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_sigmoid_grad.py index 6640a7e8d3ddd..7693c6417eeec 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_sigmoid_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_sigmoid_grad.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core @param.parameterized_class( diff --git a/test/prim/prim/vjp/eager/test_comp_eager_sqrt_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_sqrt_grad.py index 4f6dc8b2ada55..33dd31e7c9ea1 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_sqrt_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_sqrt_grad.py @@ -20,7 +20,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core core.set_prim_eager_enabled(True) diff --git a/test/prim/prim/vjp/eager/test_comp_eager_sub_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_sub_grad.py index 62aa0e936f758..89eb8f5186f7a 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_sub_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_sub_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core core.set_prim_eager_enabled(True) diff --git a/test/prim/prim/vjp/eager/test_comp_eager_sum_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_sum_grad.py index 9cade52686fe1..9b522cde3b729 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_sum_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_sum_grad.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core def actual(primal, cotangent, axis, keep_dim): diff --git a/test/prim/prim/vjp/eager/test_comp_eager_tanh_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_tanh_grad.py index b919ea3e95271..687ce527469ff 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_tanh_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_tanh_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core core.set_prim_eager_enabled(True) diff --git a/test/prim/prim/vjp/eager/test_comp_eager_transpose_grad.py b/test/prim/prim/vjp/eager/test_comp_eager_transpose_grad.py index b4a68059a5334..79996d0373b6b 100644 --- a/test/prim/prim/vjp/eager/test_comp_eager_transpose_grad.py +++ b/test/prim/prim/vjp/eager/test_comp_eager_transpose_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core core.set_prim_eager_enabled(True) diff --git a/test/prim/prim/vjp/static/test_comp_add_grad.py b/test/prim/prim/vjp/static/test_comp_add_grad.py index 9da9e7131d1a9..211564f2cd7b0 100644 --- a/test/prim/prim/vjp/static/test_comp_add_grad.py +++ b/test/prim/prim/vjp/static/test_comp_add_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core def apply_to_static(net, use_cinn): diff --git a/test/prim/prim/vjp/static/test_comp_add_tanh_grad.py b/test/prim/prim/vjp/static/test_comp_add_tanh_grad.py index 520aef634b6fa..6320395e9ee4e 100644 --- a/test/prim/prim/vjp/static/test_comp_add_tanh_grad.py +++ b/test/prim/prim/vjp/static/test_comp_add_tanh_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core def apply_to_static(net, use_cinn): diff --git a/test/prim/prim/vjp/static/test_comp_batch_norm_grad.py b/test/prim/prim/vjp/static/test_comp_batch_norm_grad.py index 5f084bab8617c..b8f25b5131203 100644 --- a/test/prim/prim/vjp/static/test_comp_batch_norm_grad.py +++ b/test/prim/prim/vjp/static/test_comp_batch_norm_grad.py @@ -18,7 +18,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core, framework +from paddle.base import core, framework np.random.seed(2023) diff --git a/test/prim/prim/vjp/static/test_comp_cast_grad.py b/test/prim/prim/vjp/static/test_comp_cast_grad.py index 850ad82220165..ce0bcbd7895f2 100644 --- a/test/prim/prim/vjp/static/test_comp_cast_grad.py +++ b/test/prim/prim/vjp/static/test_comp_cast_grad.py @@ -19,7 +19,7 @@ import parameterized as param import paddle -from paddle.fluid import core, framework +from paddle.base import core, framework def apply_to_static(net, use_cinn): diff --git a/test/prim/prim/vjp/static/test_comp_div_grad.py b/test/prim/prim/vjp/static/test_comp_div_grad.py index 844d30894de18..f0f73d2002438 100644 --- a/test/prim/prim/vjp/static/test_comp_div_grad.py +++ b/test/prim/prim/vjp/static/test_comp_div_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core def apply_to_static(net, use_cinn): diff --git a/test/prim/prim/vjp/static/test_comp_exp_grad.py b/test/prim/prim/vjp/static/test_comp_exp_grad.py index 2e720f6934f5c..c90c63b006769 100644 --- a/test/prim/prim/vjp/static/test_comp_exp_grad.py +++ b/test/prim/prim/vjp/static/test_comp_exp_grad.py @@ -20,7 +20,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core @param.parameterized_class( diff --git a/test/prim/prim/vjp/static/test_comp_expand_grad.py b/test/prim/prim/vjp/static/test_comp_expand_grad.py index 2772719a81820..4d12c4a77c968 100644 --- a/test/prim/prim/vjp/static/test_comp_expand_grad.py +++ b/test/prim/prim/vjp/static/test_comp_expand_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core @param.parameterized_class( diff --git a/test/prim/prim/vjp/static/test_comp_gather_grad.py b/test/prim/prim/vjp/static/test_comp_gather_grad.py index 77693dd1071da..c4550dd13cc02 100644 --- a/test/prim/prim/vjp/static/test_comp_gather_grad.py +++ b/test/prim/prim/vjp/static/test_comp_gather_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core, framework +from paddle.base import core, framework np.random.seed(2023) diff --git a/test/prim/prim/vjp/static/test_comp_matmul_double_grad.py b/test/prim/prim/vjp/static/test_comp_matmul_double_grad.py index 809a4ed64d71a..2bebdcb10772d 100644 --- a/test/prim/prim/vjp/static/test_comp_matmul_double_grad.py +++ b/test/prim/prim/vjp/static/test_comp_matmul_double_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core core._set_prim_backward_enabled(True) diff --git a/test/prim/prim/vjp/static/test_comp_multiply_grad.py b/test/prim/prim/vjp/static/test_comp_multiply_grad.py index 4befaf48ca90e..d824bcf9ff4f1 100644 --- a/test/prim/prim/vjp/static/test_comp_multiply_grad.py +++ b/test/prim/prim/vjp/static/test_comp_multiply_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core, framework +from paddle.base import core, framework @param.parameterized_class( diff --git a/test/prim/prim/vjp/static/test_comp_reshape_grad.py b/test/prim/prim/vjp/static/test_comp_reshape_grad.py index 4523e4af4bf44..7577c29b251cd 100644 --- a/test/prim/prim/vjp/static/test_comp_reshape_grad.py +++ b/test/prim/prim/vjp/static/test_comp_reshape_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core, framework +from paddle.base import core, framework def apply_to_static(net, use_cinn): diff --git a/test/prim/prim/vjp/static/test_comp_sigmoid_grad.py b/test/prim/prim/vjp/static/test_comp_sigmoid_grad.py index 43a1e0b14619b..cca9702dd03bc 100644 --- a/test/prim/prim/vjp/static/test_comp_sigmoid_grad.py +++ b/test/prim/prim/vjp/static/test_comp_sigmoid_grad.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import core +from paddle.base import core @param.parameterized_class( diff --git a/test/prim/prim/vjp/static/test_comp_sqrt_grad.py b/test/prim/prim/vjp/static/test_comp_sqrt_grad.py index a3b854fcc296b..09789ef6602ca 100644 --- a/test/prim/prim/vjp/static/test_comp_sqrt_grad.py +++ b/test/prim/prim/vjp/static/test_comp_sqrt_grad.py @@ -14,7 +14,7 @@ import unittest -from paddle.fluid import core +from paddle.base import core core._set_prim_backward_enabled(True) diff --git a/test/prim/prim/vjp/static/test_comp_sub_grad.py b/test/prim/prim/vjp/static/test_comp_sub_grad.py index 87bd6ff0b3602..49b1e33e3c049 100644 --- a/test/prim/prim/vjp/static/test_comp_sub_grad.py +++ b/test/prim/prim/vjp/static/test_comp_sub_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core +from paddle.base import core def apply_to_static(net, use_cinn): diff --git a/test/prim/prim/vjp/static/test_comp_sum_grad.py b/test/prim/prim/vjp/static/test_comp_sum_grad.py index 4301ff7b07b9a..d2fd37362b289 100644 --- a/test/prim/prim/vjp/static/test_comp_sum_grad.py +++ b/test/prim/prim/vjp/static/test_comp_sum_grad.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core def actual(primal, cotangent, axis, keep_dim): diff --git a/test/prim/prim/vjp/static/test_comp_tanh_grad.py b/test/prim/prim/vjp/static/test_comp_tanh_grad.py index 869a36e806695..43edf96c3aff0 100644 --- a/test/prim/prim/vjp/static/test_comp_tanh_grad.py +++ b/test/prim/prim/vjp/static/test_comp_tanh_grad.py @@ -14,7 +14,7 @@ import unittest -from paddle.fluid import core +from paddle.base import core core._set_prim_backward_enabled(True) diff --git a/test/prim/prim/vjp/static/test_comp_transpose_grad.py b/test/prim/prim/vjp/static/test_comp_transpose_grad.py index 7be0be582fd3b..2f7cb85e3145d 100644 --- a/test/prim/prim/vjp/static/test_comp_transpose_grad.py +++ b/test/prim/prim/vjp/static/test_comp_transpose_grad.py @@ -18,7 +18,7 @@ import parameterized as param import paddle -from paddle.fluid import core, framework +from paddle.base import core, framework def apply_to_static(net, use_cinn): diff --git a/test/prim/prim/vjp/test_comp_high_grad.py b/test/prim/prim/vjp/test_comp_high_grad.py index c080d4d7231a1..96762679df519 100644 --- a/test/prim/prim/vjp/test_comp_high_grad.py +++ b/test/prim/prim/vjp/test_comp_high_grad.py @@ -22,8 +22,8 @@ from decorator_helper import prog_scope import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core @param.parameterized_class( @@ -75,7 +75,7 @@ def func_double(self, place): y_arr = np.random.uniform(-2, 2, shape2).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 y_arr[np.abs(y_arr) < 0.005] = 0.002 - from paddle.fluid import core + from paddle.base import core core._set_prim_backward_enabled(True) gradient_checker.double_grad_check( @@ -101,7 +101,7 @@ def func_triple(self, place): y_arr = np.random.uniform(-1, 1, shape2).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 y_arr[np.abs(y_arr) < 0.005] = 0.002 - from paddle.fluid import core + from paddle.base import core core._set_prim_backward_enabled(True) gradient_checker.triple_grad_check( @@ -114,9 +114,9 @@ def func_triple(self, place): def test_high_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func_double(p) self.func_triple(p) @@ -171,7 +171,7 @@ def func_double(self, place): y_arr = np.random.uniform(-2, 2, shape2).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 y_arr[np.abs(y_arr) < 0.005] = 0.002 - from paddle.fluid import core + from paddle.base import core core._set_prim_backward_enabled(True) gradient_checker.double_grad_check( @@ -201,7 +201,7 @@ def func_triple(self, place): y_arr = np.random.uniform(-2, 2, shape2).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 y_arr[np.abs(y_arr) < 0.005] = 0.002 - from paddle.fluid import core + from paddle.base import core core._set_prim_backward_enabled(True) gradient_checker.triple_grad_check( @@ -218,9 +218,9 @@ def func_triple(self, place): def test_high_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func_double(p) self.func_triple(p) @@ -275,7 +275,7 @@ def func_double(self, place): y_arr = np.random.uniform(-2, 2, shape2).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 y_arr[np.abs(y_arr) < 0.005] = 0.002 - from paddle.fluid import core + from paddle.base import core core._set_prim_backward_enabled(True) gradient_checker.double_grad_check( @@ -305,7 +305,7 @@ def func_triple(self, place): y_arr = np.random.uniform(-1, 1, shape2).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 y_arr[np.abs(y_arr) < 0.005] = 0.002 - from paddle.fluid import core + from paddle.base import core core._set_prim_backward_enabled(True) gradient_checker.triple_grad_check( @@ -322,9 +322,9 @@ def func_triple(self, place): def test_high_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func_double(p) self.func_triple(p) @@ -360,7 +360,7 @@ def func_double(self, place): x_arr[np.abs(x_arr) < 0.005] = 0.002 # silu double grad only has CompositeOpMaker,don't need set prim_flag - from paddle.fluid import core + from paddle.base import core core._set_prim_backward_enabled(True) gradient_checker.double_grad_check( @@ -386,7 +386,7 @@ def func_triple(self, place): out = paddle.nn.functional.silu(x) x_arr = np.random.uniform(-1, 1, shape1).astype(dtype) x_arr[np.abs(x_arr) < 0.005] = 0.002 - from paddle.fluid import core + from paddle.base import core core._set_prim_backward_enabled(True) gradient_checker.triple_grad_check( @@ -403,9 +403,9 @@ def func_triple(self, place): def test_high_grad(self): paddle.enable_static() - places = [fluid.CPUPlace()] + places = [base.CPUPlace()] if core.is_compiled_with_cuda(): - places.append(fluid.CUDAPlace(0)) + places.append(base.CUDAPlace(0)) for p in places: self.func_double(p) self.func_triple(p) diff --git a/test/prim/process/test_check_inputs.py b/test/prim/process/test_check_inputs.py index bf1d3e7a5064f..631da96cc8b23 100644 --- a/test/prim/process/test_check_inputs.py +++ b/test/prim/process/test_check_inputs.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core def fn(x, shape): diff --git a/test/prim/process/test_copy_op.py b/test/prim/process/test_copy_op.py index de208f1163262..e91597c01abc1 100644 --- a/test/prim/process/test_copy_op.py +++ b/test/prim/process/test_copy_op.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core from paddle.incubate.autograd import primapi paddle.framework.random._manual_program_seed(2023) diff --git a/test/prim/process/test_prim_amp.py b/test/prim/process/test_prim_amp.py index 8c81f938679e1..8a632c13a4e07 100644 --- a/test/prim/process/test_prim_amp.py +++ b/test/prim/process/test_prim_amp.py @@ -19,7 +19,7 @@ import paddle import paddle.nn.functional as F from paddle import nn -from paddle.fluid import core, framework +from paddle.base import core, framework from paddle.nn import BatchNorm np.random.seed(2023) diff --git a/test/prim/test_comp_custom_vjp.py b/test/prim/test_comp_custom_vjp.py index ef4418f6934f5..fb62fe80202a4 100644 --- a/test/prim/test_comp_custom_vjp.py +++ b/test/prim/test_comp_custom_vjp.py @@ -14,7 +14,7 @@ import unittest import paddle -from paddle.fluid import core +from paddle.base import core class TestCustomVJP(unittest.TestCase): diff --git a/test/prim/test_comp_dispensable.py b/test/prim/test_comp_dispensable.py index a4f4df5fdd1c5..be76ce92ce7f0 100644 --- a/test/prim/test_comp_dispensable.py +++ b/test/prim/test_comp_dispensable.py @@ -19,10 +19,10 @@ class TestDispensable(unittest.TestCase): def setUp(self): - paddle.fluid.core._set_prim_all_enabled(True) + paddle.base.core._set_prim_all_enabled(True) def tearDown(self): - paddle.fluid.core._set_prim_all_enabled(False) + paddle.base.core._set_prim_all_enabled(False) def test_dispensable(self): @paddle.jit.to_static @@ -36,7 +36,7 @@ def f(x): op = f.get_concrete_program(x)[1].backward_program.block(0).ops[-1] self.assertEqual( op.attr('op_role'), - int(paddle.fluid.core.op_proto_and_checker_maker.OpRole.Backward), + int(paddle.base.core.op_proto_and_checker_maker.OpRole.Backward), ) self.assertIn('AxisTensor', op.input_names) diff --git a/test/prim/test_comp_get_grad_op_desc_prim_disabled.py b/test/prim/test_comp_get_grad_op_desc_prim_disabled.py index 25b3340f7b591..710fdc44a85d5 100644 --- a/test/prim/test_comp_get_grad_op_desc_prim_disabled.py +++ b/test/prim/test_comp_get_grad_op_desc_prim_disabled.py @@ -15,14 +15,14 @@ import unittest -from paddle.fluid import core +from paddle.base import core core._set_prim_backward_enabled(False) import parameterized as param import paddle -from paddle.fluid import core, framework +from paddle.base import core, framework @param.parameterized_class( diff --git a/test/prim/test_comp_get_grad_op_desc_prim_enabled.py b/test/prim/test_comp_get_grad_op_desc_prim_enabled.py index 9c89ab72bf18e..274abc2bcb1a5 100644 --- a/test/prim/test_comp_get_grad_op_desc_prim_enabled.py +++ b/test/prim/test_comp_get_grad_op_desc_prim_enabled.py @@ -15,14 +15,14 @@ import unittest -from paddle.fluid import core +from paddle.base import core core._set_prim_backward_enabled(True) import parameterized as param import paddle -from paddle.fluid import core, framework +from paddle.base import core, framework @param.parameterized_class( diff --git a/test/prim/test_comp_skip_op_set.py b/test/prim/test_comp_skip_op_set.py index bca3c97ec4d40..8c3e446a62692 100644 --- a/test/prim/test_comp_skip_op_set.py +++ b/test/prim/test_comp_skip_op_set.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle.fluid import core, framework +from paddle.base import core, framework class TestGetGradOpDescPrimEnabled(unittest.TestCase): diff --git a/test/ps/fl_ps_trainer.py b/test/ps/fl_ps_trainer.py index 7eacd5a0e33ca..ad59a68b0a35e 100755 --- a/test/ps/fl_ps_trainer.py +++ b/test/ps/fl_ps_trainer.py @@ -16,12 +16,12 @@ import time import paddle -from paddle import fluid +from paddle import base from paddle.distributed import fleet def get_dataset(inputs, config, pipe_cmd, role="worker"): - dataset = fluid.DatasetFactory().create_dataset() + dataset = base.DatasetFactory().create_dataset() dataset.set_use_var(inputs) dataset.set_pipe_command(pipe_cmd) dataset.set_batch_size(config.get('runner.batch_size')) @@ -102,9 +102,9 @@ def fl_ps_train(): _runtime_handle._run_server() # 4.2 run worker elif role_maker._is_worker(): - place = fluid.CPUPlace() - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.CPUPlace() + exe = base.Executor(place) + exe.run(base.default_startup_program()) _runtime_handle._init_worker() print('trainer get dataset') inputs = feeds_list[1:-1] @@ -112,8 +112,8 @@ def fl_ps_train(): inputs, config, "python dataset_generator_A.py" ) print( - "fluid.default_main_program: {}".format( - fluid.default_main_program()._heter_pipeline_opt + "base.default_main_program: {}".format( + base.default_main_program()._heter_pipeline_opt ) ) for epoch in range(epoch_num): @@ -121,7 +121,7 @@ def fl_ps_train(): dataset.set_filelist(file_list) start_time = time.time() exe.train_from_dataset( - program=fluid.default_main_program(), + program=base.default_main_program(), dataset=dataset, print_period=2, debug=False, @@ -135,8 +135,8 @@ def fl_ps_train(): _runtime_handle._stop_worker() print("Fl partyA Trainer Success!") else: - exe = fluid.Executor() - exe.run(fluid.default_startup_program()) + exe = base.Executor() + exe.run(base.default_startup_program()) _runtime_handle._init_worker() inputs = [ feeds_list[0], @@ -146,14 +146,14 @@ def fl_ps_train(): inputs, config, "python dataset_generator_B.py", "heter_worker" ) print( - "fluid.default_main_program: {}".format( - fluid.default_main_program()._heter_pipeline_opt + "base.default_main_program: {}".format( + base.default_main_program()._heter_pipeline_opt ) ) for epoch in range(epoch_num): dataset.set_filelist(file_list) exe.train_from_dataset( - program=fluid.default_main_program(), + program=base.default_main_program(), dataset=dataset, print_period=2, debug=False, diff --git a/test/ps/ps_dnn_model.py b/test/ps/ps_dnn_model.py index 7fc74c03c2cca..625d106c1f3e8 100755 --- a/test/ps/ps_dnn_model.py +++ b/test/ps/ps_dnn_model.py @@ -88,7 +88,7 @@ def forward(self, sparse_inputs, dense_inputs): y_dnn = paddle.concat(x=sparse_embs + [dense_inputs], axis=1) if self.sync_mode == 'heter': - with paddle.fluid.device_guard('gpu'): + with paddle.base.device_guard('gpu'): for n_layer in self._mlp_layers: y_dnn = n_layer(y_dnn) else: @@ -196,7 +196,7 @@ def __init__( self._mlp_layers_top.append(act) def bottom_a_layer(self, sparse_inputs): - with paddle.fluid.device_guard(self.PART_A_DEVICE_FlAG): + with paddle.base.device_guard(self.PART_A_DEVICE_FlAG): sparse_embs = [] for s_input in sparse_inputs: emb = self.embedding(s_input) @@ -208,7 +208,7 @@ def bottom_a_layer(self, sparse_inputs): y = self._mlp_layers_a[1](y) y = self._mlp_layers_a[2](y) - with paddle.fluid.device_guard( + with paddle.base.device_guard( self.PART_A_JOINT_OP_DEVICE_FlAG ): # joint point bottom_a = self._mlp_layers_a[3](y) @@ -216,7 +216,7 @@ def bottom_a_layer(self, sparse_inputs): return bottom_a def bottom_b_layer(self, dense_inputs): - with paddle.fluid.device_guard(self.PART_B_DEVICE_FlAG): + with paddle.base.device_guard(self.PART_B_DEVICE_FlAG): y = self._mlp_layers_b[0](dense_inputs) y = self._mlp_layers_b[1](y) @@ -226,14 +226,14 @@ def bottom_b_layer(self, dense_inputs): return bottom_b def interactive_layer(self, bottom_a, bottom_b): - with paddle.fluid.device_guard( + with paddle.base.device_guard( self.PART_B_JOINT_OP_DEVICE_FlAG ): # joint point interactive = paddle.add(bottom_a, bottom_b) return interactive def top_layer(self, interactive, label_input): - with paddle.fluid.device_guard(self.PART_B_DEVICE_FlAG): + with paddle.base.device_guard(self.PART_B_DEVICE_FlAG): y = self._mlp_layers_top[0](interactive) y_top = self._mlp_layers_top[1](y) predict_2d = paddle.nn.functional.softmax(y_top) diff --git a/test/quantization/README.md b/test/quantization/README.md index ce049b45f27f3..fd05146628ded 100644 --- a/test/quantization/README.md +++ b/test/quantization/README.md @@ -94,7 +94,7 @@ The code snipped shows how the `Quant2Int8MkldnnPass` can be applied to a model import paddle import paddle.static as static from paddle.static.quantization import Quant2Int8MkldnnPass - from paddle.fluid.framework import IrGraph + from paddle.base.framework import IrGraph from paddle.framework import core # Create the IrGraph by Program diff --git a/test/quantization/convert_model2dot.py b/test/quantization/convert_model2dot.py index d4f298ddcd5b9..8e7a4bed5033d 100644 --- a/test/quantization/convert_model2dot.py +++ b/test/quantization/convert_model2dot.py @@ -18,7 +18,7 @@ import unittest import paddle -from paddle.fluid.framework import IrGraph +from paddle.base.framework import IrGraph from paddle.framework import core paddle.enable_static() diff --git a/test/quantization/quant2_int8_image_classification_comparison.py b/test/quantization/quant2_int8_image_classification_comparison.py index c50a926562d27..5c9954df91118 100644 --- a/test/quantization/quant2_int8_image_classification_comparison.py +++ b/test/quantization/quant2_int8_image_classification_comparison.py @@ -23,7 +23,7 @@ import numpy as np import paddle -from paddle.fluid.framework import IrGraph +from paddle.base.framework import IrGraph from paddle.framework import core from paddle.static.quantization import Quant2Int8MkldnnPass diff --git a/test/quantization/quant2_int8_lstm_model.py b/test/quantization/quant2_int8_lstm_model.py index ffb2c755433d5..73659a8ac019c 100644 --- a/test/quantization/quant2_int8_lstm_model.py +++ b/test/quantization/quant2_int8_lstm_model.py @@ -81,7 +81,7 @@ def get_warmup_tensor(self, data_path, place): [len(feat) // 4 // 8, 8] ) lod_feat = [feat.shape[0]] - minputs = paddle.fluid.create_lod_tensor( + minputs = paddle.base.create_lod_tensor( feat, [lod_feat], place ) diff --git a/test/quantization/quant2_int8_nlp_comparison.py b/test/quantization/quant2_int8_nlp_comparison.py index e0ee817e92c79..985fb62f1d11e 100644 --- a/test/quantization/quant2_int8_nlp_comparison.py +++ b/test/quantization/quant2_int8_nlp_comparison.py @@ -21,7 +21,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.inference import Config, create_predictor paddle.enable_static() @@ -281,7 +281,7 @@ def _ints_from_csv(self, string): return set(map(int, string.split(','))) def test_graph_transformation(self): - if not fluid.core.is_compiled_with_mkldnn(): + if not base.core.is_compiled_with_mkldnn(): return quant_model_path = test_case_args.quant_model diff --git a/test/quantization/quant_int8_image_classification_comparison.py b/test/quantization/quant_int8_image_classification_comparison.py index 74b2b0827d870..cc0a6ad32ffc2 100644 --- a/test/quantization/quant_int8_image_classification_comparison.py +++ b/test/quantization/quant_int8_image_classification_comparison.py @@ -23,7 +23,7 @@ import numpy as np import paddle -from paddle.fluid.framework import IrGraph +from paddle.base.framework import IrGraph from paddle.framework import core from paddle.static.quantization import QuantInt8MkldnnPass diff --git a/test/quantization/test_graph.py b/test/quantization/test_graph.py index 189ca7cfcae02..3fdb087fcf00b 100644 --- a/test/quantization/test_graph.py +++ b/test/quantization/test_graph.py @@ -16,7 +16,7 @@ import unittest import paddle -from paddle.fluid.framework import IrGraph +from paddle.base.framework import IrGraph from paddle.framework import core paddle.enable_static() @@ -87,7 +87,7 @@ def graph_apis(self, use_cuda=False, for_ci=True): train_reader = paddle.batch( paddle.dataset.mnist.train(), batch_size=batch_size ) - feeder = paddle.fluid.DataFeeder(feed_list=feeds, place=place) + feeder = paddle.base.DataFeeder(feed_list=feeds, place=place) def _train(binary): for _ in range(iters): diff --git a/test/quantization/test_imperative_out_scale.py b/test/quantization/test_imperative_out_scale.py index 1288281cab2c1..a8317addd6edd 100644 --- a/test/quantization/test_imperative_out_scale.py +++ b/test/quantization/test_imperative_out_scale.py @@ -20,7 +20,7 @@ from imperative_test_utils import fix_model_dict, train_lenet import paddle -from paddle import fluid +from paddle import base from paddle.framework import core, set_flags from paddle.nn import ( BatchNorm2D, @@ -143,7 +143,7 @@ def test_out_scale_acc(self): activation_quantize_type=activation_quantize_type, ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): np.random.seed(seed) paddle.static.default_main_program().random_seed = seed paddle.static.default_startup_program().random_seed = seed @@ -170,7 +170,7 @@ def test_out_scale_acc(self): msg='Failed to do the imperative qat.', ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): lenet = ImperativeLenet() load_dict = paddle.load(self.param_save_path) imperative_out_scale.quantize(lenet) diff --git a/test/quantization/test_imperative_qat.py b/test/quantization/test_imperative_qat.py index 1f95de8e8d1b3..f1cadf1dbe2a3 100644 --- a/test/quantization/test_imperative_qat.py +++ b/test/quantization/test_imperative_qat.py @@ -21,7 +21,7 @@ from imperative_test_utils import ImperativeLenet, fix_model_dict import paddle -from paddle import fluid +from paddle import base from paddle.framework import core, set_flags from paddle.nn import Conv2D, Conv2DTranspose from paddle.nn.quant.quant_layers import ( @@ -72,7 +72,7 @@ def test_qat(self): onnx_format=self.onnx_format, ) - with fluid.dygraph.guard(): + with base.dygraph.guard(): # For CI coverage conv1 = Conv2D( in_channels=3, diff --git a/test/quantization/test_imperative_qat_amp.py b/test/quantization/test_imperative_qat_amp.py index 5b88ef49f9774..4dba9c5421df4 100644 --- a/test/quantization/test_imperative_qat_amp.py +++ b/test/quantization/test_imperative_qat_amp.py @@ -22,7 +22,7 @@ from imperative_test_utils import ImperativeLenet import paddle -from paddle import fluid +from paddle import base from paddle.dataset.common import download from paddle.framework import set_flags from paddle.quantization import ImperativeQuantAware @@ -197,7 +197,7 @@ def test_ptq(self): ) params_path += "/lenet_pretrained/lenet.pdparams" - with fluid.dygraph.guard(): + with base.dygraph.guard(): model = ImperativeLenet() model_state_dict = paddle.load(params_path) model.set_state_dict(model_state_dict) diff --git a/test/quantization/test_post_training_quantization_lstm_model.py b/test/quantization/test_post_training_quantization_lstm_model.py index d365fda19d4ed..81f68fd2b3986 100644 --- a/test/quantization/test_post_training_quantization_lstm_model.py +++ b/test/quantization/test_post_training_quantization_lstm_model.py @@ -22,7 +22,7 @@ import numpy as np import paddle -from paddle import fluid +from paddle import base from paddle.dataset.common import download from paddle.static.quantization import PostTrainingQuantization @@ -97,7 +97,7 @@ def reader(): ) lod_feat = [feat.shape[0]] - minputs = fluid.create_lod_tensor(feat, [lod_feat], place) + minputs = base.create_lod_tensor(feat, [lod_feat], place) yield [minputs] return reader @@ -127,7 +127,7 @@ def reader(): ) lod_feat = [feat.shape[0]] - minputs = fluid.create_lod_tensor(feat, [lod_feat], place) + minputs = base.create_lod_tensor(feat, [lod_feat], place) yield minputs, label return reader diff --git a/test/quantization/test_quant2_int8_mkldnn_pass.py b/test/quantization/test_quant2_int8_mkldnn_pass.py index e51da1db81ba8..3b8e01c713023 100644 --- a/test/quantization/test_quant2_int8_mkldnn_pass.py +++ b/test/quantization/test_quant2_int8_mkldnn_pass.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid.framework import IrGraph +from paddle.base.framework import IrGraph from paddle.framework import core from paddle.static.quantization import Quant2Int8MkldnnPass diff --git a/test/quantization/test_quant_aware.py b/test/quantization/test_quant_aware.py index e401dda82346f..4a07ad69bae9d 100644 --- a/test/quantization/test_quant_aware.py +++ b/test/quantization/test_quant_aware.py @@ -385,7 +385,7 @@ def test(program): # self.assertEqual(convert_quant_op_nums_1, convert_quant_op_nums_2) def get_op_number(self, prog): - graph = paddle.fluid.framework.IrGraph( + graph = paddle.base.framework.IrGraph( paddle.framework.core.Graph(prog.desc), for_test=False ) quant_op_nums = 0 @@ -398,7 +398,7 @@ def get_op_number(self, prog): return op_nums, quant_op_nums def get_convert_op_number(self, prog): - graph = paddle.fluid.framework.IrGraph( + graph = paddle.base.framework.IrGraph( paddle.framework.core.Graph(prog.desc), for_test=True ) quant_op_nums = 0 diff --git a/test/quantization/test_quant_aware_user_defined.py b/test/quantization/test_quant_aware_user_defined.py index 5169f39bc8584..4352145511f53 100644 --- a/test/quantization/test_quant_aware_user_defined.py +++ b/test/quantization/test_quant_aware_user_defined.py @@ -25,7 +25,7 @@ def pact(x): - helper = paddle.fluid.layer_helper.LayerHelper("pact", **locals()) + helper = paddle.base.layer_helper.LayerHelper("pact", **locals()) dtype = 'float32' init_thres = 20 u_param_attr = paddle.ParamAttr( diff --git a/test/quantization/test_quantization_mkldnn_pass.py b/test/quantization/test_quantization_mkldnn_pass.py index cf5ad87bb1c00..a7c66e456e648 100644 --- a/test/quantization/test_quantization_mkldnn_pass.py +++ b/test/quantization/test_quantization_mkldnn_pass.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle.fluid.framework import IrGraph +from paddle.base.framework import IrGraph from paddle.framework import core from paddle.static.quantization import ( QuantInt8MkldnnPass, @@ -156,7 +156,7 @@ def mkldnn_based_freeze_graph( test_reader = paddle.batch( paddle.dataset.mnist.test(), batch_size=batch_size ) - feeder = paddle.fluid.DataFeeder(feed_list=feeds, place=place) + feeder = paddle.base.DataFeeder(feed_list=feeds, place=place) # Training the model to get the weights value with paddle.static.scope_guard(scope): diff --git a/test/quantization/test_quantization_pass.py b/test/quantization/test_quantization_pass.py index 1d1147a0e29de..b91f204c293b2 100644 --- a/test/quantization/test_quantization_pass.py +++ b/test/quantization/test_quantization_pass.py @@ -19,8 +19,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid.framework import IrGraph +from paddle import base +from paddle.base.framework import IrGraph from paddle.framework import core from paddle.static.quantization import ( AddQuantDequantPass, @@ -386,7 +386,7 @@ def build_program(main, startup, is_test): test_reader = paddle.batch( paddle.dataset.mnist.test(), batch_size=batch_size ) - feeder = paddle.fluid.DataFeeder(feed_list=feeds, place=place) + feeder = paddle.base.DataFeeder(feed_list=feeds, place=place) with paddle.static.scope_guard(scope): for _ in range(iters): data = next(train_reader()) @@ -407,7 +407,7 @@ def build_program(main, startup, is_test): test_data = next(test_reader()) with paddle.static.program_guard(quantized_test_program): - w_var = fluid.framework._get_var( + w_var = base.framework._get_var( 'conv2d_1.w_0.quantized', quantized_test_program ) # Testing diff --git a/test/quantization/test_quantization_scale_pass.py b/test/quantization/test_quantization_scale_pass.py index 644d853da3d93..48a7a71a73f64 100644 --- a/test/quantization/test_quantization_scale_pass.py +++ b/test/quantization/test_quantization_scale_pass.py @@ -20,7 +20,7 @@ import numpy as np import paddle -from paddle.fluid.framework import IrGraph +from paddle.base.framework import IrGraph from paddle.framework import core from paddle.static.quantization import ( AddQuantDequantPass, @@ -152,7 +152,7 @@ def build_program(main, startup, is_test): paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), batch_size=batch_size, ) - feeder = paddle.fluid.DataFeeder(feed_list=feeds, place=place) + feeder = paddle.base.DataFeeder(feed_list=feeds, place=place) with paddle.static.scope_guard(scope): for _ in range(iters): data = next(train_reader()) diff --git a/test/quantization/test_user_defined_quantization.py b/test/quantization/test_user_defined_quantization.py index 80d4e6e564cc2..9a26cc0b4a8fe 100644 --- a/test/quantization/test_user_defined_quantization.py +++ b/test/quantization/test_user_defined_quantization.py @@ -21,7 +21,7 @@ import numpy as np import paddle -from paddle.fluid.framework import IrGraph +from paddle.base.framework import IrGraph from paddle.framework import LayerHelper, core from paddle.static.quantization import ( AddQuantDequantPass, @@ -200,7 +200,7 @@ def save_dict(Dict, mapping_table_path): paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), batch_size=batch_size, ) - feeder = paddle.fluid.DataFeeder(feed_list=feeds, place=place) + feeder = paddle.base.DataFeeder(feed_list=feeds, place=place) with paddle.static.scope_guard(scope): for _ in range(iters): data = next(train_reader()) diff --git a/test/rnn/test_rnn_api.py b/test/rnn/test_rnn_api.py index 0ed42e27f9025..6e607083d6605 100644 --- a/test/rnn/test_rnn_api.py +++ b/test/rnn/test_rnn_api.py @@ -21,8 +21,8 @@ import numpy as np -from paddle import fluid -from paddle.fluid import framework +from paddle import base +from paddle.base import framework bidirectional_list = ["bidirectional", "bidirect"] @@ -53,7 +53,7 @@ def setUp(self): paddle.enable_static() - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard( @@ -62,10 +62,10 @@ def setUp(self): paddle.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) - self.exe = fluid.Executor( - fluid.CPUPlace() + self.exe = base.Executor( + base.CPUPlace() if self.place == "cpu" - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) rnn_in_data = paddle.static.data( @@ -158,7 +158,7 @@ def setUp(self): paddle.enable_static() - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard( @@ -167,10 +167,10 @@ def setUp(self): paddle.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) - self.exe = fluid.Executor( - fluid.CPUPlace() + self.exe = base.Executor( + base.CPUPlace() if self.place == "cpu" - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) rnn_in_data = paddle.static.data( @@ -261,7 +261,7 @@ def setUp(self): paddle.enable_static() - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): main_program = paddle.static.Program() startup_program = paddle.static.Program() with paddle.static.program_guard( @@ -270,10 +270,10 @@ def setUp(self): paddle.seed(self.seed) paddle.framework.random._manual_program_seed(self.seed) - self.exe = paddle.fluid.Executor( - fluid.CPUPlace() + self.exe = paddle.base.Executor( + base.CPUPlace() if self.place == "cpu" - else fluid.CUDAPlace(0) + else base.CUDAPlace(0) ) rnn_in_data = paddle.static.data( diff --git a/test/rnn/test_rnn_cells.py b/test/rnn/test_rnn_cells.py index 9c126fcb08dcc..4a0792b812d54 100644 --- a/test/rnn/test_rnn_cells.py +++ b/test/rnn/test_rnn_cells.py @@ -187,7 +187,7 @@ def runTest(self): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() devices = ( - ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() else ["cpu"] + ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] ) for bias in [True, False]: for device in devices: diff --git a/test/rnn/test_rnn_cells_static.py b/test/rnn/test_rnn_cells_static.py index a7edbc6749619..21a513e0c4e0b 100644 --- a/test/rnn/test_rnn_cells_static.py +++ b/test/rnn/test_rnn_cells_static.py @@ -37,7 +37,7 @@ def setUp(self): mp = paddle.static.Program() sp = paddle.static.Program() - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): rnn2 = paddle.nn.SimpleRNNCell( 16, 32, bias_ih_attr=self.bias, bias_hh_attr=self.bias @@ -45,7 +45,7 @@ def setUp(self): place = self.place exe = paddle.static.Executor(place) - scope = paddle.fluid.Scope() + scope = paddle.base.Scope() with paddle.static.scope_guard(scope): exe.run(sp) convert_params_for_cell_static(rnn1, rnn2, place) @@ -71,7 +71,7 @@ def test_with_initial_state(self): y1, h1 = rnn1(x, prev_h) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -103,7 +103,7 @@ def test_with_zero_state(self): y1, h1 = rnn1(x) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -139,7 +139,7 @@ def setUp(self): mp = paddle.static.Program() sp = paddle.static.Program() - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): rnn2 = paddle.nn.GRUCell( 16, 32, bias_ih_attr=self.bias, bias_hh_attr=self.bias @@ -147,7 +147,7 @@ def setUp(self): place = self.place exe = paddle.static.Executor(place) - scope = paddle.fluid.Scope() + scope = paddle.base.Scope() with paddle.static.scope_guard(scope): exe.run(sp) convert_params_for_cell_static(rnn1, rnn2, place) @@ -174,7 +174,7 @@ def test_with_initial_state(self): y1, h1 = rnn1(x, prev_h) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -206,7 +206,7 @@ def test_with_zero_state(self): y1, h1 = rnn1(x) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -242,7 +242,7 @@ def setUp(self): mp = paddle.static.Program() sp = paddle.static.Program() - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): rnn2 = paddle.nn.LSTMCell( 16, 32, bias_ih_attr=self.bias, bias_hh_attr=self.bias @@ -250,7 +250,7 @@ def setUp(self): place = self.place exe = paddle.static.Executor(place) - scope = paddle.fluid.Scope() + scope = paddle.base.Scope() with paddle.static.scope_guard(scope): exe.run(sp) convert_params_for_cell_static(rnn1, rnn2, place) @@ -278,7 +278,7 @@ def test_with_initial_state(self): y1, (h1, c1) = rnn1(x, (prev_h, prev_c)) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -316,7 +316,7 @@ def test_with_zero_state(self): y1, (h1, c1) = rnn1(x) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -343,7 +343,7 @@ def runTest(self): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() devices = ( - ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() else ["cpu"] + ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] ) for bias in [True, False]: for device in devices: diff --git a/test/rnn/test_rnn_cudnn_params_packing.py b/test/rnn/test_rnn_cudnn_params_packing.py index cd5b8b4bfd217..473ad18bfc2e6 100644 --- a/test/rnn/test_rnn_cudnn_params_packing.py +++ b/test/rnn/test_rnn_cudnn_params_packing.py @@ -47,9 +47,9 @@ def test_rnn_with_cudnn_clone(self): # flattening rnn's parameters for the need to call cudnn kernel is such # a case. with paddle.static.program_guard(train_program, startup_prog): - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): bilstm = create_model() - with paddle.fluid.program_guard(test_program, startup_prog): - with paddle.fluid.unique_name.guard(): + with paddle.base.program_guard(test_program, startup_prog): + with paddle.base.unique_name.guard(): bilstm = create_model() diff --git a/test/rnn/test_rnn_nets.py b/test/rnn/test_rnn_nets.py index 0ac68bdbf30d6..e75c83007b6a3 100644 --- a/test/rnn/test_rnn_nets.py +++ b/test/rnn/test_rnn_nets.py @@ -360,7 +360,7 @@ def forward(self, input): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() devices = ( - ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() else ["cpu"] + ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] ) for direction in ["forward", "bidirectional", "bidirect"]: for time_major in [True, False]: diff --git a/test/rnn/test_rnn_nets_static.py b/test/rnn/test_rnn_nets_static.py index da04ca9e1bdb0..b2b91685f3c5c 100644 --- a/test/rnn/test_rnn_nets_static.py +++ b/test/rnn/test_rnn_nets_static.py @@ -46,7 +46,7 @@ def setUp(self): mp = paddle.static.Program() sp = paddle.static.Program() - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): rnn2 = paddle.nn.SimpleRNN( 16, @@ -57,7 +57,7 @@ def setUp(self): ) exe = paddle.static.Executor(place) - scope = paddle.fluid.Scope() + scope = paddle.base.Scope() with paddle.static.scope_guard(scope): exe.run(sp) convert_params_for_net_static(rnn1, rnn2, place) @@ -86,7 +86,7 @@ def test_with_initial_state(self): y1, h1 = rnn1(x, prev_h) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -121,7 +121,7 @@ def test_with_zero_state(self): y1, h1 = rnn1(x) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -153,7 +153,7 @@ def test_with_input_lengths(self): y1, h1 = rnn1(x, sequence_length=sequence_length) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -202,7 +202,7 @@ def setUp(self): mp = paddle.static.Program() sp = paddle.static.Program() - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): rnn2 = paddle.nn.GRU( 16, @@ -213,7 +213,7 @@ def setUp(self): ) exe = paddle.static.Executor(place) - scope = paddle.fluid.Scope() + scope = paddle.base.Scope() with paddle.static.scope_guard(scope): exe.run(sp) convert_params_for_net_static(rnn1, rnn2, place) @@ -243,7 +243,7 @@ def test_with_initial_state(self): y1, h1 = rnn1(x, prev_h) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -278,7 +278,7 @@ def test_with_zero_state(self): y1, h1 = rnn1(x) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -310,7 +310,7 @@ def test_with_input_lengths(self): y1, h1 = rnn1(x, sequence_length=sequence_length) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -358,7 +358,7 @@ def setUp(self): mp = paddle.static.Program() sp = paddle.static.Program() - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): rnn2 = paddle.nn.LSTM( 16, @@ -369,7 +369,7 @@ def setUp(self): ) exe = paddle.static.Executor(place) - scope = paddle.fluid.Scope() + scope = paddle.base.Scope() with paddle.static.scope_guard(scope): exe.run(sp) convert_params_for_net_static(rnn1, rnn2, place) @@ -399,7 +399,7 @@ def test_with_initial_state(self): y1, (h1, c1) = rnn1(x, (prev_h, prev_c)) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -440,7 +440,7 @@ def test_with_zero_state(self): y1, (h1, c1) = rnn1(x) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -473,7 +473,7 @@ def test_with_input_lengths(self): y1, (h1, c1) = rnn1(x, sequence_length=sequence_length) - with paddle.fluid.unique_name.guard(): + with paddle.base.unique_name.guard(): with paddle.static.program_guard(mp, sp): x_data = paddle.static.data( "input", @@ -508,7 +508,7 @@ def runTest(self): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() devices = ( - ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() else ["cpu"] + ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] ) for direction in ["forward", "bidirectional", "bidirect"]: for time_major in [True, False]: diff --git a/test/rnn/test_wrappers.py b/test/rnn/test_wrappers.py index aa926808f397c..c6dd9692798de 100644 --- a/test/rnn/test_wrappers.py +++ b/test/rnn/test_wrappers.py @@ -196,7 +196,7 @@ def runTest(self): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() devices = ( - ["cpu", "gpu"] if paddle.fluid.is_compiled_with_cuda() else ["cpu"] + ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] ) for direction in ["forward", "backward"]: for device in devices: diff --git a/test/sequence/test_sequence_conv.py b/test/sequence/test_sequence_conv.py index 6f71298a0fc02..62ac526d61e0f 100644 --- a/test/sequence/test_sequence_conv.py +++ b/test/sequence/test_sequence_conv.py @@ -292,19 +292,19 @@ def init_test_case(self): class TestSeqConvApi(unittest.TestCase): def test_api(self): - from paddle import fluid + from paddle import base x = paddle.static.data('x', shape=[-1, 32], lod_level=1) y = paddle.static.nn.sequence_lod.sequence_conv( input=x, num_filters=2, filter_size=3, padding_start=None ) - place = fluid.CPUPlace() - x_tensor = fluid.create_lod_tensor( + place = base.CPUPlace() + x_tensor = base.create_lod_tensor( np.random.rand(10, 32).astype("float32"), [[2, 3, 1, 4]], place ) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) ret = exe.run(feed={'x': x_tensor}, fetch_list=[y], return_numpy=False) diff --git a/test/sequence/test_sequence_expand_as.py b/test/sequence/test_sequence_expand_as.py index ba141e20e1924..090a6242c003a 100644 --- a/test/sequence/test_sequence_expand_as.py +++ b/test/sequence/test_sequence_expand_as.py @@ -19,7 +19,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import Program, program_guard +from paddle.base import Program, program_guard class TestSequenceExpandAs(OpTest): diff --git a/test/sequence/test_sequence_first_step.py b/test/sequence/test_sequence_first_step.py index 2967832b69115..d808c20ff2633 100644 --- a/test/sequence/test_sequence_first_step.py +++ b/test/sequence/test_sequence_first_step.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid.framework import Program, program_guard +from paddle.base.framework import Program, program_guard sys.path.append("../") diff --git a/test/sequence/test_sequence_last_step.py b/test/sequence/test_sequence_last_step.py index 771b49379110a..8799f20d15bd0 100644 --- a/test/sequence/test_sequence_last_step.py +++ b/test/sequence/test_sequence_last_step.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid.framework import Program, program_guard +from paddle.base.framework import Program, program_guard sys.path.append("../") diff --git a/test/sequence/test_sequence_mask.py b/test/sequence/test_sequence_mask.py index 152092eb4424d..54d7a3c87dcad 100644 --- a/test/sequence/test_sequence_mask.py +++ b/test/sequence/test_sequence_mask.py @@ -19,7 +19,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid.framework import ( +from paddle.base.framework import ( Program, convert_np_dtype_to_dtype_, program_guard, diff --git a/test/sequence/test_sequence_pad_op.py b/test/sequence/test_sequence_pad_op.py index b657ca8640aab..b78ddf12eef9a 100644 --- a/test/sequence/test_sequence_pad_op.py +++ b/test/sequence/test_sequence_pad_op.py @@ -19,7 +19,7 @@ from eager_op_test import OpTest import paddle -from paddle.fluid import core +from paddle.base import core class TestSequencePadOp(OpTest): diff --git a/test/sequence/test_sequence_softmax_op.py b/test/sequence/test_sequence_softmax_op.py index 6bc81cb80ed22..de6258bbc2612 100644 --- a/test/sequence/test_sequence_softmax_op.py +++ b/test/sequence/test_sequence_softmax_op.py @@ -19,7 +19,7 @@ from eager_op_test import OpTest from test_softmax_op import stable_softmax -from paddle.fluid import core +from paddle.base import core class TestSequenceSoftmaxOp(OpTest): diff --git a/test/standalone_executor/test_standalone_controlflow.py b/test/standalone_executor/test_standalone_controlflow.py index fcdf7f8158708..ecd0e517f89c7 100644 --- a/test/standalone_executor/test_standalone_controlflow.py +++ b/test/standalone_executor/test_standalone_controlflow.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle.fluid import core -from paddle.fluid.framework import Program, program_guard +from paddle.base import core +from paddle.base.framework import Program, program_guard paddle.enable_static() diff --git a/test/standalone_executor/test_standalone_cross_step_overlap.py b/test/standalone_executor/test_standalone_cross_step_overlap.py index a4fe9f9d25849..170ed47e2bd84 100644 --- a/test/standalone_executor/test_standalone_cross_step_overlap.py +++ b/test/standalone_executor/test_standalone_cross_step_overlap.py @@ -31,7 +31,7 @@ def setUp(self): self.step_num = 3 def test_cross_step_overlap(self): - if not paddle.fluid.core.is_compiled_with_cuda(): + if not paddle.base.core.is_compiled_with_cuda(): return # In this test case, z=x+y is calculated in the default stream, diff --git a/test/standalone_executor/test_standalone_custom_stream.py b/test/standalone_executor/test_standalone_custom_stream.py index 4126f84ed1e8c..f9635a918068c 100644 --- a/test/standalone_executor/test_standalone_custom_stream.py +++ b/test/standalone_executor/test_standalone_custom_stream.py @@ -17,7 +17,7 @@ from test_standalone_executor import build_program import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/standalone_executor/test_standalone_executor.py b/test/standalone_executor/test_standalone_executor.py index 5724ff42667cb..65a9bf06ab563 100644 --- a/test/standalone_executor/test_standalone_executor.py +++ b/test/standalone_executor/test_standalone_executor.py @@ -22,7 +22,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core from paddle.profiler import profiler paddle.enable_static() @@ -89,7 +89,7 @@ def run_with_statistics(self, executor=None): enable = True if executor == 'ParallelExecutor': - main_program = paddle.fluid.compiler.CompiledProgram(main_program) + main_program = paddle.base.compiler.CompiledProgram(main_program) enable = False elif executor == 'Executor': enable = False @@ -236,9 +236,9 @@ def test_with_feed(self): data = np.ones([2, 2], dtype="float32") feed = {"a": data, 'fake_input': data} - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): res = self.run_new_executor(feed) - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): gt = self.run_dygraph(feed) for x, y in zip(gt, res): np.testing.assert_array_equal(x, y) @@ -311,7 +311,7 @@ def test_exception(self): def test_nan(self): flags = {'FLAGS_check_nan_inf': True, 'FLAGS_benchmark': True} - paddle.fluid.set_flags(flags) + paddle.base.set_flags(flags) feed = [ { 'id': np.array([1, 2, 3, 4, 5]).astype(np.int64), @@ -345,7 +345,7 @@ def test_scope_find_temp_var(self): class TestFetchEmptyTensor(unittest.TestCase): def test_fetch(self): places = [paddle.CPUPlace()] - if paddle.fluid.core.is_compiled_with_cuda(): + if paddle.base.core.is_compiled_with_cuda(): places.append(paddle.CUDAPlace(0)) for place in places: with paddle.static.program_guard(paddle.static.Program()): @@ -357,10 +357,10 @@ def test_fetch(self): class TestInplaceApiWithDataTransform(unittest.TestCase): def test_increment(self): - if paddle.fluid.core.is_compiled_with_cuda(): - with paddle.fluid.device_guard("gpu:0"): + if paddle.base.core.is_compiled_with_cuda(): + with paddle.base.device_guard("gpu:0"): x = paddle.tensor.fill_constant([1], "float32", 0) - with paddle.fluid.device_guard("cpu"): + with paddle.base.device_guard("cpu"): x = paddle.increment(x) exe = paddle.static.Executor(paddle.CUDAPlace(0)) for i in range(10): diff --git a/test/standalone_executor/test_standalone_executor_aot_choose_kernel.py b/test/standalone_executor/test_standalone_executor_aot_choose_kernel.py index 334ac8ffc1a9f..f950e165132d4 100644 --- a/test/standalone_executor/test_standalone_executor_aot_choose_kernel.py +++ b/test/standalone_executor/test_standalone_executor_aot_choose_kernel.py @@ -99,14 +99,14 @@ def run_resnet50(aot_choose_kernel=False, use_amp=False): class TestAOTChooseKernel(unittest.TestCase): def test_resnet50_aot_choose_kernel(self): - if not paddle.fluid.core.is_compiled_with_cuda(): + if not paddle.base.core.is_compiled_with_cuda(): return loss1 = run_resnet50(aot_choose_kernel=True) loss2 = run_resnet50(aot_choose_kernel=False) self.assertEqual(loss1, loss2) def test_resnet50_amp_aot_choose_kernel(self): - if not paddle.fluid.core.is_compiled_with_cuda(): + if not paddle.base.core.is_compiled_with_cuda(): return loss1 = run_resnet50(aot_choose_kernel=True, use_amp=True) loss2 = run_resnet50(aot_choose_kernel=False, use_amp=True) diff --git a/test/standalone_executor/test_standalone_executor_multi_micro_batch.py b/test/standalone_executor/test_standalone_executor_multi_micro_batch.py index 61b76559c0098..f814a18643ede 100644 --- a/test/standalone_executor/test_standalone_executor_multi_micro_batch.py +++ b/test/standalone_executor/test_standalone_executor_multi_micro_batch.py @@ -20,9 +20,9 @@ import paddle from paddle.distributed.passes.pass_utils import get_skip_gc_vars, split_program -from paddle.fluid import core -from paddle.fluid.core import Job, Plan -from paddle.fluid.executor import _add_feed_fetch_ops, _StandaloneExecutor +from paddle.base import core +from paddle.base.core import Job, Plan +from paddle.base.executor import _add_feed_fetch_ops, _StandaloneExecutor from paddle.nn import TransformerEncoderLayer paddle.enable_static() @@ -94,7 +94,7 @@ def build_program(self, micro_batch_size, src_len, d_model, n_head): dtype="float32", ) - loader = paddle.fluid.io.DataLoader.from_generator( + loader = paddle.base.io.DataLoader.from_generator( feed_list=[enc_input, attn_mask], use_double_buffer=False, capacity=16, diff --git a/test/standalone_executor/test_standalone_executor_plan.py b/test/standalone_executor/test_standalone_executor_plan.py index 72fc4ab9bb731..82bb89855ef89 100644 --- a/test/standalone_executor/test_standalone_executor_plan.py +++ b/test/standalone_executor/test_standalone_executor_plan.py @@ -15,7 +15,7 @@ import unittest from paddle import static -from paddle.fluid import core +from paddle.base import core class TestStandaloneExecutorPlan(unittest.TestCase): diff --git a/test/standalone_executor/test_standalone_multiply_write.py b/test/standalone_executor/test_standalone_multiply_write.py index 96e35a336951c..63a13f489c0ef 100644 --- a/test/standalone_executor/test_standalone_multiply_write.py +++ b/test/standalone_executor/test_standalone_multiply_write.py @@ -17,7 +17,7 @@ from test_standalone_controlflow import TestCompatibility import paddle -from paddle.fluid.framework import Program +from paddle.base.framework import Program paddle.enable_static() diff --git a/test/tokenizer/test_faster_tokenizer_op.py b/test/tokenizer/test_faster_tokenizer_op.py index 3aea8b4155c2a..7d67c09226288 100755 --- a/test/tokenizer/test_faster_tokenizer_op.py +++ b/test/tokenizer/test_faster_tokenizer_op.py @@ -21,8 +21,8 @@ import paddle from paddle import _legacy_C_ops, nn -from paddle.fluid.framework import core -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.framework import core +from paddle.base.layer_helper import LayerHelper from paddle.framework import in_dynamic_mode diff --git a/test/xpu/collective_allgather_op_xpu.py b/test/xpu/collective_allgather_op_xpu.py index 6e2ab46dc9975..0c88f00f1a10f 100644 --- a/test/xpu/collective_allgather_op_xpu.py +++ b/test/xpu/collective_allgather_op_xpu.py @@ -15,8 +15,8 @@ from test_collective_base_xpu import TestCollectiveRunnerBase, runtime_main import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -28,7 +28,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 nranks = 2 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype='float32' ) diff --git a/test/xpu/collective_allreduce_op_xpu.py b/test/xpu/collective_allreduce_op_xpu.py index 6ab13b2be13e4..f85502580b712 100644 --- a/test/xpu/collective_allreduce_op_xpu.py +++ b/test/xpu/collective_allreduce_op_xpu.py @@ -17,8 +17,8 @@ from test_collective_base_xpu import TestCollectiveRunnerBase, runtime_main import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -29,7 +29,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype='float32' ) diff --git a/test/xpu/collective_broadcast_op_xpu.py b/test/xpu/collective_broadcast_op_xpu.py index a5d51e714115a..fee4deaf6caff 100755 --- a/test/xpu/collective_broadcast_op_xpu.py +++ b/test/xpu/collective_broadcast_op_xpu.py @@ -17,8 +17,8 @@ from test_collective_base_xpu import TestCollectiveRunnerBase, runtime_main import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -30,7 +30,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 rootid = 1 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype='float32' ) diff --git a/test/xpu/collective_concat_op.py b/test/xpu/collective_concat_op.py index 1fd946dff091d..ebda6c3883f6a 100644 --- a/test/xpu/collective_concat_op.py +++ b/test/xpu/collective_concat_op.py @@ -15,8 +15,8 @@ from test_collective_base_xpu import TestCollectiveRunnerBase, runtime_main import paddle -from paddle import fluid -from paddle.fluid import core, layers +from paddle import base +from paddle.base import core, layers paddle.enable_static() @@ -28,7 +28,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 nranks = 2 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = layers.data( name="tindata", shape=[10, 1000], dtype='float32' ) diff --git a/test/xpu/collective_identity_op_xpu.py b/test/xpu/collective_identity_op_xpu.py index 840e0236371c9..1c5cac6716388 100644 --- a/test/xpu/collective_identity_op_xpu.py +++ b/test/xpu/collective_identity_op_xpu.py @@ -15,8 +15,8 @@ from test_collective_base_xpu import TestCollectiveRunnerBase, runtime_main import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -28,7 +28,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 nranks = 2 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = paddle.static.data( name="tindata", shape=[10, 1000], dtype='float32' ) diff --git a/test/xpu/collective_split_op.py b/test/xpu/collective_split_op.py index 56ca1a6d71751..82f8db770e524 100644 --- a/test/xpu/collective_split_op.py +++ b/test/xpu/collective_split_op.py @@ -15,8 +15,8 @@ from test_collective_base_xpu import TestCollectiveRunnerBase, runtime_main import paddle -from paddle import fluid -from paddle.fluid import core, layers +from paddle import base +from paddle.base import core, layers paddle.enable_static() @@ -28,7 +28,7 @@ def __init__(self): def get_model(self, main_prog, startup_program): ring_id = 0 nranks = 2 - with fluid.program_guard(main_prog, startup_program): + with base.program_guard(main_prog, startup_program): tindata = layers.data( name="tindata", shape=[10, 1000], dtype='float32' ) diff --git a/test/xpu/get_test_cover_info.py b/test/xpu/get_test_cover_info.py index 4ef72587a1e20..806847f451c12 100644 --- a/test/xpu/get_test_cover_info.py +++ b/test/xpu/get_test_cover_info.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core type_dict_paddle_to_str = { paddle.bool: 'bool', diff --git a/test/xpu/op_test_xpu.py b/test/xpu/op_test_xpu.py index 076859c8a1708..b17561758ad2d 100644 --- a/test/xpu/op_test_xpu.py +++ b/test/xpu/op_test_xpu.py @@ -24,10 +24,10 @@ from white_list import no_grad_set_white_list, op_threshold_white_list import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.backward import append_backward -from paddle.fluid.framework import Program, convert_np_dtype_to_dtype_ +from paddle import base +from paddle.base import core +from paddle.base.backward import append_backward +from paddle.base.framework import Program, convert_np_dtype_to_dtype_ class XPUOpTest(OpTest): @@ -359,7 +359,7 @@ def get_grad_with_place( ) fetch_list = [g for p, g in param_grad_list] - executor = fluid.Executor(place) + executor = base.Executor(place) return list( map( np.array, diff --git a/test/xpu/test_activation_op_xpu.py b/test/xpu/test_activation_op_xpu.py index d6ece4edf8107..b2a546542b91a 100644 --- a/test/xpu/test_activation_op_xpu.py +++ b/test/xpu/test_activation_op_xpu.py @@ -46,7 +46,7 @@ def set_case(self): x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) out = np.exp(x) self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} def init_dtype(self): @@ -72,7 +72,7 @@ def set_case(self): x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) out = np.exp(x) self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} class XPUTestExp_ZeroDIm(TestActivationOPBase): @@ -201,7 +201,7 @@ def set_case(self): out = 1 / (1 + np.exp(-self.x)) self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(self.x)} self.outputs = {'Out': out} def init_config(self): @@ -248,7 +248,7 @@ def set_case(self): x = np.random.uniform(-1, 1, [11, 17]).astype(self.dtype) out = np.tanh(x) self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} @@ -271,7 +271,7 @@ def set_case(self): out = np.sqrt(x) self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} @@ -294,7 +294,7 @@ def set_case(self): out = np.floor(x) self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} def test_check_grad(self): @@ -325,7 +325,7 @@ def set_case(self): out = np.abs(x) self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} @@ -476,7 +476,7 @@ def set_case(self): out = np.log(x) self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} class TestLogCase_ZeroDim(XPUTestLog): @@ -518,7 +518,7 @@ def set_case(self): out = np.square(self.x) self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(self.x)} self.outputs = {'Out': out} def init_config(self): @@ -563,7 +563,7 @@ def set_case(self): self.init_config() out = np.power(self.x, self.factor) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(self.x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(self.x)} self.attrs = {'factor': self.factor, 'use_xpu': True} self.outputs = {'Out': out} @@ -662,7 +662,7 @@ def set_case(self): x = np.random.uniform(1, 2, [1111, 1117]).astype(self.dtype) out = np.reciprocal(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {'use_xpu': True} @@ -775,7 +775,7 @@ def set_case(self): x = np.random.uniform(-1, 1, [10, 12]).astype(self.dtype) out = np.ceil(x) - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} self.attrs = {'use_xpu': True} diff --git a/test/xpu/test_adadelta_op_xpu.py b/test/xpu/test_adadelta_op_xpu.py index b6ef0fbdf8ec8..7e30557d2be78 100644 --- a/test/xpu/test_adadelta_op_xpu.py +++ b/test/xpu/test_adadelta_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -165,9 +165,9 @@ def test_adadelta_dygraph(self): def test_adadelta(self): self.dtype = self.in_type paddle.enable_static() - place = fluid.XPUPlace(0) - main = fluid.Program() - with fluid.program_guard(main): + place = base.XPUPlace(0) + main = base.Program() + with base.program_guard(main): x = paddle.static.data( name='x', shape=[-1, 13], dtype=self.dtype ) @@ -187,9 +187,9 @@ def test_adadelta(self): train_reader = paddle.batch( paddle.dataset.uci_housing.train(), batch_size=1 ) - feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + feeder = base.DataFeeder(place=place, feed_list=[x, y]) + exe = base.Executor(place) + exe.run(base.default_startup_program()) for data in train_reader(): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) diff --git a/test/xpu/test_adam_op_xpu.py b/test/xpu/test_adam_op_xpu.py index 02a84c7ae57b1..823cf1543e0a5 100644 --- a/test/xpu/test_adam_op_xpu.py +++ b/test/xpu/test_adam_op_xpu.py @@ -24,7 +24,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core class XPUTestAdamOp(XPUOpTestWrapper): diff --git a/test/xpu/test_adamw_op_xpu.py b/test/xpu/test_adamw_op_xpu.py index 768cbe8151da3..1a777f2d23578 100644 --- a/test/xpu/test_adamw_op_xpu.py +++ b/test/xpu/test_adamw_op_xpu.py @@ -24,7 +24,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base def adamw_step(inputs, attributes): @@ -185,13 +185,13 @@ def test_adamw_op_coverage(self): def test_adamw_op(self): paddle.enable_static() - place = fluid.XPUPlace(0) + place = base.XPUPlace(0) shape = [2, 3, 8, 8] - exe = fluid.Executor(place) - train_prog = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(train_prog, startup): - with fluid.unique_name.guard(): + exe = base.Executor(place) + train_prog = base.Program() + startup = base.Program() + with base.program_guard(train_prog, startup): + with base.unique_name.guard(): data = paddle.static.data(name="data", shape=shape) conv = paddle.static.nn.conv2d(data, 8, 3) loss = paddle.mean(conv) @@ -437,7 +437,7 @@ def get_numpy_output(param, grad, moment1, moment2, lr_ratio, t): def test_adamw_op(self): paddle.enable_static() - place = fluid.XPUPlace(0) + place = base.XPUPlace(0) learning_rate = 0.0001 beta1 = 0.85 @@ -445,10 +445,10 @@ def test_adamw_op(self): weight_decay = 0.01 epsilon = 1e-8 - train_prog = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(train_prog, startup): - with fluid.unique_name.guard(): + train_prog = base.Program() + startup = base.Program() + with base.program_guard(train_prog, startup): + with base.unique_name.guard(): x = paddle.static.data( name='x', shape=[None, 10], dtype='float32' ) @@ -557,7 +557,7 @@ def get_numpy_output(param, grad, moment1, moment2, lr_ratio, t): "linear_1.b_0@GRAD", ] - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(startup) test_prog = train_prog.clone(for_test=True) diff --git a/test/xpu/test_affine_channel_op_xpu.py b/test/xpu/test_affine_channel_op_xpu.py index c200235ff879c..8d404b35dd3b2 100644 --- a/test/xpu/test_affine_channel_op_xpu.py +++ b/test/xpu/test_affine_channel_op_xpu.py @@ -21,7 +21,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core def affine_channel(x, scale, bias, layout): diff --git a/test/xpu/test_assign_value_op_xpu.py b/test/xpu/test_assign_value_op_xpu.py index a0e3a57dc8ac5..f6d2d2ec96ae3 100644 --- a/test/xpu/test_assign_value_op_xpu.py +++ b/test/xpu/test_assign_value_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import framework +from paddle import base +from paddle.base import framework paddle.enable_static() @@ -82,18 +82,18 @@ def setUp(self): self.value = (-100 + 200 * np.random.random(size=(2, 5))).astype( self.dtype ) - self.place = fluid.XPUPlace(0) + self.place = base.XPUPlace(0) def init_dtype(self): self.dtype = "float32" def test_assign(self): - main_program = fluid.Program() - with fluid.program_guard(main_program): + main_program = base.Program() + with base.program_guard(main_program): x = paddle.tensor.create_tensor(dtype=self.dtype) paddle.assign(self.value, output=x) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) [fetched_x] = exe.run(main_program, feed={}, fetch_list=[x]) np.testing.assert_allclose(fetched_x, self.value) self.assertEqual(fetched_x.dtype, self.value.dtype) @@ -115,7 +115,7 @@ def setUp(self): self.value = np.random.choice(a=[False, True], size=(2, 5)).astype( np.bool_ ) - self.place = fluid.XPUPlace(0) + self.place = base.XPUPlace(0) def init_dtype(self): self.dtype = "bool" diff --git a/test/xpu/test_batch_norm_op_xpu.py b/test/xpu/test_batch_norm_op_xpu.py index e9ca4da20e778..3432442930cc9 100644 --- a/test/xpu/test_batch_norm_op_xpu.py +++ b/test/xpu/test_batch_norm_op_xpu.py @@ -23,8 +23,8 @@ import paddle import paddle.nn.functional as F -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -260,11 +260,11 @@ def init_test(self): def test_global_stats(self): for p in self.places: - with fluid.dygraph.guard(p): + with base.dygraph.guard(p): x = paddle.randn([2, 6, 6, 4]) net1 = paddle.nn.BatchNorm( 6, - param_attr=fluid.ParamAttr( + param_attr=base.ParamAttr( initializer=paddle.nn.initializer.Constant(1.0) ), use_global_stats=self.use_global_stats, diff --git a/test/xpu/test_bilinear_interp_op_xpu.py b/test/xpu/test_bilinear_interp_op_xpu.py index a5a849f080e6a..6c08731d3b01d 100755 --- a/test/xpu/test_bilinear_interp_op_xpu.py +++ b/test/xpu/test_bilinear_interp_op_xpu.py @@ -471,12 +471,12 @@ def test_case(self): scale_tensor = paddle.static.data( name="scale_tensor", shape=[1], dtype="float32") - out1 = fluid.layers.resize_bilinear(x, out_shape=[12, 12]) - out2 = fluid.layers.resize_bilinear(x, out_shape=[12, dim]) - out3 = fluid.layers.resize_bilinear(x, out_shape=shape_tensor) - out4 = fluid.layers.resize_bilinear( + out1 = base.layers.resize_bilinear(x, out_shape=[12, 12]) + out2 = base.layers.resize_bilinear(x, out_shape=[12, dim]) + out3 = base.layers.resize_bilinear(x, out_shape=shape_tensor) + out4 = base.layers.resize_bilinear( x, out_shape=[4, 4], actual_shape=actual_size) - out5 = fluid.layers.resize_bilinear(x, scale=scale_tensor) + out5 = base.layers.resize_bilinear(x, scale=scale_tensor) x_data = np.random.random((2, 3, 6, 6)).astype("float32") dim_data = np.array([12]).astype("int32") @@ -485,9 +485,9 @@ def test_case(self): scale_data = np.array([2.0]).astype("float32") place = core.XPUPlace(0) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - results = exe.run(fluid.default_main_program(), + exe = base.Executor(place) + exe.run(base.default_startup_program()) + results = exe.run(base.default_main_program(), feed={ "x": x_data, "dim": dim_data, diff --git a/test/xpu/test_bitwise_op_xpu.py b/test/xpu/test_bitwise_op_xpu.py index 0466ad39d6702..1d261c95bb49a 100644 --- a/test/xpu/test_bitwise_op_xpu.py +++ b/test/xpu/test_bitwise_op_xpu.py @@ -59,8 +59,8 @@ def set_case(self): self.attrs = {'use_xpu': True} self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y), + 'X': OpTest.np_dtype_to_base_dtype(x), + 'Y': OpTest.np_dtype_to_base_dtype(y), } self.outputs = {'Out': out} @@ -133,8 +133,8 @@ def set_case(self): self.attrs = {'use_xpu': True} self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y), + 'X': OpTest.np_dtype_to_base_dtype(x), + 'Y': OpTest.np_dtype_to_base_dtype(y), } self.outputs = {'Out': out} @@ -206,8 +206,8 @@ def set_case(self): self.attrs = {'use_xpu': True} self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y), + 'X': OpTest.np_dtype_to_base_dtype(x), + 'Y': OpTest.np_dtype_to_base_dtype(y), } self.outputs = {'Out': out} @@ -270,7 +270,7 @@ def set_case(self): out = np.bitwise_not(x) self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} def init_case(self): diff --git a/test/xpu/test_c_concat.py b/test/xpu/test_c_concat.py index d2490aa3772dc..c3731a4e04c1a 100644 --- a/test/xpu/test_c_concat.py +++ b/test/xpu/test_c_concat.py @@ -22,7 +22,7 @@ from test_collective_base_xpu import TestDistBase import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_c_split.py b/test/xpu/test_c_split.py index 67e2f1a6cc5f6..c4533eaeb0bf6 100644 --- a/test/xpu/test_c_split.py +++ b/test/xpu/test_c_split.py @@ -22,7 +22,7 @@ from test_collective_base_xpu import TestDistBase import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_cast_op_xpu.py b/test/xpu/test_cast_op_xpu.py index fbfe6e979e7f4..0a8043d523f5f 100644 --- a/test/xpu/test_cast_op_xpu.py +++ b/test/xpu/test_cast_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard typeid_dict = { 'int32': int(core.VarDesc.VarType.INT32), @@ -93,8 +93,8 @@ class TestCastOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): # The input type of cast_op must be Variable. - x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.XPUPlace(0) + x1 = base.create_lod_tensor( + np.array([[-1]]), [[1]], base.XPUPlace(0) ) self.assertRaises(TypeError, paddle.cast, x1, 'int32') diff --git a/test/xpu/test_clip_op_xpu.py b/test/xpu/test_clip_op_xpu.py index 79d4e3e779869..a85482a2cce67 100644 --- a/test/xpu/test_clip_op_xpu.py +++ b/test/xpu/test_clip_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import Program, core, program_guard +from paddle import base +from paddle.base import Program, core, program_guard class XPUTestClipOp(XPUOpTestWrapper): @@ -159,11 +159,11 @@ def test_clip(self): max = paddle.static.data(name='max', shape=[1], dtype='float32') place = ( - fluid.XPUPlace(0) - if fluid.core.is_compiled_with_xpu() - else fluid.CPUPlace() + base.XPUPlace(0) + if base.core.is_compiled_with_xpu() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) out_1 = self._executed_api(images, min=min, max=max) out_2 = self._executed_api(images, min=0.2, max=0.9) @@ -175,7 +175,7 @@ def test_clip(self): out_8 = self._executed_api(images) res1, res2, res3, res4, res5, res6, res7, res8 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "image": data, "min": np.array([0.2]).astype('float32'), @@ -197,9 +197,9 @@ def test_clip(self): def test_clip_dygraph(self): paddle.disable_static() place = ( - fluid.XPUPlace(0) - if fluid.core.is_compiled_with_xpu() - else fluid.CPUPlace() + base.XPUPlace(0) + if base.core.is_compiled_with_xpu() + else base.CPUPlace() ) paddle.disable_static(place) data_shape = [1, 9, 9, 4] diff --git a/test/xpu/test_coalesce_tensor_op_xpu.py b/test/xpu/test_coalesce_tensor_op_xpu.py index f0f053137949f..614274e72fda0 100644 --- a/test/xpu/test_coalesce_tensor_op_xpu.py +++ b/test/xpu/test_coalesce_tensor_op_xpu.py @@ -16,7 +16,7 @@ import numpy as np -from paddle.fluid import core +from paddle.base import core alignment = 256 from get_test_cover_info import ( @@ -40,7 +40,7 @@ class TestAllocContinuousSpace(XPUOpTest): def setUp(self): self.op_type = "coalesce_tensor" self.use_xpu = True - self.dtype, self.fluid_dtype = self.init_dtype() + self.dtype, self.base_dtype = self.init_dtype() attrs = self.init_attr() self.copy_data = attrs["copy_data"] self.constant = attrs["constant"] @@ -76,7 +76,7 @@ def init_attr(self): "copy_data": True, "set_constant": False, "constant": 0.0, - "dtype": self.fluid_dtype, + "dtype": self.base_dtype, } def init_output(self, input_list, set_constant, constant): @@ -115,7 +115,7 @@ def init_attr(self): "copy_data": False, "set_constant": True, "constant": 0.5, - "dtype": self.fluid_dtype, + "dtype": self.base_dtype, "user_defined_size_of_dtype": 2, } diff --git a/test/xpu/test_collective_allgather_xpu.py b/test/xpu/test_collective_allgather_xpu.py index 3651ed2062957..fc30a2449e112 100644 --- a/test/xpu/test_collective_allgather_xpu.py +++ b/test/xpu/test_collective_allgather_xpu.py @@ -22,7 +22,7 @@ from test_collective_base_xpu import TestDistBase import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_collective_allreduce_xpu.py b/test/xpu/test_collective_allreduce_xpu.py index 05539aeaae432..c8626c72e1a17 100644 --- a/test/xpu/test_collective_allreduce_xpu.py +++ b/test/xpu/test_collective_allreduce_xpu.py @@ -22,7 +22,7 @@ from test_collective_base_xpu import TestDistBase import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_collective_base_xpu.py b/test/xpu/test_collective_base_xpu.py index 022ef400164d1..3cc33125f9233 100644 --- a/test/xpu/test_collective_base_xpu.py +++ b/test/xpu/test_collective_base_xpu.py @@ -24,9 +24,9 @@ import numpy as np -import paddle.fluid.unique_name as nameGen -from paddle import fluid -from paddle.fluid import core +import paddle.base.unique_name as nameGen +from paddle import base +from paddle.base import core def DataTypeCast(date_type): @@ -127,8 +127,8 @@ def initCommunicator( ) def run_trainer(self, args): - train_prog = fluid.Program() - startup_prog = fluid.Program() + train_prog = base.Program() + startup_prog = base.Program() endpoints = args["endpoints"].split(",") rank = args["trainerid"] current_endpoint = args["currentendpoint"] @@ -139,8 +139,8 @@ def run_trainer(self, args): self.rank = rank result = self.get_model(train_prog, startup_prog) device_id = int(os.getenv("FLAGS_selected_xpus", "0")) - place = fluid.XPUPlace(device_id) - exe = fluid.Executor(place) + place = base.XPUPlace(device_id) + exe = base.Executor(place) exe.run(startup_prog) np.random.seed(os.getpid()) np_data_type = DataTypeCast(args["data_type"]) diff --git a/test/xpu/test_collective_broadcast_xpu.py b/test/xpu/test_collective_broadcast_xpu.py index 5ddb451e7e4fa..061c0a76a041f 100644 --- a/test/xpu/test_collective_broadcast_xpu.py +++ b/test/xpu/test_collective_broadcast_xpu.py @@ -18,7 +18,7 @@ from test_collective_base_xpu import TestDistBase import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_collective_identity_xpu.py b/test/xpu/test_collective_identity_xpu.py index 421f9168a28d3..830a5657e7b43 100644 --- a/test/xpu/test_collective_identity_xpu.py +++ b/test/xpu/test_collective_identity_xpu.py @@ -22,7 +22,7 @@ from test_collective_base_xpu import TestDistBase import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_conv2d_op_xpu.py b/test/xpu/test_conv2d_op_xpu.py index d09402f934c69..6a97a2067727e 100644 --- a/test/xpu/test_conv2d_op_xpu.py +++ b/test/xpu/test_conv2d_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core def conv2d_forward_naive( @@ -232,8 +232,8 @@ def setUp(self): output = output.astype(self.dtype) self.inputs = { - 'Input': XPUOpTest.np_dtype_to_fluid_dtype(input), - 'Filter': XPUOpTest.np_dtype_to_fluid_dtype(filter), + 'Input': XPUOpTest.np_dtype_to_base_dtype(input), + 'Filter': XPUOpTest.np_dtype_to_base_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -392,8 +392,8 @@ def setUp(self): output = output.astype(self.dtype) self.inputs = { - 'Input': XPUOpTest.np_dtype_to_fluid_dtype(input), - 'Filter': XPUOpTest.np_dtype_to_fluid_dtype(filter), + 'Input': XPUOpTest.np_dtype_to_base_dtype(input), + 'Filter': XPUOpTest.np_dtype_to_base_dtype(filter), } self.attrs = { 'strides': self.stride, diff --git a/test/xpu/test_conv3d_op_xpu.py b/test/xpu/test_conv3d_op_xpu.py index 566c1960f8263..4ecdeb8c7cd6c 100644 --- a/test/xpu/test_conv3d_op_xpu.py +++ b/test/xpu/test_conv3d_op_xpu.py @@ -235,8 +235,8 @@ def setUp(self): ).astype(self.dtype) self.inputs = { - 'Input': XPUOpTest.np_dtype_to_fluid_dtype(input), - 'Filter': XPUOpTest.np_dtype_to_fluid_dtype(filter), + 'Input': XPUOpTest.np_dtype_to_base_dtype(input), + 'Filter': XPUOpTest.np_dtype_to_base_dtype(filter), } self.attrs = { 'strides': self.stride, @@ -409,8 +409,8 @@ def setUp(self): ).astype(self.dtype) self.inputs = { - 'Input': XPUOpTest.np_dtype_to_fluid_dtype(input), - 'Filter': XPUOpTest.np_dtype_to_fluid_dtype(filter), + 'Input': XPUOpTest.np_dtype_to_base_dtype(input), + 'Filter': XPUOpTest.np_dtype_to_base_dtype(filter), } self.attrs = { 'strides': self.stride, diff --git a/test/xpu/test_deformable_conv_op_xpu.py b/test/xpu/test_deformable_conv_op_xpu.py index 62d274fbc74f0..da8d1b86dcf7f 100644 --- a/test/xpu/test_deformable_conv_op_xpu.py +++ b/test/xpu/test_deformable_conv_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import OpTest, XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core def dmc_bilinear(data_im, height, width, h, w): @@ -162,10 +162,10 @@ def setUp(self): output = output.astype(self.dtype) self.inputs = { - 'Input': OpTest.np_dtype_to_fluid_dtype(input), - 'Offset': OpTest.np_dtype_to_fluid_dtype(offset), - 'Mask': OpTest.np_dtype_to_fluid_dtype(mask), - 'Filter': OpTest.np_dtype_to_fluid_dtype(filter), + 'Input': OpTest.np_dtype_to_base_dtype(input), + 'Offset': OpTest.np_dtype_to_base_dtype(offset), + 'Mask': OpTest.np_dtype_to_base_dtype(mask), + 'Filter': OpTest.np_dtype_to_base_dtype(filter), } self.attrs = { 'strides': self.stride, diff --git a/test/xpu/test_device_guard_xpu.py b/test/xpu/test_device_guard_xpu.py index cc9fb142279ac..ce85946aee74e 100644 --- a/test/xpu/test_device_guard_xpu.py +++ b/test/xpu/test_device_guard_xpu.py @@ -16,7 +16,7 @@ import warnings import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_diag_v2_op_xpu.py b/test/xpu/test_diag_v2_op_xpu.py index 51f42d00507fe..3c688337cb57a 100644 --- a/test/xpu/test_diag_v2_op_xpu.py +++ b/test/xpu/test_diag_v2_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard paddle.enable_static() @@ -240,9 +240,9 @@ def run_static(self, use_gpu=False): result12 = paddle.diag(x5, offset=-1) result13 = paddle.diag(x6, offset=-1) - place = fluid.XPUPlace(0) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + place = base.XPUPlace(0) + exe = base.Executor(place) + exe.run(base.default_startup_program()) ( res0, res1, @@ -299,12 +299,12 @@ def run_static(self, use_gpu=False): np.testing.assert_allclose(res13, self.expected12, rtol=1e-05) def test_xpu(self): - paddle.disable_static(place=paddle.fluid.XPUPlace(0)) + paddle.disable_static(place=paddle.base.XPUPlace(0)) self.run_imperative() paddle.enable_static() - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): self.run_static() diff --git a/test/xpu/test_dropout_op_xpu.py b/test/xpu/test_dropout_op_xpu.py index 931089d7a5656..345eeda034789 100644 --- a/test/xpu/test_dropout_op_xpu.py +++ b/test/xpu/test_dropout_op_xpu.py @@ -18,8 +18,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import _legacy_C_ops, fluid -from paddle.fluid import Program, program_guard +from paddle import _legacy_C_ops, base +from paddle.base import Program, program_guard paddle.enable_static() @@ -125,10 +125,10 @@ def test_errors(self): def test_Variable(): # the input of dropout must be Variable. - x1 = fluid.create_lod_tensor( + x1 = base.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], - fluid.CPUPlace(), + base.CPUPlace(), ) paddle.nn.functional.dropout(x1, p=0.5) @@ -147,15 +147,15 @@ def test_dtype(): class TestDropoutCAPI(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] - self.places.append(fluid.XPUPlace(0)) + self.places = [base.CPUPlace()] + self.places.append(base.XPUPlace(0)) def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.random.random([40, 40]).astype(self.in_type) result_np = input_np - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) m = paddle.nn.Dropout(p=0.0) m.eval() result = m(input) @@ -164,8 +164,8 @@ def test_dygraph(self): class TestDropoutBackward(unittest.TestCase): def setUp(self): np.random.seed(123) - self.places = [fluid.CPUPlace()] - self.places.append(fluid.XPUPlace(0)) + self.places = [base.CPUPlace()] + self.places.append(base.XPUPlace(0)) def cal_grad_upscale_train(self, mask, prob): return mask.astype(self.in_type) / (1 - prob) @@ -175,7 +175,7 @@ def cal_grad_downscale_in_infer(self, mask): def test_backward_downscale_in_infer(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input = paddle.uniform([40, 40], dtype=self.in_type) input.stop_gradient = False out, mask = _legacy_C_ops.dropout( @@ -190,7 +190,7 @@ def test_backward_downscale_in_infer(self): def test_backward_upscale_train(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): prob = 0.5 input = paddle.uniform([40, 40], dtype=self.in_type) input.stop_gradient = False @@ -210,7 +210,7 @@ def test_backward_upscale_train(self): def test_backward_upscale_train_2(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): prob = 0.3 input = paddle.uniform([40, 40], dtype=self.in_type) input.stop_gradient = False diff --git a/test/xpu/test_elementwise_add_op_xpu.py b/test/xpu/test_elementwise_add_op_xpu.py index e77b840064a87..796f936c16f87 100644 --- a/test/xpu/test_elementwise_add_op_xpu.py +++ b/test/xpu/test_elementwise_add_op_xpu.py @@ -25,7 +25,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -43,8 +43,8 @@ def setUp(self): self.init_axis() self.init_max_relative_error() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -261,7 +261,7 @@ def init_axis(self): class TestAddOp(unittest.TestCase): def test_name(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') @@ -269,7 +269,7 @@ def test_name(self): self.assertEqual(('add_res' in y_1.name), True) def test_declarative(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def gen_data(): return { @@ -281,18 +281,18 @@ def gen_data(): y = paddle.static.data(name="y", shape=[3], dtype='float32') z = paddle.add(x, y) - place = fluid.XPUPlace(0) - exe = fluid.Executor(place) + place = base.XPUPlace(0) + exe = base.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((z_value == z_expected).all(), True) def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.array([2, 3, 4]).astype('float32') np_y = np.array([1, 5, 2]).astype('float32') - x = fluid.dygraph.to_variable(np_x) - y = fluid.dygraph.to_variable(np_y) + x = base.dygraph.to_variable(np_x) + y = base.dygraph.to_variable(np_y) z = paddle.add(x, y) np_z = z.numpy() z_expected = np.array([3.0, 8.0, 6.0]) diff --git a/test/xpu/test_elementwise_add_op_xpu_kp.py b/test/xpu/test_elementwise_add_op_xpu_kp.py index 76daa814c4fc7..ff5c1afeaa902 100644 --- a/test/xpu/test_elementwise_add_op_xpu_kp.py +++ b/test/xpu/test_elementwise_add_op_xpu_kp.py @@ -20,7 +20,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -36,8 +36,8 @@ def setUp(self): self.init_axis() self.init_max_relative_error() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} @@ -308,7 +308,7 @@ def init_axis(self): ) class TestAddOp(unittest.TestCase): def test_name(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data(name="x", shape=[2, 3], dtype="float32") y = paddle.static.data(name='y', shape=[2, 3], dtype='float32') @@ -316,7 +316,7 @@ def test_name(self): self.assertEqual(('add_res' in y_1.name), True) def test_declarative(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def gen_data(): return { @@ -328,18 +328,18 @@ def gen_data(): y = paddle.static.data(name="y", shape=[3], dtype='float32') z = paddle.add(x, y) - place = fluid.XPUPlace(0) - exe = fluid.Executor(place) + place = base.XPUPlace(0) + exe = base.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) z_expected = np.array([3.0, 8.0, 6.0]) self.assertEqual((z_value == z_expected).all(), True) def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.array([2, 3, 4]).astype('float32') np_y = np.array([1, 5, 2]).astype('float32') - x = fluid.dygraph.to_variable(np_x) - y = fluid.dygraph.to_variable(np_y) + x = base.dygraph.to_variable(np_x) + y = base.dygraph.to_variable(np_y) z = paddle.add(x, y) np_z = z.numpy() z_expected = np.array([3.0, 8.0, 6.0]) diff --git a/test/xpu/test_elementwise_div_op_xpu.py b/test/xpu/test_elementwise_div_op_xpu.py index 7ec429354204c..481bb6c9c0d7c 100644 --- a/test/xpu/test_elementwise_div_op_xpu.py +++ b/test/xpu/test_elementwise_div_op_xpu.py @@ -24,7 +24,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -404,13 +404,13 @@ def init_input_output(self): class TestElementwiseDivBroadcast(unittest.TestCase): def test_shape_with_batch_sizes(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x_var = paddle.static.data( name='x', dtype='float32', shape=[None, 3, None, None] ) one = 2.0 out = one / x_var - exe = fluid.Executor(fluid.XPUPlace(0)) + exe = base.Executor(base.XPUPlace(0)) x = np.random.uniform(0.1, 0.6, (1, 3, 32, 32)).astype( 'float32' ) diff --git a/test/xpu/test_elementwise_floordiv_op_xpu.py b/test/xpu/test_elementwise_floordiv_op_xpu.py index c626f63bbd68a..1fbe2b72f70b1 100644 --- a/test/xpu/test_elementwise_floordiv_op_xpu.py +++ b/test/xpu/test_elementwise_floordiv_op_xpu.py @@ -47,8 +47,8 @@ def setUp(self): self.init_axis() self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} diff --git a/test/xpu/test_elementwise_mod_op_xpu.py b/test/xpu/test_elementwise_mod_op_xpu.py index 518df93ade8f6..06ed903b47201 100644 --- a/test/xpu/test_elementwise_mod_op_xpu.py +++ b/test/xpu/test_elementwise_mod_op_xpu.py @@ -24,7 +24,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -43,8 +43,8 @@ def init_input_output(self): self.y = np.random.uniform(0, 1000, [10, 10]).astype(self.dtype) self.out = np.mod(self.x, self.y) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} @@ -72,7 +72,7 @@ def test_check_output(self): class TestRemainderOp(unittest.TestCase): def test_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): np_x = np.random.rand(22, 128, 3).astype('int64') np_y = np.random.rand(22, 128, 3).astype('int64') x = paddle.to_tensor(np_x) diff --git a/test/xpu/test_elementwise_mul_op_xpu.py b/test/xpu/test_elementwise_mul_op_xpu.py index 9dc75c96647a3..fdb9673cf1c72 100644 --- a/test/xpu/test_elementwise_mul_op_xpu.py +++ b/test/xpu/test_elementwise_mul_op_xpu.py @@ -89,8 +89,8 @@ def init_input_output(self): self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) self.out = np.multiply(self.x, self.y) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(self.x), - 'Y': OpTest.np_dtype_to_fluid_dtype(self.y), + 'X': OpTest.np_dtype_to_base_dtype(self.x), + 'Y': OpTest.np_dtype_to_base_dtype(self.y), } self.outputs = {'Out': self.out} self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} diff --git a/test/xpu/test_empty_op_xpu.py b/test/xpu/test_empty_op_xpu.py index 71c25f335b1ba..6f54f4ea01cbf 100644 --- a/test/xpu/test_empty_op_xpu.py +++ b/test/xpu/test_empty_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle.base.framework import convert_np_dtype_to_dtype_ paddle.enable_static() diff --git a/test/xpu/test_expand_as_v2_op_xpu.py b/test/xpu/test_expand_as_v2_op_xpu.py index 41f345091054c..7ad70874c481d 100644 --- a/test/xpu/test_expand_as_v2_op_xpu.py +++ b/test/xpu/test_expand_as_v2_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() np.random.seed(10) @@ -141,9 +141,9 @@ def test_api(self): out_1 = paddle.expand_as(x, y=y) - exe = fluid.Executor(place=fluid.XPUPlace(0)) + exe = base.Executor(place=base.XPUPlace(0)) res_1 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x": x_np, "target_tensor": y_np}, fetch_list=[out_1], ) diff --git a/test/xpu/test_expand_v2_op_xpu.py b/test/xpu/test_expand_v2_op_xpu.py index ad5397ff3bcb2..4d0948a1d246f 100644 --- a/test/xpu/test_expand_v2_op_xpu.py +++ b/test/xpu/test_expand_v2_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() np.random.seed(10) @@ -195,7 +195,7 @@ def test_check_grad(self): # Test python API class TestExpandV2API(unittest.TestCase): def test_static(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = np.random.random([12, 14]).astype("float32") x = paddle.static.data( name='x', @@ -214,11 +214,11 @@ def test_static(self): out_2 = paddle.expand(x, shape=[positive_2, 14]) out_3 = paddle.expand(x, shape=expand_shape) - g0 = fluid.backward.calc_gradient(out_2, x) + g0 = base.backward.calc_gradient(out_2, x) - exe = fluid.Executor(place=paddle.XPUPlace(0)) + exe = base.Executor(place=paddle.XPUPlace(0)) res_1, res_2, res_3 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "x": input, "expand_shape": np.array([12, 14]).astype("int32"), diff --git a/test/xpu/test_fill_any_op_xpu.py b/test/xpu/test_fill_any_op_xpu.py index 824accc91297a..2d71f78e05c34 100644 --- a/test/xpu/test_fill_any_op_xpu.py +++ b/test/xpu/test_fill_any_op_xpu.py @@ -75,7 +75,7 @@ def init(self): class TestFillAnyInplace(unittest.TestCase): def test_fill_any_version(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var = paddle.to_tensor(np.ones((4, 2, 3)).astype(np.float32)) self.assertEqual(var.inplace_version, 0) @@ -89,7 +89,7 @@ def test_fill_any_version(self): self.assertEqual(var.inplace_version, 3) def test_fill_any_eqaul(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): tensor = paddle.to_tensor( np.random.random((20, 30)).astype(np.float32) ) @@ -100,7 +100,7 @@ def test_fill_any_eqaul(self): self.assertEqual((tensor.numpy() == target).all().item(), True) def test_backward(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x = paddle.full([10, 10], -1.0, dtype='float32') x.stop_gradient = False y = 2 * x diff --git a/test/xpu/test_fill_op_xpu.py b/test/xpu/test_fill_op_xpu.py index ce2c33709b9f3..8adb6fc08b998 100644 --- a/test/xpu/test_fill_op_xpu.py +++ b/test/xpu/test_fill_op_xpu.py @@ -24,7 +24,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_fleet_exe_dist_model_run_xpu.py b/test/xpu/test_fleet_exe_dist_model_run_xpu.py index a55f82426bfa8..d78f16fb97bbc 100644 --- a/test/xpu/test_fleet_exe_dist_model_run_xpu.py +++ b/test/xpu/test_fleet_exe_dist_model_run_xpu.py @@ -19,7 +19,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_fused_attention_op_xpu.py b/test/xpu/test_fused_attention_op_xpu.py index 9db584f278e7f..063580457eeee 100644 --- a/test/xpu/test_fused_attention_op_xpu.py +++ b/test/xpu/test_fused_attention_op_xpu.py @@ -26,7 +26,7 @@ import paddle.incubate.nn.functional as incubate_f import paddle.nn.functional as F from paddle import tensor -from paddle.fluid.framework import default_main_program +from paddle.base.framework import default_main_program from paddle.nn.layer.common import Dropout, Linear from paddle.nn.layer.norm import LayerNorm from paddle.nn.layer.transformer import _convert_attention_mask diff --git a/test/xpu/test_fused_feedforward_op_xpu.py b/test/xpu/test_fused_feedforward_op_xpu.py index 11f7148e188d0..755c48a8b6d9b 100644 --- a/test/xpu/test_fused_feedforward_op_xpu.py +++ b/test/xpu/test_fused_feedforward_op_xpu.py @@ -20,7 +20,7 @@ import paddle import paddle.incubate.nn.functional as incubate_f import paddle.nn.functional as F -from paddle.fluid.framework import default_main_program +from paddle.base.framework import default_main_program from paddle.nn.layer import transformer from paddle.nn.layer.common import Dropout, Linear from paddle.nn.layer.norm import LayerNorm diff --git a/test/xpu/test_fused_gemm_epilogue_grad_op_xpu.py b/test/xpu/test_fused_gemm_epilogue_grad_op_xpu.py index 394fe515554f3..f60aed378afcd 100644 --- a/test/xpu/test_fused_gemm_epilogue_grad_op_xpu.py +++ b/test/xpu/test_fused_gemm_epilogue_grad_op_xpu.py @@ -24,7 +24,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core def get_outputs(DOut, X, Y): diff --git a/test/xpu/test_fused_gemm_epilogue_op_xpu.py b/test/xpu/test_fused_gemm_epilogue_op_xpu.py index 37b1271963faf..fe1d08e36bc39 100644 --- a/test/xpu/test_fused_gemm_epilogue_op_xpu.py +++ b/test/xpu/test_fused_gemm_epilogue_op_xpu.py @@ -25,7 +25,7 @@ import paddle from paddle import _legacy_C_ops -from paddle.fluid import core +from paddle.base import core def gelu(x): diff --git a/test/xpu/test_fused_resnet_basic_block_op_xpu.py b/test/xpu/test_fused_resnet_basic_block_op_xpu.py index 073d128150da9..709222496fcde 100644 --- a/test/xpu/test_fused_resnet_basic_block_op_xpu.py +++ b/test/xpu/test_fused_resnet_basic_block_op_xpu.py @@ -24,9 +24,9 @@ ) import paddle -from paddle import fluid, nn -from paddle.fluid import core -from paddle.fluid.framework import default_main_program +from paddle import base, nn +from paddle.base import core +from paddle.base.framework import default_main_program from paddle.incubate.xpu.resnet_block import ResNetBasicBlock paddle.enable_static() @@ -69,34 +69,34 @@ def getShortcut(self): def Base(self): paddle.disable_static() - conv1_weight = fluid.ParamAttr( + conv1_weight = base.ParamAttr( initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001, ) - conv2_weight = fluid.ParamAttr( + conv2_weight = base.ParamAttr( initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001, ) - conv3_weight = fluid.ParamAttr( + conv3_weight = base.ParamAttr( initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001, ) - bn1_weight = fluid.ParamAttr( + bn1_weight = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ) - bn1_bias = fluid.ParamAttr( + bn1_bias = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.0) ) - bn2_weight = fluid.ParamAttr( + bn2_weight = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ) - bn2_bias = fluid.ParamAttr( + bn2_bias = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.0) ) - bn3_weight = fluid.ParamAttr( + bn3_weight = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ) - bn3_bias = fluid.ParamAttr( + bn3_bias = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.0) ) @@ -169,34 +169,34 @@ def Base(self): def FusedResNetBasicBlock(self): paddle.disable_static() - fused_conv1_weight = fluid.ParamAttr( + fused_conv1_weight = base.ParamAttr( initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001, ) - fused_conv2_weight = fluid.ParamAttr( + fused_conv2_weight = base.ParamAttr( initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001, ) - fused_conv3_weight = fluid.ParamAttr( + fused_conv3_weight = base.ParamAttr( initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001, ) - fused_bn1_weight = fluid.ParamAttr( + fused_bn1_weight = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ) - fused_bn1_bias = fluid.ParamAttr( + fused_bn1_bias = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.0) ) - fused_bn2_weight = fluid.ParamAttr( + fused_bn2_weight = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ) - fused_bn2_bias = fluid.ParamAttr( + fused_bn2_bias = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.0) ) - fused_bn3_weight = fluid.ParamAttr( + fused_bn3_weight = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=1.0) ) - fused_bn3_bias = fluid.ParamAttr( + fused_bn3_bias = base.ParamAttr( initializer=paddle.nn.initializer.Constant(value=0.0) ) diff --git a/test/xpu/test_gaussian_random_op_xpu.py b/test/xpu/test_gaussian_random_op_xpu.py index f30b994dcd18b..abdec498f0a62 100644 --- a/test/xpu/test_gaussian_random_op_xpu.py +++ b/test/xpu/test_gaussian_random_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() from paddle.tensor import random @@ -234,9 +234,9 @@ def test_api(self): seed=10, ) - exe = fluid.Executor(place=fluid.XPUPlace(0)) + exe = base.Executor(place=base.XPUPlace(0)) res_1, res_2, res_3, res_4, res_5, res_6 = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={ "shape_tensor_int32": np.array([2000, 500]).astype("int32"), "shape_tensor_int64": np.array([2000, 500]).astype("int64"), @@ -263,17 +263,17 @@ def test_default_dtype(self): def test_default_fp16(): paddle.framework.set_default_dtype('float16') out = paddle.tensor.random.gaussian([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') out = paddle.tensor.random.gaussian([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP32) def test_default_fp64(): paddle.framework.set_default_dtype('float64') out = paddle.tensor.random.gaussian([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP64) test_default_fp64() test_default_fp32() @@ -289,17 +289,17 @@ def test_default_dtype(self): def test_default_fp16(): paddle.framework.set_default_dtype('float16') out = paddle.tensor.random.standard_normal([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP16) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP16) def test_default_fp32(): paddle.framework.set_default_dtype('float32') out = paddle.tensor.random.standard_normal([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP32) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP32) def test_default_fp64(): paddle.framework.set_default_dtype('float64') out = paddle.tensor.random.standard_normal([2, 3]) - self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) + self.assertEqual(out.dtype, base.core.VarDesc.VarType.FP64) test_default_fp64() test_default_fp32() diff --git a/test/xpu/test_gen_bkcl_id_op.py b/test/xpu/test_gen_bkcl_id_op.py index 7c7ae3511a252..267d1bd581026 100644 --- a/test/xpu/test_gen_bkcl_id_op.py +++ b/test/xpu/test_gen_bkcl_id_op.py @@ -21,7 +21,7 @@ os.environ['GLOG_vmodule'] = "gen_bkcl_id_op*=10,gen_comm_id*=10" import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_generate_proposals_v2_op_xpu.py b/test/xpu/test_generate_proposals_v2_op_xpu.py index b7e22032f5f0c..024d09603b7d9 100644 --- a/test/xpu/test_generate_proposals_v2_op_xpu.py +++ b/test/xpu/test_generate_proposals_v2_op_xpu.py @@ -25,7 +25,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_group_norm_op_xpu.py b/test/xpu/test_group_norm_op_xpu.py index 3607fffdbfd15..bf0b528b6e8a7 100644 --- a/test/xpu/test_group_norm_op_xpu.py +++ b/test/xpu/test_group_norm_op_xpu.py @@ -78,9 +78,9 @@ def setUp(self): ) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(input), - 'Scale': OpTest.np_dtype_to_fluid_dtype(scale), - 'Bias': OpTest.np_dtype_to_fluid_dtype(bias), + 'X': OpTest.np_dtype_to_base_dtype(input), + 'Scale': OpTest.np_dtype_to_base_dtype(scale), + 'Bias': OpTest.np_dtype_to_base_dtype(bias), } self.outputs = {'Y': output, 'Mean': mean, 'Variance': var} self.attrs['data_layout'] = self.data_format diff --git a/test/xpu/test_huber_loss_op_xpu.py b/test/xpu/test_huber_loss_op_xpu.py index bedfac1a20c61..a41d4a250327c 100644 --- a/test/xpu/test_huber_loss_op_xpu.py +++ b/test/xpu/test_huber_loss_op_xpu.py @@ -58,8 +58,8 @@ def set_inputs(self): x = np.random.uniform(0, 1.0, shape).astype(self.dtype) y = np.random.uniform(0, 1.0, shape).astype(self.dtype) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y), + 'X': OpTest.np_dtype_to_base_dtype(x), + 'Y': OpTest.np_dtype_to_base_dtype(y), } def set_attrs(self): diff --git a/test/xpu/test_index_sample_op_xpu.py b/test/xpu/test_index_sample_op_xpu.py index e5204a1247f46..f07c81fa42d23 100644 --- a/test/xpu/test_index_sample_op_xpu.py +++ b/test/xpu/test_index_sample_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -125,9 +125,9 @@ def test_shape(self): index = paddle.static.data(name='index', shape=[-1, 3], dtype='int32') output = paddle.index_sample(x=x, index=index) - place = fluid.XPUPlace(0) - exe = fluid.Executor(place=place) - exe.run(fluid.default_startup_program()) + place = base.XPUPlace(0) + exe = base.Executor(place=place) + exe.run(base.default_startup_program()) feed = {'x': x_np, 'index': index_np} res = exe.run(feed=feed, fetch_list=[output]) @@ -135,7 +135,7 @@ def test_shape(self): class TestIndexSampleDynamic(unittest.TestCase): def test_result(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): x = paddle.to_tensor( [ [1.0, 2.0, 3.0, 4.0], diff --git a/test/xpu/test_index_select_op_xpu.py b/test/xpu/test_index_select_op_xpu.py index 62b9dd54c2e8b..4a4f0d3e47786 100644 --- a/test/xpu/test_index_select_op_xpu.py +++ b/test/xpu/test_index_select_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard paddle.enable_static() @@ -101,7 +101,7 @@ def test_index_select_api(self): x = paddle.static.data(name='x', shape=[-1, 4], dtype='float32') index = paddle.static.data(name='index', shape=[3], dtype='int32') z = paddle.index_select(x, index, axis=1) - exe = fluid.Executor(fluid.XPUPlace(0)) + exe = base.Executor(base.XPUPlace(0)) (res,) = exe.run( feed={'x': self.data_x, 'index': self.data_index}, fetch_list=[z.name], @@ -117,7 +117,7 @@ def test_index_select_api(self): x = paddle.static.data(name='x', shape=[-1, 4], dtype='float32') index = paddle.static.data(name='index', shape=[3], dtype='int32') z = paddle.index_select(x, index) - exe = fluid.Executor(fluid.XPUPlace(0)) + exe = base.Executor(base.XPUPlace(0)) (res,) = exe.run( feed={'x': self.data_x, 'index': self.data_index}, fetch_list=[z.name], @@ -131,9 +131,9 @@ def test_index_select_api(self): def test_dygraph_api(self): self.input_data() # case 1: - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(self.data_x) - index = fluid.dygraph.to_variable(self.data_index) + with base.dygraph.guard(): + x = base.dygraph.to_variable(self.data_x) + index = base.dygraph.to_variable(self.data_index) z = paddle.index_select(x, index) np_z = z.numpy() expect_out = np.array( @@ -142,9 +142,9 @@ def test_dygraph_api(self): np.testing.assert_allclose(expect_out, np_z, rtol=1e-05) # case 2: - with fluid.dygraph.guard(): - x = fluid.dygraph.to_variable(self.data_x) - index = fluid.dygraph.to_variable(self.data_index) + with base.dygraph.guard(): + x = base.dygraph.to_variable(self.data_x) + index = base.dygraph.to_variable(self.data_index) z = paddle.index_select(x, index, axis=1) np_z = z.numpy() expect_out = np.array( diff --git a/test/xpu/test_instance_norm_op_xpu.py b/test/xpu/test_instance_norm_op_xpu.py index 06714984a1b6d..27ee3c0813a44 100644 --- a/test/xpu/test_instance_norm_op_xpu.py +++ b/test/xpu/test_instance_norm_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard paddle.enable_static() @@ -176,8 +176,8 @@ def setUp(self): def test_errors(self): with program_guard(Program(), Program()): # the input of instance_norm must be Variable. - x1 = fluid.create_lod_tensor( - np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.XPUPlace(0) + x1 = base.create_lod_tensor( + np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], base.XPUPlace(0) ) self.assertRaises(TypeError, paddle.static.nn.instance_norm, x1) diff --git a/test/xpu/test_kldiv_loss_op_xpu.py b/test/xpu/test_kldiv_loss_op_xpu.py index e7ebe5e3f1f80..2879990916287 100644 --- a/test/xpu/test_kldiv_loss_op_xpu.py +++ b/test/xpu/test_kldiv_loss_op_xpu.py @@ -105,7 +105,7 @@ def run_kl_loss(self, reduction, shape=(5, 20)): target = np.random.uniform(-10, 10, shape).astype('float32') gt_loss = kldiv_loss(x, target, reduction) - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): kldiv_criterion = paddle.nn.KLDivLoss(reduction) pred_loss = kldiv_criterion( paddle.to_tensor(x), paddle.to_tensor(target) @@ -125,7 +125,7 @@ def test_kl_loss_static_api(self): class TestKLDivLossTypePromotion(unittest.TestCase): def test_kl_div_promotion(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): x1 = paddle.rand([5, 20], dtype='float32') target1 = paddle.rand([5, 20], dtype='float32') diff --git a/test/xpu/test_logical_op_xpu.py b/test/xpu/test_logical_op_xpu.py index 175e1fe569bad..90d6ba1c9335c 100755 --- a/test/xpu/test_logical_op_xpu.py +++ b/test/xpu/test_logical_op_xpu.py @@ -58,8 +58,8 @@ def set_case(self): self.attrs = {'use_xpu': True} self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y), + 'X': OpTest.np_dtype_to_base_dtype(x), + 'Y': OpTest.np_dtype_to_base_dtype(y), } self.outputs = {'Out': out} @@ -119,8 +119,8 @@ def set_case(self): self.attrs = {'use_xpu': True} self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y), + 'X': OpTest.np_dtype_to_base_dtype(x), + 'Y': OpTest.np_dtype_to_base_dtype(y), } self.outputs = {'Out': out} @@ -180,8 +180,8 @@ def set_case(self): self.attrs = {'use_xpu': True} self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y), + 'X': OpTest.np_dtype_to_base_dtype(x), + 'Y': OpTest.np_dtype_to_base_dtype(y), } self.outputs = {'Out': out} @@ -237,7 +237,7 @@ def set_case(self): out = np.logical_not(x) self.attrs = {'use_xpu': True} - self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.inputs = {'X': OpTest.np_dtype_to_base_dtype(x)} self.outputs = {'Out': out} def init_case(self): diff --git a/test/xpu/test_matmul_op_xpu.py b/test/xpu/test_matmul_op_xpu.py index fb30c2ecbe879..24369b79f8cab 100644 --- a/test/xpu/test_matmul_op_xpu.py +++ b/test/xpu/test_matmul_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base def reference_matmul(X, Y, transpose_X=False, transpose_Y=False): @@ -132,11 +132,11 @@ def __init__(self): class API_TestMm(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): x = paddle.static.data(name="x", shape=[2], dtype=self.in_type) y = paddle.static.data(name='y', shape=[2], dtype=self.in_type) result = paddle.mm(x, y) - exe = fluid.Executor(fluid.XPUPlace(0)) + exe = base.Executor(base.XPUPlace(0)) data1 = np.random.rand(2).astype(self.in_type) data2 = np.random.rand(2).astype(self.in_type) np_res = exe.run( @@ -147,12 +147,12 @@ def test_out(self): np.testing.assert_allclose(np_res, expected_result, atol=1e-3) def test_dygraph_without_out(self): - device = fluid.XPUPlace(0) - with fluid.dygraph.guard(device): + device = base.XPUPlace(0) + with base.dygraph.guard(device): input_array1 = np.random.rand(3, 4).astype(self.in_type) input_array2 = np.random.rand(4, 3).astype(self.in_type) - data1 = fluid.dygraph.to_variable(input_array1) - data2 = fluid.dygraph.to_variable(input_array2) + data1 = base.dygraph.to_variable(input_array1) + data2 = base.dygraph.to_variable(input_array2) out = paddle.mm(data1, data2) expected_result = np.matmul(input_array1, input_array2) np.testing.assert_allclose( @@ -161,14 +161,14 @@ def test_dygraph_without_out(self): class Test_API_Matmul(unittest.TestCase): def test_dygraph_without_out(self): - device = fluid.XPUPlace(0) - with fluid.dygraph.guard(device): + device = base.XPUPlace(0) + with base.dygraph.guard(device): input_array1 = np.random.rand(3, 4).astype(self.in_type) input_array2 = np.random.rand(4, 3).astype(self.in_type) - data1 = fluid.dygraph.to_variable(input_array1).astype( + data1 = base.dygraph.to_variable(input_array1).astype( self.in_type ) - data2 = fluid.dygraph.to_variable(input_array2).astype( + data2 = base.dygraph.to_variable(input_array2).astype( self.in_type ) out = paddle.matmul(data1, data2) @@ -180,7 +180,7 @@ def test_dygraph_without_out(self): class API_TestMmError(unittest.TestCase): def test_errors(self): def test_error1(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data( name="data1", shape=[10, 2], dtype="float32" ) @@ -192,7 +192,7 @@ def test_error1(): self.assertRaises(ValueError, test_error1) def test_error2(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data( name="data1", shape=[-1, 10, 2], dtype="float32" ) @@ -204,7 +204,7 @@ def test_error2(): test_error2() def test_error3(): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): data1 = paddle.static.data( name="data1", shape=[10, 10, 2], dtype="float32" ) diff --git a/test/xpu/test_mean_op_xpu.py b/test/xpu/test_mean_op_xpu.py index 66ed8d7edbce3..b55270ac274c8 100644 --- a/test/xpu/test_mean_op_xpu.py +++ b/test/xpu/test_mean_op_xpu.py @@ -18,7 +18,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import Program, program_guard +from paddle.base import Program, program_guard np.random.seed(10) diff --git a/test/xpu/test_merged_momentum_op_xpu.py b/test/xpu/test_merged_momentum_op_xpu.py index 8f3afc5a32697..e7b6008e43644 100644 --- a/test/xpu/test_merged_momentum_op_xpu.py +++ b/test/xpu/test_merged_momentum_op_xpu.py @@ -38,7 +38,7 @@ def setUp(self): def set_case(self): self.shapes = [[3, 4], [2, 7], [5, 6, 8]] - self.place = paddle.fluid.XPUPlace(0) + self.place = paddle.base.XPUPlace(0) self.seed = 1 def testalltype(self): diff --git a/test/xpu/test_merged_momentum_op_xpu_base.py b/test/xpu/test_merged_momentum_op_xpu_base.py index 62f534a1b41a6..e41c7fd4feeab 100644 --- a/test/xpu/test_merged_momentum_op_xpu_base.py +++ b/test/xpu/test_merged_momentum_op_xpu_base.py @@ -21,7 +21,7 @@ import numpy as np import paddle -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper def run_momentum_op( @@ -181,7 +181,7 @@ def setUp(self): paddle.enable_static() self.shapes = [[3, 4], [2, 7], [5, 6], [7, 8]] self.seed = 10 - self.place = paddle.fluid.XPUPlace(0) + self.place = paddle.base.XPUPlace(0) self.__class__.use_xpu = True def gen_rand_data(self, shapes, dtype): diff --git a/test/xpu/test_momentum_op_xpu.py b/test/xpu/test_momentum_op_xpu.py index 50854cdeb9fae..117a7ee34d2e9 100644 --- a/test/xpu/test_momentum_op_xpu.py +++ b/test/xpu/test_momentum_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_nearest_interp_op_xpu.py b/test/xpu/test_nearest_interp_op_xpu.py index 13958a331ddce..1165521339da1 100644 --- a/test/xpu/test_nearest_interp_op_xpu.py +++ b/test/xpu/test_nearest_interp_op_xpu.py @@ -423,14 +423,14 @@ def test_exception(self): def attr_data_format(): # for 4-D input, data_format can only be NCHW or NHWC - out = fluid.layers.resize_nearest( + out = base.layers.resize_nearest( input, out_shape=[4, 8], data_format="NDHWC") def attr_scale_type(): - out = fluid.layers.resize_nearest(input, scale="scale") + out = base.layers.resize_nearest(input, scale="scale") def attr_scale_value(): - out = fluid.layers.resize_nearest(input, scale=-0.3) + out = base.layers.resize_nearest(input, scale=-0.3) self.assertRaises(ValueError, attr_data_format) self.assertRaises(TypeError, attr_scale_type) diff --git a/test/xpu/test_one_hot_op_xpu.py b/test/xpu/test_one_hot_op_xpu.py index 941387b3eb1fb..322f5f29d2707 100644 --- a/test/xpu/test_one_hot_op_xpu.py +++ b/test/xpu/test_one_hot_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_one_hot_v2_op_xpu.py b/test/xpu/test_one_hot_v2_op_xpu.py index 80a60eed539c0..f32c3960c6dfc 100644 --- a/test/xpu/test_one_hot_v2_op_xpu.py +++ b/test/xpu/test_one_hot_v2_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -158,22 +158,22 @@ def test_api_with_dygraph(self): label = np.array( [np.random.randint(0, depth - 1) for i in range(6)] ).reshape([6, 1]) - with fluid.dygraph.guard(): + with base.dygraph.guard(): one_hot_label = paddle.nn.functional.one_hot( - x=fluid.dygraph.to_variable(label), num_classes=depth + x=base.dygraph.to_variable(label), num_classes=depth ) def _run(self, depth): label = paddle.static.data(name="label", shape=[-1, 1], dtype="int64") one_hot_label = paddle.nn.functional.one_hot(x=label, num_classes=depth) - place = fluid.XPUPlace(0) + place = base.XPUPlace(0) label_data = np.array( [np.random.randint(0, 10 - 1) for i in range(6)] ).reshape([6, 1]) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) ret = exe.run( feed={ 'label': label_data, @@ -185,7 +185,7 @@ def _run(self, depth): class BadInputTestOnehotV2(unittest.TestCase): def test_error(self): - with fluid.program_guard(fluid.Program()): + with base.program_guard(base.Program()): def test_bad_x(): label = paddle.static.data( diff --git a/test/xpu/test_pad3d_op_xpu.py b/test/xpu/test_pad3d_op_xpu.py index 2757ed1e3e70a..233986ef909b2 100644 --- a/test/xpu/test_pad3d_op_xpu.py +++ b/test/xpu/test_pad3d_op_xpu.py @@ -25,7 +25,7 @@ import paddle import paddle.nn.functional as F from paddle import nn -from paddle.fluid import Executor, Program, default_main_program, program_guard +from paddle.base import Executor, Program, default_main_program, program_guard paddle.enable_static() diff --git a/test/xpu/test_pad_op_xpu.py b/test/xpu/test_pad_op_xpu.py index 4f4d68ab73d0e..cd5b9ffb5f3d5 100644 --- a/test/xpu/test_pad_op_xpu.py +++ b/test/xpu/test_pad_op_xpu.py @@ -25,7 +25,7 @@ from test_attribute_var import UnittestBase import paddle -from paddle.fluid import Program, program_guard +from paddle.base import Program, program_guard def pad_wrapper(x, paddings, pad_value): @@ -101,7 +101,7 @@ def init_test_case(self): class TestPadOpError(unittest.TestCase): def test_errors(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): with program_guard(Program(), Program()): input_data = np.random.random((2, 2)).astype("float32") @@ -123,7 +123,7 @@ def init_info(self): ) def test_static(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): main_prog = Program() starup_prog = Program() with program_guard(main_prog, starup_prog): @@ -182,7 +182,7 @@ def call_func(self, x): class TestPaddingValueTensor3(unittest.TestCase): def test_static(self): - with paddle.fluid.framework._static_guard(): + with paddle.base.framework._static_guard(): np_x = np.random.random((16, 16)).astype('float32') main_prog = Program() starup_prog = Program() diff --git a/test/xpu/test_parallel_dygraph_dataparallel.py b/test/xpu/test_parallel_dygraph_dataparallel.py index 11fa8405317dc..0070f8ade9802 100644 --- a/test/xpu/test_parallel_dygraph_dataparallel.py +++ b/test/xpu/test_parallel_dygraph_dataparallel.py @@ -19,7 +19,7 @@ import unittest import paddle -from paddle import fluid +from paddle import base from paddle.distributed.utils.launch_utils import ( TrainerProc, find_free_ports, @@ -109,8 +109,8 @@ def start_local_trainers( class TestMultipleXpus(unittest.TestCase): def run_mnist_2xpu(self, target_file_name, eager_mode=True): if ( - not fluid.core.is_compiled_with_xpu() - or fluid.core.get_xpu_device_count() == 0 + not base.core.is_compiled_with_xpu() + or base.core.get_xpu_device_count() == 0 ): return diff --git a/test/xpu/test_pool2d_op_xpu.py b/test/xpu/test_pool2d_op_xpu.py index d40c29a6683b8..5081fb31cb39b 100644 --- a/test/xpu/test_pool2d_op_xpu.py +++ b/test/xpu/test_pool2d_op_xpu.py @@ -319,7 +319,7 @@ def setUp(self): self.pool_type, self.padding_algorithm, ).astype(self.dtype) - self.inputs = {'X': XPUOpTest.np_dtype_to_fluid_dtype(input)} + self.inputs = {'X': XPUOpTest.np_dtype_to_base_dtype(input)} self.attrs = { 'strides': self.strides, diff --git a/test/xpu/test_pool3d_op_xpu.py b/test/xpu/test_pool3d_op_xpu.py index c9441de646428..865029ad0d07d 100644 --- a/test/xpu/test_pool3d_op_xpu.py +++ b/test/xpu/test_pool3d_op_xpu.py @@ -313,7 +313,7 @@ def setUp(self): self.padding_algorithm, ).astype(self.dtype) - self.inputs = {'X': XPUOpTest.np_dtype_to_fluid_dtype(input)} + self.inputs = {'X': XPUOpTest.np_dtype_to_base_dtype(input)} self.attrs = { 'strides': self.strides, diff --git a/test/xpu/test_prelu_op_xpu.py b/test/xpu/test_prelu_op_xpu.py index 0a0ea28269722..d84ce2de8f9cb 100644 --- a/test/xpu/test_prelu_op_xpu.py +++ b/test/xpu/test_prelu_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import Program +from paddle import base +from paddle.base import Program paddle.enable_static() @@ -148,7 +148,7 @@ def init_attr(self): def prelu_t(x, mode, param_attr=None, name=None, data_format='NCHW'): - helper = fluid.layer_helper.LayerHelper('prelu', **locals()) + helper = base.layer_helper.LayerHelper('prelu', **locals()) alpha_shape = [1, x.shape[1], 1, 1] dtype = helper.input_dtype(input_param_name='x') alpha = helper.create_parameter( @@ -176,7 +176,7 @@ def setUp(self): def test_mode_error(self): main_program = Program() - with fluid.program_guard(main_program, Program()): + with base.program_guard(main_program, Program()): x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = prelu_t(x, 'any') @@ -185,7 +185,7 @@ def test_mode_error(self): def test_data_format_error1(self): main_program = Program() - with fluid.program_guard(main_program, Program()): + with base.program_guard(main_program, Program()): x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = prelu_t(x, 'channel', data_format='N') @@ -194,7 +194,7 @@ def test_data_format_error1(self): def test_data_format_error2(self): main_program = Program() - with fluid.program_guard(main_program, Program()): + with base.program_guard(main_program, Program()): x = paddle.static.data(name='x', shape=[2, 3, 4, 5]) try: y = paddle.static.nn.prelu(x, 'channel', data_format='N') diff --git a/test/xpu/test_randperm_op_xpu.py b/test/xpu/test_randperm_op_xpu.py index f28944e0009a2..ba6b3ae4ac412 100644 --- a/test/xpu/test_randperm_op_xpu.py +++ b/test/xpu/test_randperm_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core from paddle.static import Program, program_guard paddle.enable_static() diff --git a/test/xpu/test_refactor_op_xpu.py b/test/xpu/test_refactor_op_xpu.py index 9403a515097c7..51bad8cec2e82 100644 --- a/test/xpu/test_refactor_op_xpu.py +++ b/test/xpu/test_refactor_op_xpu.py @@ -25,7 +25,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() @@ -231,8 +231,8 @@ def set_inputs(self): x = np.random.uniform(0, 1.0, shape).astype(self.dtype) y = np.random.uniform(0, 1.0, shape).astype(self.dtype) self.inputs = { - 'X': OpTest.np_dtype_to_fluid_dtype(x), - 'Y': OpTest.np_dtype_to_fluid_dtype(y), + 'X': OpTest.np_dtype_to_base_dtype(x), + 'Y': OpTest.np_dtype_to_base_dtype(y), } def set_attrs(self): diff --git a/test/xpu/test_rmsprop_op_xpu.py b/test/xpu/test_rmsprop_op_xpu.py index f2897bde7399a..0e90de302af99 100644 --- a/test/xpu/test_rmsprop_op_xpu.py +++ b/test/xpu/test_rmsprop_op_xpu.py @@ -24,8 +24,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -165,7 +165,7 @@ def setup( ): np.random.seed(5) # fix seed - self.scope = fluid.global_scope() + self.scope = base.global_scope() self.place = place self.param_name = "param" @@ -332,7 +332,7 @@ def test_rmsprop(self): size = (128, 320) for place in places: for centered in [False, True]: - with fluid.scope_guard(core.Scope()): + with base.scope_guard(core.Scope()): self.check_with_place( place, is_sparse=False, centered=centered, size=size ) diff --git a/test/xpu/test_rnn_op_xpu.py b/test/xpu/test_rnn_op_xpu.py index eaeafc5e01c58..1029698d1d79f 100755 --- a/test/xpu/test_rnn_op_xpu.py +++ b/test/xpu/test_rnn_op_xpu.py @@ -28,7 +28,7 @@ from rnn_numpy import LSTM import paddle -from paddle.fluid import core +from paddle.base import core random.seed(2) np.set_printoptions(threshold=np.inf) diff --git a/test/xpu/test_roi_align_op_xpu.py b/test/xpu/test_roi_align_op_xpu.py index d65f78be1a488..78712e1211f9c 100644 --- a/test/xpu/test_roi_align_op_xpu.py +++ b/test/xpu/test_roi_align_op_xpu.py @@ -24,7 +24,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/xpu/test_scale_op_xpu.py b/test/xpu/test_scale_op_xpu.py index 8ad7800fe2311..df7f59be8f7a9 100644 --- a/test/xpu/test_scale_op_xpu.py +++ b/test/xpu/test_scale_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import Program, program_guard +from paddle.base import Program, program_guard class XPUTestScaleOp(XPUOpTestWrapper): diff --git a/test/xpu/test_sequence_conv_op_xpu.py b/test/xpu/test_sequence_conv_op_xpu.py index 4a52ea54f4aff..1b0816938a248 100644 --- a/test/xpu/test_sequence_conv_op_xpu.py +++ b/test/xpu/test_sequence_conv_op_xpu.py @@ -429,18 +429,18 @@ def init_test_case(self): class TestSeqConvApi(unittest.TestCase): def test_api(self): - from paddle import fluid + from paddle import base x = paddle.static.data('x', shape=[-1, 32], lod_level=1) y = paddle.static.nn.sequence_lod.sequence_conv( input=x, num_filters=2, filter_size=3, padding_start=None ) - place = fluid.CPUPlace() - x_tensor = fluid.create_lod_tensor( + place = base.CPUPlace() + x_tensor = base.create_lod_tensor( np.random.rand(10, 32).astype("float32"), [[2, 3, 1, 4]], place ) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe = base.Executor(place) + exe.run(base.default_startup_program()) ret = exe.run(feed={'x': x_tensor}, fetch_list=[y], return_numpy=False) diff --git a/test/xpu/test_sequence_unpad_op_xpu.py b/test/xpu/test_sequence_unpad_op_xpu.py index 15215fcb0c614..0a61d8b22ec96 100644 --- a/test/xpu/test_sequence_unpad_op_xpu.py +++ b/test/xpu/test_sequence_unpad_op_xpu.py @@ -100,7 +100,7 @@ def initTestCase(self): class TestSequenceUnpadOpError(unittest.TestCase): def test_error(self): """ - The type of 'x' in paddle.static.nn.sequence_unpad must be , but received . + The type of 'x' in paddle.static.nn.sequence_unpad must be , but received . """ def test_x_variable(): @@ -110,7 +110,7 @@ def test_x_variable(): self.assertRaises(TypeError, test_x_variable) """ - The type of 'length' in fluid.layers.sequence_unpad must be , but received . + The type of 'length' in base.layers.sequence_unpad must be , but received . """ def test_length_variable(): @@ -120,7 +120,7 @@ def test_length_variable(): self.assertRaises(TypeError, test_length_variable) """ - The data type of 'x' in fluid.layers.sequence_unpad must be ['float32', 'float64', 'int32', 'int64'], but received float16 + The data type of 'x' in base.layers.sequence_unpad must be ['float32', 'float64', 'int32', 'int64'], but received float16 """ def test_x_dtype(): @@ -130,7 +130,7 @@ def test_x_dtype(): self.assertRaises(TypeError, test_x_dtype) """ - The data type of 'length' in fluid.layers.sequence_unpad must be ['int64'], but received int32 + The data type of 'length' in base.layers.sequence_unpad must be ['int64'], but received int32 """ def test_length_dtype(): diff --git a/test/xpu/test_set_value_op_xpu.py b/test/xpu/test_set_value_op_xpu.py index 1cc4bb4fcd4f4..ba69ff178b738 100644 --- a/test/xpu/test_set_value_op_xpu.py +++ b/test/xpu/test_set_value_op_xpu.py @@ -29,7 +29,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid.layer_helper import LayerHelper +from paddle.base.layer_helper import LayerHelper class XPUTestSetValueOp(XPUOpTestWrapper): @@ -1638,7 +1638,7 @@ def setUp(self): def test_inplace(self): paddle.disable_static() - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): paddle.seed(100) a = paddle.rand(shape=[1, 4]) a.stop_gradient = False @@ -1662,7 +1662,7 @@ def test_inplace_var_become_leaf_var(self): paddle.disable_static() a_grad_1, b_grad_1, a_grad_2, b_grad_2 = 0, 1, 2, 3 - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): paddle.seed(100) a = paddle.rand(shape=[1, 4]) b = paddle.rand(shape=[1, 4]) @@ -1673,7 +1673,7 @@ def test_inplace_var_become_leaf_var(self): a_grad_1 = a.grad.numpy() b_grad_1 = b.grad.numpy() - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): paddle.seed(100) a = paddle.rand(shape=[1, 4]) b = paddle.rand(shape=[1, 4]) diff --git a/test/xpu/test_sgd_op_xpu.py b/test/xpu/test_sgd_op_xpu.py index 7720b992060bc..130bd76c897b4 100644 --- a/test/xpu/test_sgd_op_xpu.py +++ b/test/xpu/test_sgd_op_xpu.py @@ -24,8 +24,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core class XPUTestSgdOp(XPUOpTestWrapper): @@ -80,9 +80,9 @@ def runTest(self): sgd_optimizer.minimize(avg_cost) place = paddle.XPUPlace(0) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - result = exe.run(fluid.default_main_program(), fetch_list=[avg_cost]) + exe = base.Executor(place) + exe.run(base.default_startup_program()) + result = exe.run(base.default_main_program(), fetch_list=[avg_cost]) class TestSparseSGDOp(unittest.TestCase): diff --git a/test/xpu/test_shape_op_xpu.py b/test/xpu/test_shape_op_xpu.py index d90b53a5c7a2b..f2a9d439a6311 100644 --- a/test/xpu/test_shape_op_xpu.py +++ b/test/xpu/test_shape_op_xpu.py @@ -24,7 +24,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() @@ -103,7 +103,7 @@ def check_with_place(self, place): def test_check_output(self): for place in self.get_places(): if ( - type(place) is paddle.fluid.libpaddle.CPUPlace + type(place) is paddle.base.libpaddle.CPUPlace and self.dtype == np.float16 ): # fp16 not available on cpu diff --git a/test/xpu/test_squeeze_op_xpu.py b/test/xpu/test_squeeze_op_xpu.py index 5aae366c85635..c5b9efce7a770 100644 --- a/test/xpu/test_squeeze_op_xpu.py +++ b/test/xpu/test_squeeze_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard paddle.enable_static() @@ -106,7 +106,7 @@ def test_errors(self): paddle.enable_static() with program_guard(Program(), Program()): # The input type of softmax_op must be Variable. - x1 = fluid.create_lod_tensor( + x1 = base.create_lod_tensor( np.array([[-1]]), [[1]], paddle.XPUPlace(0) ) self.assertRaises(TypeError, paddle.squeeze, x1) diff --git a/test/xpu/test_sum_op_xpu.py b/test/xpu/test_sum_op_xpu.py index 3b51b0adb76d0..f582d6322362b 100644 --- a/test/xpu/test_sum_op_xpu.py +++ b/test/xpu/test_sum_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core paddle.enable_static() @@ -100,7 +100,7 @@ def test_w_is_selected_rows(self): class API_Test_Add_n(unittest.TestCase): def test_api(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input0 = paddle.tensor.fill_constant( shape=[2, 3], dtype='int64', value=5 ) @@ -110,12 +110,12 @@ def test_api(self): expected_result = np.empty((2, 3)) expected_result.fill(8) sum_value = paddle.add_n([input0, input1]) - exe = fluid.Executor(fluid.XPUPlace(0)) + exe = base.Executor(base.XPUPlace(0)) result = exe.run(fetch_list=[sum_value]) self.assertEqual((result == expected_result).all(), True) - with fluid.dygraph.guard(): + with base.dygraph.guard(): input0 = paddle.ones(shape=[2, 3], dtype='float32') expected_result = np.empty((2, 3)) expected_result.fill(2) @@ -193,12 +193,12 @@ def test_out_dtype(): class TestSumOpError(unittest.TestCase): def test_errors(self): def test_empty_list_input(): - with fluid.dygraph.guard(): - fluid._legacy_C_ops.sum([]) + with base.dygraph.guard(): + base._legacy_C_ops.sum([]) def test_list_of_none_input(): - with fluid.dygraph.guard(): - fluid._legacy_C_ops.sum([None]) + with base.dygraph.guard(): + base._legacy_C_ops.sum([None]) self.assertRaises(Exception, test_empty_list_input) self.assertRaises(Exception, test_list_of_none_input) diff --git a/test/xpu/test_tile_op_xpu.py b/test/xpu/test_tile_op_xpu.py index ae2e1b2f0d7dd..eee06d0e7c3bd 100644 --- a/test/xpu/test_tile_op_xpu.py +++ b/test/xpu/test_tile_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() np.random.seed(10) @@ -205,7 +205,7 @@ def init_data(self): # Test python API class TestTileAPI(unittest.TestCase): def test_api(self): - with fluid.dygraph.guard(paddle.XPUPlace(0)): + with base.dygraph.guard(paddle.XPUPlace(0)): np_x = np.random.random([12, 14]).astype("float32") x = paddle.to_tensor(np_x) diff --git a/test/xpu/test_truncated_gaussian_random_op_xpu.py b/test/xpu/test_truncated_gaussian_random_op_xpu.py index cad48d44d92bc..d412c24cb3cce 100644 --- a/test/xpu/test_truncated_gaussian_random_op_xpu.py +++ b/test/xpu/test_truncated_gaussian_random_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid.executor import Executor +from paddle import base +from paddle.base.executor import Executor paddle.enable_static() @@ -58,10 +58,10 @@ def set_attrs(self): self.std = 1.0 def test_check_output(self): - self.gaussian_random_test(place=fluid.XPUPlace(0)) + self.gaussian_random_test(place=base.XPUPlace(0)) def gaussian_random_test(self, place): - program = fluid.Program() + program = base.Program() block = program.global_block() vout = block.create_var(name="Out") op = block.append_op( diff --git a/test/xpu/test_unbind_op_xpu.py b/test/xpu/test_unbind_op_xpu.py index 3ec10511a7e90..bb3d3a027b2f1 100644 --- a/test/xpu/test_unbind_op_xpu.py +++ b/test/xpu/test_unbind_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid, tensor -from paddle.fluid import Program, program_guard +from paddle import base, tensor +from paddle.base import Program, program_guard paddle.enable_static() @@ -42,10 +42,10 @@ def test_unbind(self): [out_0, out_1] = tensor.unbind(input=x_1, axis=0) input_1 = np.random.random([2, 3]).astype(self.dtype) axis = paddle.static.data(shape=[], dtype='int32', name='axis') - exe = fluid.Executor(place=self.place) + exe = base.Executor(place=self.place) [res_1, res_2] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x_1": input_1, "axis": 0}, fetch_list=[out_0, out_1], ) @@ -54,7 +54,7 @@ def test_unbind(self): np.testing.assert_array_equal(res_2, input_1[1, 0:100]) def test_unbind_dygraph(self): - with fluid.dygraph.guard(): + with base.dygraph.guard(): self.dtype = self.in_type self.place = paddle.XPUPlace(0) np_x = np.random.random([2, 3]).astype(self.dtype) @@ -81,10 +81,10 @@ def test_layers_unbind(self): [out_0, out_1] = paddle.unbind(input=x_1, axis=0) input_1 = np.random.random([2, 3]).astype(self.dtype) axis = paddle.static.data(shape=[], dtype='int32', name='axis') - exe = fluid.Executor(place=self.place) + exe = base.Executor(place=self.place) [res_1, res_2] = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"x_1": input_1, "axis": 0}, fetch_list=[out_0, out_1], ) diff --git a/test/xpu/test_unfold_op_xpu.py b/test/xpu/test_unfold_op_xpu.py index c6e80469f7d0d..82faf188d7ae4 100644 --- a/test/xpu/test_unfold_op_xpu.py +++ b/test/xpu/test_unfold_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -158,8 +158,8 @@ def setUp(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): - input = fluid.dygraph.to_variable(self.inputs['X']) + with base.dygraph.guard(place): + input = base.dygraph.to_variable(self.inputs['X']) m = paddle.nn.Unfold(**self.attrs) m.eval() result = m(input) diff --git a/test/xpu/test_unique_op_xpu.py b/test/xpu/test_unique_op_xpu.py index 691c4bea4b0b3..65d1333b37078 100644 --- a/test/xpu/test_unique_op_xpu.py +++ b/test/xpu/test_unique_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() @@ -50,7 +50,7 @@ def init_config(self): 'dtype': int(core.VarDesc.VarType.INT32), 'return_index': True, 'return_inverse': True, - 'is_sorted': True, # is_sorted must be set to true to call paddle.unique rather than fluid.layers.unique + 'is_sorted': True, # is_sorted must be set to true to call paddle.unique rather than base.layers.unique } self.outputs = { 'Out': np.array([1, 2, 3, 5], dtype=self.dtype), diff --git a/test/xpu/test_update_loss_scaling_op_xpu.py b/test/xpu/test_update_loss_scaling_op_xpu.py index c8e398a3d7782..c8ef942dc0c0b 100644 --- a/test/xpu/test_update_loss_scaling_op_xpu.py +++ b/test/xpu/test_update_loss_scaling_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base from paddle.static.amp import amp_nn paddle.enable_static() @@ -108,7 +108,7 @@ def test_check_output(self): # self.check_output() class TestUpdateLossScalingLayer(unittest.TestCase): - def loss_scaling_check(self, scope=fluid.Scope()): + def loss_scaling_check(self, scope=base.Scope()): a = paddle.static.data( name="a", shape=[1024, 1024], dtype='float32' ) @@ -152,10 +152,10 @@ def loss_scaling_check(self, scope=fluid.Scope()): name="update_loss_scaling", ) - place = fluid.XPUPlace(0) - exe = fluid.Executor(place) - with fluid.scope_guard(scope): - exe.run(fluid.default_startup_program()) + place = base.XPUPlace(0) + exe = base.Executor(place) + with base.scope_guard(scope): + exe.run(base.default_startup_program()) result_v = exe.run( feed={ 'a': a_v, @@ -189,7 +189,7 @@ def loss_scaling_check(self, scope=fluid.Scope()): result_v[7], np.zeros_like(num_bad_steps_v) ) - def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): + def loss_scaling_check_inf(self, use_cuda=True, scope=base.Scope()): a = paddle.static.data( name="a", shape=[1024, 1024], dtype='float32' ) @@ -236,10 +236,10 @@ def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): name="update_loss_scaling", ) - place = fluid.XPUPlace(0) - exe = fluid.Executor(place) - with fluid.scope_guard(scope): - exe.run(fluid.default_startup_program()) + place = base.XPUPlace(0) + exe = base.Executor(place) + with base.scope_guard(scope): + exe.run(base.default_startup_program()) result_v = exe.run( feed={ 'a': a_v, @@ -274,17 +274,17 @@ def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()): ) def test_loss_scaling(self): - main = fluid.Program() - startup = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, startup): self.loss_scaling_check() def test_loss_scaling_inf(self): - main = fluid.Program() - startup = fluid.Program() - with fluid.unique_name.guard(): - with fluid.program_guard(main, startup): + main = base.Program() + startup = base.Program() + with base.unique_name.guard(): + with base.program_guard(main, startup): self.loss_scaling_check_inf() diff --git a/test/xpu/test_warpctc_op_xpu.py b/test/xpu/test_warpctc_op_xpu.py index d486ec009dc72..50a9ed961c86a 100644 --- a/test/xpu/test_warpctc_op_xpu.py +++ b/test/xpu/test_warpctc_op_xpu.py @@ -26,7 +26,7 @@ import paddle import paddle.nn.functional as F -from paddle.fluid import Program, program_guard +from paddle.base import Program, program_guard paddle.enable_static() diff --git a/test/xpu/test_where_index_xpu.py b/test/xpu/test_where_index_xpu.py index 70c4a3454e8a4..6d0cf79032ef6 100644 --- a/test/xpu/test_where_index_xpu.py +++ b/test/xpu/test_where_index_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid import Program, program_guard +from paddle import base +from paddle.base import Program, program_guard paddle.enable_static() @@ -100,10 +100,10 @@ def test_api(self): cond = paddle.static.data(name='cond', shape=[-1, 4], dtype='bool') result = paddle.nonzero(cond) - exe = fluid.Executor(paddle.XPUPlace(0)) - exe.run(fluid.default_startup_program()) + exe = base.Executor(paddle.XPUPlace(0)) + exe.run(base.default_startup_program()) cond_i = np.array([True, False, False, False]).astype("bool") - out = exe.run(fluid.default_main_program(), feed={'cond': cond_i}) + out = exe.run(base.default_main_program(), feed={'cond': cond_i}) class TestWhereRaiseError(unittest.TestCase): diff --git a/test/xpu/test_where_op_xpu.py b/test/xpu/test_where_op_xpu.py index 13ec8c8c446a7..5a740f8dee5e9 100644 --- a/test/xpu/test_where_op_xpu.py +++ b/test/xpu/test_where_op_xpu.py @@ -23,8 +23,8 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid -from paddle.fluid.backward import append_backward +from paddle import base +from paddle.base.backward import append_backward paddle.enable_static() @@ -99,9 +99,9 @@ def ref_y_backward(self, dout): def test_api(self): for x_stop_gradient in [False, True]: for y_stop_gradient in [False, True]: - train_prog = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(train_prog, startup): + train_prog = base.Program() + startup = base.Program() + with base.program_guard(train_prog, startup): cond = paddle.static.data( name='cond', shape=self.shape, dtype='bool' ) @@ -119,7 +119,7 @@ def test_api(self): result.stop_gradient = False append_backward(paddle.mean(result)) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(startup) fetch_list = [result, result.grad_name] @@ -148,9 +148,9 @@ def test_api(self): ) def test_api_broadcast(self, use_cuda=False): - train_prog = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(train_prog, startup): + train_prog = base.Program() + startup = base.Program() + with base.program_guard(train_prog, startup): x = paddle.static.data(name='x', shape=[-1, 4, 1], dtype='float32') y = paddle.static.data(name='y', shape=[-1, 4, 2], dtype='float32') x_i = np.array([[0.9383, 0.1983, 3.2, 1.2]]).astype("float32") @@ -159,7 +159,7 @@ def test_api_broadcast(self, use_cuda=False): ) result = paddle.where(x > 1, x=x, y=y) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) exe.run(startup) out = exe.run( @@ -170,13 +170,13 @@ def test_api_broadcast(self, use_cuda=False): class TestWhereDygraphAPI(unittest.TestCase): def test_api(self): - with fluid.dygraph.guard(paddle.XPUPlace(0)): + with base.dygraph.guard(paddle.XPUPlace(0)): x_i = np.array([0.9383, 0.1983, 3.2, 1.2]).astype("float32") y_i = np.array([1.0, 1.0, 1.0, 1.0]).astype("float32") cond_i = np.array([False, False, True, True]).astype("bool") - x = fluid.dygraph.to_variable(x_i) - y = fluid.dygraph.to_variable(y_i) - cond = fluid.dygraph.to_variable(cond_i) + x = base.dygraph.to_variable(x_i) + y = base.dygraph.to_variable(y_i) + cond = base.dygraph.to_variable(cond_i) out = paddle.where(cond, x, y) np.testing.assert_array_equal( out.numpy(), np.where(cond_i, x_i, y_i) diff --git a/test/xpu/test_while_op_xpu.py b/test/xpu/test_while_op_xpu.py index e151298ca4f43..e8fa27315021e 100644 --- a/test/xpu/test_while_op_xpu.py +++ b/test/xpu/test_while_op_xpu.py @@ -17,9 +17,9 @@ import numpy import paddle -from paddle import fluid -from paddle.fluid.backward import append_backward -from paddle.fluid.executor import Executor +from paddle import base +from paddle.base.backward import append_backward +from paddle.base.executor import Executor paddle.enable_static() @@ -76,9 +76,9 @@ def simple_net(self): return loss, sum_result def test_simple_net(self): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): loss, sum_result = self.simple_net() append_backward(loss) @@ -97,11 +97,11 @@ def test_simple_net(self): self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01) def test_simple_net_forward(self): - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): self.simple_net() - binary = fluid.compiler.CompiledProgram(main_program) + binary = base.compiler.CompiledProgram(main_program) xpu_place = paddle.XPUPlace(0) exe = Executor(xpu_place) diff --git a/test/xpu/test_xpu_place.py b/test/xpu/test_xpu_place.py index f401d721c0805..08fc3ff627aa0 100644 --- a/test/xpu/test_xpu_place.py +++ b/test/xpu/test_xpu_place.py @@ -16,8 +16,8 @@ import unittest import paddle -from paddle import fluid, static -from paddle.fluid import core +from paddle import base, static +from paddle.base import core class Test_XPU_Places(unittest.TestCase): @@ -31,12 +31,12 @@ def test_check_preset_envs(self): if core.is_compiled_with_xpu(): os.environ["FLAGS_selected_xpus"] = "0" place_list = static.xpu_places() - self.assert_places_equal([fluid.XPUPlace(0)], place_list) + self.assert_places_equal([base.XPUPlace(0)], place_list) def test_check_no_preset_envs(self): if core.is_compiled_with_xpu(): place_list = static.xpu_places(0) - self.assert_places_equal([fluid.XPUPlace(0)], place_list) + self.assert_places_equal([base.XPUPlace(0)], place_list) if __name__ == '__main__': diff --git a/tools/CrossStackProfiler/ProfileFileReader.py b/tools/CrossStackProfiler/ProfileFileReader.py index ad6e44c6ac003..af955bd6652c4 100755 --- a/tools/CrossStackProfiler/ProfileFileReader.py +++ b/tools/CrossStackProfiler/ProfileFileReader.py @@ -26,7 +26,7 @@ getLogger, ) -from paddle.fluid.proto.profiler import profiler_pb2 +from paddle.base.proto.profiler import profiler_pb2 class profileFileReader(FileReader): diff --git a/tools/check_file_diff_approvals.sh b/tools/check_file_diff_approvals.sh index 6395c2d5ef27e..405e55a76005b 100644 --- a/tools/check_file_diff_approvals.sh +++ b/tools/check_file_diff_approvals.sh @@ -38,28 +38,28 @@ API_FILES=("CMakeLists.txt" "python/paddle/distributed/fleet/__init__.py" "python/paddle/distributed/fleet/launch.py" "python/requirements.txt" - "python/paddle/fluid/__init__.py" - "python/paddle/fluid/compiler.py" - "python/paddle/fluid/parallel_executor.py" - "python/paddle/fluid/framework.py" - "python/paddle/fluid/backward.py" + "python/paddle/base/__init__.py" + "python/paddle/base/compiler.py" + "python/paddle/base/parallel_executor.py" + "python/paddle/base/framework.py" + "python/paddle/base/backward.py" "paddle/fluid/operators/distributed/send_recv.proto.in" "paddle/fluid/framework/unused_var_check.cc" - "python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py" - "python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py" - "python/paddle/fluid/tests/unittests/white_list/compile_vs_runtime_white_list.py" - "python/paddle/fluid/tests/unittests/white_list/no_check_set_white_list.py" - "python/paddle/fluid/tests/unittests/white_list/check_op_sequence_instance_0_input_white_list.py" - "python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py" - "python/paddle/fluid/tests/unittests/white_list/check_op_sequence_batch_1_input_white_list.py" - "python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py" + "python/paddle/base/tests/unittests/white_list/check_shape_white_list.py" + "python/paddle/base/tests/unittests/white_list/op_accuracy_white_list.py" + "python/paddle/base/tests/unittests/white_list/compile_vs_runtime_white_list.py" + "python/paddle/base/tests/unittests/white_list/no_check_set_white_list.py" + "python/paddle/base/tests/unittests/white_list/check_op_sequence_instance_0_input_white_list.py" + "python/paddle/base/tests/unittests/white_list/op_threshold_white_list.py" + "python/paddle/base/tests/unittests/white_list/check_op_sequence_batch_1_input_white_list.py" + "python/paddle/base/tests/unittests/white_list/no_grad_set_white_list.py" "tools/print_signatures.py" "tools/sampcd_processor.py" "tools/check_pr_approval.py" "paddle/scripts/paddle_build.bat" "tools/windows/run_unittests.sh" "tools/parallel_UT_rule.py" - "python/paddle/fluid/dygraph/layers.py" + "python/paddle/base/dygraph/layers.py" "paddle/fluid/eager/grad_node_info.h" "paddle/fluid/eager/grad_node_info.cc" "paddle/fluid/eager/grad_tensor_holder.h" @@ -142,7 +142,7 @@ for API_FILE in ${API_FILES[*]}; do elif [ "${API_FILE}" == "third_party" ];then echo_line="You must have one RD (risemeup1 or tianshuo78520a) approval for ${API_FILE}.\n" check_approval 1 risemeup1 tianshuo78520a - elif [ "${API_FILE}" == "python/paddle/fluid/__init__.py" ];then + elif [ "${API_FILE}" == "python/paddle/base/__init__.py" ];then echo_line="You must have one RD (lanxianghit (Recommend), phlrain, luotao1, Aurelius84 or qili93) approval for the python/paddle/fluid/init.py, which manages the environment variables.\n" check_approval 1 lanxianghit phlrain luotao1 Aurelius84 qili93 elif [ "${API_FILE}" == "python/requirements.txt" ];then @@ -341,9 +341,9 @@ if [ "${HAS_MODIFIED_STATIC_BUILD}" != "" ] && [ "${GIT_PR_ID}" != ""]; then check_approval 1 From00 zhiqiu fi -HAS_MODIFIED_PY_FLUID=`git diff --name-only upstream/$BRANCH | grep "python/paddle/fluid" || true` +HAS_MODIFIED_PY_FLUID=`git diff --name-only upstream/$BRANCH | grep "python/paddle/base" || true` if [ "${HAS_MODIFIED_PY_FLUID}" != "" ] && [ "${GIT_PR_ID}" != "" ]; then - echo_line="You must have one RD (zoooo0820(Recommend), or jeff41404) approval for file changes in python/paddle/fluid, because fluid API is going to be removed.\n" + echo_line="You must have one RD (zoooo0820(Recommend), or jeff41404) approval for file changes in python/paddle/base, because fluid API is going to be removed.\n" check_approval 1 zoooo0820 jeff41404 fi diff --git a/tools/check_op_desc.py b/tools/check_op_desc.py index ce234589564a2..89a5e87af0b45 100644 --- a/tools/check_op_desc.py +++ b/tools/check_op_desc.py @@ -15,7 +15,7 @@ import json import sys -from paddle.fluid.core import OpUpdateType +from paddle.base.core import OpUpdateType from paddle.utils import OpLastCheckpointChecker INPUTS = "Inputs" diff --git a/tools/check_op_register_type.py b/tools/check_op_register_type.py index 349c8f3616589..5c3a72622ec38 100644 --- a/tools/check_op_register_type.py +++ b/tools/check_op_register_type.py @@ -23,14 +23,14 @@ import re import sys -from paddle import fluid +from paddle import base INTS = {'int', 'int64_t'} FLOATS = {'float', 'double'} def get_all_kernels(): - all_kernels_info = fluid.core._get_all_register_op_kernels() + all_kernels_info = base.core._get_all_register_op_kernels() # [u'data_type[double]:data_layout[ANY_LAYOUT]:place[CPUPlace]:library_type[PLAIN]' op_kernel_types = collections.defaultdict(list) for op_type, op_infos in all_kernels_info.items(): diff --git a/tools/cinn/paddle_benchmark/paddle_test_benchmark.py b/tools/cinn/paddle_benchmark/paddle_test_benchmark.py index ff7f4d5bd72c8..56099e4749a70 100755 --- a/tools/cinn/paddle_benchmark/paddle_test_benchmark.py +++ b/tools/cinn/paddle_benchmark/paddle_test_benchmark.py @@ -18,7 +18,7 @@ import numpy as np import paddle.inference as paddle_infer -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.base.core import AnalysisConfig, create_paddle_predictor def main(): diff --git a/tools/cinn/paddle_benchmark/test_paddle_ops.py b/tools/cinn/paddle_benchmark/test_paddle_ops.py index 6cb9e806d3096..f830eb9394655 100755 --- a/tools/cinn/paddle_benchmark/test_paddle_ops.py +++ b/tools/cinn/paddle_benchmark/test_paddle_ops.py @@ -18,7 +18,7 @@ import paddle from paddle import static -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.base.core import AnalysisConfig, create_paddle_predictor def set_config(op_name, input_shapes, enable_gpu=False): diff --git a/tools/count_api_without_core_ops.py b/tools/count_api_without_core_ops.py index 0a5da2a8fa5d4..c1ef80cd9c9fe 100644 --- a/tools/count_api_without_core_ops.py +++ b/tools/count_api_without_core_ops.py @@ -24,9 +24,9 @@ # APIs that should not be printed into API.spec omitted_list = [ - "paddle.fluid.LoDTensor.set", # Do not know why it should be omitted - "paddle.fluid.io.ComposeNotAligned", - "paddle.fluid.io.ComposeNotAligned.__init__", + "paddle.base.LoDTensor.set", # Do not know why it should be omitted + "paddle.base.io.ComposeNotAligned", + "paddle.base.io.ComposeNotAligned.__init__", ] diff --git a/tools/diff_use_default_grad_op_maker.py b/tools/diff_use_default_grad_op_maker.py index c79d758b3e781..a8e82fe657fd1 100644 --- a/tools/diff_use_default_grad_op_maker.py +++ b/tools/diff_use_default_grad_op_maker.py @@ -18,12 +18,12 @@ import sys -from paddle import fluid +from paddle import base def generate_spec(filename): with open(filename, 'w') as f: - ops = fluid.core._get_use_default_grad_op_desc_maker_ops() + ops = base.core._get_use_default_grad_op_desc_maker_ops() for op in ops: f.write(op + '\n') diff --git a/tools/parse_kernel_info.py b/tools/parse_kernel_info.py index ad5a696bc626c..8a2edf06f7d89 100644 --- a/tools/parse_kernel_info.py +++ b/tools/parse_kernel_info.py @@ -94,13 +94,13 @@ def parse_paddle_kernels(lib="phi", kernel_type="function", print_detail=False): if lib == "phi": assert kernel_type in ["function", "structure", "all"] # phi kernel type can be: function, structure, all - kernel_infos = paddle.fluid.core._get_registered_phi_kernels( + kernel_infos = paddle.base.core._get_registered_phi_kernels( kernel_type ) else: # fluid, phi, all assert kernel_type in ["fluid", "phi", "all"] - kernel_infos = paddle.fluid.core._get_all_register_op_kernels( + kernel_infos = paddle.base.core._get_all_register_op_kernels( kernel_type ) diff --git a/tools/print_op_desc.py b/tools/print_op_desc.py index b078ad96e33ab..4bfaddeb953fa 100644 --- a/tools/print_op_desc.py +++ b/tools/print_op_desc.py @@ -44,7 +44,7 @@ import json -from paddle.fluid import core, framework +from paddle.base import core, framework INPUTS = "Inputs" OUTPUTS = "Outputs" diff --git a/tools/print_signatures.py b/tools/print_signatures.py index 3af095fff2e10..76dc85821d47c 100644 --- a/tools/print_signatures.py +++ b/tools/print_signatures.py @@ -88,7 +88,7 @@ def visit_all_module(mod): if mod_name != 'paddle' and not mod_name.startswith('paddle.'): return - if mod_name.startswith('paddle.fluid.core'): + if mod_name.startswith('paddle.base.core'): return if mod in visited_modules: @@ -353,7 +353,7 @@ def parse_args(): dest='skipped', type=str, help='Skip Checking submodules', - default='paddle.fluid.libpaddle.eager.ops', + default='paddle.base.libpaddle.eager.ops', ) if len(sys.argv) == 1: diff --git a/tools/summary_env.py b/tools/summary_env.py index 42019c280d666..10d82cd8746f9 100644 --- a/tools/summary_env.py +++ b/tools/summary_env.py @@ -42,7 +42,7 @@ def get_paddle_info(): import paddle envs['paddle_version'] = paddle.__version__ - envs['paddle_with_cuda'] = paddle.fluid.core.is_compiled_with_cuda() + envs['paddle_with_cuda'] = paddle.base.core.is_compiled_with_cuda() except: envs['paddle_version'] = 'N/A' envs['paddle_with_cuda'] = 'N/A' diff --git a/tools/test_check_api_compatible.py b/tools/test_check_api_compatible.py index dd134334ed9fd..0ec61521cb2da 100644 --- a/tools/test_check_api_compatible.py +++ b/tools/test_check_api_compatible.py @@ -74,7 +74,7 @@ def test_args_reduced(self): class Test_check_compatible_str(unittest.TestCase): def setUp(self) -> None: self.fullargspec_prefix = 'inspect.Full' - # paddle.fluid.layer_helper_base.LayerHelperBase.create_parameter + # paddle.base.layer_helper_base.LayerHelperBase.create_parameter self.argspec_str_o = ( self.fullargspec_prefix + """ArgSpec(args=['self', 'attr', 'shape', 'dtype', 'is_bias', 'default_initializer', 'stop_gradient', 'type'], varargs=None, varkw=None, defaults=(None, False, None, False, VarType.LOD_TENSOR), kwonlyargs=[], kwonlydefaults=None, annotations={})""" diff --git a/tools/test_runner.py b/tools/test_runner.py index f7e73be2c920b..37d132fbc1535 100644 --- a/tools/test_runner.py +++ b/tools/test_runner.py @@ -19,8 +19,8 @@ from io import StringIO import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core sys.path.append(os.path.abspath(os.path.dirname(__file__))) sys.path.append( @@ -52,12 +52,12 @@ def main(): flag_need_static_mode = True paddle.enable_static() buffer = StringIO() - main = fluid.Program() - startup = fluid.Program() - scope = fluid.core.Scope() - with fluid.program_guard(main, startup): - with fluid.scope_guard(scope): - with fluid.unique_name.guard(): + main = base.Program() + startup = base.Program() + scope = base.core.Scope() + with base.program_guard(main, startup): + with base.scope_guard(scope): + with base.unique_name.guard(): test_loader = unittest.TestLoader() module = importlib.import_module(module_name) tests = test_loader.loadTestsFromModule(module) diff --git a/tools/timeline.py b/tools/timeline.py index c5521bdfb107f..ff8d0946378d7 100644 --- a/tools/timeline.py +++ b/tools/timeline.py @@ -15,7 +15,7 @@ import argparse import json -from paddle.fluid.proto.profiler import profiler_pb2 +from paddle.base.proto.profiler import profiler_pb2 parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( From 6e363999325e6c74297ae8adbd5821d4eada3beb Mon Sep 17 00:00:00 2001 From: Difers <707065510@qq.com> Date: Wed, 16 Aug 2023 16:35:32 +0800 Subject: [PATCH 02/10] fix setup and rename paddle.fluid --- paddle/fluid/pybind/imperative.cc | 6 +++--- paddle/fluid/pybind/place.cc | 2 +- paddle/fluid/pybind/pybind.cc | 8 ++++---- paddle/fluid/pybind/tensor.cc | 18 ++++++++--------- r/README.md | 2 +- r/README_cn.md | 2 +- r/example/mobilenet.py | 2 +- r/example/mobilenet.r | 2 +- security/advisory/pdsa-2022-001.md | 2 +- security/advisory/pdsa-2022-001_cn.md | 2 +- setup.py | 28 +++++++++++++-------------- test/amp/amp_base_models.py | 2 +- test/amp/test_amp_api.py | 4 ++-- tools/count_api_without_core_ops.py | 2 +- tools/diff_api.py | 2 +- tools/print_signatures.py | 2 +- 16 files changed, 43 insertions(+), 43 deletions(-) diff --git a/paddle/fluid/pybind/imperative.cc b/paddle/fluid/pybind/imperative.cc index 111e8ebdfdf86..295dffd53af42 100644 --- a/paddle/fluid/pybind/imperative.cc +++ b/paddle/fluid/pybind/imperative.cc @@ -1034,7 +1034,7 @@ void BindImperative(py::module *m_ptr) { import paddle data = np.random.randint(10, size=(3, 4)) - tensor = paddle.fluid.core.to_uva_tensor(data) + tensor = paddle.base.core.to_uva_tensor(data) print(tensor) )DOC"); @@ -1165,7 +1165,7 @@ void BindImperative(py::module *m_ptr) { import numpy as np import paddle - from paddle.fluid import core + from paddle.base import core from paddle.device import cuda if core.is_compiled_with_cuda(): @@ -1397,7 +1397,7 @@ void BindImperative(py::module *m_ptr) { import numpy as np import paddle - from paddle.fluid import core + from paddle.base import core from paddle.device import cuda if core.is_compiled_with_cuda(): diff --git a/paddle/fluid/pybind/place.cc b/paddle/fluid/pybind/place.cc index 1f78b0994ae98..57196c592c5c7 100644 --- a/paddle/fluid/pybind/place.cc +++ b/paddle/fluid/pybind/place.cc @@ -394,7 +394,7 @@ void BindPlace(pybind11::module &m) { // NOLINT **Note**: Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as fluid xpu_place = fluid.XPUPlace(0) )DOC"); g_xpuplace_pytype = reinterpret_cast(xpuplace.ptr()); diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index a3eab23650845..42ea19183909d 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1103,7 +1103,7 @@ PYBIND11_MODULE(libpaddle, m) { }); // NOTE(zjl): ctest would load environment variables at the beginning even - // though we have not `import paddle.fluid as fluid`. So we add this API + // though we have not `import paddle.base as fluid`. So we add this API // to enable eager deletion mode in unittest. m.def("_set_eager_deletion_mode", &paddle::framework::SetEagerDeletionMode); @@ -1239,7 +1239,7 @@ All parameter, weight, gradient are variables in Paddle. Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as fluid # create tensor from a scope and set value to it. param = scope.var('Param').get_tensor() param_array = np.full((height, row_numel), 5.0).astype("float32") @@ -2149,7 +2149,7 @@ All parameter, weight, gradient are variables in Paddle. Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as fluid arr = fluid.LoDTensorArray() )DOC"); @@ -2193,7 +2193,7 @@ All parameter, weight, gradient are variables in Paddle. Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as fluid import numpy as np arr = fluid.LoDTensorArray() diff --git a/paddle/fluid/pybind/tensor.cc b/paddle/fluid/pybind/tensor.cc index 0bc22b373668f..78169ec22fbfe 100644 --- a/paddle/fluid/pybind/tensor.cc +++ b/paddle/fluid/pybind/tensor.cc @@ -393,7 +393,7 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as fluid import numpy as np t = fluid.Tensor() @@ -413,7 +413,7 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as fluid import numpy as np t = fluid.Tensor() @@ -517,7 +517,7 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as fluid import numpy as np t = fluid.Tensor() @@ -566,7 +566,7 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as fluid import numpy as np t = fluid.Tensor() @@ -594,7 +594,7 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as fluid import numpy as np t = fluid.Tensor() @@ -623,7 +623,7 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as fluid import numpy as np t = fluid.Tensor() @@ -647,7 +647,7 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.fluid as fluid + import paddle.base as fluid import numpy as np t = fluid.Tensor() @@ -824,7 +824,7 @@ void BindTensor(pybind11::module &m) { // NOLINT import paddle tensor = paddle.ones([3,3]) metainfo = tensor.value().get_tensor()._share_cuda() - tensor_from_shared = paddle.to_tensor(paddle.fluid.core.LoDTensor._new_shared_cuda(metainfo)) + tensor_from_shared = paddle.to_tensor(paddle.base.core.LoDTensor._new_shared_cuda(metainfo)) )DOC") #endif @@ -945,7 +945,7 @@ void BindTensor(pybind11::module &m) { // NOLINT import paddle tensor = paddle.ones([3,3]) metainfo = tensor.value().get_tensor()._share_filename() - tensor_from_shared = paddle.to_tensor(paddle.fluid.core.LoDTensor._new_shared_filename(metainfo)) + tensor_from_shared = paddle.to_tensor(paddle.base.core.LoDTensor._new_shared_filename(metainfo)) )DOC") .def("_shared_incref", diff --git a/r/README.md b/r/README.md index 33f1807cd6afc..15ffddcbc1205 100644 --- a/r/README.md +++ b/r/README.md @@ -32,7 +32,7 @@ library(reticulate) library(RcppCNPy) use_python("/opt/python3.7/bin/python3.7") -paddle <- import("paddle.fluid.core") +paddle <- import("paddle.base.core") ``` Create an `AnalysisConfig`, which is the configuration of the paddle inference engine. diff --git a/r/README_cn.md b/r/README_cn.md index b844e0ad0849b..9dcb49d638722 100644 --- a/r/README_cn.md +++ b/r/README_cn.md @@ -22,7 +22,7 @@ install.packages("reticulate") # 调用Paddle library(reticulate) use_python("/opt/python3.7/bin/python") -paddle <- import("paddle.fluid.core") +paddle <- import("paddle.base.core") ``` 创建一个AnalysisConfig,用于设置预测引擎的各选项 diff --git a/r/example/mobilenet.py b/r/example/mobilenet.py index 806c5ba27ad86..d5f18260dac02 100755 --- a/r/example/mobilenet.py +++ b/r/example/mobilenet.py @@ -18,7 +18,7 @@ import numpy as np -from paddle.fluid.core import AnalysisConfig, create_paddle_predictor +from paddle.base.core import AnalysisConfig, create_paddle_predictor def main(): diff --git a/r/example/mobilenet.r b/r/example/mobilenet.r index 3da8965eb5742..d3978c9f5d096 100755 --- a/r/example/mobilenet.r +++ b/r/example/mobilenet.r @@ -5,7 +5,7 @@ library(reticulate) # call Python library use_python("/opt/python3.7/bin/python") np <- import("numpy") -paddle <- import("paddle.fluid.core") +paddle <- import("paddle.base.core") set_config <- function() { config <- paddle$AnalysisConfig("") diff --git a/security/advisory/pdsa-2022-001.md b/security/advisory/pdsa-2022-001.md index 596b314e749cf..2f4250131233c 100644 --- a/security/advisory/pdsa-2022-001.md +++ b/security/advisory/pdsa-2022-001.md @@ -10,7 +10,7 @@ The PoC is as follows: ```python import paddle -import paddle.fluid as fluid +import paddle.base as fluid import numpy as np ids = paddle.to_tensor([[2,2],[6,1]]) diff --git a/security/advisory/pdsa-2022-001_cn.md b/security/advisory/pdsa-2022-001_cn.md index 60e428e2adddf..ae270510d7e83 100644 --- a/security/advisory/pdsa-2022-001_cn.md +++ b/security/advisory/pdsa-2022-001_cn.md @@ -10,7 +10,7 @@ PoC如下: ```python import paddle -import paddle.fluid as fluid +import paddle.base as fluid import numpy as np ids = paddle.to_tensor([[2,2],[6,1]]) diff --git a/setup.py b/setup.py index 58d2a6ac93553..70a770991619a 100644 --- a/setup.py +++ b/setup.py @@ -891,16 +891,16 @@ def get_setup_requires(): def get_package_data_and_package_dir(): if os.name != 'nt': package_data = { - 'paddle.fluid': [env_dict.get("FLUID_CORE_NAME") + '.so'] + 'paddle.base': [env_dict.get("FLUID_CORE_NAME") + '.so'] } else: package_data = { - 'paddle.fluid': [ + 'paddle.base': [ env_dict.get("FLUID_CORE_NAME") + '.pyd', env_dict.get("FLUID_CORE_NAME") + '.lib', ] } - package_data['paddle.fluid'] += [ + package_data['paddle.base'] += [ paddle_binary_dir + '/python/paddle/cost_model/static_op_benchmark.json' ] if 'develop' in sys.argv: @@ -908,11 +908,11 @@ def get_package_data_and_package_dir(): else: package_dir = { '': env_dict.get("PADDLE_BINARY_DIR") + '/python', - 'paddle.fluid.proto.profiler': env_dict.get("PADDLE_BINARY_DIR") + 'paddle.base.proto.profiler': env_dict.get("PADDLE_BINARY_DIR") + '/paddle/fluid/platform', - 'paddle.fluid.proto': env_dict.get("PADDLE_BINARY_DIR") + 'paddle.base.proto': env_dict.get("PADDLE_BINARY_DIR") + '/paddle/fluid/framework', - 'paddle.fluid': env_dict.get("PADDLE_BINARY_DIR") + 'paddle.base': env_dict.get("PADDLE_BINARY_DIR") + '/python/paddle/base', } # put all thirdparty libraries in paddle.libs @@ -1150,7 +1150,7 @@ def get_package_data_and_package_dir(): # change rpath of ${FLUID_CORE_NAME}.ext, add $ORIGIN/../libs/ to it. # The reason is that libwarpctc.ext, libwarprnnt.ext, libiomp5.ext etc are in paddle.libs, and - # ${FLUID_CORE_NAME}.ext is in paddle.fluid, thus paddle/fluid/../libs will pointer to above libraries. + # ${FLUID_CORE_NAME}.ext is in paddle.base, thus paddle/fluid/../libs will pointer to above libraries. # This operation will fix https://github.com/PaddlePaddle/Paddle/issues/3213 if env_dict.get("CMAKE_BUILD_TYPE") == 'Release': if os.name != 'nt': @@ -1419,14 +1419,14 @@ def get_setup_parameters(): 'paddle.inference', 'paddle.inference.contrib', 'paddle.inference.contrib.utils', - 'paddle.fluid', - 'paddle.fluid.dygraph', - 'paddle.fluid.proto', - 'paddle.fluid.proto.profiler', - 'paddle.fluid.layers', - 'paddle.fluid.incubate', + 'paddle.base', + 'paddle.base.dygraph', + 'paddle.base.proto', + 'paddle.base.proto.profiler', + 'paddle.base.layers', + 'paddle.base.incubate', 'paddle.incubate.distributed.fleet', - 'paddle.fluid.incubate.checkpoint', + 'paddle.base.incubate.checkpoint', 'paddle.amp', 'paddle.cost_model', 'paddle.hapi', diff --git a/test/amp/amp_base_models.py b/test/amp/amp_base_models.py index 8487b1a7d83be..8d4bb8a1106bd 100644 --- a/test/amp/amp_base_models.py +++ b/test/amp/amp_base_models.py @@ -20,7 +20,7 @@ import paddle from paddle import nn -from paddle.fluid import core +from paddle.base import core from paddle.framework import in_dynamic_mode diff --git a/test/amp/test_amp_api.py b/test/amp/test_amp_api.py index 607117c84aa04..9d48d06fb6f90 100644 --- a/test/amp/test_amp_api.py +++ b/test/amp/test_amp_api.py @@ -20,7 +20,7 @@ import paddle import paddle.nn.functional as F from paddle import nn -from paddle.fluid import core +from paddle.base import core from paddle.static import amp @@ -162,7 +162,7 @@ def test_amp_grad_scaler(self): scaler.minimize(optimizer, scaled) optimizer.clear_grad() paddle.amp.debugging.disable_operator_stats_collection() - op_list = paddle.fluid.core.get_low_precision_op_list() + op_list = paddle.base.core.get_low_precision_op_list() self.assertEqual(scaler._enable, False) self.assertEqual(scaler._use_dynamic_loss_scaling, False) diff --git a/tools/count_api_without_core_ops.py b/tools/count_api_without_core_ops.py index c1ef80cd9c9fe..90d5e48f5167c 100644 --- a/tools/count_api_without_core_ops.py +++ b/tools/count_api_without_core_ops.py @@ -143,7 +143,7 @@ def visit_all_module(mod, func): if mod_name != 'paddle' and not mod_name.startswith('paddle.'): return - if mod_name.startswith('paddle.fluid.core'): + if mod_name.startswith('paddle.base.core'): return if mod in visited_modules: diff --git a/tools/diff_api.py b/tools/diff_api.py index b52ec91badb86..107892395c92f 100644 --- a/tools/diff_api.py +++ b/tools/diff_api.py @@ -44,7 +44,7 @@ 1. cd ${paddle_path}, compile paddle; 2. pip install build/python/dist/(build whl package); - 3. run "python tools/print_signatures.py paddle.fluid> paddle/fluid/API.spec" + 3. run "python tools/print_signatures.py paddle.base> paddle/fluid/API.spec" ''' if error: print('API Difference is: ') diff --git a/tools/print_signatures.py b/tools/print_signatures.py index 76dc85821d47c..cdae91ece7023 100644 --- a/tools/print_signatures.py +++ b/tools/print_signatures.py @@ -15,7 +15,7 @@ Print all signature of a python module in alphabet order. Usage: - ./print_signature "paddle.fluid" > signature.txt + ./print_signature "paddle.base" > signature.txt """ import argparse From 6779c4b45fb155e2dd9a06fa86788c96a1c67cae Mon Sep 17 00:00:00 2001 From: Difers <707065510@qq.com> Date: Sat, 19 Aug 2023 14:35:17 +0800 Subject: [PATCH 03/10] fix protobuf generated paddle.fluid --- cmake/generic.cmake | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 077db75fde206..6de7c7d792f63 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -1088,7 +1088,20 @@ function(py_proto_compile TARGET_NAME) "${multiValueArgs}" ${ARGN}) set(py_srcs) protobuf_generate_python(py_srcs ${py_proto_compile_SRCS}) - add_custom_target(${TARGET_NAME} ALL DEPENDS ${py_srcs} protobuf) + + add_custom_target(${TARGET_NAME}_replace DEPENDS ${py_srcs}) + + foreach(py_src ${py_srcs}) + add_custom_command( + TARGET ${TARGET_NAME}_replace + COMMAND sed -i 's/\\/paddle.base/g' ${py_src} + COMMENT + "Replacing 'paddle.fluid' with 'paddle.base' generated by protobuf" + COMMENT "Replace ${py_src}") + endforeach() + + add_custom_target(${TARGET_NAME} ALL DEPENDS ${py_srcs} protobuf + ${TARGET_NAME}_replace) endfunction() function(py_test TARGET_NAME) From 2527ae8066a5a5205f3390ab984efb47734a8298 Mon Sep 17 00:00:00 2001 From: Difers <707065510@qq.com> Date: Tue, 22 Aug 2023 16:43:33 +0800 Subject: [PATCH 04/10] fix some errors --- cmake/generic.cmake | 3 +- cmake/replace_string.py | 31 +++++ paddle/fluid/API.spec | 6 +- python/paddle/jit/dy2static/function_spec.py | 4 +- .../jit/dy2static/program_translator.py | 4 +- python/paddle/static/nn/control_flow.py | 8 +- python/paddle/static/nn/static_pylayer.py | 8 +- test/auto_parallel/reshard_r_to_s.py | 2 +- test/custom_runtime/test_custom_op_setup.py | 2 +- test/legacy_test/test_detection.py | 16 +-- test/legacy_test/test_fused_layernorm_op.py | 2 +- .../test_get_all_op_or_kernel_names.py | 9 +- .../test_imperative_hook_for_layer.py | 12 +- test/legacy_test/test_inference_api.py | 10 +- test/legacy_test/test_layers.py | 116 ++++++++++-------- test/legacy_test/test_lu_unpack_op.py | 6 +- test/prim/new_ir_prim/test_vjp_prim.py | 2 +- 17 files changed, 140 insertions(+), 101 deletions(-) create mode 100644 cmake/replace_string.py diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 6de7c7d792f63..4b97d639eca4d 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -1094,7 +1094,8 @@ function(py_proto_compile TARGET_NAME) foreach(py_src ${py_srcs}) add_custom_command( TARGET ${TARGET_NAME}_replace - COMMAND sed -i 's/\\/paddle.base/g' ${py_src} + COMMAND ${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/cmake/replace_string.py + ${py_src} COMMENT "Replacing 'paddle.fluid' with 'paddle.base' generated by protobuf" COMMENT "Replace ${py_src}") diff --git a/cmake/replace_string.py b/cmake/replace_string.py new file mode 100644 index 0000000000000..b7e10b5c9f946 --- /dev/null +++ b/cmake/replace_string.py @@ -0,0 +1,31 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + + +def main(): + src = sys.argv[1] + + with open(src, 'r') as file: + content = file.read() + + new_content = content.replace('paddle.fluid', 'paddle.base') + + with open(src, 'w') as file: + file.write(new_content) + + +if __name__ == "__main__": + main() diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 5771a0abd75b6..51eda469b773a 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -1,6 +1,6 @@ -paddle.fluid.optimizer.PipelineOptimizer (paddle.fluid.optimizer.PipelineOptimizer, ('document', '2e55a29dbeb874934f7a1a1af3a22b8c')) -paddle.fluid.optimizer.PipelineOptimizer.__init__ (ArgSpec(args=['self', 'optimizer', 'num_microbatches', 'start_cpu_core_id'], varargs=None, keywords=None, defaults=(1, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.optimizer.PipelineOptimizer.minimize (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.base.optimizer.PipelineOptimizer (paddle.base.optimizer.PipelineOptimizer, ('document', '2e55a29dbeb874934f7a1a1af3a22b8c')) +paddle.base.optimizer.PipelineOptimizer.__init__ (ArgSpec(args=['self', 'optimizer', 'num_microbatches', 'start_cpu_core_id'], varargs=None, keywords=None, defaults=(1, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.base.optimizer.PipelineOptimizer.minimize (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.audio.features (ArgSpec(), ('document', 'd41d8cd98f00b204e9800998ecf8427e')) paddle.audio.features.layers.LogMelSpectrogram (ArgSpec(), ('document', 'c38b53606aa89215c4f00d3833e158b8')) paddle.audio.features.layers.LogMelSpectrogram.forward (ArgSpec(args=['self', 'x'], varargs=None, varkw=None, defaults=None, kwonlyargs=[], kwonlydefaults=None, annotations={'return': , 'x': }), ('document', '6c14f6f78dc697a6981cf90412e2f1ea')) diff --git a/python/paddle/jit/dy2static/function_spec.py b/python/paddle/jit/dy2static/function_spec.py index a38cca8684f45..d34a5dc6288f6 100644 --- a/python/paddle/jit/dy2static/function_spec.py +++ b/python/paddle/jit/dy2static/function_spec.py @@ -325,7 +325,7 @@ def _replace_to_input_spec_with_new_name(args, arg_names): stop_gradient = origin_input.stop_gradient input_var = paddle.static.InputSpec.from_tensor(origin_input) input_var.stop_gradient = stop_gradient - elif isinstance(origin_input, paddle.fluid.framework.Variable): + elif isinstance(origin_input, paddle.base.framework.Variable): stop_gradient = origin_input.stop_gradient input_var = paddle.static.InputSpec( origin_input.shape, origin_input.dtype, origin_input.name @@ -339,7 +339,7 @@ def _replace_to_input_spec_with_new_name(args, arg_names): ( np.ndarray, core.eager.Tensor, - paddle.fluid.framework.Variable, + paddle.base.framework.Variable, ), ): input_var.name = f"_jst.{str(order).zfill(order_digit)}.{name_prefix}.{str(index)}" diff --git a/python/paddle/jit/dy2static/program_translator.py b/python/paddle/jit/dy2static/program_translator.py index 7fba47f400435..48c9889cd1507 100644 --- a/python/paddle/jit/dy2static/program_translator.py +++ b/python/paddle/jit/dy2static/program_translator.py @@ -27,8 +27,8 @@ param_guard, switch_to_static_graph, ) -from paddle.fluid.unique_name import UniqueNameGenerator -from paddle.fluid.unique_name import guard as UniqueNameGuard +from paddle.base.unique_name import UniqueNameGenerator +from paddle.base.unique_name import guard as UniqueNameGuard from paddle.framework import in_dynamic_mode from paddle.nn.layer import layers from paddle.utils import flatten, gast diff --git a/python/paddle/static/nn/control_flow.py b/python/paddle/static/nn/control_flow.py index 6226270e597e4..87f9ae321d6f8 100644 --- a/python/paddle/static/nn/control_flow.py +++ b/python/paddle/static/nn/control_flow.py @@ -16,6 +16,9 @@ from functools import partial, reduce import paddle +from paddle.base import core +from paddle.base.backward import _infer_var_data_type_shape_ +from paddle.base.framework import Operator, Program, Variable, static_only from paddle.common_ops_import import ( LayerHelper, check_type, @@ -23,9 +26,6 @@ convert_dtype, in_dygraph_mode, ) -from paddle.base import core -from paddle.base.backward import _infer_var_data_type_shape_ -from paddle.base.framework import Operator, Program, Variable, static_only from paddle.utils import ( assert_same_structure, copy_mutable_vars, @@ -595,7 +595,7 @@ def has_shape_diff(x_var, y_var): # input is not generated in While sub block and modified by in-place and only # belong to inplace ops in constructing program process, because in-place pass # is only available in Graph level. - with paddle.fluid.framework._stride_in_no_check_dy2st_diff(): + with paddle.base.framework._stride_in_no_check_dy2st_diff(): paddle.assign(input, output) diff --git a/python/paddle/static/nn/static_pylayer.py b/python/paddle/static/nn/static_pylayer.py index e5fd171d32663..bf6900906b523 100644 --- a/python/paddle/static/nn/static_pylayer.py +++ b/python/paddle/static/nn/static_pylayer.py @@ -13,10 +13,10 @@ # limitations under the License. +from paddle.base import core +from paddle.base.backward import _append_grad_suffix_ +from paddle.base.framework import Variable from paddle.common_ops_import import LayerHelper, check_type, in_dygraph_mode -from paddle.fluid import core -from paddle.fluid.backward import _append_grad_suffix_ -from paddle.fluid.framework import Variable from paddle.utils import flatten, map_structure # NOTE(MarioLulab): Borrowed from `python/paddle/static/nn/control_flow.py` @@ -238,7 +238,7 @@ def backward_fn(dy): ) ) - check_type(name, "name", (str, type(None)), "fluid.layers.static_pylayer") + check_type(name, "name", (str, type(None)), "base.layers.static_pylayer") helper = LayerHelper('static_pylayer', **locals()) copy_to_parent_func = lambda var: copy_var_to_parent_block(var, helper) diff --git a/test/auto_parallel/reshard_r_to_s.py b/test/auto_parallel/reshard_r_to_s.py index 690c42fa49283..6d69b24a8c97f 100644 --- a/test/auto_parallel/reshard_r_to_s.py +++ b/test/auto_parallel/reshard_r_to_s.py @@ -18,7 +18,7 @@ import paddle import paddle.distributed as dist -from paddle.fluid import core +from paddle.base import core class TestReshardRToS: diff --git a/test/custom_runtime/test_custom_op_setup.py b/test/custom_runtime/test_custom_op_setup.py index 5686b87afbbd8..f0f5d1cb505a6 100644 --- a/test/custom_runtime/test_custom_op_setup.py +++ b/test/custom_runtime/test_custom_op_setup.py @@ -133,7 +133,7 @@ def setUp(self): # [Why specific paddle_includes directory?] # Add paddle_includes to pass CI, for more details, - # please refer to the comments in `paddle/base/tests/custom_op/utils.py`` + # please refer to the comments in `paddle/tests/custom_op/utils.py`` paddle_includes = [] for site_packages_path in getsitepackages(): paddle_includes.append( diff --git a/test/legacy_test/test_detection.py b/test/legacy_test/test_detection.py index 8aec394e21803..99e0836c4c1f5 100644 --- a/test/legacy_test/test_detection.py +++ b/test/legacy_test/test_detection.py @@ -20,7 +20,7 @@ import paddle from paddle import base from paddle.base import core -from paddle.base.dygraph import base +from paddle.base.dygraph import base as imperative_base from paddle.base.framework import Program, program_guard paddle.enable_static() @@ -137,11 +137,11 @@ def test_generate_proposals(self): ) with self.dynamic_graph(): - scores_dy = base.to_variable(scores_np) - bbox_deltas_dy = base.to_variable(bbox_deltas_np) - im_info_dy = base.to_variable(im_info_np) - anchors_dy = base.to_variable(anchors_np) - variances_dy = base.to_variable(variances_np) + scores_dy = imperative_base.to_variable(scores_np) + bbox_deltas_dy = imperative_base.to_variable(bbox_deltas_np) + im_info_dy = imperative_base.to_variable(im_info_np) + anchors_dy = imperative_base.to_variable(anchors_np) + variances_dy = imperative_base.to_variable(variances_np) rois, roi_probs, rois_num = paddle.vision.ops.generate_proposals( scores_dy, bbox_deltas_dy, @@ -218,8 +218,8 @@ def test_distribute_fpn_proposals(self): output_stat_np.append(output_np) with self.dynamic_graph(): - rois_dy = base.to_variable(rois_np) - rois_num_dy = base.to_variable(rois_num_np) + rois_dy = imperative_base.to_variable(rois_np) + rois_num_dy = imperative_base.to_variable(rois_num_np) ( multi_rois_dy, restore_ind_dy, diff --git a/test/legacy_test/test_fused_layernorm_op.py b/test/legacy_test/test_fused_layernorm_op.py index 35c897483550a..cf8b73c57475a 100644 --- a/test/legacy_test/test_fused_layernorm_op.py +++ b/test/legacy_test/test_fused_layernorm_op.py @@ -532,7 +532,7 @@ def check_residual_bias_add(self, x_np, residual_np, bias_np, dtype): quant_min_bound=self.quant_min_bound, ) - exe = fluid.Executor(self.place) + exe = base.Executor(self.place) out_s = exe.run( feed={ "x_static": x_np.astype(dtype), diff --git a/test/legacy_test/test_get_all_op_or_kernel_names.py b/test/legacy_test/test_get_all_op_or_kernel_names.py index 275cdbf8c81da..55f0162ec041b 100644 --- a/test/legacy_test/test_get_all_op_or_kernel_names.py +++ b/test/legacy_test/test_get_all_op_or_kernel_names.py @@ -26,9 +26,9 @@ def test_phi_kernels(self): # sign kernel is removed from base and added into phi def test_base_kernels(self): - self.assertTrue(core._get_all_register_op_kernels('base')['reshape']) + self.assertTrue(core._get_all_register_op_kernels('fluid')['reshape']) with self.assertRaises(KeyError): - core._get_all_register_op_kernels('base')['sign'] + core._get_all_register_op_kernels('fluid')['sign'] def test_all_kernels(self): self.assertTrue(core._get_all_register_op_kernels('all')['reshape']) @@ -42,14 +42,13 @@ class TestGetAllOpNames(unittest.TestCase): def test_get_all_op_names(self): all_op_names = core.get_all_op_names() all_op_with_phi_kernels = core.get_all_op_names("phi") - all_op_with_base_kernels = core.get_all_op_names("base") + all_op_with_fluid_kernels = core.get_all_op_names("fluid") self.assertTrue( len(all_op_names) - > len(set(all_op_with_phi_kernels) | set(all_op_with_base_kernels)) + > len(set(all_op_with_phi_kernels) | set(all_op_with_fluid_kernels)) ) self.assertTrue("scale" in all_op_with_phi_kernels) - self.assertTrue("scale" in all_op_with_phi_kernels) if __name__ == '__main__': diff --git a/test/legacy_test/test_imperative_hook_for_layer.py b/test/legacy_test/test_imperative_hook_for_layer.py index 22935f97afc68..d5ce15c58233a 100644 --- a/test/legacy_test/test_imperative_hook_for_layer.py +++ b/test/legacy_test/test_imperative_hook_for_layer.py @@ -19,7 +19,7 @@ from paddle import base from paddle.base import core -from paddle.base.dygraph import base +from paddle.base.dygraph import base as imperative_base call_forward_post_hook = False call_forward_pre_hook = False @@ -78,9 +78,9 @@ def test_forward_hook_return_value(self): ) y_data = y_data.reshape((-1, 1)) - input = base.to_variable(input_word) - input1 = base.to_variable(input_word1) - y = base.to_variable(y_data) + input = imperative_base.to_variable(input_word) + input1 = imperative_base.to_variable(input_word1) + y = imperative_base.to_variable(y_data) simplenet = SimpleNet( hidden_size=20, @@ -161,8 +161,8 @@ def test_forward_hook(self): ) y_data = y_data.reshape((-1, 1)) - input = base.to_variable(input_word) - y = base.to_variable(y_data) + input = imperative_base.to_variable(input_word) + y = imperative_base.to_variable(y_data) simplenet = SimpleNet( hidden_size=20, diff --git a/test/legacy_test/test_inference_api.py b/test/legacy_test/test_inference_api.py index 86e3f5d43d4db..b6f5456ee4796 100644 --- a/test/legacy_test/test_inference_api.py +++ b/test/legacy_test/test_inference_api.py @@ -103,12 +103,12 @@ def get_sample_model(): def get_sample_model_cuda(data_type): - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) + place = base.CUDAPlace(0) + exe = base.Executor(place) - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): data = paddle.static.data( name="data", shape=[-1, 6, 64, 64], dtype=data_type ) diff --git a/test/legacy_test/test_layers.py b/test/legacy_test/test_layers.py index a4d7d22ab677a..33f9af2b1d163 100644 --- a/test/legacy_test/test_layers.py +++ b/test/legacy_test/test_layers.py @@ -24,8 +24,8 @@ import paddle import paddle.nn.functional as F from paddle import base -from paddle.base import core -from paddle.base.dygraph import base, to_variable +from paddle.base import core, dygraph +from paddle.base.dygraph import to_variable from paddle.base.framework import Program, default_main_program, program_guard from paddle.incubate.layers.nn import ( batch_fc, @@ -104,7 +104,7 @@ def forward(self, x, do_linear2=False): with self.dynamic_graph(): inp = np.ones([3, 3], dtype='float32') - x = base.to_variable(inp) + x = to_variable(inp) custom = CustomLayer(input_size=3, linear1_size=2) ret = custom(x, do_linear2=False) np.testing.assert_array_equal(ret.numpy().shape, [3, 2]) @@ -126,7 +126,7 @@ def test_dropout(self): feed={'data': inp}, fetch_list=[ret, ret2] ) with self.dynamic_graph(): - t = base.to_variable(inp) + t = to_variable(inp) dropout = paddle.nn.Dropout(p=0.35) dy_ret = dropout(t) dy_ret2 = paddle.nn.functional.dropout(t, p=0.35) @@ -153,7 +153,7 @@ def test_linear(self): feed={'data': inp}, fetch_list=[ret] )[0] with self.dynamic_graph(): - t = base.to_variable(inp) + t = to_variable(inp) linear = paddle.nn.Linear( 32, 4, @@ -242,7 +242,7 @@ def test_Flatten(self): feed={'data': inp}, fetch_list=[ret] )[0] with self.dynamic_graph(): - t = base.to_variable(inp) + t = to_variable(inp) flatten = paddle.nn.Flatten() dy_ret = flatten(t) dy_ret_value = dy_ret.numpy() @@ -291,7 +291,7 @@ def test_SyncBatchNorm(self): with self.dynamic_graph(): t = np.ones([3, 3, 5, 5], dtype='float32') my_syncbn = paddle.nn.SyncBatchNorm(3) - dy_ret = my_syncbn(base.to_variable(t)) + dy_ret = my_syncbn(to_variable(t)) dy_ret_value = dy_ret.numpy() np.testing.assert_array_equal(static_ret, dy_ret_value) @@ -305,7 +305,7 @@ def test_relu(self): with self.dynamic_graph(): t = np.ones([3, 3], dtype='float32') - dy_ret = F.relu(base.to_variable(t)) + dy_ret = F.relu(to_variable(t)) dy_ret_value = dy_ret.numpy() np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) @@ -328,7 +328,7 @@ def test_matmul(self): with self.dynamic_graph(): t = np.ones([3, 3], dtype='float32') t2 = np.ones([3, 3], dtype='float32') - dy_ret = paddle.matmul(base.to_variable(t), base.to_variable(t2)) + dy_ret = paddle.matmul(to_variable(t), to_variable(t2)) dy_ret_value = dy_ret.numpy() np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) @@ -431,7 +431,7 @@ def test_conv2d_transpose(self): 27, bias_attr=paddle.nn.initializer.Constant(value=1), ) - dy_rlt = conv2d_transpose(base.to_variable(inp_np)) + dy_rlt = conv2d_transpose(to_variable(inp_np)) dy_rlt = paddle.nn.functional.sigmoid(dy_rlt) dy_rlt_value = dy_rlt.numpy() np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05) @@ -450,8 +450,8 @@ def test_conv2d_transpose(self): [2, 2], weight_attr=weight_attr, ) - dy_ret1 = conv2d1(base.to_variable(images)) - dy_ret2 = conv2d2(base.to_variable(images)) + dy_ret1 = conv2d1(to_variable(images)) + dy_ret2 = conv2d2(to_variable(images)) self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) conv2d1_weight_np = conv2d1.weight.numpy() @@ -464,8 +464,8 @@ def test_conv2d_transpose(self): conv2d1_weight_np, conv2d2.weight.numpy() ) conv2d2.bias.set_value(conv2d1_bias) - dy_ret1 = conv2d1(base.to_variable(images)) - dy_ret2 = conv2d2(base.to_variable(images)) + dy_ret1 = conv2d1(to_variable(images)) + dy_ret2 = conv2d2(to_variable(images)) np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) conv2d2.weight = conv2d1.weight @@ -537,14 +537,18 @@ def test_bilinear_tensor_product(self): 6, bias_attr=paddle.nn.initializer.Constant(value=1), ) - dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y)) + dy_rlt = btp( + to_variable(inp_np_x), + to_variable(inp_np_y), + ) dy_rlt = paddle.nn.functional.sigmoid(dy_rlt) dy_rlt_value = dy_rlt.numpy() with self.dynamic_graph(): btp2 = paddle.nn.Bilinear(3, 3, 6) dy_rlt2 = btp2( - base.to_variable(inp_np_x), base.to_variable(inp_np_y) + to_variable(inp_np_x), + to_variable(inp_np_y), ) dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2) dy_rlt2_value = dy_rlt2.numpy() @@ -576,21 +580,25 @@ def test_bilinear_tensor_product(self): btp1 = paddle.nn.Bilinear(3, 3, 6) btp2 = paddle.nn.Bilinear(3, 3, 6, weight_attr=weight_attr) dy_rlt1 = btp1( - base.to_variable(inp_np_x), base.to_variable(inp_np_y) + to_variable(inp_np_x), + to_variable(inp_np_y), ) dy_rlt1 = paddle.nn.functional.sigmoid(dy_rlt1) dy_rlt2 = btp2( - base.to_variable(inp_np_x), base.to_variable(inp_np_y) + to_variable(inp_np_x), + to_variable(inp_np_y), ) dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2) self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())) btp2.weight.set_value(btp1.weight.numpy()) btp2.bias.set_value(btp1.bias) dy_rlt1 = btp1( - base.to_variable(inp_np_x), base.to_variable(inp_np_y) + to_variable(inp_np_x), + to_variable(inp_np_y), ) dy_rlt2 = btp2( - base.to_variable(inp_np_x), base.to_variable(inp_np_y) + to_variable(inp_np_x), + to_variable(inp_np_y), ) np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()) @@ -634,7 +642,7 @@ def test_embeding(self): emb2 = paddle.nn.Embedding( dict_size, 32, weight_attr='emb.w', sparse=False ) - dy_rlt = emb2(base.to_variable(inp_word)) + dy_rlt = emb2(to_variable(inp_word)) dy_rlt_value = dy_rlt.numpy() np.testing.assert_allclose(static_rlt2[0], static_rlt) @@ -649,13 +657,13 @@ def test_embeding(self): emb2 = paddle.nn.Embedding( dict_size, 32, weight_attr=weight_attr, sparse=False ) - rep1 = emb1(base.to_variable(inp_word)) - rep2 = emb2(base.to_variable(inp_word)) + rep1 = emb1(to_variable(inp_word)) + rep2 = emb2(to_variable(inp_word)) self.assertFalse(np.array_equal(emb1.weight.numpy(), custom_weight)) np.testing.assert_array_equal(emb2.weight.numpy(), custom_weight) self.assertFalse(np.array_equal(rep1.numpy(), rep2.numpy())) emb2.weight.set_value(emb1.weight.numpy()) - rep2 = emb2(base.to_variable(inp_word)) + rep2 = emb2(to_variable(inp_word)) np.testing.assert_array_equal(rep1.numpy(), rep2.numpy()) emb2.weight = emb1.weight @@ -731,7 +739,7 @@ def test_conv3d(self): conv3d = paddle.nn.Conv3D( in_channels=3, out_channels=3, kernel_size=2 ) - dy_ret = conv3d(base.to_variable(images)) + dy_ret = conv3d(to_variable(images)) dy_rlt_value = dy_ret.numpy() np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) @@ -752,8 +760,8 @@ def test_conv3d(self): kernel_size=2, weight_attr=weight_attr, ) - dy_ret1 = conv3d1(base.to_variable(images)) - dy_ret2 = conv3d2(base.to_variable(images)) + dy_ret1 = conv3d1(to_variable(images)) + dy_ret2 = conv3d2(to_variable(images)) self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) conv3d1_weight_np = conv3d1.weight.numpy() @@ -766,8 +774,8 @@ def test_conv3d(self): conv3d1_weight_np, conv3d2.weight.numpy() ) conv3d1.bias.set_value(conv3d1_bias) - dy_ret1 = conv3d1(base.to_variable(images)) - dy_ret2 = conv3d2(base.to_variable(images)) + dy_ret1 = conv3d1(to_variable(images)) + dy_ret2 = conv3d2(to_variable(images)) np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) conv3d2.weight = conv3d1.weight @@ -837,7 +845,7 @@ def test_group_norm(self): weight_attr=paddle.nn.initializer.Uniform(low=-0.5, high=0.5), bias_attr=paddle.nn.initializer.Constant(value=1), ) - dy_ret = groupNorm(base.to_variable(input)) + dy_ret = groupNorm(to_variable(input)) dy_rlt_value = dy_ret.numpy() np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) @@ -870,12 +878,12 @@ def test_instance_norm(self): with self.dynamic_graph(): instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1]) - dy_ret = instanceNorm(base.to_variable(input)) + dy_ret = instanceNorm(to_variable(input)) dy_rlt_value = dy_ret.numpy() with self.dynamic_graph(): instanceNorm = paddle.nn.InstanceNorm2D(num_features=shape[1]) - dy_ret = instanceNorm(base.to_variable(input)) + dy_ret = instanceNorm(to_variable(input)) dy_rlt_value2 = dy_ret.numpy() np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) @@ -943,7 +951,7 @@ def test_spectral_norm(self): with self.dynamic_graph(): spectralNorm = paddle.nn.SpectralNorm(shape, dim=1, power_iters=2) - dy_ret = spectralNorm(base.to_variable(input)) + dy_ret = spectralNorm(to_variable(input)) dy_rlt_value = dy_ret.numpy() np.testing.assert_allclose(static_ret, dy_rlt_value, rtol=1e-05) @@ -979,7 +987,7 @@ def test_conv3d_transpose(self): conv3d_transpose = paddle.nn.Conv3DTranspose( in_channels=3, out_channels=12, kernel_size=12 ) - dy_rlt = conv3d_transpose(base.to_variable(input_array)) + dy_rlt = conv3d_transpose(to_variable(input_array)) dy_rlt_value = dy_rlt.numpy() np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05) np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05) @@ -1003,8 +1011,8 @@ def test_conv3d_transpose(self): weight_attr=weight_attr, bias_attr='conv3d2_b', ) - dy_ret1 = conv3d1(base.to_variable(images)) - dy_ret2 = conv3d2(base.to_variable(images)) + dy_ret1 = conv3d1(to_variable(images)) + dy_ret2 = conv3d2(to_variable(images)) self.assertFalse(np.array_equal(dy_ret1.numpy(), dy_ret2.numpy())) conv3d1_weight_np = conv3d1.weight.numpy() @@ -1017,8 +1025,8 @@ def test_conv3d_transpose(self): conv3d1_weight_np, conv3d2.weight.numpy() ) conv3d1.bias.set_value(conv3d1_bias) - dy_ret1 = conv3d1(base.to_variable(images)) - dy_ret2 = conv3d2(base.to_variable(images)) + dy_ret1 = conv3d1(to_variable(images)) + dy_ret2 = conv3d2(to_variable(images)) np.testing.assert_array_equal(dy_ret1.numpy(), dy_ret2.numpy()) conv3d2.weight = conv3d1.weight @@ -1083,8 +1091,8 @@ def test_compare(self): feed={"a": value_a, "b": value_b}, fetch_list=[cond] )[0] with self.dynamic_graph(): - da = base.to_variable(value_a) - db = base.to_variable(value_b) + da = to_variable(value_a) + db = to_variable(value_b) dcond = paddle.less_than(x=da, y=db) for i in range(len(static_ret)): @@ -1099,8 +1107,8 @@ def test_compare(self): feed={"a1": value_a, "b1": value_b}, fetch_list=[cond1] )[0] with self.dynamic_graph(): - da1 = base.to_variable(value_a) - db1 = base.to_variable(value_b) + da1 = to_variable(value_a) + db1 = to_variable(value_b) dcond1 = paddle.less_equal(x=da1, y=db1) for i in range(len(static_ret1)): @@ -1115,8 +1123,8 @@ def test_compare(self): feed={"a2": value_a, "b2": value_b}, fetch_list=[cond2] )[0] with self.dynamic_graph(): - da2 = base.to_variable(value_a) - db2 = base.to_variable(value_b) + da2 = to_variable(value_a) + db2 = to_variable(value_b) dcond2 = paddle.greater_than(x=da2, y=db2) for i in range(len(static_ret2)): @@ -1131,8 +1139,8 @@ def test_compare(self): feed={"a3": value_a, "b3": value_b}, fetch_list=[cond3] )[0] with self.dynamic_graph(): - da3 = base.to_variable(value_a) - db3 = base.to_variable(value_b) + da3 = to_variable(value_a) + db3 = to_variable(value_b) dcond3 = paddle.greater_equal(x=da3, y=db3) for i in range(len(static_ret3)): @@ -1147,8 +1155,8 @@ def test_compare(self): feed={"a4": value_a, "b4": value_b}, fetch_list=[cond4] )[0] with self.dynamic_graph(): - da4 = base.to_variable(value_a) - db4 = base.to_variable(value_b) + da4 = to_variable(value_a) + db4 = to_variable(value_b) dcond4 = paddle.equal(x=da4, y=db4) for i in range(len(static_ret4)): @@ -1163,8 +1171,8 @@ def test_compare(self): feed={"a5": value_a, "b5": value_b}, fetch_list=[cond5] )[0] with self.dynamic_graph(): - da5 = base.to_variable(value_a) - db5 = base.to_variable(value_b) + da5 = to_variable(value_a) + db5 = to_variable(value_b) dcond5 = paddle.equal(x=da5, y=db5) for i in range(len(static_ret5)): @@ -1436,8 +1444,8 @@ def test_accuracy(self): ) with self.dynamic_graph(force_to_use_cpu=True): - data = base.to_variable(x) - label = base.to_variable(y) + data = to_variable(x) + label = to_variable(y) data_new = paddle.reshape(data, [3, 32 * 32]) fc_out = paddle.nn.Linear(32 * 32, 10)(data_new) predict = paddle.nn.functional.softmax(fc_out) @@ -1536,8 +1544,8 @@ def _get_np_data(self, shape, dtype, append_batch_size=True): def _get_data( self, name, shape, dtype, set_feed_dict=True, append_batch_size=True ): - if base.enabled(): - return base.to_variable( + if dygraph.base.enabled(): + return to_variable( value=self._get_np_data(shape, dtype, append_batch_size), name=name, zero_copy=False, diff --git a/test/legacy_test/test_lu_unpack_op.py b/test/legacy_test/test_lu_unpack_op.py index 9c6486f29cd6e..bfac8781076bf 100644 --- a/test/legacy_test/test_lu_unpack_op.py +++ b/test/legacy_test/test_lu_unpack_op.py @@ -317,7 +317,7 @@ def run_lu_static(shape, dtype): class TestLU_UnpackAPIError(unittest.TestCase): def test_errors_1(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): # The size of input in lu should not be 0. def test_x_size(): x = paddle.to_tensor( @@ -337,7 +337,7 @@ def test_x_size(): self.assertRaises(ValueError, test_x_size) def test_errors_2(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): # The size of input in lu should not be 0. def test_y_size(): x = paddle.to_tensor( @@ -357,7 +357,7 @@ def test_y_size(): self.assertRaises(ValueError, test_y_size) def test_errors_3(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): # The size of input in lu should not be 0. def test_y_data(): x = paddle.to_tensor( diff --git a/test/prim/new_ir_prim/test_vjp_prim.py b/test/prim/new_ir_prim/test_vjp_prim.py index 22309a08823ec..c6244892bfa5a 100644 --- a/test/prim/new_ir_prim/test_vjp_prim.py +++ b/test/prim/new_ir_prim/test_vjp_prim.py @@ -16,7 +16,7 @@ import paddle from paddle import ir -from paddle.fluid.core import call_vjp +from paddle.base.core import call_vjp paddle.enable_static() From dd79a4a151b19a715ad389cdb0eec386547f557a Mon Sep 17 00:00:00 2001 From: Difers <707065510@qq.com> Date: Tue, 22 Aug 2023 21:43:26 +0800 Subject: [PATCH 05/10] fix cmake depends --- cmake/generic.cmake | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 4b97d639eca4d..0beaa74107547 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -1101,8 +1101,7 @@ function(py_proto_compile TARGET_NAME) COMMENT "Replace ${py_src}") endforeach() - add_custom_target(${TARGET_NAME} ALL DEPENDS ${py_srcs} protobuf - ${TARGET_NAME}_replace) + add_custom_target(${TARGET_NAME} ALL DEPENDS protobuf ${TARGET_NAME}_replace) endfunction() function(py_test TARGET_NAME) From 87e1a3c81cafbbeedb9dc889497ac499cf5569bc Mon Sep 17 00:00:00 2001 From: Difers <707065510@qq.com> Date: Thu, 24 Aug 2023 17:59:08 +0800 Subject: [PATCH 06/10] fix some error --- cmake/generic.cmake | 1 + paddle/scripts/paddle_build.sh | 2 + python/paddle/base/executor.py | 2 +- python/paddle/base/framework.py | 16 +++---- python/paddle/new_ir_utils.py | 18 ++++---- test/legacy_test/test_static_pylayer.py | 44 +++++++++---------- test/legacy_test/test_static_pylayer_block.py | 10 ++--- test/quantization/test_llm_int8_linear.py | 16 +++---- test/quantization/test_weight_only_linear.py | 16 +++---- test/xpu/test_inverse_op_xpu.py | 14 +++--- 10 files changed, 71 insertions(+), 68 deletions(-) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 0beaa74107547..32840a68330c2 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -1094,6 +1094,7 @@ function(py_proto_compile TARGET_NAME) foreach(py_src ${py_srcs}) add_custom_command( TARGET ${TARGET_NAME}_replace + POST_BUILD COMMAND ${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/cmake/replace_string.py ${py_src} COMMENT diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index a788171bdad1e..230f8e64f1b72 100644 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -3285,6 +3285,8 @@ function build_pr_and_develop() { mv ${PADDLE_ROOT}/dist/*.whl ${PADDLE_ROOT}/build/python/dist/ mkdir ${PADDLE_ROOT}/build/dev_whl && cp ${PADDLE_ROOT}/build/python/dist/*.whl ${PADDLE_ROOT}/build/dev_whl fi + # may be remove it later + rm -rf ${PADDLE_ROOT}/build/third_party generate_api_spec "$1" "DEV" } diff --git a/python/paddle/base/executor.py b/python/paddle/base/executor.py index e3c20efa706d6..830590d3394c7 100755 --- a/python/paddle/base/executor.py +++ b/python/paddle/base/executor.py @@ -1915,7 +1915,7 @@ def _run_new_ir_impl( else: error_info = ( "There are no operators in the program to be executed. " - "If you pass Program manually, please use fluid.program_guard " + "If you pass Program manually, please use base.program_guard " "to ensure the current Program is being used." ) warnings.warn(error_info) diff --git a/python/paddle/base/framework.py b/python/paddle/base/framework.py index 476658aa37c75..2fe4b5ce4a104 100644 --- a/python/paddle/base/framework.py +++ b/python/paddle/base/framework.py @@ -445,10 +445,10 @@ def require_version(min_version, max_version=None): ) version_installed = [ - base_version.major, - base_version.minor, - base_version.patch, - base_version.rc, + fluid_version.major, + fluid_version.minor, + fluid_version.patch, + fluid_version.rc, ] zero_version = ['0', '0', '0', '0'] @@ -466,14 +466,14 @@ def version_cmp(ver_a, ver_b): "PaddlePaddle version in [%s, %s] required, but %s installed. " "Maybe you are using a develop version, " "please make sure the version is good with your code." - % (min_version, max_version, base_version.full_version) + % (min_version, max_version, fluid_version.full_version) ) else: warnings.warn( "PaddlePaddle version %s or higher is required, but %s installed, " "Maybe you are using a develop version, " "please make sure the version is good with your code." - % (min_version, base_version.full_version) + % (min_version, fluid_version.full_version) ) return @@ -494,14 +494,14 @@ def version_cmp(ver_a, ver_b): ): raise Exception( "VersionError: PaddlePaddle version in [%s, %s] required, but %s installed." - % (min_version, max_version, base_version.full_version) + % (min_version, max_version, fluid_version.full_version) ) else: if version_cmp(version_installed, min_version_to_check) < 0: raise Exception( "VersionError: PaddlePaddle version %s or higher is required, but %s installed, " "please upgrade your PaddlePaddle to %s or other higher version." - % (min_version, base_version.full_version, min_version) + % (min_version, fluid_version.full_version, min_version) ) diff --git a/python/paddle/new_ir_utils.py b/python/paddle/new_ir_utils.py index 83c9b5f826d8d..0a72274a0bf02 100644 --- a/python/paddle/new_ir_utils.py +++ b/python/paddle/new_ir_utils.py @@ -15,23 +15,23 @@ import paddle -from .fluid.wrapped_decorator import signature_safe_contextmanager +from .base.wrapped_decorator import signature_safe_contextmanager class IrChange: def __init__(self): - old_flag = paddle.fluid.framework.get_flags("FLAGS_enable_new_ir_api") - paddle.fluid.framework.set_flags({"FLAGS_enable_new_ir_api": False}) + old_flag = paddle.base.framework.get_flags("FLAGS_enable_new_ir_api") + paddle.base.framework.set_flags({"FLAGS_enable_new_ir_api": False}) if not paddle.ir.core._use_new_ir_api(): self.old_Program = paddle.static.Program - self.old_program_guard = paddle.fluid.program_guard + self.old_program_guard = paddle.base.program_guard self.old_default_main_program = paddle.static.default_main_program else: raise RuntimeError( "IrChange only init when paddle.ir.core._use_new_ir_api() is false, \ please set FLAGS_enable_new_ir_api = false" ) - paddle.fluid.framework.set_flags(old_flag) + paddle.base.framework.set_flags(old_flag) def _switch_to_new_ir(self): if paddle.ir.core._use_new_ir_api(): @@ -40,8 +40,8 @@ def _switch_to_new_ir(self): ) paddle.ir.register_paddle_dialect() paddle.static.Program = paddle.ir.Program - paddle.fluid.Program = paddle.ir.Program - paddle.fluid.program_guard = paddle.ir.core.program_guard + paddle.base.Program = paddle.ir.Program + paddle.base.program_guard = paddle.ir.core.program_guard paddle.static.program_guard = paddle.ir.core.program_guard paddle.framework.default_main_program = ( paddle.ir.core.default_main_program @@ -53,8 +53,8 @@ def _switch_to_old_ir(self): {"FLAGS_enable_new_ir_in_executor": False} ) paddle.static.Program = self.old_Program - paddle.fluid.Program = self.old_Program - paddle.fluid.program_guard = self.old_program_guard + paddle.base.Program = self.old_Program + paddle.base.program_guard = self.old_program_guard paddle.static.program_guard = self.old_program_guard paddle.framework.default_main_program = ( self.old_default_main_program diff --git a/test/legacy_test/test_static_pylayer.py b/test/legacy_test/test_static_pylayer.py index 2cd61de9517df..d62bfb2fd2afa 100644 --- a/test/legacy_test/test_static_pylayer.py +++ b/test/legacy_test/test_static_pylayer.py @@ -17,10 +17,10 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.backward import append_backward -from paddle.fluid.framework import Program, program_guard +from paddle import base +from paddle.base import core +from paddle.base.backward import append_backward +from paddle.base.framework import Program, program_guard np.random.seed(123) @@ -45,11 +45,11 @@ def forward_fn(x): out = paddle.static.nn.static_pylayer(forward_fn, [data]) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) x = np.array([2.0], dtype=np.float32) (ret,) = exe.run(main_program, feed={"X": x}, fetch_list=[out.name]) np.testing.assert_allclose( @@ -76,11 +76,11 @@ def forward_fn(x): out = paddle.static.nn.static_pylayer(forward_fn, [data]) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) (ret,) = exe.run(main_program, fetch_list=[out.name]) np.testing.assert_allclose( np.asarray(ret), np.array(6.0, np.float32), rtol=1e-05 @@ -114,11 +114,11 @@ def backward_fn(dy): append_backward(out) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) ret, x_grad = exe.run( main_program, fetch_list=[out.name, data.grad_name] ) @@ -146,11 +146,11 @@ def forward_fn(a, b): ) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) ret_1, ret_2 = exe.run( main_program, fetch_list=[out_1.name, out_2.name] ) @@ -184,11 +184,11 @@ def forward_fn(x): out = paddle.static.nn.static_pylayer(forward_fn, [data]) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) exe.run(main_program) self.assertIsNone(out) @@ -284,11 +284,11 @@ def backward_fn(diout, daout): append_backward(loss) place = ( - fluid.CUDAPlace(0) + base.CUDAPlace(0) if core.is_compiled_with_cuda() - else fluid.CPUPlace() + else base.CPUPlace() ) - exe = fluid.Executor(place) + exe = base.Executor(place) for feed_i in range(0, 10): print(feed_i) expected_a = 2.0 * feed_i diff --git a/test/legacy_test/test_static_pylayer_block.py b/test/legacy_test/test_static_pylayer_block.py index 060ea4d22d05c..b91125d47bffa 100644 --- a/test/legacy_test/test_static_pylayer_block.py +++ b/test/legacy_test/test_static_pylayer_block.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle import fluid -from paddle.fluid import core +from paddle import base +from paddle.base import core from paddle.static import Executor, append_backward from paddle.static.nn.static_pylayer import StaticPyLayerBlock @@ -26,9 +26,9 @@ class StaticPyLayerBlockTest(unittest.TestCase): def test_forward_and_backward(self): paddle.enable_static() - main_program = fluid.Program() - startup_program = fluid.Program() - with fluid.program_guard(main_program, startup_program): + main_program = base.Program() + startup_program = base.Program() + with base.program_guard(main_program, startup_program): data = paddle.static.data(name='X', shape=[10, 1], dtype='float32') data.stop_gradient = False static_pylayer_manager = StaticPyLayerBlock(inputs=[data]) diff --git a/test/quantization/test_llm_int8_linear.py b/test/quantization/test_llm_int8_linear.py index b26285c3049f0..e79b802d37243 100644 --- a/test/quantization/test_llm_int8_linear.py +++ b/test/quantization/test_llm_int8_linear.py @@ -19,9 +19,9 @@ import paddle import paddle.nn.quant as Q -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.framework import default_main_program +from paddle import base +from paddle.base import core +from paddle.base.framework import default_main_program from paddle.framework import set_default_dtype np.random.seed(123) @@ -53,7 +53,7 @@ def setUp(self): x = np.random.random((self.batch, self.token, self.in_features)) self.x = paddle.to_tensor(x, dtype=self.dtype) if self.bias: - bias_attr = fluid.ParamAttr( + bias_attr = base.ParamAttr( trainable=False, regularizer=None, initializer=paddle.nn.initializer.Constant(value=1.0), @@ -88,9 +88,9 @@ def get_llm_int8_linear_out(self): def get_llm_int8_linear_out_static(self): paddle.enable_static() - main = fluid.Program() - start = fluid.Program() - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.program_guard(main, start): x = paddle.static.data("x", self.x.shape, dtype=self.x.dtype) weight = paddle.static.data( @@ -126,7 +126,7 @@ def get_llm_int8_linear_out_static(self): 'bias': bias_np, "weight_scale": weight_scale_np, } - exe = fluid.Executor(paddle.CUDAPlace(0)) + exe = base.Executor(paddle.CUDAPlace(0)) exe.run(start) (out,) = exe.run(main, feed=feed_dict, fetch_list=[out]) paddle.disable_static() diff --git a/test/quantization/test_weight_only_linear.py b/test/quantization/test_weight_only_linear.py index 6c30c13ec21d1..3e40f4d64d36a 100644 --- a/test/quantization/test_weight_only_linear.py +++ b/test/quantization/test_weight_only_linear.py @@ -21,9 +21,9 @@ import paddle import paddle.nn.quant as Q -from paddle import fluid -from paddle.fluid import core -from paddle.fluid.framework import default_main_program +from paddle import base +from paddle.base import core +from paddle.base.framework import default_main_program from paddle.framework import set_default_dtype np.random.seed(123) @@ -80,7 +80,7 @@ def setUp(self): x = np.random.random((self.batch, self.token, self.in_features)) self.x = paddle.to_tensor(x, dtype=self.dtype) if self.bias: - bias_attr = fluid.ParamAttr( + bias_attr = base.ParamAttr( trainable=False, regularizer=None, initializer=paddle.nn.initializer.Constant(value=1.0), @@ -118,9 +118,9 @@ def get_weight_only_linear_out(self): def get_weight_only_linear_out_static(self): paddle.enable_static() - main = fluid.Program() - start = fluid.Program() - with fluid.program_guard(main, start): + main = base.Program() + start = base.Program() + with base.program_guard(main, start): x = paddle.static.data("x", self.x.shape, dtype=self.x.dtype) weight = paddle.static.data( @@ -156,7 +156,7 @@ def get_weight_only_linear_out_static(self): 'bias': bias_np, "weight_scale": weight_scale_np, } - exe = fluid.Executor(paddle.CUDAPlace(0)) + exe = base.Executor(paddle.CUDAPlace(0)) exe.run(start) (out,) = exe.run(main, feed=feed_dict, fetch_list=[out]) paddle.disable_static() diff --git a/test/xpu/test_inverse_op_xpu.py b/test/xpu/test_inverse_op_xpu.py index 8ddd4636372a8..911d1974bf5d6 100644 --- a/test/xpu/test_inverse_op_xpu.py +++ b/test/xpu/test_inverse_op_xpu.py @@ -23,7 +23,7 @@ from op_test_xpu import XPUOpTest import paddle -from paddle import fluid +from paddle import base paddle.enable_static() @@ -76,10 +76,10 @@ def set_shape(self): class TestInverseSingularAPI(unittest.TestCase): def setUp(self): - self.places = [fluid.XPUPlace(0)] + self.places = [base.XPUPlace(0)] def check_static_result(self, place): - with fluid.program_guard(fluid.Program(), fluid.Program()): + with base.program_guard(base.Program(), base.Program()): input = paddle.static.data( name="input", shape=[4, 4], dtype="float32" ) @@ -87,10 +87,10 @@ def check_static_result(self, place): input_np = np.ones([4, 4]).astype("float32") - exe = fluid.Executor(place) + exe = base.Executor(place) with self.assertRaises(OSError): fetches = exe.run( - fluid.default_main_program(), + base.default_main_program(), feed={"input": input_np}, fetch_list=[result], ) @@ -101,9 +101,9 @@ def test_static(self): def test_dygraph(self): for place in self.places: - with fluid.dygraph.guard(place): + with base.dygraph.guard(place): input_np = np.ones([4, 4]).astype("float32") - input = fluid.dygraph.to_variable(input_np) + input = base.dygraph.to_variable(input_np) with self.assertRaises(OSError): result = paddle.inverse(input) From 0c402d7b6a882810047cc6433d1d75edc8a18d69 Mon Sep 17 00:00:00 2001 From: Difers <707065510@qq.com> Date: Thu, 24 Aug 2023 23:06:43 +0800 Subject: [PATCH 07/10] try to fix cache error --- paddle/scripts/paddle_build.sh | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 230f8e64f1b72..1df1b2a87763d 100644 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -3275,6 +3275,11 @@ function build_pr_and_develop() { rm -rf ${PADDLE_ROOT}/build/Makefile ${PADDLE_ROOT}/build/CMakeCache.txt ${PADDLE_ROOT}/build/build.ninja rm -rf ${PADDLE_ROOT}/build/third_party fi + # may be remove it later + mkdir -p ${PADDLE_ROOT}/pr && cp -r ${PADDLE_ROOT}/build/pr_whl ${PADDLE_ROOT}/pr + rm -rf ${PADDLE_ROOT}/build && mkdir -p ${PADDLE_ROOT}/build + mv ${PADDLE_ROOT}/pr/pr_whl ${PADDLE_ROOT}/build + cd ${PADDLE_ROOT}/build git checkout -b develop_base_pr upstream/$BRANCH git submodule update --init @@ -3285,8 +3290,7 @@ function build_pr_and_develop() { mv ${PADDLE_ROOT}/dist/*.whl ${PADDLE_ROOT}/build/python/dist/ mkdir ${PADDLE_ROOT}/build/dev_whl && cp ${PADDLE_ROOT}/build/python/dist/*.whl ${PADDLE_ROOT}/build/dev_whl fi - # may be remove it later - rm -rf ${PADDLE_ROOT}/build/third_party + generate_api_spec "$1" "DEV" } From b7db312cabf28f5ce0c14854b214b16f13523784 Mon Sep 17 00:00:00 2001 From: Difers <707065510@qq.com> Date: Sat, 26 Aug 2023 00:39:38 +0800 Subject: [PATCH 08/10] fix codestyle && some doc error --- python/paddle/dataset/uci_housing.py | 4 +- .../auto_parallel/static/auto_align_tool.py | 4 +- .../auto_parallel/static/completion.py | 2 +- .../auto_parallel/static/engine.py | 2 +- .../static/tuner/rule_based_tuner.py | 6 +-- .../communication/stream/all_gather.py | 2 +- .../communication/stream/all_reduce.py | 2 +- .../communication/stream/all_to_all.py | 2 +- .../communication/stream/broadcast.py | 2 +- .../distributed/communication/stream/recv.py | 2 +- .../communication/stream/reduce.py | 2 +- .../communication/stream/reduce_scatter.py | 2 +- .../communication/stream/scatter.py | 2 +- .../distributed/communication/stream/send.py | 2 +- .../fleet/base/distributed_strategy.py | 4 +- .../distributed/fleet/base/role_maker.py | 2 +- .../distributed/fleet/layers/mpu/mp_layers.py | 2 +- .../distributed/fleet/layers/mpu/mp_ops.py | 2 +- .../distributed/fleet/layers/mpu/random.py | 2 +- .../fleet/meta_optimizers/dgc_optimizer.py | 2 +- .../sharding/group_sharded_stage3.py | 2 +- .../sharding/group_sharded_utils.py | 2 +- .../fleet/recompute/recompute_hybrid.py | 2 +- python/paddle/distributed/fleet/scaler.py | 2 +- .../fleet/utils/hybrid_parallel_inference.py | 3 +- .../fleet/utils/hybrid_parallel_util.py | 6 +-- .../fleet/utils/mix_precision_utils.py | 6 +-- .../fleet/utils/sequence_parallel_utils.py | 2 +- .../fleet/utils/tensor_parallel_utils.py | 2 +- .../distributed/launch/context/device.py | 3 +- .../paddle/distributed/parallel_with_gloo.py | 7 ++-- .../distributed/passes/auto_parallel_amp.py | 2 +- .../passes/auto_parallel_pipeline.py | 4 +- .../passes/auto_parallel_recompute.py | 2 +- .../paddle/distributed/passes/pass_utils.py | 4 +- .../distributed/passes/pipeline_pass_base.py | 2 +- .../distributed/passes/ps_trainer_pass.py | 2 +- .../ps/utils/collective_transpiler.py | 2 +- .../ps/utils/ps_program_builder.py | 12 ++---- python/paddle/distributed/ps/utils/public.py | 2 +- python/paddle/distributed/rpc/rpc.py | 2 +- python/paddle/distributed/spawn.py | 7 ++-- .../distributed/transpiler/collective.py | 2 +- python/paddle/distribution/bernoulli.py | 2 +- python/paddle/distribution/categorical.py | 2 +- python/paddle/distribution/cauchy.py | 2 +- python/paddle/distribution/dirichlet.py | 2 +- python/paddle/distribution/geometric.py | 2 +- python/paddle/distribution/gumbel.py | 2 +- python/paddle/distribution/laplace.py | 2 +- python/paddle/distribution/normal.py | 2 +- python/paddle/distribution/uniform.py | 2 +- python/paddle/hapi/model.py | 8 ++-- python/paddle/hapi/model_summary.py | 4 +- python/paddle/incubate/autograd/primreg.py | 8 ++-- .../paddle/incubate/distributed/fleet/base.py | 2 +- .../incubate/distributed/fleet/collective.py | 2 +- .../incubate/distributed/fleet/fleet_util.py | 2 +- .../incubate/distributed/fleet/utils.py | 2 +- .../distributed/utils/io/dist_load.py | 2 +- .../distributed/utils/io/dist_save.py | 2 +- .../distributed/utils/io/save_for_auto.py | 2 +- .../nn/functional/fused_dropout_add.py | 2 +- .../paddle/jit/dy2static/convert_operators.py | 5 +-- python/paddle/nn/clip.py | 2 +- python/paddle/nn/functional/common.py | 2 +- python/paddle/nn/functional/conv.py | 6 +-- python/paddle/nn/functional/extension.py | 2 +- python/paddle/nn/functional/input.py | 2 +- python/paddle/nn/functional/loss.py | 2 +- python/paddle/nn/functional/vision.py | 4 +- python/paddle/nn/layer/layers.py | 4 +- python/paddle/nn/layer/rnn.py | 2 +- python/paddle/signal.py | 2 +- python/paddle/sparse/unary.py | 8 +--- python/paddle/static/nn/common.py | 10 ++--- python/paddle/tensor/array.py | 2 +- python/paddle/tensor/attribute.py | 2 +- .../paddle/tensor/layer_function_generator.py | 2 +- python/paddle/tensor/linalg.py | 6 +-- python/paddle/tensor/logic.py | 2 +- python/paddle/tensor/math.py | 2 +- python/paddle/tensor/random.py | 2 +- python/paddle/tensor/stat.py | 2 +- test/asp/test_fleet_with_asp_dynamic.py | 2 +- test/auto_parallel/test_dist_attr_v2.py | 2 +- test/auto_parallel/test_dist_matmul.py | 2 +- test/auto_parallel/test_dist_op_cost.py | 4 +- test/auto_parallel/test_dist_pnorm.py | 2 +- test/auto_parallel/test_pass_bf16.py | 2 +- test/auto_parallel/test_prim_dist_op.py | 2 +- test/auto_parallel/test_serialization.py | 4 +- test/auto_parallel/test_shard_tensor_api.py | 2 +- .../fleet/auto_parallel_parallelizer.py | 2 +- .../fleet/dygraph_group_sharded_stage3.py | 2 +- .../dygraph_group_sharded_stage3_offload.py | 2 +- test/collective/fleet/test_auto_checkpoint.py | 2 +- .../collective/fleet/test_fleet_checkpoint.py | 2 +- ...est_fleet_gradient_merge_meta_optimizer.py | 4 +- ...perative_auto_mixed_precision_for_eager.py | 4 +- test/collective/process_group_mpi.py | 4 +- .../test_distribution_bernoulli.py | 2 +- test/distribution/test_distribution_cauchy.py | 2 +- test/dygraph_to_static/test_bert.py | 4 +- test/dygraph_to_static/test_cache_program.py | 5 +-- test/dygraph_to_static/test_cycle_gan.py | 4 +- test/dygraph_to_static/test_dict.py | 4 +- test/dygraph_to_static/test_ifelse.py | 2 +- test/dygraph_to_static/test_mobile_net.py | 4 +- test/dygraph_to_static/test_resnet.py | 4 +- test/dygraph_to_static/test_resnet_amp.py | 4 +- .../test_save_inference_model.py | 4 +- test/dygraph_to_static/test_save_load.py | 4 +- test/dygraph_to_static/test_se_resnet.py | 4 +- test/dygraph_to_static/test_sentiment.py | 4 +- test/dygraph_to_static/test_seq2seq.py | 4 +- test/dygraph_to_static/test_transformer.py | 4 +- test/dygraph_to_static/test_word2vec.py | 4 +- test/ir/inference/quant_dequant_test.py | 4 +- .../test_trt_c_allreduce_infer_script.py | 2 +- test/ir/new_ir/test_pass_manager.py | 2 +- test/legacy_test/detected_gpu.py | 5 +-- .../legacy_test/dist_fleet_sync_batch_norm.py | 2 +- .../distributed_fused_lamb_test_base.py | 2 +- test/legacy_test/test_assign_pos_op.py | 2 +- test/legacy_test/test_async_read_write.py | 2 +- .../test_auto_parallel_cost_model.py | 2 +- test/legacy_test/test_auto_parallel_mapper.py | 2 +- test/legacy_test/test_auto_search_dist_op.py | 2 +- test/legacy_test/test_base_layer.py | 12 ++---- test/legacy_test/test_boxps.py | 2 +- .../test_buffer_shared_memory_reuse_pass.py | 4 +- test/legacy_test/test_checkpoint_saver.py | 2 +- test/legacy_test/test_cost_model.py | 2 +- .../test_cuda_graph_static_mode.py | 2 +- .../test_cuda_graph_static_mode_error.py | 2 +- .../test_cuda_max_memory_allocated.py | 2 +- .../test_cuda_max_memory_reserved.py | 2 +- .../legacy_test/test_cuda_memory_allocated.py | 2 +- test/legacy_test/test_cuda_memory_reserved.py | 2 +- test/legacy_test/test_dataset.py | 20 +++------- test/legacy_test/test_debugger.py | 2 +- test/legacy_test/test_dist_base.py | 2 +- test/legacy_test/test_einsum.py | 8 +--- test/legacy_test/test_einsum_v2.py | 8 +--- test/legacy_test/test_elementwise_add_op.py | 8 +--- test/legacy_test/test_elementwise_mod_op.py | 8 +--- test/legacy_test/test_elementwise_mul_op.py | 8 +--- test/legacy_test/test_fetch_handler.py | 4 +- .../test_fleet_executor_cond_interceptor.py | 2 +- .../test_fleet_executor_task_node.py | 2 +- test/legacy_test/test_fused_attention_pass.py | 2 +- .../test_fused_feedforward_pass.py | 2 +- .../legacy_test/test_get_device_properties.py | 2 +- test/legacy_test/test_imperative_optimizer.py | 2 +- .../test_imperative_optimizer_v2.py | 2 +- test/legacy_test/test_inference_model_io.py | 6 +-- test/legacy_test/test_jit_save_load.py | 40 +++++-------------- test/legacy_test/test_limit_by_capacity_op.py | 2 +- test/legacy_test/test_lookup_table_bf16_op.py | 2 +- test/legacy_test/test_number_count_op.py | 2 +- test/legacy_test/test_paddle_save_load.py | 4 +- .../test_parallel_executor_transformer.py | 2 +- .../test_prune_gate_by_capacity_op.py | 2 +- test/legacy_test/test_py_func_op.py | 4 +- test/legacy_test/test_random_routing_op.py | 2 +- .../test_spawn_and_init_parallel_env.py | 2 +- test/legacy_test/test_sum_op.py | 2 +- test/legacy_test/test_where_op.py | 4 +- test/mkldnn/test_layer_norm_bf16_mkldnn_op.py | 2 +- test/mkldnn/test_layer_norm_mkldnn_op.py | 2 +- test/prim/model/bert.py | 2 +- test/prim/model/test_bert_cinn.py | 2 +- test/prim/model/test_bert_prim.py | 2 +- test/prim/model/test_bert_prim_cinn.py | 2 +- test/quantization/quant2_int8_lstm_model.py | 4 +- test/rnn/test_rnn_cells.py | 4 +- test/rnn/test_rnn_cells_static.py | 4 +- test/rnn/test_rnn_nets.py | 4 +- test/rnn/test_rnn_nets_static.py | 4 +- test/rnn/test_wrappers.py | 4 +- ...t_standalone_executor_multi_micro_batch.py | 2 +- tools/parse_kernel_info.py | 4 +- 183 files changed, 238 insertions(+), 383 deletions(-) diff --git a/python/paddle/dataset/uci_housing.py b/python/paddle/dataset/uci_housing.py index 744f9104c51f5..e834e2526318a 100644 --- a/python/paddle/dataset/uci_housing.py +++ b/python/paddle/dataset/uci_housing.py @@ -51,7 +51,7 @@ UCI_TRAIN_DATA = None UCI_TEST_DATA = None -FLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/base/fit_a_line.base.tar' +FLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/base/fit_a_line.fluid.tar' FLUID_MD5_MODEL = '6e6dd637ccd5993961f68bfbde46090b' @@ -152,7 +152,7 @@ def reader(): def base_model(): parameter_tar = paddle.dataset.common.download( - FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.base.tar' + FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.fluid.tar' ) tar = tarfile.TarFile(parameter_tar, mode='r') diff --git a/python/paddle/distributed/auto_parallel/static/auto_align_tool.py b/python/paddle/distributed/auto_parallel/static/auto_align_tool.py index 1d25a264bf621..d7d98f75d80f1 100644 --- a/python/paddle/distributed/auto_parallel/static/auto_align_tool.py +++ b/python/paddle/distributed/auto_parallel/static/auto_align_tool.py @@ -21,6 +21,8 @@ import paddle import paddle.distributed as dist +from paddle.base import core +from paddle.base.framework import Program from paddle.distributed.auto_parallel.static.converter import Converter from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, @@ -30,8 +32,6 @@ is_forward_op, is_loss_op, ) -from paddle.base import core -from paddle.base.framework import Program from paddle.static.io import deserialize_program _valid_types = [ diff --git a/python/paddle/distributed/auto_parallel/static/completion.py b/python/paddle/distributed/auto_parallel/static/completion.py index cb64025d7db0f..2e7396dd596af 100644 --- a/python/paddle/distributed/auto_parallel/static/completion.py +++ b/python/paddle/distributed/auto_parallel/static/completion.py @@ -15,8 +15,8 @@ import copy import logging -from paddle.distributed.fleet.meta_optimizers.common import OpRole from paddle.base.core import get_spmd_rule # noqa: F401 +from paddle.distributed.fleet.meta_optimizers.common import OpRole from paddle.framework import core from ..process_mesh import ProcessMesh, compute_compatible_process_mesh diff --git a/python/paddle/distributed/auto_parallel/static/engine.py b/python/paddle/distributed/auto_parallel/static/engine.py index 16b452944f932..c8f8310f92a1a 100644 --- a/python/paddle/distributed/auto_parallel/static/engine.py +++ b/python/paddle/distributed/auto_parallel/static/engine.py @@ -24,8 +24,8 @@ import paddle import paddle.distributed.auto_parallel.static.utils as auto_utils from paddle import static, utils -from paddle.distributed import fleet from paddle.base.executor import _to_name_str +from paddle.distributed import fleet from paddle.framework import IrGraph from paddle.framework import _current_expected_place as _get_device from paddle.framework import core, in_dynamic_mode diff --git a/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py b/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py index bb5bb4bbc2ac8..8e0c3855e477e 100644 --- a/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py +++ b/python/paddle/distributed/auto_parallel/static/tuner/rule_based_tuner.py @@ -26,6 +26,9 @@ import numpy as np import paddle +from paddle.base import program_guard +from paddle.base.backward import append_backward +from paddle.base.framework import Parameter, unique_name from paddle.distributed.auto_parallel.process_mesh import ProcessMesh from paddle.distributed.auto_parallel.static.cluster_v2 import DeviceMesh from paddle.distributed.auto_parallel.static.completion import Completer @@ -48,9 +51,6 @@ print_program_with_dist_attr, ) from paddle.distributed.fleet.meta_optimizers.common import OpRole -from paddle.base import program_guard -from paddle.base.backward import append_backward -from paddle.base.framework import Parameter, unique_name from ....utils.log_utils import get_logger from ..graph import Graph diff --git a/python/paddle/distributed/communication/stream/all_gather.py b/python/paddle/distributed/communication/stream/all_gather.py index 04e0a7d5361d1..165bf9690b6f2 100644 --- a/python/paddle/distributed/communication/stream/all_gather.py +++ b/python/paddle/distributed/communication/stream/all_gather.py @@ -15,8 +15,8 @@ import paddle import paddle.distributed as dist from paddle import framework -from paddle.distributed.communication.group import _get_global_group from paddle.base import data_feeder +from paddle.distributed.communication.group import _get_global_group def _all_gather_into_tensor_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/all_reduce.py b/python/paddle/distributed/communication/stream/all_reduce.py index b10773006a344..61ea16b769a7a 100644 --- a/python/paddle/distributed/communication/stream/all_reduce.py +++ b/python/paddle/distributed/communication/stream/all_reduce.py @@ -13,12 +13,12 @@ # limitations under the License. from paddle import framework +from paddle.base import data_feeder from paddle.distributed.communication.group import ( _get_global_group, _warn_cur_rank_not_in_group, ) from paddle.distributed.communication.reduce import ReduceOp, _get_reduce_op -from paddle.base import data_feeder def _all_reduce_in_dygraph(tensor, op, group, sync_op, use_calc_stream): diff --git a/python/paddle/distributed/communication/stream/all_to_all.py b/python/paddle/distributed/communication/stream/all_to_all.py index 7089ec70fc55d..656986270805c 100644 --- a/python/paddle/distributed/communication/stream/all_to_all.py +++ b/python/paddle/distributed/communication/stream/all_to_all.py @@ -15,11 +15,11 @@ import paddle import paddle.distributed as dist from paddle import framework +from paddle.base import data_feeder from paddle.distributed.communication.group import ( _get_global_group, _warn_cur_rank_not_in_group, ) -from paddle.base import data_feeder def _all_to_all_tensor_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/broadcast.py b/python/paddle/distributed/communication/stream/broadcast.py index 2a671ac7a8700..2f02012098ec9 100644 --- a/python/paddle/distributed/communication/stream/broadcast.py +++ b/python/paddle/distributed/communication/stream/broadcast.py @@ -13,12 +13,12 @@ # limitations under the License. from paddle import framework +from paddle.base import data_feeder from paddle.distributed.communication.group import ( _get_global_group, _get_or_throw_group_rank, _warn_cur_rank_not_in_group, ) -from paddle.base import data_feeder def _broadcast_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/recv.py b/python/paddle/distributed/communication/stream/recv.py index d77010e79e061..cb93ce8a47155 100644 --- a/python/paddle/distributed/communication/stream/recv.py +++ b/python/paddle/distributed/communication/stream/recv.py @@ -13,12 +13,12 @@ # limitations under the License. from paddle import framework +from paddle.base import data_feeder from paddle.distributed.communication.group import ( _get_global_group, _get_or_throw_group_rank, _warn_cur_rank_not_in_group, ) -from paddle.base import data_feeder def _recv_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/reduce.py b/python/paddle/distributed/communication/stream/reduce.py index feb5aff33d006..93c69a701879e 100644 --- a/python/paddle/distributed/communication/stream/reduce.py +++ b/python/paddle/distributed/communication/stream/reduce.py @@ -13,13 +13,13 @@ # limitations under the License. from paddle import framework +from paddle.base import data_feeder from paddle.distributed.communication.group import ( _get_global_group, _get_or_throw_group_rank, _warn_cur_rank_not_in_group, ) from paddle.distributed.communication.reduce import ReduceOp, _get_reduce_op -from paddle.base import data_feeder def _reduce_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/reduce_scatter.py b/python/paddle/distributed/communication/stream/reduce_scatter.py index d071db82b50ec..6b9d669469e8e 100644 --- a/python/paddle/distributed/communication/stream/reduce_scatter.py +++ b/python/paddle/distributed/communication/stream/reduce_scatter.py @@ -15,12 +15,12 @@ import paddle import paddle.distributed as dist from paddle import framework +from paddle.base import data_feeder from paddle.distributed.communication.group import ( _get_global_group, _warn_cur_rank_not_in_group, ) from paddle.distributed.communication.reduce import ReduceOp, _get_reduce_op -from paddle.base import data_feeder def _reduce_scatter_tensor_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/scatter.py b/python/paddle/distributed/communication/stream/scatter.py index fc3a115ff63e4..84a03f44bdf3e 100644 --- a/python/paddle/distributed/communication/stream/scatter.py +++ b/python/paddle/distributed/communication/stream/scatter.py @@ -17,12 +17,12 @@ import paddle import paddle.distributed as dist from paddle import framework +from paddle.base import data_feeder from paddle.distributed.communication.group import ( _get_global_group, _get_or_throw_group_rank, _warn_cur_rank_not_in_group, ) -from paddle.base import data_feeder def _scatter_tensor_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/send.py b/python/paddle/distributed/communication/stream/send.py index 16705f05bbfd4..d90d180783c85 100644 --- a/python/paddle/distributed/communication/stream/send.py +++ b/python/paddle/distributed/communication/stream/send.py @@ -13,12 +13,12 @@ # limitations under the License. from paddle import framework +from paddle.base import data_feeder from paddle.distributed.communication.group import ( _get_global_group, _get_or_throw_group_rank, _warn_cur_rank_not_in_group, ) -from paddle.base import data_feeder def _send_in_dygraph( diff --git a/python/paddle/distributed/fleet/base/distributed_strategy.py b/python/paddle/distributed/fleet/base/distributed_strategy.py index 4fb06eaf16e5d..09e2ef82162bc 100755 --- a/python/paddle/distributed/fleet/base/distributed_strategy.py +++ b/python/paddle/distributed/fleet/base/distributed_strategy.py @@ -19,10 +19,10 @@ import google.protobuf.text_format import paddle -from paddle.distributed.fleet.proto import distributed_strategy_pb2 -from paddle.distributed.fleet.utils.log_util import logger from paddle.base.framework import _global_flags from paddle.base.wrapped_decorator import wrap_decorator +from paddle.distributed.fleet.proto import distributed_strategy_pb2 +from paddle.distributed.fleet.utils.log_util import logger __all__ = [] diff --git a/python/paddle/distributed/fleet/base/role_maker.py b/python/paddle/distributed/fleet/base/role_maker.py index 7300791dbcb7a..870f818d177a9 100755 --- a/python/paddle/distributed/fleet/base/role_maker.py +++ b/python/paddle/distributed/fleet/base/role_maker.py @@ -20,10 +20,10 @@ import numpy as np import paddle +from paddle.base import core from paddle.distributed.fleet.base.private_helper_function import ( wait_server_ready, ) -from paddle.base import core from ...backup_env import getenv_or_backup diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_layers.py b/python/paddle/distributed/fleet/layers/mpu/mp_layers.py index 6a3becc314974..c24062c1f392b 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_layers.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_layers.py @@ -14,8 +14,8 @@ import paddle from paddle.autograd import PyLayer -from paddle.distributed import fleet from paddle.base import core +from paddle.distributed import fleet from paddle.nn import functional as F from ....communication.reduce import ReduceOp, _get_reduce_op diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py index 9a82864f719f4..27d8a32f0693f 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py @@ -15,8 +15,8 @@ import paddle from paddle import _legacy_C_ops from paddle.autograd import PyLayer -from paddle.distributed import collective from paddle.base.data_feeder import check_dtype, check_variable_and_dtype +from paddle.distributed import collective from paddle.framework import LayerHelper, _create_tensor, in_dynamic_mode from paddle.nn import Layer from paddle.nn.utils import dygraph_utils diff --git a/python/paddle/distributed/fleet/layers/mpu/random.py b/python/paddle/distributed/fleet/layers/mpu/random.py index 22063aa24fa79..5b43ef951cfff 100644 --- a/python/paddle/distributed/fleet/layers/mpu/random.py +++ b/python/paddle/distributed/fleet/layers/mpu/random.py @@ -18,9 +18,9 @@ import paddle from paddle import _legacy_C_ops -from paddle.common_ops_import import Variable from paddle.base import core from paddle.base.data_feeder import check_variable_and_dtype +from paddle.common_ops_import import Variable from paddle.framework import LayerHelper, in_dynamic_mode __all__ = [] diff --git a/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py index 4f74b62a418a5..cee43657c7527 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py @@ -19,9 +19,9 @@ __all__ = [] import paddle -from paddle.common_ops_import import LayerHelper from paddle.base import framework from paddle.base.dygraph import base as imperative_base +from paddle.common_ops_import import LayerHelper from paddle.framework import core, in_dynamic_mode from paddle.nn.clip import ClipGradByNorm, append_gradient_clip_ops from paddle.optimizer import Momentum, Optimizer diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py index 3ef5dbb91dd13..3b342778442c2 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py @@ -22,8 +22,8 @@ import paddle.distributed as dist from paddle import framework, nn from paddle.autograd import PyLayer -from paddle.distributed import collective from paddle.base.framework import EagerParamBase +from paddle.distributed import collective from paddle.framework import core from paddle.nn import ClipGradByGlobalNorm diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py index 3e4ba5026b1fa..adfad3ec77e31 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_utils.py @@ -20,9 +20,9 @@ import paddle from paddle import _C_ops, _legacy_C_ops -from paddle.common_ops_import import dygraph_only from paddle.base import core from paddle.base.dygraph import to_variable +from paddle.common_ops_import import dygraph_only from paddle.nn import clip diff --git a/python/paddle/distributed/fleet/recompute/recompute_hybrid.py b/python/paddle/distributed/fleet/recompute/recompute_hybrid.py index 5a8ca327ffc51..bdbdc797b2b7b 100644 --- a/python/paddle/distributed/fleet/recompute/recompute_hybrid.py +++ b/python/paddle/distributed/fleet/recompute/recompute_hybrid.py @@ -158,7 +158,7 @@ def forward( # Note: # If not marked non_differentiable, all output tensors' attr `stop gradient` # will be reset to `False` in c++ backend. - # See https://github.com/PaddlePaddle/Paddle/blob/9d62efb0e6e5373823039d9eda96cd5905426c0a/paddle/base/pybind/eager_py_layer.cc#L388 + # See https://github.com/PaddlePaddle/Paddle/blob/9d62efb0e6e5373823039d9eda96cd5905426c0a/paddle/fluid/pybind/eager_py_layer.cc#L388 if framework.in_dynamic_mode() and state: ctx.mark_non_differentiable(arg) else: diff --git a/python/paddle/distributed/fleet/scaler.py b/python/paddle/distributed/fleet/scaler.py index a9528210c6223..bf0d7363b0525 100755 --- a/python/paddle/distributed/fleet/scaler.py +++ b/python/paddle/distributed/fleet/scaler.py @@ -18,8 +18,8 @@ import paddle from paddle import _C_ops, _legacy_C_ops -from paddle.distributed import fleet from paddle.base.dygraph import to_variable +from paddle.distributed import fleet from paddle.framework import core from .base.topology import ParallelMode diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index ae9fccb1be7a6..a5723f856e661 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -16,10 +16,9 @@ import numpy as np -from paddle.distributed import fleet - # (TODO: GhostScreaming) It will be removed later. from paddle.base import core +from paddle.distributed import fleet from paddle.framework import Block, Program, in_dynamic_mode diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py index f44ba3f41ba05..edeb585384b03 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py @@ -14,6 +14,9 @@ import paddle from paddle import framework + +# (TODO: GhostScreaming) It will be removed later. +from paddle.base import core from paddle.distributed.parallel import ( _split_tensors, build_groups, @@ -21,9 +24,6 @@ sync_params_buffers, ) -# (TODO: GhostScreaming) It will be removed later. -from paddle.base import core - from .log_util import logger __all__ = [] diff --git a/python/paddle/distributed/fleet/utils/mix_precision_utils.py b/python/paddle/distributed/fleet/utils/mix_precision_utils.py index 37e6a4d32a42e..ead12379bb5a8 100644 --- a/python/paddle/distributed/fleet/utils/mix_precision_utils.py +++ b/python/paddle/distributed/fleet/utils/mix_precision_utils.py @@ -20,13 +20,13 @@ import paddle from paddle import _legacy_C_ops, nn +from paddle.base import framework +from paddle.base.dygraph import base as imperative_base +from paddle.base.dygraph import to_variable from paddle.distributed import fleet from paddle.distributed.fleet.utils.hybrid_parallel_util import ( obtain_optimizer_parameters_list, ) -from paddle.base import framework -from paddle.base.dygraph import base as imperative_base -from paddle.base.dygraph import to_variable from paddle.framework import core diff --git a/python/paddle/distributed/fleet/utils/sequence_parallel_utils.py b/python/paddle/distributed/fleet/utils/sequence_parallel_utils.py index c43a339db9f3f..ae5dec21b4e8f 100644 --- a/python/paddle/distributed/fleet/utils/sequence_parallel_utils.py +++ b/python/paddle/distributed/fleet/utils/sequence_parallel_utils.py @@ -17,12 +17,12 @@ import paddle from paddle import distributed as dist from paddle.autograd import PyLayer +from paddle.base import core from paddle.distributed import fleet from paddle.distributed.fleet.meta_parallel import get_rng_state_tracker from paddle.distributed.fleet.utils.hybrid_parallel_util import ( fused_allreduce_gradients_with_group, ) -from paddle.base import core from paddle.nn import Layer from paddle.nn import functional as F diff --git a/python/paddle/distributed/fleet/utils/tensor_parallel_utils.py b/python/paddle/distributed/fleet/utils/tensor_parallel_utils.py index 0ad0c5024015e..e370042927434 100644 --- a/python/paddle/distributed/fleet/utils/tensor_parallel_utils.py +++ b/python/paddle/distributed/fleet/utils/tensor_parallel_utils.py @@ -22,8 +22,8 @@ ch.setFormatter(formatter) logger.addHandler(ch) -from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY from paddle.base import core +from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY from paddle.static import Parameter _supported_optimizer_type = [ diff --git a/python/paddle/distributed/launch/context/device.py b/python/paddle/distributed/launch/context/device.py index dfba7b72149fb..5126f687ccb0a 100644 --- a/python/paddle/distributed/launch/context/device.py +++ b/python/paddle/distributed/launch/context/device.py @@ -14,10 +14,9 @@ import os -from paddle.device import get_available_custom_device - # (TODO: GhostScreaming) It will be removed later. from paddle.base import core +from paddle.device import get_available_custom_device class DeviceType: diff --git a/python/paddle/distributed/parallel_with_gloo.py b/python/paddle/distributed/parallel_with_gloo.py index 1a4bf4f8fbe6e..9183139becd95 100755 --- a/python/paddle/distributed/parallel_with_gloo.py +++ b/python/paddle/distributed/parallel_with_gloo.py @@ -15,13 +15,12 @@ import time from multiprocessing import Manager, Process -from paddle.distributed.fleet.base.private_helper_function import ( - wait_server_ready, -) - # deprecated module import # (TODO: GhostScreaming) It will be removed later. from paddle.base import core +from paddle.distributed.fleet.base.private_helper_function import ( + wait_server_ready, +) __all__ = [] diff --git a/python/paddle/distributed/passes/auto_parallel_amp.py b/python/paddle/distributed/passes/auto_parallel_amp.py index 76a33d748d86e..322adfb5da310 100644 --- a/python/paddle/distributed/passes/auto_parallel_amp.py +++ b/python/paddle/distributed/passes/auto_parallel_amp.py @@ -13,6 +13,7 @@ # limitations under the License. import paddle +from paddle.base.data_feeder import check_type, check_variable_and_dtype from paddle.distributed.auto_parallel.static.dist_attribute import ( OperatorDistAttr, ) @@ -24,7 +25,6 @@ set_var_dist_attr, ) from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole -from paddle.base.data_feeder import check_type, check_variable_and_dtype from paddle.framework import core from paddle.static.amp.bf16.amp_utils import ( AutoMixedPrecisionListsBF16, diff --git a/python/paddle/distributed/passes/auto_parallel_pipeline.py b/python/paddle/distributed/passes/auto_parallel_pipeline.py index de7e819635a49..9e2a06778854a 100644 --- a/python/paddle/distributed/passes/auto_parallel_pipeline.py +++ b/python/paddle/distributed/passes/auto_parallel_pipeline.py @@ -14,6 +14,8 @@ import os +from paddle.base import core +from paddle.base.framework import Program from paddle.distributed.auto_parallel.static.process_group import ( remove_process_group, ) @@ -24,8 +26,6 @@ is_optimize_op, ) from paddle.distributed.fleet.fleet_executor_utils import TaskNode -from paddle.base import core -from paddle.base.framework import Program from .pass_base import PassBase, register_pass from .pass_utils import _create_program, _insert_sync_for_fthenb_1f1b diff --git a/python/paddle/distributed/passes/auto_parallel_recompute.py b/python/paddle/distributed/passes/auto_parallel_recompute.py index 194aeefb6029b..0fd008ff5a701 100644 --- a/python/paddle/distributed/passes/auto_parallel_recompute.py +++ b/python/paddle/distributed/passes/auto_parallel_recompute.py @@ -15,7 +15,6 @@ import logging import paddle -from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole from paddle.base.backward import ( ProgramStats, _append_grad_suffix_, @@ -23,6 +22,7 @@ _get_no_grad_set_name, _rename_arg_, ) +from paddle.distributed.fleet.meta_optimizers.common import OP_ROLE_KEY, OpRole from paddle.framework import core from paddle.utils import unique_name diff --git a/python/paddle/distributed/passes/pass_utils.py b/python/paddle/distributed/passes/pass_utils.py index 2db43237c8290..7c34023ebd6cc 100644 --- a/python/paddle/distributed/passes/pass_utils.py +++ b/python/paddle/distributed/passes/pass_utils.py @@ -15,6 +15,8 @@ from collections import OrderedDict from typing import List +from paddle.base import core +from paddle.base.framework import Parameter, Program from paddle.distributed.auto_parallel.static.utils import ( is_backward_op, is_forward_op, @@ -22,8 +24,6 @@ is_optimize_op, ) from paddle.distributed.fleet.meta_optimizers.common import OpRole -from paddle.base import core -from paddle.base.framework import Parameter, Program __not_shape_var_type__ = [ core.VarDesc.VarType.READER, diff --git a/python/paddle/distributed/passes/pipeline_pass_base.py b/python/paddle/distributed/passes/pipeline_pass_base.py index 65e09a73327d3..07235ecc05a0f 100644 --- a/python/paddle/distributed/passes/pipeline_pass_base.py +++ b/python/paddle/distributed/passes/pipeline_pass_base.py @@ -14,8 +14,8 @@ import logging -from paddle.distributed.auto_parallel.static.utils import get_logger from paddle.base import core +from paddle.distributed.auto_parallel.static.utils import get_logger from .pass_base import PassBase from .pass_utils import get_skip_gc_vars diff --git a/python/paddle/distributed/passes/ps_trainer_pass.py b/python/paddle/distributed/passes/ps_trainer_pass.py index 85dc873b7112f..434d32891d1d0 100755 --- a/python/paddle/distributed/passes/ps_trainer_pass.py +++ b/python/paddle/distributed/passes/ps_trainer_pass.py @@ -17,8 +17,8 @@ from _collections import defaultdict import paddle -from paddle.distributed.passes.pass_base import PassBase, register_pass from paddle.base import framework +from paddle.distributed.passes.pass_base import PassBase, register_pass from paddle.framework import core from paddle.static import Parameter, Program diff --git a/python/paddle/distributed/ps/utils/collective_transpiler.py b/python/paddle/distributed/ps/utils/collective_transpiler.py index 989a97d1d8426..99bb76a3b315b 100644 --- a/python/paddle/distributed/ps/utils/collective_transpiler.py +++ b/python/paddle/distributed/ps/utils/collective_transpiler.py @@ -15,10 +15,10 @@ import os import paddle +from paddle.base import unique_name from paddle.distributed.fleet.base.private_helper_function import ( wait_server_ready, ) -from paddle.base import unique_name from paddle.framework import core from paddle.static import default_main_program, default_startup_program diff --git a/python/paddle/distributed/ps/utils/ps_program_builder.py b/python/paddle/distributed/ps/utils/ps_program_builder.py index 428875fff6f72..8ba98a3707d99 100755 --- a/python/paddle/distributed/ps/utils/ps_program_builder.py +++ b/python/paddle/distributed/ps/utils/ps_program_builder.py @@ -97,9 +97,7 @@ def _build_programs(self): elif self.attrs['is_server']: self._build_pserver_programs() self.loss.block.program = self.attrs['_main_server'] - base.framework.switch_startup_program( - self.attrs['_startup_server'] - ) + base.framework.switch_startup_program(self.attrs['_startup_server']) class GeoPsProgramBuilder(PsProgramBuilder): # 仅 CPU 模式 @@ -372,9 +370,7 @@ def _build_programs(self): elif self.attrs['is_server']: self._build_pserver_programs() self.loss.block.program = self.attrs['_main_server'] - base.framework.switch_startup_program( - self.attrs['_startup_server'] - ) + base.framework.switch_startup_program(self.attrs['_startup_server']) class FlPsProgramBuilder(HeterAsyncPsProgramBuilder): @@ -479,7 +475,5 @@ def _build_programs(self): ) else: self._build_pserver_programs() - base.framework.switch_startup_program( - self.attrs['_startup_server'] - ) + base.framework.switch_startup_program(self.attrs['_startup_server']) paddle.framework.switch_main_program(self.attrs['_main_server']) diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index 82d6eccee2e10..865de4c828308 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -18,8 +18,8 @@ import warnings from functools import reduce -from paddle.distributed.io import is_persistable from paddle.base.framework import generate_control_dev_var_name +from paddle.distributed.io import is_persistable from paddle.framework import core # logging.basicConfig( diff --git a/python/paddle/distributed/rpc/rpc.py b/python/paddle/distributed/rpc/rpc.py index ae4c1459d8717..ebe6bc54623d6 100644 --- a/python/paddle/distributed/rpc/rpc.py +++ b/python/paddle/distributed/rpc/rpc.py @@ -18,10 +18,10 @@ import time from collections import namedtuple +from paddle.base import core from paddle.distributed.launch.context import Node from paddle.distributed.rpc.internal import PythonFunc, _serialize from paddle.distributed.utils.launch_utils import logger -from paddle.base import core WorkerInfo = namedtuple("WorkerInfo", ["name", "rank", "ip", "port"]) diff --git a/python/paddle/distributed/spawn.py b/python/paddle/distributed/spawn.py index c3351c5c9436b..91039b3b3bac3 100644 --- a/python/paddle/distributed/spawn.py +++ b/python/paddle/distributed/spawn.py @@ -18,6 +18,9 @@ import sys import warnings +# deprecated module import +# (TODO: GhostScreaming) It will be removed later. +from paddle.base import core from paddle.device import get_device from paddle.distributed.cloud_utils import ( _get_trainers_num, @@ -35,10 +38,6 @@ _print_arguments, get_host_name_ip, ) - -# deprecated module import -# (TODO: GhostScreaming) It will be removed later. -from paddle.base import core from paddle.framework import set_flags __all__ = [] diff --git a/python/paddle/distributed/transpiler/collective.py b/python/paddle/distributed/transpiler/collective.py index 03b3be36e69b0..e76238e02af43 100644 --- a/python/paddle/distributed/transpiler/collective.py +++ b/python/paddle/distributed/transpiler/collective.py @@ -15,10 +15,10 @@ import os import paddle +from paddle.base import unique_name from paddle.distributed.fleet.base.private_helper_function import ( wait_server_ready, ) -from paddle.base import unique_name from paddle.framework import core from paddle.static import default_main_program, default_startup_program diff --git a/python/paddle/distribution/bernoulli.py b/python/paddle/distribution/bernoulli.py index c86bed5a0d78f..7d4849fab48e7 100644 --- a/python/paddle/distribution/bernoulli.py +++ b/python/paddle/distribution/bernoulli.py @@ -16,9 +16,9 @@ import numpy as np import paddle -from paddle.distribution import exponential_family from paddle.base.data_feeder import check_type, convert_dtype from paddle.base.framework import Variable +from paddle.distribution import exponential_family from paddle.framework import in_dynamic_mode from paddle.nn.functional import ( binary_cross_entropy_with_logits, diff --git a/python/paddle/distribution/categorical.py b/python/paddle/distribution/categorical.py index 110f324db77c7..b6484e3f21d56 100644 --- a/python/paddle/distribution/categorical.py +++ b/python/paddle/distribution/categorical.py @@ -15,9 +15,9 @@ import numpy as np import paddle -from paddle.distribution import distribution from paddle.base.data_feeder import check_type, convert_dtype from paddle.base.framework import Variable +from paddle.distribution import distribution from paddle.framework import in_dynamic_mode from paddle.tensor import multinomial diff --git a/python/paddle/distribution/cauchy.py b/python/paddle/distribution/cauchy.py index cad5c88753421..63e0a5f252638 100644 --- a/python/paddle/distribution/cauchy.py +++ b/python/paddle/distribution/cauchy.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle.distribution import distribution from paddle.base import framework +from paddle.distribution import distribution class Cauchy(distribution.Distribution): diff --git a/python/paddle/distribution/dirichlet.py b/python/paddle/distribution/dirichlet.py index a1695f2b36b48..cf578c9d0dd5c 100644 --- a/python/paddle/distribution/dirichlet.py +++ b/python/paddle/distribution/dirichlet.py @@ -13,9 +13,9 @@ # limitations under the License. import paddle -from paddle.distribution import exponential_family from paddle.base.data_feeder import check_variable_and_dtype from paddle.base.layer_helper import LayerHelper +from paddle.distribution import exponential_family from paddle.framework import in_dynamic_mode diff --git a/python/paddle/distribution/geometric.py b/python/paddle/distribution/geometric.py index e4f2795b31f16..bfcd734bb1785 100644 --- a/python/paddle/distribution/geometric.py +++ b/python/paddle/distribution/geometric.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle.distribution import distribution, uniform from paddle.base import framework +from paddle.distribution import distribution, uniform class Geometric(distribution.Distribution): diff --git a/python/paddle/distribution/gumbel.py b/python/paddle/distribution/gumbel.py index 6c21b8d601a3f..005801ae6b7cc 100644 --- a/python/paddle/distribution/gumbel.py +++ b/python/paddle/distribution/gumbel.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle.distribution.transformed_distribution import TransformedDistribution from paddle.base import framework +from paddle.distribution.transformed_distribution import TransformedDistribution class Gumbel(TransformedDistribution): diff --git a/python/paddle/distribution/laplace.py b/python/paddle/distribution/laplace.py index 3d5ef1bbf245a..fc4b57eeba79c 100644 --- a/python/paddle/distribution/laplace.py +++ b/python/paddle/distribution/laplace.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle.distribution import distribution from paddle.base import framework +from paddle.distribution import distribution class Laplace(distribution.Distribution): diff --git a/python/paddle/distribution/normal.py b/python/paddle/distribution/normal.py index f522ac65c8a6d..0564bb2b5397b 100644 --- a/python/paddle/distribution/normal.py +++ b/python/paddle/distribution/normal.py @@ -18,9 +18,9 @@ import numpy as np import paddle -from paddle.distribution import distribution from paddle.base.data_feeder import check_type, convert_dtype from paddle.base.framework import Variable +from paddle.distribution import distribution from paddle.framework import in_dynamic_mode from paddle.tensor import random diff --git a/python/paddle/distribution/uniform.py b/python/paddle/distribution/uniform.py index 833195491e038..4d8a0f97d910d 100644 --- a/python/paddle/distribution/uniform.py +++ b/python/paddle/distribution/uniform.py @@ -16,9 +16,9 @@ import paddle from paddle import _C_ops -from paddle.distribution import distribution from paddle.base.data_feeder import check_type, convert_dtype from paddle.base.framework import Variable +from paddle.distribution import distribution from paddle.framework import in_dynamic_mode from paddle.tensor import random diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 9002de1ee1698..8ff1a557673af 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -26,14 +26,14 @@ import paddle.distributed as dist from paddle import base from paddle.autograd import no_grad -from paddle.distributed import fleet -from paddle.distributed.fleet.base import role_maker from paddle.base import core from paddle.base.dygraph.base import to_variable from paddle.base.executor import global_scope from paddle.base.framework import Variable from paddle.base.framework import _current_expected_place as _get_device from paddle.base.framework import _get_paddle_place +from paddle.distributed import fleet +from paddle.distributed.fleet.base import role_maker from paddle.framework import in_dynamic_mode from paddle.framework.io_utils import is_belong_to_optimizer from paddle.io import DataLoader, Dataset, DistributedBatchSampler @@ -58,9 +58,7 @@ def to_list(value): def to_numpy(var): - assert isinstance( - var, (Variable, base.core.eager.Tensor) - ), "not a variable" + assert isinstance(var, (Variable, base.core.eager.Tensor)), "not a variable" if isinstance(var, base.core.eager.Tensor): return np.array(var) t = global_scope().find_var(var.name).get_tensor() diff --git a/python/paddle/hapi/model_summary.py b/python/paddle/hapi/model_summary.py index 4a204f64b3d63..267e938a6e298 100644 --- a/python/paddle/hapi/model_summary.py +++ b/python/paddle/hapi/model_summary.py @@ -353,9 +353,7 @@ def _build_dtypes(input_size, dtype): depth = len(list(model.sublayers())) def _get_shape_from_tensor(x): - if isinstance( - x, (paddle.base.Variable, paddle.base.core.eager.Tensor) - ): + if isinstance(x, (paddle.base.Variable, paddle.base.core.eager.Tensor)): return list(x.shape) elif isinstance(x, (list, tuple)): return [_get_shape_from_tensor(xx) for xx in x] diff --git a/python/paddle/incubate/autograd/primreg.py b/python/paddle/incubate/autograd/primreg.py index 5e94c220acb47..5cedac46320dd 100644 --- a/python/paddle/incubate/autograd/primreg.py +++ b/python/paddle/incubate/autograd/primreg.py @@ -79,7 +79,7 @@ def op_position_inputs(op): .. code-block:: python >>> from paddle.incubate.autograd.primops import _simple_binop - >>> from paddle.fluid.layer_helper import LayerHelper + >>> from paddle.base.layer_helper import LayerHelper >>> from paddle.incubate.autograd.primreg import REGISTER_FN >>> # doctest: +SKIP('Depends on external code.') @@ -126,7 +126,7 @@ def op_position_output(op): >>> # doctest: +SKIP('Depends on external code.') >>> from paddle.incubate.autograd.primops import _simple_binop - >>> from paddle.fluid.layer_helper import LayerHelper + >>> from paddle.base.layer_helper import LayerHelper >>> from paddle.incubate.autograd.primreg import REGISTER_FN >>> @REGISTER_FN('div_p', 'X', 'Y', 'Z') @@ -169,7 +169,7 @@ def REGISTER_FN(op_type, *position_argnames): >>> # doctest: +SKIP('Depends on external code.') >>> from paddle.incubate.autograd.primops import _simple_binop - >>> from paddle.fluid.layer_helper import LayerHelper + >>> from paddle.base.layer_helper import LayerHelper >>> from paddle.incubate.autograd.primreg import REGISTER_FN >>> @REGISTER_FN('tanh_p', 'X', 'Y') @@ -204,7 +204,7 @@ def REGISTER_ORIG2PRIM(op_type): .. code-block:: python >>> # doctest: +SKIP('Depends on external code.') - >>> from paddle.fluid.layer_helper import LayerHelper + >>> from paddle.base.layer_helper import LayerHelper >>> from paddle.incubate.autograd.utils import get_input_var_list >>> from paddle.incubate.autograd import primops >>> from paddle.incubate.autograd.primreg import REGISTER_ORIG2PRIM diff --git a/python/paddle/incubate/distributed/fleet/base.py b/python/paddle/incubate/distributed/fleet/base.py index 1bce9decb5290..87af4c7fd4e82 100644 --- a/python/paddle/incubate/distributed/fleet/base.py +++ b/python/paddle/incubate/distributed/fleet/base.py @@ -15,8 +15,8 @@ import abc from paddle import base -from paddle.distributed.fleet.base.role_maker import RoleMakerBase from paddle.base.executor import Executor +from paddle.distributed.fleet.base.role_maker import RoleMakerBase from paddle.optimizer import SGD from paddle.static.amp.decorator import OptimizerWithMixedPrecision diff --git a/python/paddle/incubate/distributed/fleet/collective.py b/python/paddle/incubate/distributed/fleet/collective.py index 8ac688107e97b..0a63ddb71dffb 100644 --- a/python/paddle/incubate/distributed/fleet/collective.py +++ b/python/paddle/incubate/distributed/fleet/collective.py @@ -17,7 +17,6 @@ import paddle import paddle.distributed.transpiler.distribute_transpiler as dist_transpiler from paddle import base -from paddle.distributed.fleet.meta_optimizers import RawProgramOptimizer from paddle.base.compiler import CompiledProgram from paddle.base.executor import Executor from paddle.base.framework import Program @@ -25,6 +24,7 @@ CheckpointSaver, PaddleModel, ) +from paddle.distributed.fleet.meta_optimizers import RawProgramOptimizer from paddle.incubate.distributed.fleet.base import ( DistributedOptimizer, Fleet, diff --git a/python/paddle/incubate/distributed/fleet/fleet_util.py b/python/paddle/incubate/distributed/fleet/fleet_util.py index 860c37705f4be..687c1066f0c09 100644 --- a/python/paddle/incubate/distributed/fleet/fleet_util.py +++ b/python/paddle/incubate/distributed/fleet/fleet_util.py @@ -25,8 +25,8 @@ import paddle from paddle import base -from paddle.distributed.fleet.utils.fs import HDFSClient from paddle.base.log_helper import get_logger +from paddle.distributed.fleet.utils.fs import HDFSClient from . import utils diff --git a/python/paddle/incubate/distributed/fleet/utils.py b/python/paddle/incubate/distributed/fleet/utils.py index 4a60900f75435..2be2ac7161071 100644 --- a/python/paddle/incubate/distributed/fleet/utils.py +++ b/python/paddle/incubate/distributed/fleet/utils.py @@ -22,10 +22,10 @@ import paddle from paddle import base -from paddle.distributed.fleet.base.util_factory import draw_block_graphviz from paddle.base import core from paddle.base.framework import Program from paddle.base.proto import framework_pb2 +from paddle.distributed.fleet.base.util_factory import draw_block_graphviz from paddle.framework import io_utils __all__ = [ diff --git a/python/paddle/incubate/distributed/utils/io/dist_load.py b/python/paddle/incubate/distributed/utils/io/dist_load.py index be37471ec0a60..4d1ce2a40ff6e 100644 --- a/python/paddle/incubate/distributed/utils/io/dist_load.py +++ b/python/paddle/incubate/distributed/utils/io/dist_load.py @@ -17,8 +17,8 @@ import paddle import paddle.distributed as dist -from paddle.distributed import fleet from paddle.base.framework import dygraph_only +from paddle.distributed import fleet @dygraph_only diff --git a/python/paddle/incubate/distributed/utils/io/dist_save.py b/python/paddle/incubate/distributed/utils/io/dist_save.py index f05828ea92bd0..94b07ed728cd2 100644 --- a/python/paddle/incubate/distributed/utils/io/dist_save.py +++ b/python/paddle/incubate/distributed/utils/io/dist_save.py @@ -18,9 +18,9 @@ import paddle import paddle.distributed as dist +from paddle.base.framework import dygraph_only from paddle.distributed import fleet from paddle.distributed.fleet.utils.log_util import logger -from paddle.base.framework import dygraph_only from .save_for_auto import save_for_auto_inference diff --git a/python/paddle/incubate/distributed/utils/io/save_for_auto.py b/python/paddle/incubate/distributed/utils/io/save_for_auto.py index da127303ad1b7..5dd12d41218a6 100644 --- a/python/paddle/incubate/distributed/utils/io/save_for_auto.py +++ b/python/paddle/incubate/distributed/utils/io/save_for_auto.py @@ -21,12 +21,12 @@ import paddle import paddle.distributed as dist +from paddle.base.framework import dygraph_only from paddle.distributed import fleet from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import ( GroupShardedStage3, ) from paddle.distributed.fleet.utils.log_util import logger -from paddle.base.framework import dygraph_only __all__ = ["save_for_auto_inference"] diff --git a/python/paddle/incubate/nn/functional/fused_dropout_add.py b/python/paddle/incubate/nn/functional/fused_dropout_add.py index c8995bb7334a8..d191f1682fdda 100644 --- a/python/paddle/incubate/nn/functional/fused_dropout_add.py +++ b/python/paddle/incubate/nn/functional/fused_dropout_add.py @@ -14,8 +14,8 @@ from paddle import _C_ops -from paddle.common_ops_import import default_main_program from paddle.base import core +from paddle.common_ops_import import default_main_program from paddle.framework import LayerHelper, in_dynamic_mode diff --git a/python/paddle/jit/dy2static/convert_operators.py b/python/paddle/jit/dy2static/convert_operators.py index 700c23189b079..535c3713100e2 100644 --- a/python/paddle/jit/dy2static/convert_operators.py +++ b/python/paddle/jit/dy2static/convert_operators.py @@ -16,10 +16,7 @@ import paddle from paddle.base.data_feeder import convert_dtype -from paddle.base.dygraph.base import ( - _convert_into_variable, - in_declarative_mode, -) +from paddle.base.dygraph.base import _convert_into_variable, in_declarative_mode from paddle.base.framework import Variable, core, default_main_program from .utils import ( diff --git a/python/paddle/nn/clip.py b/python/paddle/nn/clip.py index 2c27abbdc667f..c00851e3019e6 100644 --- a/python/paddle/nn/clip.py +++ b/python/paddle/nn/clip.py @@ -19,9 +19,9 @@ import paddle import paddle.autograd as imperative_base from paddle import _C_ops -from paddle.common_ops_import import Variable, check_type, default_main_program from paddle.base import core, framework, unique_name from paddle.base.data_feeder import check_variable_and_dtype +from paddle.common_ops_import import Variable, check_type, default_main_program from paddle.framework import LayerHelper, in_dynamic_mode from paddle.tensor.layer_function_generator import templatedoc diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index e231ea252f3f5..d2bb5523f810d 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -16,8 +16,8 @@ import paddle from paddle import _C_ops -from paddle.common_ops_import import Variable, default_main_program from paddle.base.layer_helper import LayerHelper +from paddle.common_ops_import import Variable, default_main_program from paddle.framework import core, in_dynamic_mode from paddle.tensor.creation import full diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index 5765a8d36b74c..00e45f789ae5e 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -13,19 +13,19 @@ # limitations under the License. from paddle import _C_ops, _legacy_C_ops, get_flags, in_dynamic_mode +from paddle.base.framework import _global_flags from paddle.device import ( get_all_custom_device_type, is_compiled_with_cuda, is_compiled_with_rocm, ) -from paddle.base.framework import _global_flags from paddle.tensor.manipulation import reshape from paddle.tensor.math import _add_with_axis -from ...common_ops_import import Variable -from ...device import get_cudnn_version from ...base.data_feeder import check_dtype, check_variable_and_dtype from ...base.layer_helper import LayerHelper +from ...common_ops_import import Variable +from ...device import get_cudnn_version from ...framework import no_grad from ...tensor.manipulation import squeeze, unsqueeze from ...utils import ( diff --git a/python/paddle/nn/functional/extension.py b/python/paddle/nn/functional/extension.py index 9963c454720d6..252afc268bf7c 100644 --- a/python/paddle/nn/functional/extension.py +++ b/python/paddle/nn/functional/extension.py @@ -18,13 +18,13 @@ from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode -from ...common_ops_import import Variable from ...base.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, ) from ...base.layer_helper import LayerHelper +from ...common_ops_import import Variable from ...framework import convert_np_dtype_to_dtype_, core from ...tensor.creation import assign diff --git a/python/paddle/nn/functional/input.py b/python/paddle/nn/functional/input.py index 57175ae79434d..b76c2e5ea9b80 100644 --- a/python/paddle/nn/functional/input.py +++ b/python/paddle/nn/functional/input.py @@ -14,9 +14,9 @@ from paddle import _C_ops -from ...common_ops_import import Variable from ...base.data_feeder import check_variable_and_dtype from ...base.layer_helper import LayerHelper +from ...common_ops_import import Variable from ...framework import in_dynamic_mode __all__ = [] diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index d66381996a3cb..539224de32ef2 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -21,10 +21,10 @@ from paddle.static.nn.control_flow import Assert from paddle.utils import deprecated -from ...common_ops_import import Variable from ...base.data_feeder import check_variable_and_dtype from ...base.framework import _current_expected_place from ...base.layer_helper import LayerHelper +from ...common_ops_import import Variable from ...tensor.manipulation import reshape __all__ = [] diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index c382349d4622e..d49ca156f77ea 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -15,10 +15,10 @@ from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode from paddle.base.framework import in_dygraph_mode -from ...common_ops_import import Variable -from ...device import get_cudnn_version, is_compiled_with_rocm from ...base.data_feeder import check_variable_and_dtype from ...base.layer_helper import LayerHelper +from ...common_ops_import import Variable +from ...device import get_cudnn_version, is_compiled_with_rocm __all__ = [] diff --git a/python/paddle/nn/layer/layers.py b/python/paddle/nn/layer/layers.py index 8908b38defec4..8e8c06ea2df49 100644 --- a/python/paddle/nn/layer/layers.py +++ b/python/paddle/nn/layer/layers.py @@ -2164,9 +2164,7 @@ def _transform(self, t, device, dtype, blocking): # 2. cast param / Tensor to dtype if dtype is not None and dtype != t_used.dtype: - with paddle.base.framework._dygraph_place_guard( - place=t_used.place - ): + with paddle.base.framework._dygraph_place_guard(place=t_used.place): t_casted = t_used.cast(dtype=dtype) else: t_casted = t_used diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index df8800555992e..3f5adf54d3968 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -20,7 +20,6 @@ import paddle from paddle import _C_ops, _legacy_C_ops, framework, in_dynamic_mode -from paddle.common_ops_import import Variable from paddle.base.data_feeder import check_type, check_variable_and_dtype from paddle.base.dygraph.base import NON_PERSISTABLE_VAR_NAME_SUFFIX from paddle.base.framework import ( @@ -28,6 +27,7 @@ in_dygraph_mode, program_guard, ) +from paddle.common_ops_import import Variable from paddle.framework import core from paddle.nn import functional as F from paddle.nn import initializer as I diff --git a/python/paddle/signal.py b/python/paddle/signal.py index 4529f86fbea7a..a6aa6f112d3dd 100644 --- a/python/paddle/signal.py +++ b/python/paddle/signal.py @@ -16,9 +16,9 @@ from paddle import _C_ops from paddle.framework import in_dynamic_mode -from .fft import fft_c2c, fft_c2r, fft_r2c from .base.data_feeder import check_variable_and_dtype from .base.layer_helper import LayerHelper +from .fft import fft_c2c, fft_c2r, fft_r2c from .tensor.attribute import is_complex __all__ = [ diff --git a/python/paddle/sparse/unary.py b/python/paddle/sparse/unary.py index 4fb2a8d32e1a0..b97a588af1bbe 100644 --- a/python/paddle/sparse/unary.py +++ b/python/paddle/sparse/unary.py @@ -16,13 +16,9 @@ import paddle from paddle import _C_ops, in_dynamic_mode -from paddle.common_ops_import import Variable from paddle.base.data_feeder import check_type, check_variable_and_dtype -from paddle.base.framework import ( - convert_np_dtype_to_dtype_, - core, - dygraph_only, -) +from paddle.base.framework import convert_np_dtype_to_dtype_, core, dygraph_only +from paddle.common_ops_import import Variable from paddle.framework import LayerHelper __all__ = [] diff --git a/python/paddle/static/nn/common.py b/python/paddle/static/nn/common.py index c424066c554ae..a5743cc2bce52 100644 --- a/python/paddle/static/nn/common.py +++ b/python/paddle/static/nn/common.py @@ -19,11 +19,6 @@ import numpy as np import paddle -from paddle.common_ops_import import ( - LayerHelper, - check_type, - check_variable_and_dtype, -) from paddle.base import core, unique_name from paddle.base.data_feeder import check_dtype from paddle.base.framework import ( @@ -38,6 +33,11 @@ from paddle.base.layers.layer_function_generator import templatedoc from paddle.base.param_attr import ParamAttr from paddle.base.wrapped_decorator import signature_safe_contextmanager +from paddle.common_ops_import import ( + LayerHelper, + check_type, + check_variable_and_dtype, +) from paddle.nn.initializer import Constant, Normal __all__ = [] diff --git a/python/paddle/tensor/array.py b/python/paddle/tensor/array.py index 752eb848aae74..f241071913341 100644 --- a/python/paddle/tensor/array.py +++ b/python/paddle/tensor/array.py @@ -14,8 +14,8 @@ # Define functions about array. -from ..common_ops_import import Variable from ..base.data_feeder import check_type, check_variable_and_dtype +from ..common_ops_import import Variable from ..framework import LayerHelper, core, in_dynamic_mode __all__ = [] diff --git a/python/paddle/tensor/attribute.py b/python/paddle/tensor/attribute.py index 561681416f5c3..60f142186809c 100644 --- a/python/paddle/tensor/attribute.py +++ b/python/paddle/tensor/attribute.py @@ -19,9 +19,9 @@ import paddle from paddle import _C_ops -from ..common_ops_import import Variable from ..base.data_feeder import check_type, check_variable_and_dtype from ..base.framework import in_dygraph_mode +from ..common_ops_import import Variable from ..framework import LayerHelper, core from .creation import _complex_to_real_dtype, assign diff --git a/python/paddle/tensor/layer_function_generator.py b/python/paddle/tensor/layer_function_generator.py index a1856cea7c4ed..02ab66eb1da2a 100644 --- a/python/paddle/tensor/layer_function_generator.py +++ b/python/paddle/tensor/layer_function_generator.py @@ -18,9 +18,9 @@ from paddle import _C_ops, _legacy_C_ops -from ..common_ops_import import Variable from ..base.data_feeder import check_variable_and_dtype from ..base.proto import framework_pb2 +from ..common_ops_import import Variable from ..framework import ( LayerHelper, OpProtoHolder, diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 7b916a398a67e..60c5afb99fc7b 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -18,12 +18,8 @@ from paddle import _C_ops from paddle.common_ops_import import VarDesc +from ..base.data_feeder import check_dtype, check_type, check_variable_and_dtype from ..common_ops_import import Variable -from ..base.data_feeder import ( - check_dtype, - check_type, - check_variable_and_dtype, -) from ..framework import LayerHelper, in_dynamic_mode from .creation import full from .manipulation import cast diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index 8723517255052..8322b66e59a06 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -16,8 +16,8 @@ import paddle -from ..common_ops_import import Variable from ..base.data_feeder import check_type, check_variable_and_dtype +from ..common_ops_import import Variable from .layer_function_generator import templatedoc Tensor = paddle.base.framework.core.eager.Tensor diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 63e21bc98fca0..0f9a3e4ebeb13 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -22,13 +22,13 @@ from paddle.common_ops_import import VarDesc, dygraph_only, dygraph_utils from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only -from ..common_ops_import import Variable from ..base.data_feeder import ( check_dtype, check_type, check_variable_and_dtype, convert_dtype, ) +from ..common_ops_import import Variable from ..framework import ( LayerHelper, convert_np_dtype_to_dtype_, diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index e2b71457bbdde..bbfc533c5fc92 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -16,8 +16,8 @@ import paddle from paddle import _C_ops, _legacy_C_ops -from paddle.common_ops_import import Variable from paddle.base.framework import _current_expected_place +from paddle.common_ops_import import Variable from paddle.framework import in_dynamic_mode from ..base.data_feeder import ( diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index 6b1b54a7d0bdf..7c5a6693e8c80 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -18,8 +18,8 @@ from paddle import _C_ops from paddle.framework import in_dynamic_mode, in_dynamic_or_new_ir_mode -from ..common_ops_import import Variable from ..base.data_feeder import check_type, check_variable_and_dtype +from ..common_ops_import import Variable from ..framework import LayerHelper, core from .math import _get_reduce_axis_with_tensor from .search import where diff --git a/test/asp/test_fleet_with_asp_dynamic.py b/test/asp/test_fleet_with_asp_dynamic.py index 6e46785ae2b3c..03e8bbdcb8dd3 100644 --- a/test/asp/test_fleet_with_asp_dynamic.py +++ b/test/asp/test_fleet_with_asp_dynamic.py @@ -19,8 +19,8 @@ import numpy as np import paddle -from paddle.distributed import fleet from paddle.base import core +from paddle.distributed import fleet from paddle.incubate.asp import ASPHelper cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES') diff --git a/test/auto_parallel/test_dist_attr_v2.py b/test/auto_parallel/test_dist_attr_v2.py index 7e0fc4a3be59e..563c8a19019ce 100644 --- a/test/auto_parallel/test_dist_attr_v2.py +++ b/test/auto_parallel/test_dist_attr_v2.py @@ -20,6 +20,7 @@ import paddle import paddle.nn.functional as F from paddle import nn, static +from paddle.base.core import OperatorDistAttr, TensorDistAttr from paddle.distributed import fleet from paddle.distributed.auto_parallel.process_mesh import ProcessMesh from paddle.distributed.auto_parallel.static.dist_context import ( @@ -33,7 +34,6 @@ _copy_dist_attr_to_cpp_for_graph, ) from paddle.distributed.fleet import auto -from paddle.base.core import OperatorDistAttr, TensorDistAttr paddle.enable_static() diff --git a/test/auto_parallel/test_dist_matmul.py b/test/auto_parallel/test_dist_matmul.py index 892ed45ce92a0..a60f942db104d 100644 --- a/test/auto_parallel/test_dist_matmul.py +++ b/test/auto_parallel/test_dist_matmul.py @@ -15,9 +15,9 @@ import unittest import paddle -from paddle.distributed.fleet import auto from paddle.base import program_guard from paddle.base.backward import append_backward +from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/test/auto_parallel/test_dist_op_cost.py b/test/auto_parallel/test_dist_op_cost.py index b025eceb817c0..6477d8646bca6 100644 --- a/test/auto_parallel/test_dist_op_cost.py +++ b/test/auto_parallel/test_dist_op_cost.py @@ -16,14 +16,14 @@ import unittest import paddle +from paddle.base import program_guard +from paddle.base.backward import append_backward from paddle.distributed.auto_parallel.static.cluster import Cluster from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, is_elementwise_op, ) from paddle.distributed.fleet import auto -from paddle.base import program_guard -from paddle.base.backward import append_backward paddle.enable_static() diff --git a/test/auto_parallel/test_dist_pnorm.py b/test/auto_parallel/test_dist_pnorm.py index eab9e7fabef0a..004f05a56168b 100644 --- a/test/auto_parallel/test_dist_pnorm.py +++ b/test/auto_parallel/test_dist_pnorm.py @@ -15,9 +15,9 @@ import unittest import paddle -from paddle.distributed.fleet import auto from paddle.base import program_guard from paddle.base.backward import append_backward +from paddle.distributed.fleet import auto paddle.enable_static() diff --git a/test/auto_parallel/test_pass_bf16.py b/test/auto_parallel/test_pass_bf16.py index ce9841ced88c7..fd6f6daf41cd1 100644 --- a/test/auto_parallel/test_pass_bf16.py +++ b/test/auto_parallel/test_pass_bf16.py @@ -19,8 +19,8 @@ import paddle from paddle import nn -from paddle.distributed.fleet import auto from paddle.base import core +from paddle.distributed.fleet import auto from paddle.static import InputSpec from paddle.static.amp.bf16.amp_utils import _valid_types from paddle.static.amp.fp16_utils import find_true_prev_op diff --git a/test/auto_parallel/test_prim_dist_op.py b/test/auto_parallel/test_prim_dist_op.py index a01e1fb90441f..99e12b2099874 100644 --- a/test/auto_parallel/test_prim_dist_op.py +++ b/test/auto_parallel/test_prim_dist_op.py @@ -15,6 +15,7 @@ import unittest import paddle +from paddle.base.layer_helper import LayerHelper from paddle.distributed.auto_parallel.static.completion import Completer from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, @@ -23,7 +24,6 @@ from paddle.distributed.auto_parallel.static.partitioner import Partitioner from paddle.distributed.auto_parallel.static.utils import set_var_dist_attr from paddle.distributed.fleet import auto -from paddle.base.layer_helper import LayerHelper from paddle.incubate.autograd import enable_prim paddle.enable_static() diff --git a/test/auto_parallel/test_serialization.py b/test/auto_parallel/test_serialization.py index 01c17b3733570..d246db2ed8884 100644 --- a/test/auto_parallel/test_serialization.py +++ b/test/auto_parallel/test_serialization.py @@ -19,6 +19,8 @@ import paddle import paddle.nn.functional as F from paddle import nn, static +from paddle.base.core import TensorDistAttr +from paddle.base.framework import Program from paddle.distributed import fleet from paddle.distributed.auto_parallel.static.dist_context import ( DistributedContext, @@ -26,8 +28,6 @@ ) from paddle.distributed.auto_parallel.static.process_mesh_v2 import ProcessMesh from paddle.distributed.fleet import auto -from paddle.base.core import TensorDistAttr -from paddle.base.framework import Program paddle.enable_static() diff --git a/test/auto_parallel/test_shard_tensor_api.py b/test/auto_parallel/test_shard_tensor_api.py index af96c715131c6..86be83b41031a 100644 --- a/test/auto_parallel/test_shard_tensor_api.py +++ b/test/auto_parallel/test_shard_tensor_api.py @@ -16,10 +16,10 @@ import paddle import paddle.distributed as dist +from paddle.base.dygraph.base import switch_to_static_graph from paddle.distributed.auto_parallel.static.dist_context import ( get_default_distributed_context, ) -from paddle.fluid.dygraph.base import switch_to_static_graph class TestDistAttrBasic(unittest.TestCase): diff --git a/test/collective/fleet/auto_parallel_parallelizer.py b/test/collective/fleet/auto_parallel_parallelizer.py index a5bfe090d6904..473da21fc52d7 100755 --- a/test/collective/fleet/auto_parallel_parallelizer.py +++ b/test/collective/fleet/auto_parallel_parallelizer.py @@ -17,9 +17,9 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils +from paddle.base import core from paddle.distributed import fleet from paddle.distributed.fleet import auto -from paddle.base import core paddle.enable_static() _global_parallel_strategy = None diff --git a/test/collective/fleet/dygraph_group_sharded_stage3.py b/test/collective/fleet/dygraph_group_sharded_stage3.py index fb2c2fec6bd3b..5587f51e1928b 100644 --- a/test/collective/fleet/dygraph_group_sharded_stage3.py +++ b/test/collective/fleet/dygraph_group_sharded_stage3.py @@ -21,6 +21,7 @@ import numpy as np import paddle +from paddle.base import core from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_optimizer_stage2 import ( GroupShardedOptimizerStage2, ) @@ -33,7 +34,6 @@ from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( GroupShardedScaler, ) -from paddle.base import core from paddle.nn import Linear epoch = 10 diff --git a/test/collective/fleet/dygraph_group_sharded_stage3_offload.py b/test/collective/fleet/dygraph_group_sharded_stage3_offload.py index b680f343e3f25..fc7791fd09aaa 100644 --- a/test/collective/fleet/dygraph_group_sharded_stage3_offload.py +++ b/test/collective/fleet/dygraph_group_sharded_stage3_offload.py @@ -18,13 +18,13 @@ import numpy as np import paddle +from paddle.base import core from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_stage3 import ( GroupShardedStage3, ) from paddle.distributed.fleet.meta_parallel.sharding.group_sharded_utils import ( GroupShardedScaler, ) -from paddle.base import core from paddle.nn import Linear epoch = 10 diff --git a/test/collective/fleet/test_auto_checkpoint.py b/test/collective/fleet/test_auto_checkpoint.py index ccb2bfb41a90d..5382e522612d7 100644 --- a/test/collective/fleet/test_auto_checkpoint.py +++ b/test/collective/fleet/test_auto_checkpoint.py @@ -19,8 +19,8 @@ import paddle import paddle.base.incubate.checkpoint.auto_checkpoint as acp -from paddle.distributed.fleet.utils.fs import HDFSClient, LocalFS from paddle.base.incubate.checkpoint.checkpoint_saver import PaddleModel +from paddle.distributed.fleet.utils.fs import HDFSClient, LocalFS paddle.enable_static() logger = get_logger() diff --git a/test/collective/fleet/test_fleet_checkpoint.py b/test/collective/fleet/test_fleet_checkpoint.py index 79c221fd152ac..82f2f416e189c 100644 --- a/test/collective/fleet/test_fleet_checkpoint.py +++ b/test/collective/fleet/test_fleet_checkpoint.py @@ -17,9 +17,9 @@ import paddle from paddle import base -from paddle.distributed.fleet.utils.fs import HDFSClient, LocalFS from paddle.base.incubate.checkpoint.auto_checkpoint import ExeTrainStatus from paddle.base.incubate.checkpoint.checkpoint_saver import CheckpointSaver +from paddle.distributed.fleet.utils.fs import HDFSClient, LocalFS from paddle.incubate.distributed.fleet import role_maker from paddle.incubate.distributed.fleet.collective import fleet diff --git a/test/collective/fleet/test_fleet_gradient_merge_meta_optimizer.py b/test/collective/fleet/test_fleet_gradient_merge_meta_optimizer.py index 16e30f974e947..fc8e116f1e9e0 100644 --- a/test/collective/fleet/test_fleet_gradient_merge_meta_optimizer.py +++ b/test/collective/fleet/test_fleet_gradient_merge_meta_optimizer.py @@ -75,9 +75,7 @@ def test_gm_pure_fp16_optimizer(self): params = train_prog.all_parameters() for param in train_prog.all_parameters(): - self.assertEqual( - param.dtype, paddle.base.core.VarDesc.VarType.FP16 - ) + self.assertEqual(param.dtype, paddle.base.core.VarDesc.VarType.FP16) vars = [x.name for x in train_prog.list_vars()] self.assertIn('@GradientMerge', ''.join(vars)) diff --git a/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py b/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py index 3d85c6cd01edd..7bfdcf5723f2a 100644 --- a/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py +++ b/test/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py @@ -1357,9 +1357,7 @@ def func_isinstance(): with paddle.amp.auto_cast(custom_white_list=['layer_norm']): out = layer_norm(x) - self.assertTrue( - out.dtype == base.core.VarDesc.VarType.FP16 - ) + self.assertTrue(out.dtype == base.core.VarDesc.VarType.FP16) func_isinstance() diff --git a/test/collective/process_group_mpi.py b/test/collective/process_group_mpi.py index 5c0873c6234ce..3f363e90df24b 100644 --- a/test/collective/process_group_mpi.py +++ b/test/collective/process_group_mpi.py @@ -20,6 +20,8 @@ import paddle import paddle.distributed as dist +from paddle.base import core +from paddle.base.framework import _set_expected_place from paddle.distributed.collective import ( Group, _default_group_name, @@ -27,8 +29,6 @@ _set_group_map_backend, _set_group_map_by_name, ) -from paddle.base import core -from paddle.base.framework import _set_expected_place ctypes.CDLL("libmpi.so", mode=ctypes.RTLD_GLOBAL) diff --git a/test/distribution/test_distribution_bernoulli.py b/test/distribution/test_distribution_bernoulli.py index 0419268674cca..4845efcd6a29d 100644 --- a/test/distribution/test_distribution_bernoulli.py +++ b/test/distribution/test_distribution_bernoulli.py @@ -27,9 +27,9 @@ from test_distribution import DistributionNumpy import paddle +from paddle.base.data_feeder import convert_dtype from paddle.distribution import Bernoulli from paddle.distribution.kl import kl_divergence -from paddle.base.data_feeder import convert_dtype np.random.seed(2023) paddle.seed(2023) diff --git a/test/distribution/test_distribution_cauchy.py b/test/distribution/test_distribution_cauchy.py index a2ab7ca3d9b2d..d0d570a37df07 100644 --- a/test/distribution/test_distribution_cauchy.py +++ b/test/distribution/test_distribution_cauchy.py @@ -27,9 +27,9 @@ from test_distribution import DistributionNumpy import paddle +from paddle.base.data_feeder import convert_dtype from paddle.distribution import Cauchy from paddle.distribution.kl import kl_divergence -from paddle.base.data_feeder import convert_dtype np.random.seed(2023) paddle.seed(2023) diff --git a/test/dygraph_to_static/test_bert.py b/test/dygraph_to_static/test_bert.py index 2e0feffa24b58..c7b5272ff4765 100644 --- a/test/dygraph_to_static/test_bert.py +++ b/test/dygraph_to_static/test_bert.py @@ -28,9 +28,7 @@ from paddle.base import core from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX -place = ( - base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() -) +place = base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() SEED = 2020 STEP_NUM = 10 PRINT_STEP = 2 diff --git a/test/dygraph_to_static/test_cache_program.py b/test/dygraph_to_static/test_cache_program.py index 0958cb0ff5716..2b8a88245de87 100644 --- a/test/dygraph_to_static/test_cache_program.py +++ b/test/dygraph_to_static/test_cache_program.py @@ -45,10 +45,7 @@ def test_cache(self): # Check forward ops prev_ops = cur_ops cur_ops = Counter( - [ - op.type - for op in base.default_main_program().block(0).ops - ] + [op.type for op in base.default_main_program().block(0).ops] ) if batch_id > 0: prev_out_numpy = ( diff --git a/test/dygraph_to_static/test_cycle_gan.py b/test/dygraph_to_static/test_cycle_gan.py index 9b8001d9f68b4..3484b27d5fac5 100644 --- a/test/dygraph_to_static/test_cycle_gan.py +++ b/test/dygraph_to_static/test_cycle_gan.py @@ -550,9 +550,7 @@ def optimizer_setting(parameters): def train(args, to_static): place = ( - base.CUDAPlace(0) - if base.is_compiled_with_cuda() - else base.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) paddle.jit.enable_to_static(to_static) diff --git a/test/dygraph_to_static/test_dict.py b/test/dygraph_to_static/test_dict.py index 576bbffda4c05..650d7c58b0ba1 100644 --- a/test/dygraph_to_static/test_dict.py +++ b/test/dygraph_to_static/test_dict.py @@ -20,9 +20,7 @@ from paddle import base from paddle.jit import to_static -PLACE = ( - base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() -) +PLACE = base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() class SubNetWithDict(paddle.nn.Layer): diff --git a/test/dygraph_to_static/test_ifelse.py b/test/dygraph_to_static/test_ifelse.py index 253cab46f6fcc..381ba5b8daef4 100644 --- a/test/dygraph_to_static/test_ifelse.py +++ b/test/dygraph_to_static/test_ifelse.py @@ -19,6 +19,7 @@ from ifelse_simple_func import ( NetWithControlFlowIf, add_fn, + base, dyfunc_empty_nonlocal, dyfunc_ifelse_ret_int1, dyfunc_ifelse_ret_int2, @@ -28,7 +29,6 @@ dyfunc_with_if_else2, dyfunc_with_if_else3, dyfunc_with_if_else_with_list_generator, - base, if_tensor_case, if_with_and_or, if_with_and_or_1, diff --git a/test/dygraph_to_static/test_mobile_net.py b/test/dygraph_to_static/test_mobile_net.py index 0478be61f6428..607d7644763de 100644 --- a/test/dygraph_to_static/test_mobile_net.py +++ b/test/dygraph_to_static/test_mobile_net.py @@ -496,9 +496,7 @@ class Args: print_step = 1 train_step = 10 place = ( - base.CUDAPlace(0) - if base.is_compiled_with_cuda() - else base.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) model_save_dir = None model_save_prefix = None diff --git a/test/dygraph_to_static/test_resnet.py b/test/dygraph_to_static/test_resnet.py index 4dc8170da9de9..3d4585117c977 100644 --- a/test/dygraph_to_static/test_resnet.py +++ b/test/dygraph_to_static/test_resnet.py @@ -36,9 +36,7 @@ # NOTE: Reduce batch_size from 8 to 2 to avoid unittest timeout. batch_size = 2 epoch_num = 1 -place = ( - base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() -) +place = base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() if base.is_compiled_with_cuda(): diff --git a/test/dygraph_to_static/test_resnet_amp.py b/test/dygraph_to_static/test_resnet_amp.py index afb85d18b5921..40c60520fbcad 100644 --- a/test/dygraph_to_static/test_resnet_amp.py +++ b/test/dygraph_to_static/test_resnet_amp.py @@ -26,9 +26,7 @@ # NOTE: Reduce batch_size from 8 to 2 to avoid unittest timeout. batch_size = 2 epoch_num = 1 -place = ( - base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() -) +place = base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() if base.is_compiled_with_cuda(): diff --git a/test/dygraph_to_static/test_save_inference_model.py b/test/dygraph_to_static/test_save_inference_model.py index 23f8633a4e681..8842b18b37a8f 100644 --- a/test/dygraph_to_static/test_save_inference_model.py +++ b/test/dygraph_to_static/test_save_inference_model.py @@ -29,9 +29,7 @@ np.random.seed(SEED) -place = ( - base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() -) +place = base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() class SimpleFcLayer(paddle.nn.Layer): diff --git a/test/dygraph_to_static/test_save_load.py b/test/dygraph_to_static/test_save_load.py index 7e1eae4858e83..c963b627d8b8f 100644 --- a/test/dygraph_to_static/test_save_load.py +++ b/test/dygraph_to_static/test_save_load.py @@ -29,9 +29,7 @@ np.random.seed(2020) -place = ( - base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() -) +place = base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() class PrimeNet(paddle.nn.Layer): diff --git a/test/dygraph_to_static/test_se_resnet.py b/test/dygraph_to_static/test_se_resnet.py index 80e7257962e62..aef9b3a2f0b6a 100644 --- a/test/dygraph_to_static/test_se_resnet.py +++ b/test/dygraph_to_static/test_se_resnet.py @@ -38,9 +38,7 @@ PRINT_STEP = 2 STEP_NUM = 10 -place = ( - base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() -) +place = base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() # Note: Set True to eliminate randomness. # 1. For one operation, cuDNN has several algorithms, diff --git a/test/dygraph_to_static/test_sentiment.py b/test/dygraph_to_static/test_sentiment.py index 8e701e48b5b81..f2692671729a8 100644 --- a/test/dygraph_to_static/test_sentiment.py +++ b/test/dygraph_to_static/test_sentiment.py @@ -304,9 +304,7 @@ class Args: def train(args, to_static): paddle.jit.enable_to_static(to_static) place = ( - base.CUDAPlace(0) - if base.is_compiled_with_cuda() - else base.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) with base.dygraph.guard(place): diff --git a/test/dygraph_to_static/test_seq2seq.py b/test/dygraph_to_static/test_seq2seq.py index e39a4da16ba13..fee69b74bfdfe 100644 --- a/test/dygraph_to_static/test_seq2seq.py +++ b/test/dygraph_to_static/test_seq2seq.py @@ -25,9 +25,7 @@ from paddle import base from paddle.nn import ClipGradByGlobalNorm -place = ( - base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() -) +place = base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() STEP_NUM = 10 PRINT_STEP = 2 diff --git a/test/dygraph_to_static/test_transformer.py b/test/dygraph_to_static/test_transformer.py index da5a3a7edacc6..af1918d982cc0 100644 --- a/test/dygraph_to_static/test_transformer.py +++ b/test/dygraph_to_static/test_transformer.py @@ -30,9 +30,7 @@ from paddle import base trainer_count = 1 -place = ( - base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() -) +place = base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() SEED = 10 STEP_NUM = 10 diff --git a/test/dygraph_to_static/test_word2vec.py b/test/dygraph_to_static/test_word2vec.py index 82fe2e1c0d516..85edea2093d82 100644 --- a/test/dygraph_to_static/test_word2vec.py +++ b/test/dygraph_to_static/test_word2vec.py @@ -281,9 +281,7 @@ def train(to_static): np.random.seed(0) place = ( - base.CUDAPlace(0) - if base.is_compiled_with_cuda() - else base.CPUPlace() + base.CUDAPlace(0) if base.is_compiled_with_cuda() else base.CPUPlace() ) with base.dygraph.guard(place): base.default_startup_program().random_seed = 1000 diff --git a/test/ir/inference/quant_dequant_test.py b/test/ir/inference/quant_dequant_test.py index 2dfef926933ff..4f1e2335f5d49 100644 --- a/test/ir/inference/quant_dequant_test.py +++ b/test/ir/inference/quant_dequant_test.py @@ -285,9 +285,7 @@ def check_output_with_option( paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), batch_size=batch_size, ) - feeder = base.DataFeeder( - feed_list=[self.data, self.label], place=place - ) + feeder = base.DataFeeder(feed_list=[self.data, self.label], place=place) with base.scope_guard(scope): for _ in range(iters): data = next(train_reader()) diff --git a/test/ir/inference/test_trt_c_allreduce_infer_script.py b/test/ir/inference/test_trt_c_allreduce_infer_script.py index b82300ac9fb6a..98b7e38e00c2c 100644 --- a/test/ir/inference/test_trt_c_allreduce_infer_script.py +++ b/test/ir/inference/test_trt_c_allreduce_infer_script.py @@ -19,8 +19,8 @@ import numpy as np import paddle -from paddle.distributed import fleet from paddle.base import core +from paddle.distributed import fleet from paddle.inference import Config, PrecisionType, create_predictor diff --git a/test/ir/new_ir/test_pass_manager.py b/test/ir/new_ir/test_pass_manager.py index 761baaea13be8..81184eb08e8a8 100644 --- a/test/ir/new_ir/test_pass_manager.py +++ b/test/ir/new_ir/test_pass_manager.py @@ -16,7 +16,7 @@ import paddle from paddle import ir -from paddle.fluid import core +from paddle.base import core from paddle.framework import LayerHelper paddle.enable_static() diff --git a/test/legacy_test/detected_gpu.py b/test/legacy_test/detected_gpu.py index a33916714ee45..ff7d625805f6b 100644 --- a/test/legacy_test/detected_gpu.py +++ b/test/legacy_test/detected_gpu.py @@ -19,10 +19,7 @@ print("compile with cuda:", base.core.is_compiled_with_cuda()) print("get_cuda_device_count:", base.core.get_cuda_device_count()) -if ( - base.core.is_compiled_with_cuda() - and base.core.get_cuda_device_count() > 0 -): +if base.core.is_compiled_with_cuda() and base.core.get_cuda_device_count() > 0: sys.exit(0) else: sys.exit(1) diff --git a/test/legacy_test/dist_fleet_sync_batch_norm.py b/test/legacy_test/dist_fleet_sync_batch_norm.py index 56a001178b4cf..86e8e921adf10 100644 --- a/test/legacy_test/dist_fleet_sync_batch_norm.py +++ b/test/legacy_test/dist_fleet_sync_batch_norm.py @@ -21,8 +21,8 @@ import paddle from paddle import base -from paddle.distributed import fleet from paddle.base import core +from paddle.distributed import fleet from paddle.static import Executor, Program, program_guard diff --git a/test/legacy_test/distributed_fused_lamb_test_base.py b/test/legacy_test/distributed_fused_lamb_test_base.py index ec96fee570cf9..baffc7dd5e546 100644 --- a/test/legacy_test/distributed_fused_lamb_test_base.py +++ b/test/legacy_test/distributed_fused_lamb_test_base.py @@ -19,9 +19,9 @@ import numpy as np import paddle +from paddle.base import core from paddle.distributed import fleet from paddle.distributed.fleet.meta_optimizers.common import CollectiveHelper -from paddle.base import core from paddle.incubate import DistributedFusedLamb from paddle.nn.clip import ClipGradBase, _clip_by_global_norm_using_mp_type from paddle.vision.models import resnet18 as resnet diff --git a/test/legacy_test/test_assign_pos_op.py b/test/legacy_test/test_assign_pos_op.py index 57a4a75fb1bf3..4b039eb1bd91d 100644 --- a/test/legacy_test/test_assign_pos_op.py +++ b/test/legacy_test/test_assign_pos_op.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle.distributed.models.moe import utils from paddle.base import core +from paddle.distributed.models.moe import utils def assign_pos(x, _cum_count): diff --git a/test/legacy_test/test_async_read_write.py b/test/legacy_test/test_async_read_write.py index 39374fe1cdcf0..1af4e21c5c9a3 100644 --- a/test/legacy_test/test_async_read_write.py +++ b/test/legacy_test/test_async_read_write.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle.device import cuda from paddle.base import core +from paddle.device import cuda class TestAsyncRead(unittest.TestCase): diff --git a/test/legacy_test/test_auto_parallel_cost_model.py b/test/legacy_test/test_auto_parallel_cost_model.py index d3b298309656b..8725abad91ba8 100644 --- a/test/legacy_test/test_auto_parallel_cost_model.py +++ b/test/legacy_test/test_auto_parallel_cost_model.py @@ -17,6 +17,7 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils +from paddle.base import core from paddle.distributed import fleet from paddle.distributed.auto_parallel.static.completion import Completer from paddle.distributed.auto_parallel.static.cost_model import estimate_cost @@ -29,7 +30,6 @@ from paddle.distributed.auto_parallel.static.partitioner import Partitioner from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto -from paddle.base import core paddle.enable_static() _global_parallel_strategy = "dp_mp_pp" diff --git a/test/legacy_test/test_auto_parallel_mapper.py b/test/legacy_test/test_auto_parallel_mapper.py index f80b637cc520c..a1159f823f605 100644 --- a/test/legacy_test/test_auto_parallel_mapper.py +++ b/test/legacy_test/test_auto_parallel_mapper.py @@ -22,6 +22,7 @@ import paddle import paddle.nn.functional as F from paddle import base, nn, static, utils +from paddle.base import core from paddle.distributed import fleet from paddle.distributed.auto_parallel.static.cluster import Cluster from paddle.distributed.auto_parallel.static.completion import Completer @@ -39,7 +40,6 @@ from paddle.distributed.auto_parallel.static.partitioner import Partitioner from paddle.distributed.auto_parallel.static.reshard import Resharder from paddle.distributed.fleet import auto -from paddle.base import core if os.getenv("CUDA_VISIBLE_DEVICES") is not None: os.environ["CUDA_VISIBLE_DEVICES"] = "" diff --git a/test/legacy_test/test_auto_search_dist_op.py b/test/legacy_test/test_auto_search_dist_op.py index 4567aafab5949..2fbf3c9d70293 100644 --- a/test/legacy_test/test_auto_search_dist_op.py +++ b/test/legacy_test/test_auto_search_dist_op.py @@ -16,6 +16,7 @@ import paddle import paddle.nn.functional as F from paddle import nn, static, utils +from paddle.base import core from paddle.distributed.auto_parallel.static.dist_attribute import ( OperatorDistAttr, ) @@ -23,7 +24,6 @@ from paddle.distributed.auto_parallel.static.operators.common import ( get_distributed_operator_impl_container, ) -from paddle.base import core paddle.enable_static() device = "gpu" if core.is_compiled_with_cuda() else "cpu" diff --git a/test/legacy_test/test_base_layer.py b/test/legacy_test/test_base_layer.py index 0ad517e00cdb1..28b8d0cac762f 100644 --- a/test/legacy_test/test_base_layer.py +++ b/test/legacy_test/test_base_layer.py @@ -392,9 +392,7 @@ def func_test_to_api(self): paddle.base.core.VarDesc.VarType.FP64, ) for p in self.linear.parameters(): - self.assertTrue( - isinstance(p, paddle.base.framework.EagerParamBase) - ) + self.assertTrue(isinstance(p, paddle.base.framework.EagerParamBase)) if paddle.base.is_compiled_with_cuda(): self.linear.to(device=paddle.CUDAPlace(0)) @@ -470,9 +468,7 @@ def func_test_to_api_paddle_dtype(self): paddle.base.core.VarDesc.VarType.FP64, ) for p in self.linear.parameters(): - self.assertTrue( - isinstance(p, paddle.base.framework.EagerParamBase) - ) + self.assertTrue(isinstance(p, paddle.base.framework.EagerParamBase)) def func_test_to_api_numpy_dtype(self): self.linear.to(dtype=np.float64) @@ -505,9 +501,7 @@ def func_test_to_api_numpy_dtype(self): paddle.base.core.VarDesc.VarType.FP64, ) for p in self.linear.parameters(): - self.assertTrue( - isinstance(p, paddle.base.framework.EagerParamBase) - ) + self.assertTrue(isinstance(p, paddle.base.framework.EagerParamBase)) def func_test_to_api_none_buffer(self): model = paddle.nn.Linear(2, 4) diff --git a/test/legacy_test/test_boxps.py b/test/legacy_test/test_boxps.py index 66582739c5dfa..78c7473d4fcfd 100644 --- a/test/legacy_test/test_boxps.py +++ b/test/legacy_test/test_boxps.py @@ -16,8 +16,8 @@ import paddle from paddle import base -from paddle.distributed.transpiler import collective from paddle.base import core +from paddle.distributed.transpiler import collective from paddle.incubate.layers.nn import _pull_box_sparse diff --git a/test/legacy_test/test_buffer_shared_memory_reuse_pass.py b/test/legacy_test/test_buffer_shared_memory_reuse_pass.py index ab99363f751a0..4eaa5387216f0 100644 --- a/test/legacy_test/test_buffer_shared_memory_reuse_pass.py +++ b/test/legacy_test/test_buffer_shared_memory_reuse_pass.py @@ -61,9 +61,7 @@ def build_program_and_scope(self): with base.scope_guard(scope): exe = base.Executor( - base.CUDAPlace(0) - if self.use_cuda - else base.CPUPlace() + base.CUDAPlace(0) if self.use_cuda else base.CPUPlace() ) exe.run(startup_program) diff --git a/test/legacy_test/test_checkpoint_saver.py b/test/legacy_test/test_checkpoint_saver.py index 2fe34a3385822..643ea78816579 100644 --- a/test/legacy_test/test_checkpoint_saver.py +++ b/test/legacy_test/test_checkpoint_saver.py @@ -14,8 +14,8 @@ import unittest -from paddle.distributed.fleet.utils.fs import HDFSClient from paddle.base.incubate.checkpoint.checkpoint_saver import CheckpointSaver +from paddle.distributed.fleet.utils.fs import HDFSClient class CheckpointerSaverTest(unittest.TestCase): diff --git a/test/legacy_test/test_cost_model.py b/test/legacy_test/test_cost_model.py index 1529d3432cd09..997a5c0c6c47b 100644 --- a/test/legacy_test/test_cost_model.py +++ b/test/legacy_test/test_cost_model.py @@ -15,8 +15,8 @@ import unittest import paddle -from paddle.cost_model import CostModel from paddle.base import core +from paddle.cost_model import CostModel paddle.enable_static() diff --git a/test/legacy_test/test_cuda_graph_static_mode.py b/test/legacy_test/test_cuda_graph_static_mode.py index 15df4acea3de9..746a3db02c222 100644 --- a/test/legacy_test/test_cuda_graph_static_mode.py +++ b/test/legacy_test/test_cuda_graph_static_mode.py @@ -18,8 +18,8 @@ from simple_nets import simple_fc_net_with_inputs import paddle -from paddle.device.cuda.graphs import CUDAGraph from paddle.base.dygraph.base import switch_to_static_graph +from paddle.device.cuda.graphs import CUDAGraph def can_use_cuda_graph(): diff --git a/test/legacy_test/test_cuda_graph_static_mode_error.py b/test/legacy_test/test_cuda_graph_static_mode_error.py index f2ef98eab5f90..a718f1b7009bd 100644 --- a/test/legacy_test/test_cuda_graph_static_mode_error.py +++ b/test/legacy_test/test_cuda_graph_static_mode_error.py @@ -18,8 +18,8 @@ from test_cuda_graph_static_mode import build_program, can_use_cuda_graph import paddle -from paddle.device.cuda.graphs import CUDAGraph from paddle.base.dygraph.base import switch_to_static_graph +from paddle.device.cuda.graphs import CUDAGraph class TestCUDAGraphInFirstBatch(unittest.TestCase): diff --git a/test/legacy_test/test_cuda_max_memory_allocated.py b/test/legacy_test/test_cuda_max_memory_allocated.py index b9f76325728b0..90e016921f8a2 100644 --- a/test/legacy_test/test_cuda_max_memory_allocated.py +++ b/test/legacy_test/test_cuda_max_memory_allocated.py @@ -15,12 +15,12 @@ import unittest import paddle +from paddle.base import core from paddle.device.cuda import ( device_count, max_memory_allocated, memory_allocated, ) -from paddle.base import core class TestMaxMemoryAllocated(unittest.TestCase): diff --git a/test/legacy_test/test_cuda_max_memory_reserved.py b/test/legacy_test/test_cuda_max_memory_reserved.py index e6ccaddbfb985..ac3b2b712e2ff 100644 --- a/test/legacy_test/test_cuda_max_memory_reserved.py +++ b/test/legacy_test/test_cuda_max_memory_reserved.py @@ -15,12 +15,12 @@ import unittest import paddle +from paddle.base import core from paddle.device.cuda import ( device_count, max_memory_reserved, memory_reserved, ) -from paddle.base import core class TestMaxMemoryreserved(unittest.TestCase): diff --git a/test/legacy_test/test_cuda_memory_allocated.py b/test/legacy_test/test_cuda_memory_allocated.py index bd5b294e735f8..3e4c258940659 100644 --- a/test/legacy_test/test_cuda_memory_allocated.py +++ b/test/legacy_test/test_cuda_memory_allocated.py @@ -15,8 +15,8 @@ import unittest import paddle -from paddle.device.cuda import device_count, memory_allocated from paddle.base import core +from paddle.device.cuda import device_count, memory_allocated class TestMemoryAllocated(unittest.TestCase): diff --git a/test/legacy_test/test_cuda_memory_reserved.py b/test/legacy_test/test_cuda_memory_reserved.py index 0a119aa3c30a0..d639eab054ff5 100644 --- a/test/legacy_test/test_cuda_memory_reserved.py +++ b/test/legacy_test/test_cuda_memory_reserved.py @@ -15,8 +15,8 @@ import unittest import paddle -from paddle.device.cuda import device_count, memory_reserved from paddle.base import core +from paddle.device.cuda import device_count, memory_reserved class TestMemoryreserved(unittest.TestCase): diff --git a/test/legacy_test/test_dataset.py b/test/legacy_test/test_dataset.py index b9a94767334e2..fd4141c1c8b64 100644 --- a/test/legacy_test/test_dataset.py +++ b/test/legacy_test/test_dataset.py @@ -285,9 +285,7 @@ def test_in_memory_dataset_run(self): else: for i in range(self.epoch_num): try: - exe.train_from_dataset( - base.default_main_program(), dataset - ) + exe.train_from_dataset(base.default_main_program(), dataset) except Exception as e: self.assertTrue(False) @@ -552,9 +550,7 @@ def test_in_memory_dataset_run_2(self): else: for i in range(self.epoch_num): try: - exe.train_from_dataset( - base.default_main_program(), dataset - ) + exe.train_from_dataset(base.default_main_program(), dataset) except Exception as e: self.assertTrue(False) @@ -638,9 +634,7 @@ def test_queue_dataset_run(self): else: for i in range(self.epoch_num): try: - exe.train_from_dataset( - base.default_main_program(), dataset - ) + exe.train_from_dataset(base.default_main_program(), dataset) except Exception as e: self.assertTrue(False) @@ -710,9 +704,7 @@ def test_queue_dataset_run_2(self): else: for i in range(self.epoch_num): try: - exe.train_from_dataset( - base.default_main_program(), dataset - ) + exe.train_from_dataset(base.default_main_program(), dataset) except Exception as e: self.assertTrue(False) @@ -776,9 +768,7 @@ def test_queue_dataset_run_3(self): else: for i in range(self.epoch_num): try: - exe.train_from_dataset( - base.default_main_program(), dataset - ) + exe.train_from_dataset(base.default_main_program(), dataset) except Exception as e: self.assertTrue(False) diff --git a/test/legacy_test/test_debugger.py b/test/legacy_test/test_debugger.py index bddd9ad540b1d..2acda1972fcc6 100644 --- a/test/legacy_test/test_debugger.py +++ b/test/legacy_test/test_debugger.py @@ -14,9 +14,9 @@ import unittest -from paddle.distributed.fleet.base.util_factory import draw_block_graphviz from paddle.base import core from paddle.base.framework import Program +from paddle.distributed.fleet.base.util_factory import draw_block_graphviz class TestDrawBlockGraphviz(unittest.TestCase): diff --git a/test/legacy_test/test_dist_base.py b/test/legacy_test/test_dist_base.py index 8e6a1c3c9c9ee..2e6895b717579 100755 --- a/test/legacy_test/test_dist_base.py +++ b/test/legacy_test/test_dist_base.py @@ -29,10 +29,10 @@ import paddle from paddle import base +from paddle.base import compiler from paddle.distributed.fleet.meta_optimizers import ( RawProgramOptimizer as RawProgram, ) -from paddle.base import compiler from paddle.incubate.distributed.fleet import role_maker from paddle.incubate.distributed.fleet.collective import ( DistributedStrategy, diff --git a/test/legacy_test/test_einsum.py b/test/legacy_test/test_einsum.py index 551c7948191f8..830474de90f31 100644 --- a/test/legacy_test/test_einsum.py +++ b/test/legacy_test/test_einsum.py @@ -179,9 +179,7 @@ def test_forward(self): expected_result = np.einsum(self.sample["paradigm"], *operands) equation = self.sample["paradigm"] - with paddle.base.dygraph.guard( - self._get_place(force_to_use_cpu=False) - ): + with paddle.base.dygraph.guard(self._get_place(force_to_use_cpu=False)): pd_operands = [paddle.to_tensor(operand) for operand in operands] result = paddle.einsum(equation, *pd_operands) self.check_output_equal(result.numpy(), expected_result) @@ -348,9 +346,7 @@ def check_output_equal(self, actual, expect, rtol=1.0e-5, atol=1.0e-8): def check_output(self, eqn, *ops): expect = np.einsum(eqn, *ops) - with paddle.base.dygraph.guard( - self._get_place(force_to_use_cpu=False) - ): + with paddle.base.dygraph.guard(self._get_place(force_to_use_cpu=False)): pd_operands = [paddle.to_tensor(op) for op in ops] actual = paddle.einsum(eqn, *pd_operands) self.check_output_equal(actual.numpy(), expect) diff --git a/test/legacy_test/test_einsum_v2.py b/test/legacy_test/test_einsum_v2.py index cb93963b7dd81..4ce8b7519bc8a 100644 --- a/test/legacy_test/test_einsum_v2.py +++ b/test/legacy_test/test_einsum_v2.py @@ -181,9 +181,7 @@ def test_forward(self): expected_result = np.einsum(self.sample["paradigm"], *operands) equation = self.sample["paradigm"] - with paddle.base.dygraph.guard( - self._get_place(force_to_use_cpu=False) - ): + with paddle.base.dygraph.guard(self._get_place(force_to_use_cpu=False)): pd_operands = [paddle.to_tensor(operand) for operand in operands] result = paddle.einsum(equation, *pd_operands) self.check_output_equal(result.numpy(), expected_result) @@ -390,9 +388,7 @@ def check_output_equal(self, actual, expect, rtol=1.0e-5, atol=1.0e-8): def check_output(self, eqn, *ops): expect = np.einsum(eqn, *ops) - with paddle.base.dygraph.guard( - self._get_place(force_to_use_cpu=False) - ): + with paddle.base.dygraph.guard(self._get_place(force_to_use_cpu=False)): pd_operands = [paddle.to_tensor(op) for op in ops] actual = paddle.einsum(eqn, *pd_operands) self.check_output_equal(actual.numpy(), expect) diff --git a/test/legacy_test/test_elementwise_add_op.py b/test/legacy_test/test_elementwise_add_op.py index 4a9ecc51aa73a..8630ef593be64 100644 --- a/test/legacy_test/test_elementwise_add_op.py +++ b/test/legacy_test/test_elementwise_add_op.py @@ -187,12 +187,8 @@ def setUp(self): self.axis = -1 self.inputs = { - 'X': OpTest.np_dtype_to_base_dtype( - convert_float_to_uint16(self.x) - ), - 'Y': OpTest.np_dtype_to_base_dtype( - convert_float_to_uint16(self.y) - ), + 'X': OpTest.np_dtype_to_base_dtype(convert_float_to_uint16(self.x)), + 'Y': OpTest.np_dtype_to_base_dtype(convert_float_to_uint16(self.y)), } self.attrs = {'axis': self.axis, 'use_mkldnn': False} self.outputs = {'Out': convert_float_to_uint16(self.out)} diff --git a/test/legacy_test/test_elementwise_mod_op.py b/test/legacy_test/test_elementwise_mod_op.py index d75ae9e0a181e..eb3ba740ea5e2 100644 --- a/test/legacy_test/test_elementwise_mod_op.py +++ b/test/legacy_test/test_elementwise_mod_op.py @@ -177,12 +177,8 @@ def setUp(self): self.init_kernel_type() self.init_axis() self.inputs = { - 'X': convert_float_to_uint16( - OpTest.np_dtype_to_base_dtype(self.x) - ), - 'Y': convert_float_to_uint16( - OpTest.np_dtype_to_base_dtype(self.y) - ), + 'X': convert_float_to_uint16(OpTest.np_dtype_to_base_dtype(self.x)), + 'Y': convert_float_to_uint16(OpTest.np_dtype_to_base_dtype(self.y)), } self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': convert_float_to_uint16(self.out)} diff --git a/test/legacy_test/test_elementwise_mul_op.py b/test/legacy_test/test_elementwise_mul_op.py index 281087ce26420..6da50f02c163d 100644 --- a/test/legacy_test/test_elementwise_mul_op.py +++ b/test/legacy_test/test_elementwise_mul_op.py @@ -182,12 +182,8 @@ def setUp(self): self.axis = -1 self.inputs = { - 'X': OpTest.np_dtype_to_base_dtype( - convert_float_to_uint16(self.x) - ), - 'Y': OpTest.np_dtype_to_base_dtype( - convert_float_to_uint16(self.y) - ), + 'X': OpTest.np_dtype_to_base_dtype(convert_float_to_uint16(self.x)), + 'Y': OpTest.np_dtype_to_base_dtype(convert_float_to_uint16(self.y)), } self.outputs = {'Out': convert_float_to_uint16(self.out)} self.attrs = {'axis': self.axis, 'use_mkldnn': False} diff --git a/test/legacy_test/test_fetch_handler.py b/test/legacy_test/test_fetch_handler.py index 5a41a6ea33b97..1d588a5d0339b 100644 --- a/test/legacy_test/test_fetch_handler.py +++ b/test/legacy_test/test_fetch_handler.py @@ -52,9 +52,7 @@ def handler(self, fetch_dict): var_dict={'emb': var_emb, 'emb2': None, 'emb3': var_emb3}, period_secs=1, ) - default_fm = base.trainer_factory.FetchHandlerMonitor( - scope, default_fh - ) + default_fm = base.trainer_factory.FetchHandlerMonitor(scope, default_fh) default_fm.start() time.sleep(5) default_fm.stop() diff --git a/test/legacy_test/test_fleet_executor_cond_interceptor.py b/test/legacy_test/test_fleet_executor_cond_interceptor.py index f6c02b5d815b0..463a7f2983879 100644 --- a/test/legacy_test/test_fleet_executor_cond_interceptor.py +++ b/test/legacy_test/test_fleet_executor_cond_interceptor.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle.distributed.fleet.fleet_executor_utils import TaskNode from paddle.base import core +from paddle.distributed.fleet.fleet_executor_utils import TaskNode paddle.enable_static() diff --git a/test/legacy_test/test_fleet_executor_task_node.py b/test/legacy_test/test_fleet_executor_task_node.py index a15547f4f4551..1a2fa9be9bb46 100644 --- a/test/legacy_test/test_fleet_executor_task_node.py +++ b/test/legacy_test/test_fleet_executor_task_node.py @@ -15,8 +15,8 @@ import unittest import paddle -from paddle.distributed.fleet.fleet_executor_utils import TaskNode from paddle.base import core +from paddle.distributed.fleet.fleet_executor_utils import TaskNode paddle.enable_static() diff --git a/test/legacy_test/test_fused_attention_pass.py b/test/legacy_test/test_fused_attention_pass.py index db938ab39974e..37a356ea64b70 100644 --- a/test/legacy_test/test_fused_attention_pass.py +++ b/test/legacy_test/test_fused_attention_pass.py @@ -18,8 +18,8 @@ import paddle import paddle.nn.functional as F -from paddle.distributed.passes import PassManager, new_pass from paddle.base import core +from paddle.distributed.passes import PassManager, new_pass paddle.enable_static() diff --git a/test/legacy_test/test_fused_feedforward_pass.py b/test/legacy_test/test_fused_feedforward_pass.py index 9fe700f6a683e..03240d88d2d8b 100644 --- a/test/legacy_test/test_fused_feedforward_pass.py +++ b/test/legacy_test/test_fused_feedforward_pass.py @@ -18,8 +18,8 @@ import paddle from paddle import nn -from paddle.distributed.passes import PassManager, new_pass from paddle.base import core +from paddle.distributed.passes import PassManager, new_pass paddle.enable_static() diff --git a/test/legacy_test/test_get_device_properties.py b/test/legacy_test/test_get_device_properties.py index 0cbfc4d16b0f4..41b7f94ad764c 100644 --- a/test/legacy_test/test_get_device_properties.py +++ b/test/legacy_test/test_get_device_properties.py @@ -14,8 +14,8 @@ import unittest -from paddle.device.cuda import device_count, get_device_properties from paddle.base import core +from paddle.device.cuda import device_count, get_device_properties class TestGetDeviceProperties(unittest.TestCase): diff --git a/test/legacy_test/test_imperative_optimizer.py b/test/legacy_test/test_imperative_optimizer.py index 8f6e37fef04d0..a9af79fca6afa 100644 --- a/test/legacy_test/test_imperative_optimizer.py +++ b/test/legacy_test/test_imperative_optimizer.py @@ -19,8 +19,8 @@ import paddle from paddle import base -from paddle.distributed.fleet.meta_optimizers import DGCMomentumOptimizer from paddle.base import core +from paddle.distributed.fleet.meta_optimizers import DGCMomentumOptimizer # Note(wangzhongpu) # In dygraph, don't support ModelAverage, DGCMomentumOptimizer, ExponentialMovingAverage, PipelineOptimizer, LookaheadOptimizer, RecomputeOptimizer. diff --git a/test/legacy_test/test_imperative_optimizer_v2.py b/test/legacy_test/test_imperative_optimizer_v2.py index 3262c5a05dd8b..0000bd49ccb08 100644 --- a/test/legacy_test/test_imperative_optimizer_v2.py +++ b/test/legacy_test/test_imperative_optimizer_v2.py @@ -20,8 +20,8 @@ import paddle from paddle import base -from paddle.distributed.fleet.meta_optimizers import DGCMomentumOptimizer from paddle.base import core +from paddle.distributed.fleet.meta_optimizers import DGCMomentumOptimizer # Note(wangzhongpu) # In dygraph, don't support ModelAverage, DGCMomentumOptimizer, ExponentialMovingAverage, PipelineOptimizer, LookaheadOptimizer, RecomputeOptimizer. diff --git a/test/legacy_test/test_inference_model_io.py b/test/legacy_test/test_inference_model_io.py index 2f54e934818f3..2e179cf90276e 100644 --- a/test/legacy_test/test_inference_model_io.py +++ b/test/legacy_test/test_inference_model_io.py @@ -22,13 +22,13 @@ import paddle from paddle import base +from paddle.base import core, executor +from paddle.base.compiler import CompiledProgram +from paddle.base.framework import Program, program_guard from paddle.distributed.io import ( load_inference_model_distributed, save_persistables, ) -from paddle.base import core, executor -from paddle.base.compiler import CompiledProgram -from paddle.base.framework import Program, program_guard from paddle.static.io import load_inference_model, save_inference_model paddle.enable_static() diff --git a/test/legacy_test/test_jit_save_load.py b/test/legacy_test/test_jit_save_load.py index 1edb99e1ea77f..e2df76f475194 100644 --- a/test/legacy_test/test_jit_save_load.py +++ b/test/legacy_test/test_jit_save_load.py @@ -449,9 +449,7 @@ def tearDown(self): self.temp_dir.cleanup() def test_nest_output(self): - x = base.dygraph.to_variable( - np.random.random((4, 8)).astype('float32') - ) + x = base.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) net = LinearNetWithNestOut(8, 8) dy_outs = paddle.utils.flatten(net(x)) @@ -567,9 +565,7 @@ def test_with_input_spec(self): # 2. load to infer infer_layer = paddle.jit.load(model_path) - x = base.dygraph.to_variable( - np.random.random((4, 8)).astype('float32') - ) + x = base.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) pred = infer_layer(x) def test_multi_in_out(self): @@ -591,12 +587,8 @@ def test_multi_in_out(self): # 3. load to infer infer_layer = paddle.jit.load(model_path) - x = base.dygraph.to_variable( - np.random.random((4, 8)).astype('float32') - ) - y = base.dygraph.to_variable( - np.random.random((4, 8)).astype('float32') - ) + x = base.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + y = base.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) # 4. predict pred_x, pred_y = infer_layer(x, y) @@ -633,12 +625,8 @@ def test_multi_in_out1(self): # 3. load to infer infer_layer = paddle.jit.load(model_path) - x = base.dygraph.to_variable( - np.random.random((4, 8)).astype('float32') - ) - y = base.dygraph.to_variable( - np.random.random((4, 8)).astype('float32') - ) + x = base.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) + y = base.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) # 4. predict pred_x, pred_y = infer_layer(x, y) @@ -680,9 +668,7 @@ def test_output_spec(self): adam = paddle.optimizer.Adam( learning_rate=0.1, parameters=train_layer.parameters() ) - x = base.dygraph.to_variable( - np.random.random((4, 8)).astype('float32') - ) + x = base.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) for i in range(10): out, loss = train_layer(x) loss.backward() @@ -702,9 +688,7 @@ def test_output_spec(self): train_layer.eval() infer_layer = paddle.jit.load(model_path) - x = base.dygraph.to_variable( - np.random.random((4, 8)).astype('float32') - ) + x = base.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) np.testing.assert_array_equal( train_layer(x)[0].numpy(), infer_layer(x).numpy() ) @@ -788,9 +772,7 @@ def train_and_save(self): adam = paddle.optimizer.Adam( learning_rate=0.1, parameters=train_layer.parameters() ) - x = base.dygraph.to_variable( - np.random.random((4, 8)).astype('float32') - ) + x = base.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) for i in range(10): hidden, loss = train_layer(x) loss.backward() @@ -813,9 +795,7 @@ def test_load_pruned_model(self): infer_layer = paddle.jit.load(self.model_path) - x = base.dygraph.to_variable( - np.random.random((4, 8)).astype('float32') - ) + x = base.dygraph.to_variable(np.random.random((4, 8)).astype('float32')) np.testing.assert_array_equal( train_layer(x)[0].numpy(), infer_layer(x).numpy() ) diff --git a/test/legacy_test/test_limit_by_capacity_op.py b/test/legacy_test/test_limit_by_capacity_op.py index a897f35578a5b..066345d584824 100644 --- a/test/legacy_test/test_limit_by_capacity_op.py +++ b/test/legacy_test/test_limit_by_capacity_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle.distributed.models.moe import utils from paddle.base import core +from paddle.distributed.models.moe import utils def limit_by_capacity(expert_count, _capacity, n_worker): diff --git a/test/legacy_test/test_lookup_table_bf16_op.py b/test/legacy_test/test_lookup_table_bf16_op.py index 204992f0c1c42..942a9cc3ccd74 100644 --- a/test/legacy_test/test_lookup_table_bf16_op.py +++ b/test/legacy_test/test_lookup_table_bf16_op.py @@ -24,7 +24,7 @@ from op import Operator import paddle -from paddle import enable_static, base +from paddle import base, enable_static from paddle.base import core diff --git a/test/legacy_test/test_number_count_op.py b/test/legacy_test/test_number_count_op.py index b877a40026dc2..2ea68fd82d7ec 100644 --- a/test/legacy_test/test_number_count_op.py +++ b/test/legacy_test/test_number_count_op.py @@ -18,8 +18,8 @@ import numpy as np import paddle -from paddle.distributed.models.moe import utils from paddle.base import core +from paddle.distributed.models.moe import utils def count(x, upper_num): diff --git a/test/legacy_test/test_paddle_save_load.py b/test/legacy_test/test_paddle_save_load.py index f5d87ab70ea0f..cb2e7e47afe86 100644 --- a/test/legacy_test/test_paddle_save_load.py +++ b/test/legacy_test/test_paddle_save_load.py @@ -570,9 +570,7 @@ def test_save_load_complex_object_dygraph_save(self): ) self.assertTrue(load_tensor2['epoch'] == 123) - self.assertTrue( - isinstance(load_tensor3[0], paddle.base.core.LoDTensor) - ) + self.assertTrue(isinstance(load_tensor3[0], paddle.base.core.LoDTensor)) np.testing.assert_array_equal( np.array(load_tensor3[0]), obj3[0].numpy() ) diff --git a/test/legacy_test/test_parallel_executor_transformer.py b/test/legacy_test/test_parallel_executor_transformer.py index cdf24938e2573..2cca066c2a9c7 100644 --- a/test/legacy_test/test_parallel_executor_transformer.py +++ b/test/legacy_test/test_parallel_executor_transformer.py @@ -21,8 +21,8 @@ from parallel_executor_test_base import DeviceType, TestParallelExecutorBase import paddle -from paddle.dataset import wmt16 from paddle.base import core +from paddle.dataset import wmt16 os.environ['CPU_NUM'] = str(4) diff --git a/test/legacy_test/test_prune_gate_by_capacity_op.py b/test/legacy_test/test_prune_gate_by_capacity_op.py index 966651e22ff1c..602c2ad787b60 100644 --- a/test/legacy_test/test_prune_gate_by_capacity_op.py +++ b/test/legacy_test/test_prune_gate_by_capacity_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle.distributed.models.moe import utils from paddle.base import core +from paddle.distributed.models.moe import utils def count(x, upper_num): diff --git a/test/legacy_test/test_py_func_op.py b/test/legacy_test/test_py_func_op.py index 3164feab034b5..1706ad14d644d 100644 --- a/test/legacy_test/test_py_func_op.py +++ b/test/legacy_test/test_py_func_op.py @@ -199,9 +199,7 @@ def test_main(use_cuda, use_py_func_op, use_parallel_executor): train_cp = base.default_main_program() if use_parallel_executor: - train_cp = compiler.CompiledProgram( - base.default_main_program() - ) + train_cp = compiler.CompiledProgram(base.default_main_program()) fetch_list = [loss.name] else: fetch_list = [loss] diff --git a/test/legacy_test/test_random_routing_op.py b/test/legacy_test/test_random_routing_op.py index 5bc1752ecc993..21a1746dd057f 100644 --- a/test/legacy_test/test_random_routing_op.py +++ b/test/legacy_test/test_random_routing_op.py @@ -17,8 +17,8 @@ import numpy as np import paddle -from paddle.distributed.models.moe import utils from paddle.base import core +from paddle.distributed.models.moe import utils def random_routing(topk_idx, topk_value, prob, topk=2): diff --git a/test/legacy_test/test_spawn_and_init_parallel_env.py b/test/legacy_test/test_spawn_and_init_parallel_env.py index 8c9e3d4dde836..69a35448b707d 100644 --- a/test/legacy_test/test_spawn_and_init_parallel_env.py +++ b/test/legacy_test/test_spawn_and_init_parallel_env.py @@ -18,12 +18,12 @@ import paddle import paddle.distributed as dist +from paddle.base import core from paddle.distributed.spawn import ( _get_default_nprocs, _get_subprocess_env_list, _options_valid_check, ) -from paddle.base import core # NOTE(chenweihang): Coverage CI is currently not able to count python3 # unittest, so the unittests here covers some cases that will only be diff --git a/test/legacy_test/test_sum_op.py b/test/legacy_test/test_sum_op.py index db71738e61e00..fc37437409063 100644 --- a/test/legacy_test/test_sum_op.py +++ b/test/legacy_test/test_sum_op.py @@ -29,7 +29,7 @@ import paddle import paddle.inference as paddle_infer -from paddle import enable_static, base +from paddle import base, enable_static from paddle.base import core from paddle.base.layer_helper import LayerHelper diff --git a/test/legacy_test/test_where_op.py b/test/legacy_test/test_where_op.py index 12fc6ea887829..3daa2fbbb6898 100644 --- a/test/legacy_test/test_where_op.py +++ b/test/legacy_test/test_where_op.py @@ -148,9 +148,7 @@ def test_api(self, use_cuda=False): result.stop_gradient = False append_backward(paddle.mean(result)) for use_cuda in [False, True]: - if use_cuda and ( - not base.core.is_compiled_with_cuda() - ): + if use_cuda and (not base.core.is_compiled_with_cuda()): break place = ( base.CUDAPlace(0) if use_cuda else base.CPUPlace() diff --git a/test/mkldnn/test_layer_norm_bf16_mkldnn_op.py b/test/mkldnn/test_layer_norm_bf16_mkldnn_op.py index f3bd7f731b361..6e28472d5e9e0 100644 --- a/test/mkldnn/test_layer_norm_bf16_mkldnn_op.py +++ b/test/mkldnn/test_layer_norm_bf16_mkldnn_op.py @@ -24,7 +24,7 @@ _reference_layer_norm_naive, ) -from paddle import enable_static, base +from paddle import base, enable_static from paddle.base import core np.random.random(123) diff --git a/test/mkldnn/test_layer_norm_mkldnn_op.py b/test/mkldnn/test_layer_norm_mkldnn_op.py index 9db2b1966607d..4533ccd05179d 100644 --- a/test/mkldnn/test_layer_norm_mkldnn_op.py +++ b/test/mkldnn/test_layer_norm_mkldnn_op.py @@ -20,7 +20,7 @@ import numpy as np from eager_op_test import OpTestTool, _set_use_system_allocator -from paddle import enable_static, base +from paddle import base, enable_static from paddle.base import core np.random.random(123) diff --git a/test/prim/model/bert.py b/test/prim/model/bert.py index 2b0805c6123e2..f7cf05f7ca243 100644 --- a/test/prim/model/bert.py +++ b/test/prim/model/bert.py @@ -21,8 +21,8 @@ import paddle import paddle.nn.functional as F from paddle import Tensor, nn -from paddle.distributed.fleet.utils import recompute from paddle.base.data_feeder import convert_dtype +from paddle.distributed.fleet.utils import recompute from paddle.io import DataLoader, Dataset from paddle.nn import MultiHeadAttention diff --git a/test/prim/model/test_bert_cinn.py b/test/prim/model/test_bert_cinn.py index c1bb108f58fee..3ae1bcb27aeea 100644 --- a/test/prim/model/test_bert_cinn.py +++ b/test/prim/model/test_bert_cinn.py @@ -21,8 +21,8 @@ import paddle from paddle import base -from paddle.dataset.common import DATA_HOME, download from paddle.base import core +from paddle.dataset.common import DATA_HOME, download SEED = 2023 BATCH_SIZE = 2 diff --git a/test/prim/model/test_bert_prim.py b/test/prim/model/test_bert_prim.py index 22e3cf856ae85..74a65e2f0761c 100644 --- a/test/prim/model/test_bert_prim.py +++ b/test/prim/model/test_bert_prim.py @@ -21,8 +21,8 @@ import paddle from paddle import base -from paddle.dataset.common import DATA_HOME, download from paddle.base import core +from paddle.dataset.common import DATA_HOME, download SEED = 2023 BATCH_SIZE = 2 diff --git a/test/prim/model/test_bert_prim_cinn.py b/test/prim/model/test_bert_prim_cinn.py index 21dd6c202baac..46e8d63b7a572 100644 --- a/test/prim/model/test_bert_prim_cinn.py +++ b/test/prim/model/test_bert_prim_cinn.py @@ -21,8 +21,8 @@ import paddle from paddle import base -from paddle.dataset.common import DATA_HOME, download from paddle.base import core +from paddle.dataset.common import DATA_HOME, download SEED = 2023 BATCH_SIZE = 2 diff --git a/test/quantization/quant2_int8_lstm_model.py b/test/quantization/quant2_int8_lstm_model.py index 73659a8ac019c..5bbb378e9c35e 100644 --- a/test/quantization/quant2_int8_lstm_model.py +++ b/test/quantization/quant2_int8_lstm_model.py @@ -81,9 +81,7 @@ def get_warmup_tensor(self, data_path, place): [len(feat) // 4 // 8, 8] ) lod_feat = [feat.shape[0]] - minputs = paddle.base.create_lod_tensor( - feat, [lod_feat], place - ) + minputs = paddle.base.create_lod_tensor(feat, [lod_feat], place) infer_data = core.PaddleTensor() infer_data.lod = minputs.lod() diff --git a/test/rnn/test_rnn_cells.py b/test/rnn/test_rnn_cells.py index 4a0792b812d54..4bb6f49963f84 100644 --- a/test/rnn/test_rnn_cells.py +++ b/test/rnn/test_rnn_cells.py @@ -186,9 +186,7 @@ def runTest(self): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - devices = ( - ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] - ) + devices = ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] for bias in [True, False]: for device in devices: for test_class in [TestSimpleRNNCell, TestGRUCell, TestLSTMCell]: diff --git a/test/rnn/test_rnn_cells_static.py b/test/rnn/test_rnn_cells_static.py index 21a513e0c4e0b..df0dbb11bbb51 100644 --- a/test/rnn/test_rnn_cells_static.py +++ b/test/rnn/test_rnn_cells_static.py @@ -342,9 +342,7 @@ def runTest(self): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - devices = ( - ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] - ) + devices = ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] for bias in [True, False]: for device in devices: for test_class in [TestSimpleRNNCell, TestGRUCell, TestLSTMCell]: diff --git a/test/rnn/test_rnn_nets.py b/test/rnn/test_rnn_nets.py index e75c83007b6a3..734dcae0fde56 100644 --- a/test/rnn/test_rnn_nets.py +++ b/test/rnn/test_rnn_nets.py @@ -359,9 +359,7 @@ def forward(self, input): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - devices = ( - ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] - ) + devices = ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] for direction in ["forward", "bidirectional", "bidirect"]: for time_major in [True, False]: for device in devices: diff --git a/test/rnn/test_rnn_nets_static.py b/test/rnn/test_rnn_nets_static.py index b2b91685f3c5c..20b8a7975e8c2 100644 --- a/test/rnn/test_rnn_nets_static.py +++ b/test/rnn/test_rnn_nets_static.py @@ -507,9 +507,7 @@ def runTest(self): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - devices = ( - ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] - ) + devices = ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] for direction in ["forward", "bidirectional", "bidirect"]: for time_major in [True, False]: for device in devices: diff --git a/test/rnn/test_wrappers.py b/test/rnn/test_wrappers.py index c6dd9692798de..957d5894b9885 100644 --- a/test/rnn/test_wrappers.py +++ b/test/rnn/test_wrappers.py @@ -195,9 +195,7 @@ def runTest(self): def load_tests(loader, tests, pattern): suite = unittest.TestSuite() - devices = ( - ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] - ) + devices = ["cpu", "gpu"] if paddle.base.is_compiled_with_cuda() else ["cpu"] for direction in ["forward", "backward"]: for device in devices: for time_major in [False]: diff --git a/test/standalone_executor/test_standalone_executor_multi_micro_batch.py b/test/standalone_executor/test_standalone_executor_multi_micro_batch.py index f814a18643ede..f44e23ddf8dc2 100644 --- a/test/standalone_executor/test_standalone_executor_multi_micro_batch.py +++ b/test/standalone_executor/test_standalone_executor_multi_micro_batch.py @@ -19,10 +19,10 @@ import numpy as np import paddle -from paddle.distributed.passes.pass_utils import get_skip_gc_vars, split_program from paddle.base import core from paddle.base.core import Job, Plan from paddle.base.executor import _add_feed_fetch_ops, _StandaloneExecutor +from paddle.distributed.passes.pass_utils import get_skip_gc_vars, split_program from paddle.nn import TransformerEncoderLayer paddle.enable_static() diff --git a/tools/parse_kernel_info.py b/tools/parse_kernel_info.py index 8a2edf06f7d89..23106ab0d2ebb 100644 --- a/tools/parse_kernel_info.py +++ b/tools/parse_kernel_info.py @@ -94,9 +94,7 @@ def parse_paddle_kernels(lib="phi", kernel_type="function", print_detail=False): if lib == "phi": assert kernel_type in ["function", "structure", "all"] # phi kernel type can be: function, structure, all - kernel_infos = paddle.base.core._get_registered_phi_kernels( - kernel_type - ) + kernel_infos = paddle.base.core._get_registered_phi_kernels(kernel_type) else: # fluid, phi, all assert kernel_type in ["fluid", "phi", "all"] From 6fa4c80d575a945dc35247f16ac9050e5e5b0aaf Mon Sep 17 00:00:00 2001 From: Difers <707065510@qq.com> Date: Tue, 29 Aug 2023 19:49:20 +0800 Subject: [PATCH 09/10] remove build change --- paddle/scripts/paddle_build.sh | 7 +------ tools/check_file_diff_approvals.sh | 32 +++++++++++++++--------------- 2 files changed, 17 insertions(+), 22 deletions(-) diff --git a/paddle/scripts/paddle_build.sh b/paddle/scripts/paddle_build.sh index 1df1b2a87763d..6a6208334e0e4 100644 --- a/paddle/scripts/paddle_build.sh +++ b/paddle/scripts/paddle_build.sh @@ -3275,12 +3275,7 @@ function build_pr_and_develop() { rm -rf ${PADDLE_ROOT}/build/Makefile ${PADDLE_ROOT}/build/CMakeCache.txt ${PADDLE_ROOT}/build/build.ninja rm -rf ${PADDLE_ROOT}/build/third_party fi - # may be remove it later - mkdir -p ${PADDLE_ROOT}/pr && cp -r ${PADDLE_ROOT}/build/pr_whl ${PADDLE_ROOT}/pr - rm -rf ${PADDLE_ROOT}/build && mkdir -p ${PADDLE_ROOT}/build - mv ${PADDLE_ROOT}/pr/pr_whl ${PADDLE_ROOT}/build - cd ${PADDLE_ROOT}/build - + git checkout -b develop_base_pr upstream/$BRANCH git submodule update --init run_setup ${PYTHON_ABI:-""} "rerun-cmake bdist_wheel" ${parallel_number} diff --git a/tools/check_file_diff_approvals.sh b/tools/check_file_diff_approvals.sh index 405e55a76005b..d467ae1cc4671 100644 --- a/tools/check_file_diff_approvals.sh +++ b/tools/check_file_diff_approvals.sh @@ -143,7 +143,7 @@ for API_FILE in ${API_FILES[*]}; do echo_line="You must have one RD (risemeup1 or tianshuo78520a) approval for ${API_FILE}.\n" check_approval 1 risemeup1 tianshuo78520a elif [ "${API_FILE}" == "python/paddle/base/__init__.py" ];then - echo_line="You must have one RD (lanxianghit (Recommend), phlrain, luotao1, Aurelius84 or qili93) approval for the python/paddle/fluid/init.py, which manages the environment variables.\n" + echo_line="You must have one RD (lanxianghit (Recommend), phlrain, luotao1, Aurelius84 or qili93) approval for the python/paddle/base/init.py, which manages the environment variables.\n" check_approval 1 lanxianghit phlrain luotao1 Aurelius84 qili93 elif [ "${API_FILE}" == "python/requirements.txt" ];then echo_line="You must have one RD (phlrain) and one TPM (dingjiaweiww) and one QA (kolinwei) approval for python/requirements.txt, which manages the third-party python package.\n" @@ -154,29 +154,29 @@ for API_FILE in ${API_FILES[*]}; do elif [ "${API_FILE}" == "paddle/fluid/framework/unused_var_check.cc" ];then echo_line="You must have one RD (zhiqiu (Recommend) or chenwhql) approval for the changes of paddle/fluid/framework/unused_var_check.cc, which manages the allow list of operators that have unused input variables. Before change the allow list, please read the specification [https://github.com/PaddlePaddle/Paddle/wiki/OP-Should-Not-Have-Unused-Input] and try to refine code first. \n" check_approval 1 zhiqiu chenwhql - elif [ "${API_FILE}" == "python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py" ];then + elif [ "${API_FILE}" == "python/paddle/base/tests/unittests/white_list/check_shape_white_list.py" ];then echo_line="It is an Op accuracy problem, please take care of it. You must have one RD (hong19860320 (Recommend), luotao1, Aurelisu84, phlrain) approval for the changes of check_shape_white_list.py, which manages the white list of operators with limited input size. Inputs size of all cases in the op test must be greater than or equal to 100. For more information, please refer to: https://github.com/PaddlePaddle/Paddle/wiki/OP-Test-Input-Shape-Requirements. \n" check_approval 1 hong19860320 luotao1 Aurelisu84 phlrain - elif [ "${API_FILE}" == "python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py" ];then - echo_line="It is an Op accuracy problem, please take care of it. You must have one RD (juncaipeng (Recommend), zhangting2020 (Recommend) or luotao1 or Aurelius84) approval for the python/paddle/fluid/tests/unittests/white_list/op_accuracy_white_list.py, which manages the white list of upgrading the precision of op test to float64. For more information, please refer to: https://github.com/PaddlePaddle/Paddle/wiki/Upgrade-OP-Precision-to-Float64. \n" + elif [ "${API_FILE}" == "python/paddle/base/tests/unittests/white_list/op_accuracy_white_list.py" ];then + echo_line="It is an Op accuracy problem, please take care of it. You must have one RD (juncaipeng (Recommend), zhangting2020 (Recommend) or luotao1 or Aurelius84) approval for the python/paddle/base/tests/unittests/white_list/op_accuracy_white_list.py, which manages the white list of upgrading the precision of op test to float64. For more information, please refer to: https://github.com/PaddlePaddle/Paddle/wiki/Upgrade-OP-Precision-to-Float64. \n" check_approval 1 juncaipeng zhangting2020 luotao1 Aurelius84 - elif [ "${API_FILE}" == "python/paddle/fluid/tests/unittests/white_list/compile_vs_runtime_white_list.py" ];then - echo_line="You must have one RD (DannyIsFunny (Recommend), luotao1, Aurelius84, phlrain) approval for the python/paddle/fluid/tests/unittests/white_list/compile_vs_runtime_white_list.py, which manages the white list of compile&runtime lod-level check. For more information, please refer to: https://github.com/PaddlePaddle/Paddle/wiki/Compile_vs_Runtime-Check-Specification. \n" + elif [ "${API_FILE}" == "python/paddle/base/tests/unittests/white_list/compile_vs_runtime_white_list.py" ];then + echo_line="You must have one RD (DannyIsFunny (Recommend), luotao1, Aurelius84, phlrain) approval for the python/paddle/base/tests/unittests/white_list/compile_vs_runtime_white_list.py, which manages the white list of compile&runtime lod-level check. For more information, please refer to: https://github.com/PaddlePaddle/Paddle/wiki/Compile_vs_Runtime-Check-Specification. \n" check_approval 1 DannyIsFunny luotao1 Aurelius84 phlrain - elif [ "${API_FILE}" == "python/paddle/fluid/tests/unittests/white_list/no_check_set_white_list.py" ];then - echo_line="You must have one RD (cryoco (Recommend), luotao1, Aurelius84 or phlrain) approval for the python/paddle/fluid/tests/unittests/white_list/no_check_set_white_list.py, which manages the white list of setting no_check_set of check_output. \n" + elif [ "${API_FILE}" == "python/paddle/base/tests/unittests/white_list/no_check_set_white_list.py" ];then + echo_line="You must have one RD (cryoco (Recommend), luotao1, Aurelius84 or phlrain) approval for the python/paddle/base/tests/unittests/white_list/no_check_set_white_list.py, which manages the white list of setting no_check_set of check_output. \n" check_approval 1 cryoco luotao1 Aurelius84 phlrain - elif [ "${API_FILE}" == "python/paddle/fluid/tests/unittests/white_list/check_op_sequence_instance_0_input_white_list.py" ]; then + elif [ "${API_FILE}" == "python/paddle/base/tests/unittests/white_list/check_op_sequence_instance_0_input_white_list.py" ]; then echo_line="You must have one RD (luotao1, Aurelisu84, lanxianghit, phlrain) approval for the ${API_FILE}, which manages the white list of instance size 0 input for sequence op test. For more information, please refer to [https://github.com/PaddlePaddle/Paddle/wiki/It-is-required-to-include-LoDTensor-input-with-instance_size=0-in-sequence-OP-test]. \n" check_approval 1 luotao1 Aurelisu84 lanxianghit phlrain - elif [ "${API_FILE}" == "python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py" ];then - echo_line="It is an Op accuracy problem, please take care of it. You must have one RD (juncaipeng (Recommend), zhangting2020 or luotao1, Aurelius84) approval for the python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py, which manages the white list of error threshold for op test with float64 precision. For more information, please refer to: https://github.com/PaddlePaddle/Paddle/wiki/Upgrade-OP-Precision-to-Float64. \n" + elif [ "${API_FILE}" == "python/paddle/base/tests/unittests/white_list/op_threshold_white_list.py" ];then + echo_line="It is an Op accuracy problem, please take care of it. You must have one RD (juncaipeng (Recommend), zhangting2020 or luotao1, Aurelius84) approval for the python/paddle/base/tests/unittests/white_list/op_threshold_white_list.py, which manages the white list of error threshold for op test with float64 precision. For more information, please refer to: https://github.com/PaddlePaddle/Paddle/wiki/Upgrade-OP-Precision-to-Float64. \n" check_approval 1 juncaipeng zhangting2020 luotao1 Aurelius84 - elif [ "${API_FILE}" == "python/paddle/fluid/tests/unittests/white_list/check_op_sequence_batch_1_input_white_list.py" ];then + elif [ "${API_FILE}" == "python/paddle/base/tests/unittests/white_list/check_op_sequence_batch_1_input_white_list.py" ];then echo_line="You must have one RD (luotao1, Aurelius84, lanxianghit or phlrain) approval for ${API_FILE}, which manages the white list of batch size 1 input for sequence op test. For more information, please refer to [https://github.com/PaddlePaddle/Paddle/wiki/It-is-required-to-include-LoDTensor-input-with-batch_size=1-in-sequence-OP-test]. \n" check_approval 1 luotao1 Aurelius84 lanxianghit phlrain - elif [ "${API_FILE}" == "python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py" ];then - echo_line="You must have one RD (Shixiaowei02 (Recommend), luotao1, Aurelius84 or phlrain) approval for the python/paddle/fluid/tests/unittests/white_list/no_grad_set_white_list.py, which manages the white list of no_grad_set without value in operators. For more information, please refer to[https://github.com/PaddlePaddle/Paddle/wiki/It's-recommend-to-set-no_grad_set-to-be-None].\n" + elif [ "${API_FILE}" == "python/paddle/base/tests/unittests/white_list/no_grad_set_white_list.py" ];then + echo_line="You must have one RD (Shixiaowei02 (Recommend), luotao1, Aurelius84 or phlrain) approval for the python/paddle/base/tests/unittests/white_list/no_grad_set_white_list.py, which manages the white list of no_grad_set without value in operators. For more information, please refer to[https://github.com/PaddlePaddle/Paddle/wiki/It's-recommend-to-set-no_grad_set-to-be-None].\n" check_approval 1 Shixiaowei02 luotao1 Aurelius84 phlrain elif [ "${API_FILE}" == "tools/sampcd_processor.py" ];then echo_line="test_sampcd_processor.py will be executed for changed sampcd_processor.py.\n" @@ -205,10 +205,10 @@ for API_FILE in ${API_FILES[*]}; do elif [ "${API_FILE}" == "tools/parallel_UT_rule.py" ]; then echo_line="You must have one RD (zhwesky2010 (Recommend), wanghuancoder, luotao1 or Aurelius84) approval for ${API_FILE} changes, which manages the rule of running unittest with a same GPU. If the unittest failed due to Insufficient GPU memory or CUBLAS_STATUS_ALLOC_FAILED, you can remove it from ${API_FILE}.\n" check_approval 1 zhwesky2010 wanghuancoder luotao1 Aurelius84 - elif [ "${API_FILE}" == "python/paddle/fluid/parallel_executor.py" ]; then + elif [ "${API_FILE}" == "python/paddle/base/parallel_executor.py" ]; then echo_line="You must have one RD (Xreki, luotao1, zhhsplendid or Aurelius84) approval for ${API_FILE}, which manages the underlying code for PaddlePaddle.\n" check_approval 1 Xreki luotao1 zhhsplendid Aurelius84 - elif [ "${API_FILE}" == "python/paddle/fluid/dygraph/layers.py" ] || [ "${API_FILE}" == "paddle/fluid/eager/grad_node_info.h" ] || [ "${API_FILE}" == "paddle/fluid/eager/grad_node_info.cc" ] || [ "${API_FILE}" == "paddle/fluid/eager/grad_tensor_holder.h" ] || [ "${API_FILE}" == "paddle/fluid/eager/grad_tensor_holder.cc" ] || [ "${API_FILE}" == "paddle/fluid/eager/tensor_wrapper.h" ] || [ "${API_FILE}" == "paddle/fluid/eager/autograd_meta.cc"] || [ "${API_FILE}" == "paddle/fluid/eager/autograd_meta.h"] || [ "${API_FILE}" == "paddle/fluid/eager/backward.cc"] || [ "${API_FILE}" == "paddle/fluid/eager/backward.h"]; then + elif [ "${API_FILE}" == "python/paddle/base/dygraph/layers.py" ] || [ "${API_FILE}" == "paddle/fluid/eager/grad_node_info.h" ] || [ "${API_FILE}" == "paddle/fluid/eager/grad_node_info.cc" ] || [ "${API_FILE}" == "paddle/fluid/eager/grad_tensor_holder.h" ] || [ "${API_FILE}" == "paddle/fluid/eager/grad_tensor_holder.cc" ] || [ "${API_FILE}" == "paddle/fluid/eager/tensor_wrapper.h" ] || [ "${API_FILE}" == "paddle/fluid/eager/autograd_meta.cc"] || [ "${API_FILE}" == "paddle/fluid/eager/autograd_meta.h"] || [ "${API_FILE}" == "paddle/fluid/eager/backward.cc"] || [ "${API_FILE}" == "paddle/fluid/eager/backward.h"]; then echo_line="You must have one RD (JiabinYang,chenwhql,phlrain) approval for ${API_FILE}, which manages the underlying code for PaddlePaddle.\n" check_approval JiabinYang chenwhql phlrain elif [ "${API_FILE}" == "paddle/phi/api/include/tensor.h" ] || [ "${API_FILE}" == "paddle/phi/core/tensor_base.h" ] || [ "${API_FILE}" == "paddle/phi/core/dense_tensor.h" ] || [ "${API_FILE}" == "paddle/phi/core/meta_tensor.h" ] || [ "${API_FILE}" == "paddle/phi/core/tensor_meta.h" ] || [ "${API_FILE}" == "paddle/phi/core/attribute.h" ] || [ "${API_FILE}" == "paddle/phi/core/device_context.h" ] || [ "${API_FILE}" == "paddle/phi/core/kernel_utils.h" ] || [ "${API_FILE}" == "paddle/phi/core/kernel_registry.h" ] || [ "${API_FILE}" == "paddle/phi/core/kernel_factory.h" ] || [ "${API_FILE}" == "paddle/phi/core/kernel_context.h" ] || [ "${API_FILE}" == "paddle/phi/core/infermeta_utils.h" ]; then From 6dbbf91935acb9c242401521782610bbd8e05c99 Mon Sep 17 00:00:00 2001 From: Difers <707065510@qq.com> Date: Wed, 6 Sep 2023 14:46:55 +0800 Subject: [PATCH 10/10] fix some error --- paddle/fluid/API.spec | 6 +-- paddle/fluid/pybind/place.cc | 4 +- paddle/fluid/pybind/pybind.cc | 15 ++++--- paddle/fluid/pybind/tensor.cc | 42 +++++++++---------- python/paddle/dataset/uci_housing.py | 4 +- .../auto_parallel/static/cost/base_cost.py | 4 +- .../paddle/jit/dy2static/partial_program.py | 4 +- test/ir/new_ir/test_pd_inplace_pass.py | 2 +- ...t_fused_scale_bias_relu_conv_bnstats_op.py | 2 +- test/legacy_test/test_inplace.py | 22 +++++----- .../test_standalone_custom_event.py | 4 +- 11 files changed, 53 insertions(+), 56 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 51eda469b773a..d10ff999f6eb2 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -1,6 +1,6 @@ -paddle.base.optimizer.PipelineOptimizer (paddle.base.optimizer.PipelineOptimizer, ('document', '2e55a29dbeb874934f7a1a1af3a22b8c')) -paddle.base.optimizer.PipelineOptimizer.__init__ (ArgSpec(args=['self', 'optimizer', 'num_microbatches', 'start_cpu_core_id'], varargs=None, keywords=None, defaults=(1, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.base.optimizer.PipelineOptimizer.minimize (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.incubate.optimizer.PipelineOptimizer (paddle.incubate.optimizer.PipelineOptimizer, ('document', '2e55a29dbeb874934f7a1a1af3a22b8c')) +paddle.incubate.optimizer.PipelineOptimizer.__init__ (ArgSpec(args=['self', 'optimizer', 'num_microbatches', 'start_cpu_core_id'], varargs=None, keywords=None, defaults=(1, 0)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) +paddle.incubate.optimizer.PipelineOptimizer.minimize (ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.audio.features (ArgSpec(), ('document', 'd41d8cd98f00b204e9800998ecf8427e')) paddle.audio.features.layers.LogMelSpectrogram (ArgSpec(), ('document', 'c38b53606aa89215c4f00d3833e158b8')) paddle.audio.features.layers.LogMelSpectrogram.forward (ArgSpec(args=['self', 'x'], varargs=None, varkw=None, defaults=None, kwonlyargs=[], kwonlydefaults=None, annotations={'return': , 'x': }), ('document', '6c14f6f78dc697a6981cf90412e2f1ea')) diff --git a/paddle/fluid/pybind/place.cc b/paddle/fluid/pybind/place.cc index 57196c592c5c7..55a73cac1b665 100644 --- a/paddle/fluid/pybind/place.cc +++ b/paddle/fluid/pybind/place.cc @@ -394,8 +394,8 @@ void BindPlace(pybind11::module &m) { // NOLINT **Note**: Examples: .. code-block:: python - import paddle.base as fluid - xpu_place = fluid.XPUPlace(0) + import paddle.base as base + xpu_place = base.XPUPlace(0) )DOC"); g_xpuplace_pytype = reinterpret_cast(xpuplace.ptr()); xpuplace diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 42ea19183909d..02fb5b7b4b00b 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1103,7 +1103,7 @@ PYBIND11_MODULE(libpaddle, m) { }); // NOTE(zjl): ctest would load environment variables at the beginning even - // though we have not `import paddle.base as fluid`. So we add this API + // though we have not `import paddle.base as base`. So we add this API // to enable eager deletion mode in unittest. m.def("_set_eager_deletion_mode", &paddle::framework::SetEagerDeletionMode); @@ -1239,7 +1239,6 @@ All parameter, weight, gradient are variables in Paddle. Examples: .. code-block:: python - import paddle.base as fluid # create tensor from a scope and set value to it. param = scope.var('Param').get_tensor() param_array = np.full((height, row_numel), 5.0).astype("float32") @@ -2149,9 +2148,9 @@ All parameter, weight, gradient are variables in Paddle. Examples: .. code-block:: python - import paddle.base as fluid + import paddle.base as base - arr = fluid.LoDTensorArray() + arr = base.LoDTensorArray() )DOC"); g_framework_lodtensorarray_pytype = reinterpret_cast(pylodtensorarray.ptr()); @@ -2193,12 +2192,12 @@ All parameter, weight, gradient are variables in Paddle. Examples: .. code-block:: python - import paddle.base as fluid + import paddle.base as base import numpy as np - arr = fluid.LoDTensorArray() - t = fluid.LoDTensor() - t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + arr = base.LoDTensorArray() + t = base.LoDTensor() + t.set(np.ndarray([5, 30]), base.CPUPlace()) arr.append(t) )DOC") .def( diff --git a/paddle/fluid/pybind/tensor.cc b/paddle/fluid/pybind/tensor.cc index 78169ec22fbfe..b3edc9575223d 100644 --- a/paddle/fluid/pybind/tensor.cc +++ b/paddle/fluid/pybind/tensor.cc @@ -393,11 +393,11 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.base as fluid + import paddle.base as base import numpy as np - t = fluid.Tensor() - t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t = base.Tensor() + t.set(np.ndarray([5, 30]), base.CPUPlace()) )DOC") .def( @@ -413,11 +413,11 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.base as fluid + import paddle.base as base import numpy as np - t = fluid.Tensor() - t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t = base.Tensor() + t.set(np.ndarray([5, 30]), base.CPUPlace()) print(t.shape()) # [5, 30] )DOC") .def("_to_dlpack", @@ -517,11 +517,11 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.base as fluid + import paddle.base as base import numpy as np - t = fluid.Tensor() - t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t = base.Tensor() + t.set(np.ndarray([5, 30]), base.CPUPlace()) t.set_lod([[0, 2, 5]]) print(t.lod()) # [[0, 2, 5]] )DOC") @@ -566,11 +566,11 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.base as fluid + import paddle.base as base import numpy as np - t = fluid.Tensor() - t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t = base.Tensor() + t.set(np.ndarray([5, 30]), base.CPUPlace()) t.set_recursive_sequence_lengths([[2, 3]]) print(t.recursive_sequence_lengths()) # [[2, 3]] print(t.lod()) # [[0, 2, 5]] @@ -594,11 +594,11 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.base as fluid + import paddle.base as base import numpy as np - t = fluid.Tensor() - t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t = base.Tensor() + t.set(np.ndarray([5, 30]), base.CPUPlace()) t.set_lod([[0, 2, 5]]) print(t.lod()) # [[0, 2, 5]] )DOC") @@ -623,11 +623,11 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.base as fluid + import paddle.base as base import numpy as np - t = fluid.Tensor() - t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t = base.Tensor() + t.set(np.ndarray([5, 30]), base.CPUPlace()) t.set_recursive_sequence_lengths([[2, 3]]) print(t.recursive_sequence_lengths()) # [[2, 3]] )DOC") @@ -647,11 +647,11 @@ void BindTensor(pybind11::module &m) { // NOLINT Examples: .. code-block:: python - import paddle.base as fluid + import paddle.base as base import numpy as np - t = fluid.Tensor() - t.set(np.ndarray([5, 30]), fluid.CPUPlace()) + t = base.Tensor() + t.set(np.ndarray([5, 30]), base.CPUPlace()) t.set_recursive_sequence_lengths([[2, 3]]) print(t.has_valid_recursive_sequence_lengths()) # True )DOC") diff --git a/python/paddle/dataset/uci_housing.py b/python/paddle/dataset/uci_housing.py index e834e2526318a..d5e68db9d6954 100644 --- a/python/paddle/dataset/uci_housing.py +++ b/python/paddle/dataset/uci_housing.py @@ -51,7 +51,7 @@ UCI_TRAIN_DATA = None UCI_TEST_DATA = None -FLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/base/fit_a_line.fluid.tar' +FLUID_URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fluid/fit_a_line.fluid.tar' FLUID_MD5_MODEL = '6e6dd637ccd5993961f68bfbde46090b' @@ -150,7 +150,7 @@ def reader(): return reader -def base_model(): +def fluid_model(): parameter_tar = paddle.dataset.common.download( FLUID_URL_MODEL, 'uci_housing', FLUID_MD5_MODEL, 'fit_a_line.fluid.tar' ) diff --git a/python/paddle/distributed/auto_parallel/static/cost/base_cost.py b/python/paddle/distributed/auto_parallel/static/cost/base_cost.py index 197656d6ea845..58ab301ad99f8 100644 --- a/python/paddle/distributed/auto_parallel/static/cost/base_cost.py +++ b/python/paddle/distributed/auto_parallel/static/cost/base_cost.py @@ -928,9 +928,9 @@ def calc_time_by_modeling(op=None, desc=None, cluster=None): def calc_time_by_cost_model(op, cluster=None): """Calc op time by cost model and the unit is microsecond.""" - if not isinstance(op, paddle.fluid.framework.Operator): + if not isinstance(op, paddle.base.framework.Operator): raise TypeError( - "OP must be paddle.fluid.framework.Operator, but got {}.".format( + "OP must be paddle.base.framework.Operator, but got {}.".format( type(op) ) ) diff --git a/python/paddle/jit/dy2static/partial_program.py b/python/paddle/jit/dy2static/partial_program.py index 3bf82cbd68c9c..7679a6d4ed513 100644 --- a/python/paddle/jit/dy2static/partial_program.py +++ b/python/paddle/jit/dy2static/partial_program.py @@ -190,9 +190,7 @@ def __init__( assert isinstance(self._build_strategy, BuildStrategy) self._origin_main_program = self._verify_program(main_program) - with paddle.fluid.framework._dygraph_guard( - paddle.fluid.dygraph.Tracer() - ): + with paddle.base.framework._dygraph_guard(paddle.base.dygraph.Tracer()): self._cuda_graph_vec = self._create_cuda_graph_vec() self._cuda_graph_capture_mode = "" self._cuda_graph_pool_id = 0 diff --git a/test/ir/new_ir/test_pd_inplace_pass.py b/test/ir/new_ir/test_pd_inplace_pass.py index e00f34389d3b4..9e56eb6c5cc80 100644 --- a/test/ir/new_ir/test_pd_inplace_pass.py +++ b/test/ir/new_ir/test_pd_inplace_pass.py @@ -17,7 +17,7 @@ import numpy as np import paddle -from paddle.fluid import core +from paddle.base import core paddle.enable_static() diff --git a/test/legacy_test/test_fused_scale_bias_relu_conv_bnstats_op.py b/test/legacy_test/test_fused_scale_bias_relu_conv_bnstats_op.py index cbed4e5b33fcf..f8cbc8c387dc7 100644 --- a/test/legacy_test/test_fused_scale_bias_relu_conv_bnstats_op.py +++ b/test/legacy_test/test_fused_scale_bias_relu_conv_bnstats_op.py @@ -20,7 +20,7 @@ import paddle from paddle import nn -from paddle.fluid import core +from paddle.base import core def skip_unit_test(): diff --git a/test/legacy_test/test_inplace.py b/test/legacy_test/test_inplace.py index 422bcd35d1612..3dd1213d38d9e 100644 --- a/test/legacy_test/test_inplace.py +++ b/test/legacy_test/test_inplace.py @@ -861,7 +861,7 @@ def set_np_compare_func(self): self.np_compare = np_array_equal_with_nan def test_forward_version(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) self.assertEqual(var.inplace_version, 0) @@ -877,7 +877,7 @@ def test_forward_version(self): def test_backward_error(self): # It raises an error because the inplace operator will result # in incorrect gradient computation. - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var_a.stop_gradient = False @@ -920,7 +920,7 @@ def non_inplace_api_processing(self, var): return paddle.lcm(var, self.y) def test_forward_version(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) self.assertEqual(var.inplace_version, 0) @@ -961,7 +961,7 @@ def test_leaf_inplace_var_error(self): pass def test_forward_version(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) self.assertEqual(var.inplace_version, 0) @@ -977,7 +977,7 @@ def test_forward_version(self): def test_backward_error(self): # It raises an error because the inplace operator will result # in incorrect gradient computation. - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.to_tensor(self.input_var_numpy).astype("float64") var_a.stop_gradient = False @@ -1039,7 +1039,7 @@ def non_inplace_api_processing(self, var): return paddle.where(var > self.y, var, self.y) def test_forward_version(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) self.assertEqual(var.inplace_version, 0) @@ -1055,7 +1055,7 @@ def test_forward_version(self): def test_backward_error(self): # It raises an error because the inplace operator will result # in incorrect gradient computation. - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var_a.stop_gradient = False @@ -1344,7 +1344,7 @@ def non_inplace_api_processing(self, var): def test_backward_error(self): # It raises an error because the inplace operator will result # in incorrect gradient computation. - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.to_tensor(self.input_var_numpy).astype("float64") var_a.stop_gradient = False @@ -1364,7 +1364,7 @@ def test_backward_error(self): loss.backward() def test_forward_version(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) self.assertEqual(var.inplace_version, 0) @@ -1388,7 +1388,7 @@ def non_inplace_api_processing(self, var): def test_backward_error(self): # It raises an error because the inplace operator will result # in incorrect gradient computation. - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var_a = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) var_a.stop_gradient = False @@ -1408,7 +1408,7 @@ def test_backward_error(self): loss.backward() def test_forward_version(self): - with paddle.fluid.dygraph.guard(): + with paddle.base.dygraph.guard(): var = paddle.to_tensor(self.input_var_numpy).astype(self.dtype) self.assertEqual(var.inplace_version, 0) diff --git a/test/standalone_executor/test_standalone_custom_event.py b/test/standalone_executor/test_standalone_custom_event.py index 3b9fe7a3197b6..e65ed021e7972 100644 --- a/test/standalone_executor/test_standalone_custom_event.py +++ b/test/standalone_executor/test_standalone_custom_event.py @@ -15,13 +15,13 @@ import unittest import paddle +from paddle.base import core +from paddle.base.executor import _add_feed_fetch_ops, _StandaloneExecutor from paddle.distributed.passes.pass_utils import ( _add_event_dependency, get_skip_gc_vars, split_program, ) -from paddle.fluid import core -from paddle.fluid.executor import _add_feed_fetch_ops, _StandaloneExecutor paddle.enable_static()