Skip to content

Commit

Permalink
rem npu in test (#53469)
Browse files Browse the repository at this point in the history
* rem npu in test

* restore some code
  • Loading branch information
KimBioInfoStudio authored May 6, 2023
1 parent 13e2e10 commit a499731
Show file tree
Hide file tree
Showing 6 changed files with 9 additions and 66 deletions.
26 changes: 8 additions & 18 deletions test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -186,24 +186,14 @@ if(${len} GREATER_EQUAL 1)
if(WITH_XPU)
target_link_libraries(${test_name} xpulib)
endif()
if(NOT
("${test_name}" STREQUAL "c_broadcast_op_npu_test"
OR "${test_name}" STREQUAL "c_allreduce_sum_op_npu_test"
OR "${test_name}" STREQUAL "c_allreduce_max_op_npu_test"
OR "${test_name}" STREQUAL "c_reducescatter_op_npu_test"
OR "${test_name}" STREQUAL "c_allgather_op_npu_test"
OR "${test_name}" STREQUAL "send_v2_op_npu_test"
OR "${test_name}" STREQUAL "c_reduce_sum_op_npu_test"
OR "${test_name}" STREQUAL "recv_v2_op_npu_test"))
cc_test_run(
${test_name}
COMMAND
${test_name}
ARGS
${test_arg}
DIR
${CC_TESTS_DIR})
endif()
cc_test_run(
${test_name}
COMMAND
${test_name}
ARGS
${test_arg}
DIR
${CC_TESTS_DIR})
elseif(WITH_TESTING AND NOT TEST ${test_name})
add_test(NAME ${test_name} COMMAND ${CMAKE_COMMAND} -E echo CI skip
${test_name}.)
Expand Down
4 changes: 0 additions & 4 deletions test/cpp/inference/api/api_impl_tester.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ void MainWord2Vec(const paddle::PaddlePlace& place) {
auto predictor = CreatePaddlePredictor<NativeConfig>(config);
config.use_gpu = paddle::gpu_place_used(place);
config.use_xpu = paddle::xpu_place_used(place);
config.use_npu = paddle::npu_place_used(place);

phi::DenseTensor first_word, second_word, third_word, fourth_word;
framework::LoD lod{{0, 1}};
Expand Down Expand Up @@ -125,7 +124,6 @@ void MainImageClassification(const paddle::PaddlePlace& place) {
NativeConfig config = GetConfig();
config.use_gpu = paddle::gpu_place_used(place);
config.use_xpu = paddle::xpu_place_used(place);
config.use_npu = paddle::npu_place_used(place);
config.model_dir =
FLAGS_book_dirname + "/image_classification_resnet.inference.model";

Expand Down Expand Up @@ -169,7 +167,6 @@ void MainThreadsWord2Vec(const paddle::PaddlePlace& place) {
NativeConfig config = GetConfig();
config.use_gpu = paddle::gpu_place_used(place);
config.use_xpu = paddle::xpu_place_used(place);
config.use_npu = paddle::npu_place_used(place);
auto main_predictor = CreatePaddlePredictor<NativeConfig>(config);

// prepare inputs data and reference results
Expand Down Expand Up @@ -234,7 +231,6 @@ void MainThreadsImageClassification(const paddle::PaddlePlace& place) {
NativeConfig config = GetConfig();
config.use_gpu = paddle::gpu_place_used(place);
config.use_xpu = paddle::xpu_place_used(place);
config.use_npu = paddle::npu_place_used(place);
config.model_dir =
FLAGS_book_dirname + "/image_classification_resnet.inference.model";

Expand Down
3 changes: 0 additions & 3 deletions test/cpp/inference/test_helper.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,6 @@ bool gpu_place_used(const paddle::PaddlePlace& place) {
bool xpu_place_used(const paddle::PaddlePlace& place) {
return place == paddle::PaddlePlace::kXPU;
}
bool npu_place_used(const paddle::PaddlePlace& place) {
return place == paddle::PaddlePlace::kNPU;
}
bool cpu_place_used(const paddle::PaddlePlace& place) {
return place == paddle::PaddlePlace::kCPU;
}
Expand Down
4 changes: 0 additions & 4 deletions test/cpp/phi/common/test_backend.cc
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,6 @@ TEST(Backend, OStream) {
oss << phi::Backend::XPU;
EXPECT_EQ(oss.str(), "XPU");
oss.str("");
oss << phi::Backend::NPU;
EXPECT_EQ(oss.str(), "NPU");
oss.str("");
oss << phi::Backend::ONEDNN;
EXPECT_EQ(oss.str(), "ONEDNN");
oss.str("");
Expand All @@ -62,7 +59,6 @@ TEST(Backend, StringToBackend) {
EXPECT_EQ(phi::Backend::CPU, pexp::StringToBackend("CPU"));
EXPECT_EQ(phi::Backend::GPU, pexp::StringToBackend("GPU"));
EXPECT_EQ(phi::Backend::XPU, pexp::StringToBackend("XPU"));
EXPECT_EQ(phi::Backend::NPU, pexp::StringToBackend("NPU"));
EXPECT_EQ(phi::Backend::ONEDNN, pexp::StringToBackend("OneDNN"));
EXPECT_EQ(phi::Backend::GPUDNN, pexp::StringToBackend("GPUDNN"));
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Expand Down
37 changes: 1 addition & 36 deletions test/cpp/phi/core/test_dense_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -129,13 +129,6 @@ TEST(dense_tensor, shallow_copy) {
CHECK(tensor_0.meta() == tensor_1.meta());
}

struct TestStorageProperties
: public StorageProperties,
public TypeInfoTraits<StorageProperties, NPUStorageProperties> {
virtual ~TestStorageProperties() = default;
static const char* name() { return "TestStorageProperties"; }
};

TEST(dense_tensor, storage_properties) {
const DataType dtype{DataType::FLOAT32};
const DDim dims({1, 2});
Expand All @@ -144,44 +137,16 @@ TEST(dense_tensor, storage_properties) {
auto fancy_allocator = std::unique_ptr<Allocator>(new FancyAllocator);
DenseTensor tensor(fancy_allocator.get(), meta);

// test no storage properties
bool caught_exception = false;
try {
tensor.storage_properties<NPUStorageProperties>();
} catch (phi::enforce::EnforceNotMet& error) {
caught_exception = true;
}
EXPECT_TRUE(caught_exception);

// test custom device storage properties
EXPECT_FALSE(tensor.storage_properties_initialized());
auto npu_properties = std::make_unique<NPUStorageProperties>();
npu_properties->storage_format = 3;
npu_properties->storage_dims = {1, 1, 1, 1, 16};
tensor.set_storage_properties(std::move(npu_properties));
EXPECT_TRUE(tensor.storage_properties_initialized());
auto get_npu_properties = tensor.storage_properties<NPUStorageProperties>();
CHECK_EQ(get_npu_properties.storage_format, 3);
CHECK_EQ(get_npu_properties.storage_dims.size(), 5);

// test error type storage properties
#ifdef PADDLE_WITH_MKLDNN
caught_exception = false;
bool caught_exception = false;
try {
tensor.storage_properties<OneDNNStorageProperties>();
} catch (phi::enforce::EnforceNotMet& error) {
caught_exception = true;
}
EXPECT_TRUE(caught_exception);
#endif

// test copy storage properties
auto cp_tensor = tensor;
auto get_cp_npu_properties =
cp_tensor.storage_properties<NPUStorageProperties>();
CHECK_EQ(get_cp_npu_properties.storage_format, 3);
CHECK_EQ(get_cp_npu_properties.storage_dims.size(), 5);
}

} // namespace tests
} // namespace phi
1 change: 0 additions & 1 deletion test/xpu/test_merged_momentum_op_xpu_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,7 +211,6 @@ def check_with_place(self, place, dtype, multi_precision=False):
)

def run_op(use_nesterov, use_merged):
# NPU Momentum Op does not support rescale_grad
rescale_grad = 1.0
return run_momentum_op(
params,
Expand Down

0 comments on commit a499731

Please sign in to comment.