diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 567d9b1e6c32fa1aac57e309dafaf34a5b1cce56..258ea9025dd55d32366f81416c80e04210854e60 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -186,24 +186,14 @@ if(${len} GREATER_EQUAL 1) if(WITH_XPU) target_link_libraries(${test_name} xpulib) endif() - if(NOT - ("${test_name}" STREQUAL "c_broadcast_op_npu_test" - OR "${test_name}" STREQUAL "c_allreduce_sum_op_npu_test" - OR "${test_name}" STREQUAL "c_allreduce_max_op_npu_test" - OR "${test_name}" STREQUAL "c_reducescatter_op_npu_test" - OR "${test_name}" STREQUAL "c_allgather_op_npu_test" - OR "${test_name}" STREQUAL "send_v2_op_npu_test" - OR "${test_name}" STREQUAL "c_reduce_sum_op_npu_test" - OR "${test_name}" STREQUAL "recv_v2_op_npu_test")) - cc_test_run( - ${test_name} - COMMAND - ${test_name} - ARGS - ${test_arg} - DIR - ${CC_TESTS_DIR}) - endif() + cc_test_run( + ${test_name} + COMMAND + ${test_name} + ARGS + ${test_arg} + DIR + ${CC_TESTS_DIR}) elseif(WITH_TESTING AND NOT TEST ${test_name}) add_test(NAME ${test_name} COMMAND ${CMAKE_COMMAND} -E echo CI skip ${test_name}.) diff --git a/test/cpp/inference/api/api_impl_tester.cc b/test/cpp/inference/api/api_impl_tester.cc index 3270e216586e17b6ffb20f6017b4944b17b91b59..d3d289371359d4eb967eaadd685ec18936944e78 100644 --- a/test/cpp/inference/api/api_impl_tester.cc +++ b/test/cpp/inference/api/api_impl_tester.cc @@ -72,7 +72,6 @@ void MainWord2Vec(const paddle::PaddlePlace& place) { auto predictor = CreatePaddlePredictor(config); config.use_gpu = paddle::gpu_place_used(place); config.use_xpu = paddle::xpu_place_used(place); - config.use_npu = paddle::npu_place_used(place); phi::DenseTensor first_word, second_word, third_word, fourth_word; framework::LoD lod{{0, 1}}; @@ -125,7 +124,6 @@ void MainImageClassification(const paddle::PaddlePlace& place) { NativeConfig config = GetConfig(); config.use_gpu = paddle::gpu_place_used(place); config.use_xpu = paddle::xpu_place_used(place); - config.use_npu = paddle::npu_place_used(place); config.model_dir = FLAGS_book_dirname + "/image_classification_resnet.inference.model"; @@ -169,7 +167,6 @@ void MainThreadsWord2Vec(const paddle::PaddlePlace& place) { NativeConfig config = GetConfig(); config.use_gpu = paddle::gpu_place_used(place); config.use_xpu = paddle::xpu_place_used(place); - config.use_npu = paddle::npu_place_used(place); auto main_predictor = CreatePaddlePredictor(config); // prepare inputs data and reference results @@ -234,7 +231,6 @@ void MainThreadsImageClassification(const paddle::PaddlePlace& place) { NativeConfig config = GetConfig(); config.use_gpu = paddle::gpu_place_used(place); config.use_xpu = paddle::xpu_place_used(place); - config.use_npu = paddle::npu_place_used(place); config.model_dir = FLAGS_book_dirname + "/image_classification_resnet.inference.model"; diff --git a/test/cpp/inference/test_helper.h b/test/cpp/inference/test_helper.h index 0685b90de1fa225901d06d2ae9a938e445755709..f66712401858ab5424ee629a70fbe9bf60afd2e0 100644 --- a/test/cpp/inference/test_helper.h +++ b/test/cpp/inference/test_helper.h @@ -35,9 +35,6 @@ bool gpu_place_used(const paddle::PaddlePlace& place) { bool xpu_place_used(const paddle::PaddlePlace& place) { return place == paddle::PaddlePlace::kXPU; } -bool npu_place_used(const paddle::PaddlePlace& place) { - return place == paddle::PaddlePlace::kNPU; -} bool cpu_place_used(const paddle::PaddlePlace& place) { return place == paddle::PaddlePlace::kCPU; } diff --git a/test/cpp/phi/common/test_backend.cc b/test/cpp/phi/common/test_backend.cc index 791167ffe62a6449d7eafa75eb5fd976c909874c..516deeee34af20e2b95d8f48471e472333717ba0 100644 --- a/test/cpp/phi/common/test_backend.cc +++ b/test/cpp/phi/common/test_backend.cc @@ -36,9 +36,6 @@ TEST(Backend, OStream) { oss << phi::Backend::XPU; EXPECT_EQ(oss.str(), "XPU"); oss.str(""); - oss << phi::Backend::NPU; - EXPECT_EQ(oss.str(), "NPU"); - oss.str(""); oss << phi::Backend::ONEDNN; EXPECT_EQ(oss.str(), "ONEDNN"); oss.str(""); @@ -62,7 +59,6 @@ TEST(Backend, StringToBackend) { EXPECT_EQ(phi::Backend::CPU, pexp::StringToBackend("CPU")); EXPECT_EQ(phi::Backend::GPU, pexp::StringToBackend("GPU")); EXPECT_EQ(phi::Backend::XPU, pexp::StringToBackend("XPU")); - EXPECT_EQ(phi::Backend::NPU, pexp::StringToBackend("NPU")); EXPECT_EQ(phi::Backend::ONEDNN, pexp::StringToBackend("OneDNN")); EXPECT_EQ(phi::Backend::GPUDNN, pexp::StringToBackend("GPUDNN")); #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) diff --git a/test/cpp/phi/core/test_dense_tensor.cc b/test/cpp/phi/core/test_dense_tensor.cc index b0c02441ef1e241582fa6298d861f3eb2c653c06..424c4bca75f404ec87dc7d550cf4d4ddbd95cb7b 100644 --- a/test/cpp/phi/core/test_dense_tensor.cc +++ b/test/cpp/phi/core/test_dense_tensor.cc @@ -129,13 +129,6 @@ TEST(dense_tensor, shallow_copy) { CHECK(tensor_0.meta() == tensor_1.meta()); } -struct TestStorageProperties - : public StorageProperties, - public TypeInfoTraits { - virtual ~TestStorageProperties() = default; - static const char* name() { return "TestStorageProperties"; } -}; - TEST(dense_tensor, storage_properties) { const DataType dtype{DataType::FLOAT32}; const DDim dims({1, 2}); @@ -144,29 +137,9 @@ TEST(dense_tensor, storage_properties) { auto fancy_allocator = std::unique_ptr(new FancyAllocator); DenseTensor tensor(fancy_allocator.get(), meta); - // test no storage properties - bool caught_exception = false; - try { - tensor.storage_properties(); - } catch (phi::enforce::EnforceNotMet& error) { - caught_exception = true; - } - EXPECT_TRUE(caught_exception); - - // test custom device storage properties - EXPECT_FALSE(tensor.storage_properties_initialized()); - auto npu_properties = std::make_unique(); - npu_properties->storage_format = 3; - npu_properties->storage_dims = {1, 1, 1, 1, 16}; - tensor.set_storage_properties(std::move(npu_properties)); - EXPECT_TRUE(tensor.storage_properties_initialized()); - auto get_npu_properties = tensor.storage_properties(); - CHECK_EQ(get_npu_properties.storage_format, 3); - CHECK_EQ(get_npu_properties.storage_dims.size(), 5); - // test error type storage properties #ifdef PADDLE_WITH_MKLDNN - caught_exception = false; + bool caught_exception = false; try { tensor.storage_properties(); } catch (phi::enforce::EnforceNotMet& error) { @@ -174,14 +147,6 @@ TEST(dense_tensor, storage_properties) { } EXPECT_TRUE(caught_exception); #endif - - // test copy storage properties - auto cp_tensor = tensor; - auto get_cp_npu_properties = - cp_tensor.storage_properties(); - CHECK_EQ(get_cp_npu_properties.storage_format, 3); - CHECK_EQ(get_cp_npu_properties.storage_dims.size(), 5); } - } // namespace tests } // namespace phi diff --git a/test/xpu/test_merged_momentum_op_xpu_base.py b/test/xpu/test_merged_momentum_op_xpu_base.py index 10868585a903f05bf2488e174b850863abc11d25..62f534a1b41a64c2d7f94c2db03f49c5a10b465d 100644 --- a/test/xpu/test_merged_momentum_op_xpu_base.py +++ b/test/xpu/test_merged_momentum_op_xpu_base.py @@ -211,7 +211,6 @@ class TestMergedMomentumBase(unittest.TestCase): ) def run_op(use_nesterov, use_merged): - # NPU Momentum Op does not support rescale_grad rescale_grad = 1.0 return run_momentum_op( params,