未验证 提交 a4997311 编写于 作者: K Kim Yann 提交者: GitHub

rem npu in test (#53469)

* rem npu in test

* restore some code
上级 13e2e10c
......@@ -186,24 +186,14 @@ if(${len} GREATER_EQUAL 1)
if(WITH_XPU)
target_link_libraries(${test_name} xpulib)
endif()
if(NOT
("${test_name}" STREQUAL "c_broadcast_op_npu_test"
OR "${test_name}" STREQUAL "c_allreduce_sum_op_npu_test"
OR "${test_name}" STREQUAL "c_allreduce_max_op_npu_test"
OR "${test_name}" STREQUAL "c_reducescatter_op_npu_test"
OR "${test_name}" STREQUAL "c_allgather_op_npu_test"
OR "${test_name}" STREQUAL "send_v2_op_npu_test"
OR "${test_name}" STREQUAL "c_reduce_sum_op_npu_test"
OR "${test_name}" STREQUAL "recv_v2_op_npu_test"))
cc_test_run(
${test_name}
COMMAND
${test_name}
ARGS
${test_arg}
DIR
${CC_TESTS_DIR})
endif()
cc_test_run(
${test_name}
COMMAND
${test_name}
ARGS
${test_arg}
DIR
${CC_TESTS_DIR})
elseif(WITH_TESTING AND NOT TEST ${test_name})
add_test(NAME ${test_name} COMMAND ${CMAKE_COMMAND} -E echo CI skip
${test_name}.)
......
......@@ -72,7 +72,6 @@ void MainWord2Vec(const paddle::PaddlePlace& place) {
auto predictor = CreatePaddlePredictor<NativeConfig>(config);
config.use_gpu = paddle::gpu_place_used(place);
config.use_xpu = paddle::xpu_place_used(place);
config.use_npu = paddle::npu_place_used(place);
phi::DenseTensor first_word, second_word, third_word, fourth_word;
framework::LoD lod{{0, 1}};
......@@ -125,7 +124,6 @@ void MainImageClassification(const paddle::PaddlePlace& place) {
NativeConfig config = GetConfig();
config.use_gpu = paddle::gpu_place_used(place);
config.use_xpu = paddle::xpu_place_used(place);
config.use_npu = paddle::npu_place_used(place);
config.model_dir =
FLAGS_book_dirname + "/image_classification_resnet.inference.model";
......@@ -169,7 +167,6 @@ void MainThreadsWord2Vec(const paddle::PaddlePlace& place) {
NativeConfig config = GetConfig();
config.use_gpu = paddle::gpu_place_used(place);
config.use_xpu = paddle::xpu_place_used(place);
config.use_npu = paddle::npu_place_used(place);
auto main_predictor = CreatePaddlePredictor<NativeConfig>(config);
// prepare inputs data and reference results
......@@ -234,7 +231,6 @@ void MainThreadsImageClassification(const paddle::PaddlePlace& place) {
NativeConfig config = GetConfig();
config.use_gpu = paddle::gpu_place_used(place);
config.use_xpu = paddle::xpu_place_used(place);
config.use_npu = paddle::npu_place_used(place);
config.model_dir =
FLAGS_book_dirname + "/image_classification_resnet.inference.model";
......
......@@ -35,9 +35,6 @@ bool gpu_place_used(const paddle::PaddlePlace& place) {
bool xpu_place_used(const paddle::PaddlePlace& place) {
return place == paddle::PaddlePlace::kXPU;
}
bool npu_place_used(const paddle::PaddlePlace& place) {
return place == paddle::PaddlePlace::kNPU;
}
bool cpu_place_used(const paddle::PaddlePlace& place) {
return place == paddle::PaddlePlace::kCPU;
}
......
......@@ -36,9 +36,6 @@ TEST(Backend, OStream) {
oss << phi::Backend::XPU;
EXPECT_EQ(oss.str(), "XPU");
oss.str("");
oss << phi::Backend::NPU;
EXPECT_EQ(oss.str(), "NPU");
oss.str("");
oss << phi::Backend::ONEDNN;
EXPECT_EQ(oss.str(), "ONEDNN");
oss.str("");
......@@ -62,7 +59,6 @@ TEST(Backend, StringToBackend) {
EXPECT_EQ(phi::Backend::CPU, pexp::StringToBackend("CPU"));
EXPECT_EQ(phi::Backend::GPU, pexp::StringToBackend("GPU"));
EXPECT_EQ(phi::Backend::XPU, pexp::StringToBackend("XPU"));
EXPECT_EQ(phi::Backend::NPU, pexp::StringToBackend("NPU"));
EXPECT_EQ(phi::Backend::ONEDNN, pexp::StringToBackend("OneDNN"));
EXPECT_EQ(phi::Backend::GPUDNN, pexp::StringToBackend("GPUDNN"));
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
......
......@@ -129,13 +129,6 @@ TEST(dense_tensor, shallow_copy) {
CHECK(tensor_0.meta() == tensor_1.meta());
}
struct TestStorageProperties
: public StorageProperties,
public TypeInfoTraits<StorageProperties, NPUStorageProperties> {
virtual ~TestStorageProperties() = default;
static const char* name() { return "TestStorageProperties"; }
};
TEST(dense_tensor, storage_properties) {
const DataType dtype{DataType::FLOAT32};
const DDim dims({1, 2});
......@@ -144,29 +137,9 @@ TEST(dense_tensor, storage_properties) {
auto fancy_allocator = std::unique_ptr<Allocator>(new FancyAllocator);
DenseTensor tensor(fancy_allocator.get(), meta);
// test no storage properties
bool caught_exception = false;
try {
tensor.storage_properties<NPUStorageProperties>();
} catch (phi::enforce::EnforceNotMet& error) {
caught_exception = true;
}
EXPECT_TRUE(caught_exception);
// test custom device storage properties
EXPECT_FALSE(tensor.storage_properties_initialized());
auto npu_properties = std::make_unique<NPUStorageProperties>();
npu_properties->storage_format = 3;
npu_properties->storage_dims = {1, 1, 1, 1, 16};
tensor.set_storage_properties(std::move(npu_properties));
EXPECT_TRUE(tensor.storage_properties_initialized());
auto get_npu_properties = tensor.storage_properties<NPUStorageProperties>();
CHECK_EQ(get_npu_properties.storage_format, 3);
CHECK_EQ(get_npu_properties.storage_dims.size(), 5);
// test error type storage properties
#ifdef PADDLE_WITH_MKLDNN
caught_exception = false;
bool caught_exception = false;
try {
tensor.storage_properties<OneDNNStorageProperties>();
} catch (phi::enforce::EnforceNotMet& error) {
......@@ -174,14 +147,6 @@ TEST(dense_tensor, storage_properties) {
}
EXPECT_TRUE(caught_exception);
#endif
// test copy storage properties
auto cp_tensor = tensor;
auto get_cp_npu_properties =
cp_tensor.storage_properties<NPUStorageProperties>();
CHECK_EQ(get_cp_npu_properties.storage_format, 3);
CHECK_EQ(get_cp_npu_properties.storage_dims.size(), 5);
}
} // namespace tests
} // namespace phi
......@@ -211,7 +211,6 @@ class TestMergedMomentumBase(unittest.TestCase):
)
def run_op(use_nesterov, use_merged):
# NPU Momentum Op does not support rescale_grad
rescale_grad = 1.0
return run_momentum_op(
params,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册