From c376a9408d76bfa58869d4c35a54bf4b25c28923 Mon Sep 17 00:00:00 2001 From: gouzil <66515297+gouzil@users.noreply.github.com> Date: Wed, 12 Apr 2023 16:53:35 +0800 Subject: [PATCH] [phi] mv sequence_pool to phi - Step 1 : sequence_pooling_test (#52782) * [phi] mv sequence_pooling_test * [test] fix include --- paddle/fluid/operators/math/CMakeLists.txt | 4 -- test/cpp/phi/kernels/CMakeLists.txt | 5 +++ .../cpp/phi/kernels}/sequence_pooling_test.cc | 43 ++++++++++--------- 3 files changed, 28 insertions(+), 24 deletions(-) rename {paddle/fluid/operators/math => test/cpp/phi/kernels}/sequence_pooling_test.cc (81%) diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index 6975873b137..42cb92db862 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -39,10 +39,6 @@ cc_test( vol2col_test SRCS vol2col_test.cc DEPS vol2col) -cc_test( - sequence_pooling_test - SRCS sequence_pooling_test.cc - DEPS sequence_pooling) cc_test( beam_search_test SRCS beam_search_test.cc diff --git a/test/cpp/phi/kernels/CMakeLists.txt b/test/cpp/phi/kernels/CMakeLists.txt index a9e897eb614..3e7f394f186 100644 --- a/test/cpp/phi/kernels/CMakeLists.txt +++ b/test/cpp/phi/kernels/CMakeLists.txt @@ -105,3 +105,8 @@ cc_test( sequence_padding_test SRCS sequence_padding_test.cc DEPS sequence_padding) + +cc_test( + sequence_pooling_test + SRCS sequence_pooling_test.cc + DEPS sequence_pooling) diff --git a/paddle/fluid/operators/math/sequence_pooling_test.cc b/test/cpp/phi/kernels/sequence_pooling_test.cc similarity index 81% rename from paddle/fluid/operators/math/sequence_pooling_test.cc rename to test/cpp/phi/kernels/sequence_pooling_test.cc index dac5eb63bfc..3c12d55ed36 100644 --- a/paddle/fluid/operators/math/sequence_pooling_test.cc +++ b/test/cpp/phi/kernels/sequence_pooling_test.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,13 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include + #include "paddle/fluid/operators/math/sequence_pooling.h" -#include +#include "paddle/phi/backends/context_pool.h" +#include "paddle/phi/common/place.h" +#include "paddle/phi/core/tensor_utils.h" template void TestSequencePoolingSum(const DeviceContext &context, - const paddle::framework::LoD &lod, + const phi::LoD &lod, const int64_t second_dim) { phi::DenseTensor cpu_out_grad; phi::DenseTensor cpu_in_grad; @@ -30,17 +34,17 @@ void TestSequencePoolingSum(const DeviceContext &context, auto out_dims = phi::make_ddim({static_cast(out_first_dim), second_dim}); - cpu_out_grad.mutable_data(out_dims, paddle::platform::CPUPlace()); + cpu_out_grad.mutable_data(out_dims, phi::CPUPlace()); for (int64_t i = 0; i < cpu_out_grad.numel(); ++i) { cpu_out_grad.data()[i] = static_cast(i); } // copy to dst out_grad auto place = context.GetPlace(); - if (paddle::platform::is_cpu_place(place)) { + if (place == phi::CPUPlace()) { out_grad = cpu_out_grad; } else { - paddle::framework::TensorCopySync(cpu_out_grad, place, &out_grad); + phi::Copy(context, cpu_out_grad, place, true, &out_grad); } // construct in_grad @@ -53,7 +57,7 @@ void TestSequencePoolingSum(const DeviceContext &context, PADDLE_ENFORCE_EQ( in_grad.dims().size(), out_grad.dims().size(), - paddle::platform::errors::InvalidArgument( + phi::errors::InvalidArgument( "The dimension of input and output shall be same. Expected %ld == " "%ld, but got %ld != %ld. Please check the input value.", in_grad.dims().size(), @@ -64,7 +68,7 @@ void TestSequencePoolingSum(const DeviceContext &context, PADDLE_ENFORCE_EQ( in_grad.dims()[i], out_grad.dims()[i], - paddle::platform::errors::InvalidArgument( + phi::errors::InvalidArgument( "The dimension of input and output shall be same. Expected %ld == " "%ld, but got %ld != %ld. Please check the input value.", in_grad.dims()[i], @@ -77,18 +81,17 @@ void TestSequencePoolingSum(const DeviceContext &context, paddle::operators::math::SequencePoolGradFunctor()( context, "SUM", out_grad, &in_grad); - if (paddle::platform::is_cpu_place(place)) { + if (place == phi::CPUPlace()) { cpu_in_grad = in_grad; } else { - paddle::framework::TensorCopySync( - in_grad, paddle::platform::CPUPlace(), &cpu_in_grad); + phi::Copy(context, in_grad, phi::CPUPlace(), true, &cpu_in_grad); cpu_in_grad.set_lod(in_grad.lod()); } EXPECT_EQ(in_grad.numel(), static_cast(lod[0].back() * second_dim)); EXPECT_EQ(in_grad.lod(), lod); - if (paddle::platform::is_cpu_place(place)) { + if (place == phi::CPUPlace()) { for (size_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) { int64_t begin = in_grad.lod()[0][i]; int64_t end = in_grad.lod()[0][i + 1]; @@ -116,30 +119,30 @@ void TestSequencePoolingSum(const DeviceContext &context, } TEST(SequencePoolingGrad, CPU_SUM) { - auto place = paddle::platform::CPUPlace(); + auto place = phi::CPUPlace(); auto *context = static_cast( - paddle::platform::DeviceContextPool::Instance().Get(place)); + phi::DeviceContextPool::Instance().Get(place)); - paddle::framework::LoD lod1; + phi::LoD lod1; lod1.push_back(std::vector{0, 10}); TestSequencePoolingSum(*context, lod1, 128); - paddle::framework::LoD lod2; + phi::LoD lod2; lod2.push_back(std::vector{0, 2, 7, 10}); TestSequencePoolingSum(*context, lod2, 128); } #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) TEST(SequencePoolingGrad, CUDA_SUM) { - auto place = paddle::platform::CUDAPlace(0); + auto place = phi::GPUPlace(0); auto *context = static_cast( - paddle::platform::DeviceContextPool::Instance().Get(place)); + phi::DeviceContextPool::Instance().Get(place)); - paddle::framework::LoD lod1; + phi::LoD lod1; lod1.push_back(std::vector{0, 10}); TestSequencePoolingSum(*context, lod1, 128); - paddle::framework::LoD lod2; + phi::LoD lod2; lod2.push_back(std::vector{0, 2, 7, 10}); TestSequencePoolingSum(*context, lod2, 128); } -- GitLab