未验证 提交 c376a940 编写于 作者: G gouzil 提交者: GitHub

[phi] mv sequence_pool to phi - Step 1 : sequence_pooling_test (#52782)

* [phi] mv sequence_pooling_test

* [test] fix include
上级 8e7c3789
...@@ -39,10 +39,6 @@ cc_test( ...@@ -39,10 +39,6 @@ cc_test(
vol2col_test vol2col_test
SRCS vol2col_test.cc SRCS vol2col_test.cc
DEPS vol2col) DEPS vol2col)
cc_test(
sequence_pooling_test
SRCS sequence_pooling_test.cc
DEPS sequence_pooling)
cc_test( cc_test(
beam_search_test beam_search_test
SRCS beam_search_test.cc SRCS beam_search_test.cc
......
...@@ -105,3 +105,8 @@ cc_test( ...@@ -105,3 +105,8 @@ cc_test(
sequence_padding_test sequence_padding_test
SRCS sequence_padding_test.cc SRCS sequence_padding_test.cc
DEPS sequence_padding) DEPS sequence_padding)
cc_test(
sequence_pooling_test
SRCS sequence_pooling_test.cc
DEPS sequence_pooling)
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. /* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
...@@ -12,13 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,13 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <gtest/gtest.h>
#include "paddle/fluid/operators/math/sequence_pooling.h" #include "paddle/fluid/operators/math/sequence_pooling.h"
#include <gtest/gtest.h> #include "paddle/phi/backends/context_pool.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/tensor_utils.h"
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
void TestSequencePoolingSum(const DeviceContext &context, void TestSequencePoolingSum(const DeviceContext &context,
const paddle::framework::LoD &lod, const phi::LoD &lod,
const int64_t second_dim) { const int64_t second_dim) {
phi::DenseTensor cpu_out_grad; phi::DenseTensor cpu_out_grad;
phi::DenseTensor cpu_in_grad; phi::DenseTensor cpu_in_grad;
...@@ -30,17 +34,17 @@ void TestSequencePoolingSum(const DeviceContext &context, ...@@ -30,17 +34,17 @@ void TestSequencePoolingSum(const DeviceContext &context,
auto out_dims = auto out_dims =
phi::make_ddim({static_cast<int64_t>(out_first_dim), second_dim}); phi::make_ddim({static_cast<int64_t>(out_first_dim), second_dim});
cpu_out_grad.mutable_data<T>(out_dims, paddle::platform::CPUPlace()); cpu_out_grad.mutable_data<T>(out_dims, phi::CPUPlace());
for (int64_t i = 0; i < cpu_out_grad.numel(); ++i) { for (int64_t i = 0; i < cpu_out_grad.numel(); ++i) {
cpu_out_grad.data<T>()[i] = static_cast<T>(i); cpu_out_grad.data<T>()[i] = static_cast<T>(i);
} }
// copy to dst out_grad // copy to dst out_grad
auto place = context.GetPlace(); auto place = context.GetPlace();
if (paddle::platform::is_cpu_place(place)) { if (place == phi::CPUPlace()) {
out_grad = cpu_out_grad; out_grad = cpu_out_grad;
} else { } else {
paddle::framework::TensorCopySync(cpu_out_grad, place, &out_grad); phi::Copy(context, cpu_out_grad, place, true, &out_grad);
} }
// construct in_grad // construct in_grad
...@@ -53,7 +57,7 @@ void TestSequencePoolingSum(const DeviceContext &context, ...@@ -53,7 +57,7 @@ void TestSequencePoolingSum(const DeviceContext &context,
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in_grad.dims().size(), in_grad.dims().size(),
out_grad.dims().size(), out_grad.dims().size(),
paddle::platform::errors::InvalidArgument( phi::errors::InvalidArgument(
"The dimension of input and output shall be same. Expected %ld == " "The dimension of input and output shall be same. Expected %ld == "
"%ld, but got %ld != %ld. Please check the input value.", "%ld, but got %ld != %ld. Please check the input value.",
in_grad.dims().size(), in_grad.dims().size(),
...@@ -64,7 +68,7 @@ void TestSequencePoolingSum(const DeviceContext &context, ...@@ -64,7 +68,7 @@ void TestSequencePoolingSum(const DeviceContext &context,
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in_grad.dims()[i], in_grad.dims()[i],
out_grad.dims()[i], out_grad.dims()[i],
paddle::platform::errors::InvalidArgument( phi::errors::InvalidArgument(
"The dimension of input and output shall be same. Expected %ld == " "The dimension of input and output shall be same. Expected %ld == "
"%ld, but got %ld != %ld. Please check the input value.", "%ld, but got %ld != %ld. Please check the input value.",
in_grad.dims()[i], in_grad.dims()[i],
...@@ -77,18 +81,17 @@ void TestSequencePoolingSum(const DeviceContext &context, ...@@ -77,18 +81,17 @@ void TestSequencePoolingSum(const DeviceContext &context,
paddle::operators::math::SequencePoolGradFunctor<DeviceContext, T>()( paddle::operators::math::SequencePoolGradFunctor<DeviceContext, T>()(
context, "SUM", out_grad, &in_grad); context, "SUM", out_grad, &in_grad);
if (paddle::platform::is_cpu_place(place)) { if (place == phi::CPUPlace()) {
cpu_in_grad = in_grad; cpu_in_grad = in_grad;
} else { } else {
paddle::framework::TensorCopySync( phi::Copy(context, in_grad, phi::CPUPlace(), true, &cpu_in_grad);
in_grad, paddle::platform::CPUPlace(), &cpu_in_grad);
cpu_in_grad.set_lod(in_grad.lod()); cpu_in_grad.set_lod(in_grad.lod());
} }
EXPECT_EQ(in_grad.numel(), static_cast<int64_t>(lod[0].back() * second_dim)); EXPECT_EQ(in_grad.numel(), static_cast<int64_t>(lod[0].back() * second_dim));
EXPECT_EQ(in_grad.lod(), lod); EXPECT_EQ(in_grad.lod(), lod);
if (paddle::platform::is_cpu_place(place)) { if (place == phi::CPUPlace()) {
for (size_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) { for (size_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) {
int64_t begin = in_grad.lod()[0][i]; int64_t begin = in_grad.lod()[0][i];
int64_t end = in_grad.lod()[0][i + 1]; int64_t end = in_grad.lod()[0][i + 1];
...@@ -116,30 +119,30 @@ void TestSequencePoolingSum(const DeviceContext &context, ...@@ -116,30 +119,30 @@ void TestSequencePoolingSum(const DeviceContext &context,
} }
TEST(SequencePoolingGrad, CPU_SUM) { TEST(SequencePoolingGrad, CPU_SUM) {
auto place = paddle::platform::CPUPlace(); auto place = phi::CPUPlace();
auto *context = static_cast<phi::CPUContext *>( auto *context = static_cast<phi::CPUContext *>(
paddle::platform::DeviceContextPool::Instance().Get(place)); phi::DeviceContextPool::Instance().Get(place));
paddle::framework::LoD lod1; phi::LoD lod1;
lod1.push_back(std::vector<size_t>{0, 10}); lod1.push_back(std::vector<size_t>{0, 10});
TestSequencePoolingSum<phi::CPUContext, float>(*context, lod1, 128); TestSequencePoolingSum<phi::CPUContext, float>(*context, lod1, 128);
paddle::framework::LoD lod2; phi::LoD lod2;
lod2.push_back(std::vector<size_t>{0, 2, 7, 10}); lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
TestSequencePoolingSum<phi::CPUContext, float>(*context, lod2, 128); TestSequencePoolingSum<phi::CPUContext, float>(*context, lod2, 128);
} }
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
TEST(SequencePoolingGrad, CUDA_SUM) { TEST(SequencePoolingGrad, CUDA_SUM) {
auto place = paddle::platform::CUDAPlace(0); auto place = phi::GPUPlace(0);
auto *context = static_cast<phi::GPUContext *>( auto *context = static_cast<phi::GPUContext *>(
paddle::platform::DeviceContextPool::Instance().Get(place)); phi::DeviceContextPool::Instance().Get(place));
paddle::framework::LoD lod1; phi::LoD lod1;
lod1.push_back(std::vector<size_t>{0, 10}); lod1.push_back(std::vector<size_t>{0, 10});
TestSequencePoolingSum<phi::GPUContext, float>(*context, lod1, 128); TestSequencePoolingSum<phi::GPUContext, float>(*context, lod1, 128);
paddle::framework::LoD lod2; phi::LoD lod2;
lod2.push_back(std::vector<size_t>{0, 2, 7, 10}); lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
TestSequencePoolingSum<phi::GPUContext, float>(*context, lod2, 128); TestSequencePoolingSum<phi::GPUContext, float>(*context, lod2, 128);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册