From 14ebc424d65a32828afe2687d227e08c80a6dc44 Mon Sep 17 00:00:00 2001 From: minqiyang Date: Wed, 24 Oct 2018 16:09:33 +0800 Subject: [PATCH] Add gpu support for unittest --- .../operators/math/sequence_pooling_test.cc | 43 ++++++++++++++----- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/paddle/fluid/operators/math/sequence_pooling_test.cc b/paddle/fluid/operators/math/sequence_pooling_test.cc index c994d470d..af697f1ad 100644 --- a/paddle/fluid/operators/math/sequence_pooling_test.cc +++ b/paddle/fluid/operators/math/sequence_pooling_test.cc @@ -19,17 +19,18 @@ limitations under the License. */ template void TestSequencePoolingSum(const paddle::framework::LoD& lod) { paddle::framework::LoDTensor cpu_out_grad; + paddle::framework::LoDTensor cpu_in_grad; paddle::framework::LoDTensor out_grad; paddle::framework::LoDTensor in_grad; const size_t second_dim = 128u; // construct out_grad's tensor in cpu - const size_t out_first_dim = lod.size() - 1; + const size_t out_first_dim = lod[0].size() - 1; auto out_dims = paddle::framework::make_ddim( {static_cast(out_first_dim), static_cast(second_dim)}); cpu_out_grad.mutable_data(out_dims, paddle::platform::CPUPlace()); - for (int64_t i = 0; i < out_grad.numel(); ++i) { + for (int64_t i = 0; i < cpu_out_grad.numel(); ++i) { cpu_out_grad.data()[i] = static_cast(i); } @@ -58,16 +59,38 @@ void TestSequencePoolingSum(const paddle::framework::LoD& lod) { paddle::operators::math::SequencePoolGradFunctor()( *context, "SUM", out_grad, &in_grad); + if (paddle::platform::is_cpu_place(*place)) { + cpu_in_grad = in_grad; + } else { + TensorCopySync(in_grad, paddle::platform::CPUPlace(), &cpu_in_grad); + cpu_in_grad.set_lod(in_grad.lod()); + } + EXPECT_EQ(in_grad.numel(), lod[0].back() * second_dim); EXPECT_EQ(in_grad.lod(), lod); - for (int64_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) { - int64_t begin = in_grad.lod()[0][i]; - int64_t end = in_grad.lod()[0][i + 1]; - paddle::framework::Tensor tmp = in_grad.Slice(begin, end); - for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) { - for (int64_t m = 0; m != second_dim; ++m) { - EXPECT_EQ(tmp.data()[m + j * second_dim], - out_grad.data()[m + i * second_dim]); + + if (paddle::platform::is_cpu_place(*place)) { + for (int64_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) { + int64_t begin = in_grad.lod()[0][i]; + int64_t end = in_grad.lod()[0][i + 1]; + paddle::framework::Tensor tmp = in_grad.Slice(begin, end); + for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) { + for (int64_t m = 0; m != second_dim; ++m) { + EXPECT_EQ(tmp.data()[m + j * second_dim], + out_grad.data()[m + i * second_dim]); + } + } + } + } else { + for (int64_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) { + int64_t begin = cpu_in_grad.lod()[0][i]; + int64_t end = cpu_in_grad.lod()[0][i + 1]; + paddle::framework::Tensor tmp = cpu_in_grad.Slice(begin, end); + for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) { + for (int64_t m = 0; m != second_dim; ++m) { + EXPECT_EQ(tmp.data()[m + j * second_dim], + cpu_out_grad.data()[m + i * second_dim]); + } } } } -- GitLab