sequence_pooling_test.cc 5.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/math/sequence_pooling.h"
#include <gtest/gtest.h>
#include <vector>

19 20 21 22
template <typename DeviceContext, typename T>
void TestSequencePoolingSum(const DeviceContext &context,
                            const paddle::framework::LoD &lod,
                            const int64_t second_dim) {
23
  paddle::framework::LoDTensor cpu_out_grad;
M
minqiyang 已提交
24
  paddle::framework::LoDTensor cpu_in_grad;
25 26 27 28
  paddle::framework::LoDTensor out_grad;
  paddle::framework::LoDTensor in_grad;

  // construct out_grad's tensor in cpu
M
minqiyang 已提交
29
  const size_t out_first_dim = lod[0].size() - 1;
30
  auto out_dims = paddle::framework::make_ddim(
31
      {static_cast<int64_t>(out_first_dim), second_dim});
32 33

  cpu_out_grad.mutable_data<T>(out_dims, paddle::platform::CPUPlace());
M
minqiyang 已提交
34
  for (int64_t i = 0; i < cpu_out_grad.numel(); ++i) {
35 36 37 38
    cpu_out_grad.data<T>()[i] = static_cast<T>(i);
  }

  // copy to dst out_grad
39 40
  auto place = context.GetPlace();
  if (paddle::platform::is_cpu_place(place)) {
41 42
    out_grad = cpu_out_grad;
  } else {
43
    TensorCopySync(cpu_out_grad, place, &out_grad);
44 45 46 47 48
  }

  // construct in_grad
  in_grad.set_lod(lod);
  auto in_dims = paddle::framework::make_ddim(
49 50
      {static_cast<int64_t>(lod[0].back()), second_dim});
  in_grad.mutable_data<T>(in_dims, place);
51 52

  // check tensor contruction result
53 54 55 56 57 58 59
  PADDLE_ENFORCE_EQ(
      in_grad.dims().size(), out_grad.dims().size(),
      paddle::platform::errors::InvalidArgument(
          "The dimension of input and output shall be same. Expected %ld == "
          "%ld, but got %ld != %ld. Please check the input value.",
          in_grad.dims().size(), out_grad.dims().size(), in_grad.dims().size(),
          out_grad.dims().size()));
60
  for (int64_t i = 1; i < out_grad.dims().size(); ++i) {
61 62 63 64 65 66 67
    PADDLE_ENFORCE_EQ(
        in_grad.dims()[i], out_grad.dims()[i],
        paddle::platform::errors::InvalidArgument(
            "The dimension of input and output shall be same. Expected %ld == "
            "%ld, but got %ld != %ld. Please check the input value.",
            in_grad.dims()[i], out_grad.dims()[i], in_grad.dims()[i],
            out_grad.dims()[i]));
68 69 70 71
  }

  // call functor
  paddle::operators::math::SequencePoolGradFunctor<DeviceContext, T>()(
72
      context, "SUM", out_grad, &in_grad);
73

74
  if (paddle::platform::is_cpu_place(place)) {
M
minqiyang 已提交
75 76 77 78 79 80
    cpu_in_grad = in_grad;
  } else {
    TensorCopySync(in_grad, paddle::platform::CPUPlace(), &cpu_in_grad);
    cpu_in_grad.set_lod(in_grad.lod());
  }

81
  EXPECT_EQ(in_grad.numel(), static_cast<int64_t>(lod[0].back() * second_dim));
82
  EXPECT_EQ(in_grad.lod(), lod);
M
minqiyang 已提交
83

84
  if (paddle::platform::is_cpu_place(place)) {
T
Tao Luo 已提交
85
    for (size_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) {
M
minqiyang 已提交
86 87 88
      int64_t begin = in_grad.lod()[0][i];
      int64_t end = in_grad.lod()[0][i + 1];
      paddle::framework::Tensor tmp = in_grad.Slice(begin, end);
89
      for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
M
minqiyang 已提交
90 91 92 93 94 95 96
        for (int64_t m = 0; m != second_dim; ++m) {
          EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
                    out_grad.data<T>()[m + i * second_dim]);
        }
      }
    }
  } else {
T
Tao Luo 已提交
97
    for (size_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) {
M
minqiyang 已提交
98 99 100
      int64_t begin = cpu_in_grad.lod()[0][i];
      int64_t end = cpu_in_grad.lod()[0][i + 1];
      paddle::framework::Tensor tmp = cpu_in_grad.Slice(begin, end);
101
      for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
M
minqiyang 已提交
102 103 104 105
        for (int64_t m = 0; m != second_dim; ++m) {
          EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
                    cpu_out_grad.data<T>()[m + i * second_dim]);
        }
106 107 108 109 110 111
      }
    }
  }
}

TEST(SequencePoolingGrad, CPU_SUM) {
112 113 114 115
  auto place = paddle::platform::CPUPlace();
  auto *context = static_cast<paddle::platform::CPUDeviceContext *>(
      paddle::platform::DeviceContextPool::Instance().Get(place));

116
  paddle::framework::LoD lod1;
M
minqiyang 已提交
117
  lod1.push_back(std::vector<size_t>{0, 10});
118 119
  TestSequencePoolingSum<paddle::platform::CPUDeviceContext, float>(*context,
                                                                    lod1, 128);
120 121 122

  paddle::framework::LoD lod2;
  lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
123 124
  TestSequencePoolingSum<paddle::platform::CPUDeviceContext, float>(*context,
                                                                    lod2, 128);
125 126 127 128
}

#ifdef PADDLE_WITH_CUDA
TEST(SequencePoolingGrad, CUDA_SUM) {
129 130 131 132
  auto place = paddle::platform::CUDAPlace(0);
  auto *context = static_cast<paddle::platform::CUDADeviceContext *>(
      paddle::platform::DeviceContextPool::Instance().Get(place));

133 134
  paddle::framework::LoD lod1;
  lod1.push_back(std::vector<size_t>{0, 10});
135 136
  TestSequencePoolingSum<paddle::platform::CUDADeviceContext, float>(*context,
                                                                     lod1, 128);
137 138 139

  paddle::framework::LoD lod2;
  lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
140 141
  TestSequencePoolingSum<paddle::platform::CUDADeviceContext, float>(*context,
                                                                     lod2, 128);
142 143
}
#endif