sequence_pooling_test.cc 5.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/math/sequence_pooling.h"
#include <gtest/gtest.h>

18 19 20 21
template <typename DeviceContext, typename T>
void TestSequencePoolingSum(const DeviceContext &context,
                            const paddle::framework::LoD &lod,
                            const int64_t second_dim) {
22
  paddle::framework::LoDTensor cpu_out_grad;
M
minqiyang 已提交
23
  paddle::framework::LoDTensor cpu_in_grad;
24 25 26 27
  paddle::framework::LoDTensor out_grad;
  paddle::framework::LoDTensor in_grad;

  // construct out_grad's tensor in cpu
M
minqiyang 已提交
28
  const size_t out_first_dim = lod[0].size() - 1;
29
  auto out_dims = paddle::framework::make_ddim(
30
      {static_cast<int64_t>(out_first_dim), second_dim});
31 32

  cpu_out_grad.mutable_data<T>(out_dims, paddle::platform::CPUPlace());
M
minqiyang 已提交
33
  for (int64_t i = 0; i < cpu_out_grad.numel(); ++i) {
34 35 36 37
    cpu_out_grad.data<T>()[i] = static_cast<T>(i);
  }

  // copy to dst out_grad
38 39
  auto place = context.GetPlace();
  if (paddle::platform::is_cpu_place(place)) {
40 41
    out_grad = cpu_out_grad;
  } else {
42
    TensorCopySync(cpu_out_grad, place, &out_grad);
43 44 45 46 47
  }

  // construct in_grad
  in_grad.set_lod(lod);
  auto in_dims = paddle::framework::make_ddim(
48 49
      {static_cast<int64_t>(lod[0].back()), second_dim});
  in_grad.mutable_data<T>(in_dims, place);
50 51

  // check tensor contruction result
52 53 54 55 56 57 58
  PADDLE_ENFORCE_EQ(
      in_grad.dims().size(), out_grad.dims().size(),
      paddle::platform::errors::InvalidArgument(
          "The dimension of input and output shall be same. Expected %ld == "
          "%ld, but got %ld != %ld. Please check the input value.",
          in_grad.dims().size(), out_grad.dims().size(), in_grad.dims().size(),
          out_grad.dims().size()));
59
  for (int64_t i = 1; i < out_grad.dims().size(); ++i) {
60 61 62 63 64 65 66
    PADDLE_ENFORCE_EQ(
        in_grad.dims()[i], out_grad.dims()[i],
        paddle::platform::errors::InvalidArgument(
            "The dimension of input and output shall be same. Expected %ld == "
            "%ld, but got %ld != %ld. Please check the input value.",
            in_grad.dims()[i], out_grad.dims()[i], in_grad.dims()[i],
            out_grad.dims()[i]));
67 68 69 70
  }

  // call functor
  paddle::operators::math::SequencePoolGradFunctor<DeviceContext, T>()(
71
      context, "SUM", out_grad, &in_grad);
72

73
  if (paddle::platform::is_cpu_place(place)) {
M
minqiyang 已提交
74 75 76 77 78 79
    cpu_in_grad = in_grad;
  } else {
    TensorCopySync(in_grad, paddle::platform::CPUPlace(), &cpu_in_grad);
    cpu_in_grad.set_lod(in_grad.lod());
  }

80
  EXPECT_EQ(in_grad.numel(), static_cast<int64_t>(lod[0].back() * second_dim));
81
  EXPECT_EQ(in_grad.lod(), lod);
M
minqiyang 已提交
82

83
  if (paddle::platform::is_cpu_place(place)) {
T
Tao Luo 已提交
84
    for (size_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) {
M
minqiyang 已提交
85 86 87
      int64_t begin = in_grad.lod()[0][i];
      int64_t end = in_grad.lod()[0][i + 1];
      paddle::framework::Tensor tmp = in_grad.Slice(begin, end);
88
      for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
M
minqiyang 已提交
89 90 91 92 93 94 95
        for (int64_t m = 0; m != second_dim; ++m) {
          EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
                    out_grad.data<T>()[m + i * second_dim]);
        }
      }
    }
  } else {
T
Tao Luo 已提交
96
    for (size_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) {
M
minqiyang 已提交
97 98 99
      int64_t begin = cpu_in_grad.lod()[0][i];
      int64_t end = cpu_in_grad.lod()[0][i + 1];
      paddle::framework::Tensor tmp = cpu_in_grad.Slice(begin, end);
100
      for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
M
minqiyang 已提交
101 102 103 104
        for (int64_t m = 0; m != second_dim; ++m) {
          EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
                    cpu_out_grad.data<T>()[m + i * second_dim]);
        }
105 106 107 108 109 110
      }
    }
  }
}

TEST(SequencePoolingGrad, CPU_SUM) {
111 112 113 114
  auto place = paddle::platform::CPUPlace();
  auto *context = static_cast<paddle::platform::CPUDeviceContext *>(
      paddle::platform::DeviceContextPool::Instance().Get(place));

115
  paddle::framework::LoD lod1;
M
minqiyang 已提交
116
  lod1.push_back(std::vector<size_t>{0, 10});
117 118
  TestSequencePoolingSum<paddle::platform::CPUDeviceContext, float>(*context,
                                                                    lod1, 128);
119 120 121

  paddle::framework::LoD lod2;
  lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
122 123
  TestSequencePoolingSum<paddle::platform::CPUDeviceContext, float>(*context,
                                                                    lod2, 128);
124 125
}

126
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
127
TEST(SequencePoolingGrad, CUDA_SUM) {
128 129 130 131
  auto place = paddle::platform::CUDAPlace(0);
  auto *context = static_cast<paddle::platform::CUDADeviceContext *>(
      paddle::platform::DeviceContextPool::Instance().Get(place));

132 133
  paddle::framework::LoD lod1;
  lod1.push_back(std::vector<size_t>{0, 10});
134 135
  TestSequencePoolingSum<paddle::platform::CUDADeviceContext, float>(*context,
                                                                     lod1, 128);
136 137 138

  paddle::framework::LoD lod2;
  lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
139 140
  TestSequencePoolingSum<paddle::platform::CUDADeviceContext, float>(*context,
                                                                     lod2, 128);
141 142
}
#endif