sequence_pooling_test.cc 5.0 KB
Newer Older
1
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16
#include <gtest/gtest.h>

17
#include "paddle/fluid/operators/math/sequence_pooling.h"
18

19 20 21
#include "paddle/phi/backends/context_pool.h"
#include "paddle/phi/common/place.h"
#include "paddle/phi/core/tensor_utils.h"
22

23 24
template <typename DeviceContext, typename T>
void TestSequencePoolingSum(const DeviceContext &context,
25
                            const phi::LoD &lod,
26
                            const int64_t second_dim) {
27 28 29 30
  phi::DenseTensor cpu_out_grad;
  phi::DenseTensor cpu_in_grad;
  phi::DenseTensor out_grad;
  phi::DenseTensor in_grad;
31 32

  // construct out_grad's tensor in cpu
M
minqiyang 已提交
33
  const size_t out_first_dim = lod[0].size() - 1;
34
  auto out_dims =
35
      phi::make_ddim({static_cast<int64_t>(out_first_dim), second_dim});
36

37
  cpu_out_grad.mutable_data<T>(out_dims, phi::CPUPlace());
M
minqiyang 已提交
38
  for (int64_t i = 0; i < cpu_out_grad.numel(); ++i) {
39 40 41 42
    cpu_out_grad.data<T>()[i] = static_cast<T>(i);
  }

  // copy to dst out_grad
43
  auto place = context.GetPlace();
44
  if (place == phi::CPUPlace()) {
45 46
    out_grad = cpu_out_grad;
  } else {
47
    phi::Copy(context, cpu_out_grad, place, true, &out_grad);
48 49 50 51
  }

  // construct in_grad
  in_grad.set_lod(lod);
52
  auto in_dims =
53
      phi::make_ddim({static_cast<int64_t>(lod[0].back()), second_dim});
54
  in_grad.mutable_data<T>(in_dims, place);
55 56

  // check tensor contruction result
57
  PADDLE_ENFORCE_EQ(
58 59
      in_grad.dims().size(),
      out_grad.dims().size(),
60
      phi::errors::InvalidArgument(
61 62
          "The dimension of input and output shall be same. Expected %ld == "
          "%ld, but got %ld != %ld. Please check the input value.",
63 64 65
          in_grad.dims().size(),
          out_grad.dims().size(),
          in_grad.dims().size(),
66
          out_grad.dims().size()));
67
  for (int64_t i = 1; i < out_grad.dims().size(); ++i) {
68
    PADDLE_ENFORCE_EQ(
69 70
        in_grad.dims()[i],
        out_grad.dims()[i],
71
        phi::errors::InvalidArgument(
72 73
            "The dimension of input and output shall be same. Expected %ld == "
            "%ld, but got %ld != %ld. Please check the input value.",
74 75 76
            in_grad.dims()[i],
            out_grad.dims()[i],
            in_grad.dims()[i],
77
            out_grad.dims()[i]));
78 79 80 81
  }

  // call functor
  paddle::operators::math::SequencePoolGradFunctor<DeviceContext, T>()(
82
      context, "SUM", out_grad, &in_grad);
83

84
  if (place == phi::CPUPlace()) {
M
minqiyang 已提交
85 86
    cpu_in_grad = in_grad;
  } else {
87
    phi::Copy(context, in_grad, phi::CPUPlace(), true, &cpu_in_grad);
M
minqiyang 已提交
88 89 90
    cpu_in_grad.set_lod(in_grad.lod());
  }

91
  EXPECT_EQ(in_grad.numel(), static_cast<int64_t>(lod[0].back() * second_dim));
92
  EXPECT_EQ(in_grad.lod(), lod);
M
minqiyang 已提交
93

94
  if (place == phi::CPUPlace()) {
T
Tao Luo 已提交
95
    for (size_t i = 0; i < in_grad.lod()[0].size() - 1; ++i) {
M
minqiyang 已提交
96 97
      int64_t begin = in_grad.lod()[0][i];
      int64_t end = in_grad.lod()[0][i + 1];
98
      phi::DenseTensor tmp = in_grad.Slice(begin, end);
99
      for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
M
minqiyang 已提交
100 101 102 103 104 105 106
        for (int64_t m = 0; m != second_dim; ++m) {
          EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
                    out_grad.data<T>()[m + i * second_dim]);
        }
      }
    }
  } else {
T
Tao Luo 已提交
107
    for (size_t i = 0; i < cpu_in_grad.lod()[0].size() - 1; ++i) {
M
minqiyang 已提交
108 109
      int64_t begin = cpu_in_grad.lod()[0][i];
      int64_t end = cpu_in_grad.lod()[0][i + 1];
110
      phi::DenseTensor tmp = cpu_in_grad.Slice(begin, end);
111
      for (int64_t j = 0; j != tmp.numel() / second_dim; ++j) {
M
minqiyang 已提交
112 113 114 115
        for (int64_t m = 0; m != second_dim; ++m) {
          EXPECT_EQ(tmp.data<T>()[m + j * second_dim],
                    cpu_out_grad.data<T>()[m + i * second_dim]);
        }
116 117 118 119 120 121
      }
    }
  }
}

TEST(SequencePoolingGrad, CPU_SUM) {
122
  auto place = phi::CPUPlace();
L
Leo Chen 已提交
123
  auto *context = static_cast<phi::CPUContext *>(
124
      phi::DeviceContextPool::Instance().Get(place));
125

126
  phi::LoD lod1;
M
minqiyang 已提交
127
  lod1.push_back(std::vector<size_t>{0, 10});
L
Leo Chen 已提交
128
  TestSequencePoolingSum<phi::CPUContext, float>(*context, lod1, 128);
129

130
  phi::LoD lod2;
131
  lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
L
Leo Chen 已提交
132
  TestSequencePoolingSum<phi::CPUContext, float>(*context, lod2, 128);
133 134
}

135
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
136
TEST(SequencePoolingGrad, CUDA_SUM) {
137
  auto place = phi::GPUPlace(0);
L
Leo Chen 已提交
138
  auto *context = static_cast<phi::GPUContext *>(
139
      phi::DeviceContextPool::Instance().Get(place));
140

141
  phi::LoD lod1;
142
  lod1.push_back(std::vector<size_t>{0, 10});
L
Leo Chen 已提交
143
  TestSequencePoolingSum<phi::GPUContext, float>(*context, lod1, 128);
144

145
  phi::LoD lod2;
146
  lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
L
Leo Chen 已提交
147
  TestSequencePoolingSum<phi::GPUContext, float>(*context, lod2, 128);
148 149
}
#endif