sequence_padding_test.cc 4.3 KB
Newer Older
1
/* Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
Y
Yiqun Liu 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14
#include <gtest/gtest.h>
Y
Yiqun Liu 已提交
15

16 17
#include "paddle/phi/kernels/funcs/sequence_padding.h"

18
#include "paddle/phi/backends/context_pool.h"
19
#include "paddle/phi/core/tensor_utils.h"
W
wanghuancoder 已提交
20

21 22
template <typename DeviceContext, typename T>
void TestSequencePadding(const DeviceContext &context,
23
                         const phi::LoD &lod,
Y
Yiqun Liu 已提交
24
                         const size_t sequence_width) {
25 26 27 28 29 30 31
  phi::DenseTensor cpu_seq;
  phi::DenseTensor cpu_seq_back;
  phi::DenseTensor seq;
  phi::DenseTensor seq_back;
  phi::DenseTensor padding;
  phi::DenseTensor cpu_pad_value;
  phi::DenseTensor pad_value;
Y
Yiqun Liu 已提交
32 33

  const size_t level = lod.size() - 1;
34 35
  auto seq_dims = phi::make_ddim({static_cast<int64_t>(lod[level].back()),
                                  static_cast<int64_t>(sequence_width)});
Y
Yiqun Liu 已提交
36 37

  cpu_seq.set_lod(lod);
38
  auto *dev_ctx = static_cast<phi::CPUContext *>(
39
      phi::DeviceContextPool::Instance().Get(phi::CPUPlace()));
40 41 42
  cpu_seq.Resize(seq_dims);
  dev_ctx->template Alloc<T>(&cpu_seq);

C
chengduoZH 已提交
43
  for (int64_t i = 0; i < cpu_seq.numel(); ++i) {
Y
Yiqun Liu 已提交
44 45 46
    cpu_seq.data<T>()[i] = static_cast<T>(i);
  }

47
  auto place = context.GetPlace();
48
  if (place.GetType() == phi::AllocationType::CPU) {
Y
Yiqun Liu 已提交
49 50
    seq = cpu_seq;
  } else {
51
    phi::Copy(context, cpu_seq, place, true, &seq);
Y
Yiqun Liu 已提交
52 53 54 55
    seq.set_lod(lod);
  }

  const size_t max_sequence_length =
56
      phi::funcs::MaximumSequenceLength(lod[level]);
Y
Yiqun Liu 已提交
57
  const size_t num_sequences = lod[level].size() - 1;
58 59 60
  auto padding_dims = phi::make_ddim({static_cast<int64_t>(max_sequence_length),
                                      static_cast<int64_t>(num_sequences),
                                      static_cast<int64_t>(sequence_width)});
Y
yangyaming 已提交
61

62 63
  padding.Resize(padding_dims);
  context.template Alloc<T>(&padding);
Y
yangyaming 已提交
64

65 66
  cpu_pad_value.Resize({1});
  T *pad_value_data = dev_ctx->template Alloc<T>(&cpu_pad_value);
F
fengjiayi 已提交
67
  *pad_value_data = static_cast<T>(0);
68
  if (place.GetType() == phi::AllocationType::CPU) {
F
fengjiayi 已提交
69 70
    pad_value = cpu_pad_value;
  } else {
71
    phi::Copy(context, cpu_pad_value, place, true, &pad_value);
F
fengjiayi 已提交
72 73
  }

74
  phi::funcs::PaddingLoDTensorFunctor<DeviceContext, T>()(
75 76 77 78 79 80 81
      context,
      seq,
      &padding,
      pad_value,
      -1,
      0,
      false,
82
      phi::funcs::kLengthBatchWidth);
Y
Yiqun Liu 已提交
83 84

  seq_back.set_lod(lod);
85 86 87 88
  seq_back.Resize(seq_dims);
  context.template Alloc<T>(&seq_back);
  phi::funcs::UnpaddingLoDTensorFunctor<DeviceContext, T>()(
      context, padding, &seq_back, -1, 0, false, phi::funcs::kLengthBatchWidth);
Y
Yiqun Liu 已提交
89

90
  if (place.GetType() == phi::AllocationType::CPU) {
Y
Yiqun Liu 已提交
91 92
    cpu_seq_back = seq_back;
  } else {
93
    phi::Copy(context, seq_back, phi::CPUPlace(), true, &cpu_seq_back);
Y
Yiqun Liu 已提交
94 95 96 97 98
    cpu_seq_back.set_lod(lod);
  }

  EXPECT_EQ(cpu_seq.numel(), cpu_seq_back.numel());
  EXPECT_EQ(cpu_seq.dims(), cpu_seq_back.dims());
C
chengduoZH 已提交
99
  for (int64_t i = 0; i < cpu_seq.numel(); ++i) {
Y
Yiqun Liu 已提交
100 101
    EXPECT_EQ(cpu_seq.data<T>()[i], cpu_seq_back.data<T>()[i]);
  }
102
}
Y
Yiqun Liu 已提交
103 104

TEST(Seq2BatchPadding, CPU) {
105
  auto place = phi::CPUPlace();
L
Leo Chen 已提交
106
  auto *context = static_cast<phi::CPUContext *>(
107
      phi::DeviceContextPool::Instance().Get(place));
108

109
  phi::LoD lod1;
Y
Yiqun Liu 已提交
110
  lod1.push_back(std::vector<size_t>{0, 10});
L
Leo Chen 已提交
111
  TestSequencePadding<phi::CPUContext, float>(*context, lod1, 16);
Y
Yiqun Liu 已提交
112

113
  phi::LoD lod2;
Y
Yiqun Liu 已提交
114
  lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
L
Leo Chen 已提交
115
  TestSequencePadding<phi::CPUContext, float>(*context, lod2, 128);
Y
Yiqun Liu 已提交
116 117
}

118
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yiqun Liu 已提交
119
TEST(SequencePadding, CUDA) {
120
  auto place = phi::GPUPlace(0);
L
Leo Chen 已提交
121
  auto *context = static_cast<phi::GPUContext *>(
122
      phi::DeviceContextPool::Instance().Get(place));
123

124
  phi::LoD lod1;
Y
Yiqun Liu 已提交
125
  lod1.push_back(std::vector<size_t>{0, 10});
L
Leo Chen 已提交
126
  TestSequencePadding<phi::GPUContext, float>(*context, lod1, 16);
Y
Yiqun Liu 已提交
127

128
  phi::LoD lod2;
Y
Yiqun Liu 已提交
129
  lod2.push_back(std::vector<size_t>{0, 2, 7, 10});
L
Leo Chen 已提交
130
  TestSequencePadding<phi::GPUContext, float>(*context, lod2, 128);
Y
Yiqun Liu 已提交
131 132
}
#endif