test_pten_tensor.cc 8.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "glog/logging.h"
#include "gtest/gtest.h"
17 18
#include "paddle/phi/api/include/tensor.h"
#include "paddle/phi/api/lib/ext_compat_utils.h"
19 20 21 22 23 24 25
#include "paddle/phi/core/kernel_registry.h"

PD_DECLARE_KERNEL(copy, CPU, ALL_LAYOUT);

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
PD_DECLARE_KERNEL(copy, GPU, ALL_LAYOUT);
#endif
26

27
namespace paddle {
28
namespace tests {
29 30

template <typename T>
31
experimental::Tensor InitCPUTensorForTest() {
C
Chen Weihang 已提交
32
  std::vector<int64_t> tensor_shape{5, 5};
33
  auto t1 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape);
34 35
  auto* p_data_ptr = t1.mutable_data<T>(paddle::PlaceType::kCPU);
  for (int64_t i = 0; i < t1.size(); i++) {
36
    p_data_ptr[i] = T(5);
37 38 39 40 41 42 43 44 45 46
  }
  return t1;
}

template <typename T>
void TestCopyTensor() {
  auto t1 = InitCPUTensorForTest<T>();
  auto t1_cpu_cp = t1.template copy_to<T>(paddle::PlaceType::kCPU);
  CHECK((paddle::PlaceType::kCPU == t1_cpu_cp.place()));
  for (int64_t i = 0; i < t1.size(); i++) {
C
Chen Weihang 已提交
47
    CHECK_EQ(t1_cpu_cp.template mutable_data<T>()[i], T(5));
48
  }
49
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
50 51 52 53 54 55
  VLOG(2) << "Do GPU copy test";
  auto t1_gpu_cp = t1_cpu_cp.template copy_to<T>(paddle::PlaceType::kGPU);
  CHECK((paddle::PlaceType::kGPU == t1_gpu_cp.place()));
  auto t1_gpu_cp_cp = t1_gpu_cp.template copy_to<T>(paddle::PlaceType::kGPU);
  CHECK((paddle::PlaceType::kGPU == t1_gpu_cp_cp.place()));
  auto t1_gpu_cp_cp_cpu =
56 57 58
      t1_gpu_cp_cp.template copy_to<T>(paddle::PlaceType::kCPU);
  CHECK((paddle::PlaceType::kCPU == t1_gpu_cp_cp_cpu.place()));
  for (int64_t i = 0; i < t1.size(); i++) {
C
Chen Weihang 已提交
59
    CHECK_EQ(t1_gpu_cp_cp_cpu.template mutable_data<T>()[i], T(5));
60
  }
61 62 63 64
#endif
}

void TestAPIPlace() {
C
Chen Weihang 已提交
65
  std::vector<int64_t> tensor_shape = {5, 5};
66
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
67
  auto t1 = experimental::Tensor(paddle::PlaceType::kGPU, tensor_shape);
68
  t1.mutable_data<float>(paddle::PlaceType::kGPU);
69 70
  CHECK((paddle::PlaceType::kGPU == t1.place()));
#endif
71
  auto t2 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape);
72
  t2.mutable_data<float>(paddle::PlaceType::kCPU);
73 74 75 76
  CHECK((paddle::PlaceType::kCPU == t2.place()));
}

void TestAPISizeAndShape() {
C
Chen Weihang 已提交
77
  std::vector<int64_t> tensor_shape = {5, 5};
78
  auto t1 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape);
79 80 81 82
  CHECK_EQ(t1.size(), 25);
  CHECK(t1.shape() == tensor_shape);
}

H
Hao Lin 已提交
83 84 85 86 87
void TestAPISlice() {
  std::vector<int64_t> tensor_shape_origin1 = {5, 5};
  std::vector<int64_t> tensor_shape_sub1 = {3, 5};
  std::vector<int64_t> tensor_shape_origin2 = {5, 5, 5};
  std::vector<int64_t> tensor_shape_sub2 = {1, 5, 5};
88
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
89
  auto t1 = experimental::Tensor(paddle::PlaceType::kGPU, tensor_shape_origin1);
90
  t1.mutable_data<float>(paddle::PlaceType::kGPU);
H
Hao Lin 已提交
91 92
  CHECK(t1.slice(0, 5).shape() == tensor_shape_origin1);
  CHECK(t1.slice(0, 3).shape() == tensor_shape_sub1);
93
  auto t2 = experimental::Tensor(paddle::PlaceType::kGPU, tensor_shape_origin2);
94
  t2.mutable_data<float>(paddle::PlaceType::kGPU);
H
Hao Lin 已提交
95 96
  CHECK(t2.slice(4, 5).shape() == tensor_shape_sub2);
#endif
97
  auto t3 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape_origin1);
98
  t3.mutable_data<float>(paddle::PlaceType::kCPU);
H
Hao Lin 已提交
99 100
  CHECK(t3.slice(0, 5).shape() == tensor_shape_origin1);
  CHECK(t3.slice(0, 3).shape() == tensor_shape_sub1);
101
  auto t4 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape_origin2);
102
  t4.mutable_data<float>(paddle::PlaceType::kCPU);
H
Hao Lin 已提交
103 104 105 106 107
  CHECK(t4.slice(4, 5).shape() == tensor_shape_sub2);

  // Test writing function for sliced tensor
  auto t = InitCPUTensorForTest<float>();
  auto t_sliced = t.slice(0, 1);
108 109
  auto* t_sliced_data_ptr =
      t_sliced.mutable_data<float>(paddle::PlaceType::kCPU);
H
Hao Lin 已提交
110 111 112
  for (int64_t i = 0; i < t_sliced.size(); i++) {
    t_sliced_data_ptr[i] += static_cast<float>(5);
  }
113
  auto* t_data_ptr = t.mutable_data<float>(paddle::PlaceType::kCPU);
H
Hao Lin 已提交
114 115 116 117 118
  for (int64_t i = 0; i < t_sliced.size(); i++) {
    CHECK_EQ(t_data_ptr[i], static_cast<float>(10));
  }
}

119 120
template <typename T>
paddle::DataType TestDtype() {
C
Chen Weihang 已提交
121
  std::vector<int64_t> tensor_shape = {5, 5};
122
  auto t1 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape);
123
  t1.template mutable_data<T>(paddle::PlaceType::kCPU);
124 125 126 127 128
  return t1.type();
}

template <typename T>
void TestCast(paddle::DataType data_type) {
C
Chen Weihang 已提交
129
  std::vector<int64_t> tensor_shape = {5, 5};
130
  auto t1 = experimental::Tensor(paddle::PlaceType::kCPU, tensor_shape);
131
  t1.template mutable_data<T>(paddle::PlaceType::kCPU);
132
  auto t2 = t1.cast(data_type);
133
  CHECK(t2.type() == data_type);
134
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
135
  auto tg1 = experimental::Tensor(paddle::PlaceType::kGPU);
136
  tg1.reshape(tensor_shape);
137
  tg1.template mutable_data<T>(paddle::PlaceType::kGPU);
138 139 140
  auto tg2 = tg1.cast(data_type);
  CHECK(tg2.type() == data_type);
#endif
141 142 143 144 145 146 147
}

void GroupTestCopy() {
  VLOG(2) << "Float cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<float>();
  VLOG(2) << "Double cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<double>();
148
  VLOG(2) << "int cpu-cpu-gpu-gpu-cpu";
149 150 151 152 153 154 155 156 157
  TestCopyTensor<int>();
  VLOG(2) << "int64 cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<int64_t>();
  VLOG(2) << "int16 cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<int16_t>();
  VLOG(2) << "int8 cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<int8_t>();
  VLOG(2) << "uint8 cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<uint8_t>();
158
  VLOG(2) << "complex<float> cpu-cpu-gpu-gpu-cpu";
159
  TestCopyTensor<paddle::complex64>();
160
  VLOG(2) << "complex<double> cpu-cpu-gpu-gpu-cpu";
161
  TestCopyTensor<paddle::complex128>();
162 163
  VLOG(2) << "Fp16 cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<paddle::float16>();
164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
}

void GroupTestCast() {
  VLOG(2) << "int cast";
  TestCast<int>(paddle::DataType::FLOAT32);
  VLOG(2) << "int32 cast";
  TestCast<int32_t>(paddle::DataType::FLOAT32);
  VLOG(2) << "int64 cast";
  TestCast<int64_t>(paddle::DataType::FLOAT32);
  VLOG(2) << "double cast";
  TestCast<double>(paddle::DataType::FLOAT32);
  VLOG(2) << "bool cast";
  TestCast<bool>(paddle::DataType::FLOAT32);
  VLOG(2) << "uint8 cast";
  TestCast<uint8_t>(paddle::DataType::FLOAT32);
  VLOG(2) << "float cast";
  TestCast<float>(paddle::DataType::FLOAT32);
181
  VLOG(2) << "complex<float> cast";
182
  TestCast<paddle::complex64>(paddle::DataType::FLOAT32);
183
  VLOG(2) << "complex<double> cast";
184
  TestCast<paddle::complex128>(paddle::DataType::FLOAT32);
185 186
  VLOG(2) << "float16 cast";
  TestCast<paddle::float16>(paddle::DataType::FLOAT16);
187 188 189 190 191 192 193 194 195 196
}

void GroupTestDtype() {
  CHECK(TestDtype<float>() == paddle::DataType::FLOAT32);
  CHECK(TestDtype<double>() == paddle::DataType::FLOAT64);
  CHECK(TestDtype<int>() == paddle::DataType::INT32);
  CHECK(TestDtype<int64_t>() == paddle::DataType::INT64);
  CHECK(TestDtype<int16_t>() == paddle::DataType::INT16);
  CHECK(TestDtype<int8_t>() == paddle::DataType::INT8);
  CHECK(TestDtype<uint8_t>() == paddle::DataType::UINT8);
197 198
  CHECK(TestDtype<paddle::complex64>() == paddle::DataType::COMPLEX64);
  CHECK(TestDtype<paddle::complex128>() == paddle::DataType::COMPLEX128);
199
  CHECK(TestDtype<paddle::float16>() == paddle::DataType::FLOAT16);
200 201
}

202
void TestInitilized() {
203
  experimental::Tensor test_tensor(paddle::PlaceType::kCPU, {1, 1});
204
  CHECK(test_tensor.is_initialized() == false);
205
  test_tensor.mutable_data<float>(paddle::PlaceType::kCPU);
206
  CHECK(test_tensor.is_initialized() == true);
207
  float* tensor_data = test_tensor.mutable_data<float>();
208 209 210 211 212 213 214 215
  for (int i = 0; i < test_tensor.size(); i++) {
    tensor_data[i] = 0.5;
  }
  for (int i = 0; i < test_tensor.size(); i++) {
    CHECK(tensor_data[i] == 0.5);
  }
}

C
Chen Weihang 已提交
216 217 218 219 220
void TestJudgeTensorType() {
  experimental::Tensor test_tensor(paddle::PlaceType::kCPU, {1, 1});
  CHECK(test_tensor.is_dense_tensor() == true);
}

221
TEST(PhiTensor, All) {
C
Chen Weihang 已提交
222 223
  VLOG(2) << "TestCopy";
  GroupTestCopy();
224 225 226 227 228 229
  VLOG(2) << "TestDtype";
  GroupTestDtype();
  VLOG(2) << "TestShape";
  TestAPISizeAndShape();
  VLOG(2) << "TestPlace";
  TestAPIPlace();
C
Chen Weihang 已提交
230 231 232 233
  VLOG(2) << "TestSlice";
  TestAPISlice();
  VLOG(2) << "TestCast";
  GroupTestCast();
234 235
  VLOG(2) << "TestInitilized";
  TestInitilized();
C
Chen Weihang 已提交
236 237
  VLOG(2) << "TestJudgeTensorType";
  TestJudgeTensorType();
238
}
239 240

}  // namespace tests
241
}  // namespace paddle