test_pten_tensor.cc 7.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "glog/logging.h"
#include "gtest/gtest.h"
17 18 19 20 21
#include "paddle/pten/api/include/tensor.h"
#include "paddle/pten/api/lib/ext_compat_utils.h"

namespace pten {
namespace tests {
22 23 24

template <typename T>
paddle::Tensor InitCPUTensorForTest() {
C
Chen Weihang 已提交
25
  std::vector<int64_t> tensor_shape{5, 5};
26
  auto t1 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape);
27 28
  auto* p_data_ptr = t1.mutable_data<T>(paddle::PlaceType::kCPU);
  for (int64_t i = 0; i < t1.size(); i++) {
29
    p_data_ptr[i] = T(5);
30 31 32 33 34 35 36 37 38 39
  }
  return t1;
}

template <typename T>
void TestCopyTensor() {
  auto t1 = InitCPUTensorForTest<T>();
  auto t1_cpu_cp = t1.template copy_to<T>(paddle::PlaceType::kCPU);
  CHECK((paddle::PlaceType::kCPU == t1_cpu_cp.place()));
  for (int64_t i = 0; i < t1.size(); i++) {
40
    CHECK_EQ(t1_cpu_cp.template data<T>()[i], T(5));
41
  }
42
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
43 44 45 46 47 48
  VLOG(2) << "Do GPU copy test";
  auto t1_gpu_cp = t1_cpu_cp.template copy_to<T>(paddle::PlaceType::kGPU);
  CHECK((paddle::PlaceType::kGPU == t1_gpu_cp.place()));
  auto t1_gpu_cp_cp = t1_gpu_cp.template copy_to<T>(paddle::PlaceType::kGPU);
  CHECK((paddle::PlaceType::kGPU == t1_gpu_cp_cp.place()));
  auto t1_gpu_cp_cp_cpu =
49 50 51 52 53
      t1_gpu_cp_cp.template copy_to<T>(paddle::PlaceType::kCPU);
  CHECK((paddle::PlaceType::kCPU == t1_gpu_cp_cp_cpu.place()));
  for (int64_t i = 0; i < t1.size(); i++) {
    CHECK_EQ(t1_gpu_cp_cp_cpu.template data<T>()[i], T(5));
  }
54 55 56 57
#endif
}

void TestAPIPlace() {
C
Chen Weihang 已提交
58
  std::vector<int64_t> tensor_shape = {5, 5};
59
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
60
  auto t1 = paddle::Tensor(paddle::PlaceType::kGPU, tensor_shape);
61 62 63
  t1.mutable_data<float>();
  CHECK((paddle::PlaceType::kGPU == t1.place()));
#endif
64
  auto t2 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape);
65 66 67 68 69
  t2.mutable_data<float>();
  CHECK((paddle::PlaceType::kCPU == t2.place()));
}

void TestAPISizeAndShape() {
C
Chen Weihang 已提交
70
  std::vector<int64_t> tensor_shape = {5, 5};
71
  auto t1 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape);
72 73 74 75
  CHECK_EQ(t1.size(), 25);
  CHECK(t1.shape() == tensor_shape);
}

H
Hao Lin 已提交
76 77 78 79 80
void TestAPISlice() {
  std::vector<int64_t> tensor_shape_origin1 = {5, 5};
  std::vector<int64_t> tensor_shape_sub1 = {3, 5};
  std::vector<int64_t> tensor_shape_origin2 = {5, 5, 5};
  std::vector<int64_t> tensor_shape_sub2 = {1, 5, 5};
81
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
H
Hao Lin 已提交
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
  auto t1 = paddle::Tensor(paddle::PlaceType::kGPU, tensor_shape_origin1);
  t1.mutable_data<float>();
  CHECK(t1.slice(0, 5).shape() == tensor_shape_origin1);
  CHECK(t1.slice(0, 3).shape() == tensor_shape_sub1);
  auto t2 = paddle::Tensor(paddle::PlaceType::kGPU, tensor_shape_origin2);
  t2.mutable_data<float>();
  CHECK(t2.slice(4, 5).shape() == tensor_shape_sub2);
#endif
  auto t3 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape_origin1);
  t3.mutable_data<float>();
  CHECK(t3.slice(0, 5).shape() == tensor_shape_origin1);
  CHECK(t3.slice(0, 3).shape() == tensor_shape_sub1);
  auto t4 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape_origin2);
  t4.mutable_data<float>();
  CHECK(t4.slice(4, 5).shape() == tensor_shape_sub2);

  // Test writing function for sliced tensor
  auto t = InitCPUTensorForTest<float>();
  auto t_sliced = t.slice(0, 1);
  auto* t_sliced_data_ptr = t_sliced.mutable_data<float>();
  for (int64_t i = 0; i < t_sliced.size(); i++) {
    t_sliced_data_ptr[i] += static_cast<float>(5);
  }
  auto* t_data_ptr = t.mutable_data<float>();
  for (int64_t i = 0; i < t_sliced.size(); i++) {
    CHECK_EQ(t_data_ptr[i], static_cast<float>(10));
  }
}

111 112
template <typename T>
paddle::DataType TestDtype() {
C
Chen Weihang 已提交
113
  std::vector<int64_t> tensor_shape = {5, 5};
114
  auto t1 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape);
115 116 117 118 119 120
  t1.template mutable_data<T>();
  return t1.type();
}

template <typename T>
void TestCast(paddle::DataType data_type) {
C
Chen Weihang 已提交
121
  std::vector<int64_t> tensor_shape = {5, 5};
122
  auto t1 = paddle::Tensor(paddle::PlaceType::kCPU, tensor_shape);
123 124
  t1.template mutable_data<T>();
  auto t2 = t1.cast(data_type);
125
  CHECK(t2.type() == data_type);
126
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
127 128 129 130 131 132
  auto tg1 = paddle::Tensor(paddle::PlaceType::kGPU);
  tg1.reshape(tensor_shape);
  tg1.template mutable_data<T>();
  auto tg2 = tg1.cast(data_type);
  CHECK(tg2.type() == data_type);
#endif
133 134 135 136 137 138 139
}

void GroupTestCopy() {
  VLOG(2) << "Float cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<float>();
  VLOG(2) << "Double cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<double>();
140
  VLOG(2) << "int cpu-cpu-gpu-gpu-cpu";
141 142 143 144 145 146 147 148 149
  TestCopyTensor<int>();
  VLOG(2) << "int64 cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<int64_t>();
  VLOG(2) << "int16 cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<int16_t>();
  VLOG(2) << "int8 cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<int8_t>();
  VLOG(2) << "uint8 cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<uint8_t>();
150
  VLOG(2) << "complex<float> cpu-cpu-gpu-gpu-cpu";
151
  TestCopyTensor<paddle::complex64>();
152
  VLOG(2) << "complex<double> cpu-cpu-gpu-gpu-cpu";
153
  TestCopyTensor<paddle::complex128>();
154 155
  VLOG(2) << "Fp16 cpu-cpu-gpu-gpu-cpu";
  TestCopyTensor<paddle::float16>();
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172
}

void GroupTestCast() {
  VLOG(2) << "int cast";
  TestCast<int>(paddle::DataType::FLOAT32);
  VLOG(2) << "int32 cast";
  TestCast<int32_t>(paddle::DataType::FLOAT32);
  VLOG(2) << "int64 cast";
  TestCast<int64_t>(paddle::DataType::FLOAT32);
  VLOG(2) << "double cast";
  TestCast<double>(paddle::DataType::FLOAT32);
  VLOG(2) << "bool cast";
  TestCast<bool>(paddle::DataType::FLOAT32);
  VLOG(2) << "uint8 cast";
  TestCast<uint8_t>(paddle::DataType::FLOAT32);
  VLOG(2) << "float cast";
  TestCast<float>(paddle::DataType::FLOAT32);
173
  VLOG(2) << "complex<float> cast";
174
  TestCast<paddle::complex64>(paddle::DataType::FLOAT32);
175
  VLOG(2) << "complex<double> cast";
176
  TestCast<paddle::complex128>(paddle::DataType::FLOAT32);
177 178
  VLOG(2) << "float16 cast";
  TestCast<paddle::float16>(paddle::DataType::FLOAT16);
179 180 181 182 183 184 185 186 187 188
}

void GroupTestDtype() {
  CHECK(TestDtype<float>() == paddle::DataType::FLOAT32);
  CHECK(TestDtype<double>() == paddle::DataType::FLOAT64);
  CHECK(TestDtype<int>() == paddle::DataType::INT32);
  CHECK(TestDtype<int64_t>() == paddle::DataType::INT64);
  CHECK(TestDtype<int16_t>() == paddle::DataType::INT16);
  CHECK(TestDtype<int8_t>() == paddle::DataType::INT8);
  CHECK(TestDtype<uint8_t>() == paddle::DataType::UINT8);
189 190
  CHECK(TestDtype<paddle::complex64>() == paddle::DataType::COMPLEX64);
  CHECK(TestDtype<paddle::complex128>() == paddle::DataType::COMPLEX128);
191
  CHECK(TestDtype<paddle::float16>() == paddle::DataType::FLOAT16);
192 193
}

194
void TestInitilized() {
195
  paddle::Tensor test_tensor(paddle::PlaceType::kCPU, {1, 1});
196 197 198
  CHECK(test_tensor.is_initialized() == false);
  test_tensor.mutable_data<float>();
  CHECK(test_tensor.is_initialized() == true);
199
  float* tensor_data = test_tensor.mutable_data<float>();
200 201 202 203 204 205 206 207
  for (int i = 0; i < test_tensor.size(); i++) {
    tensor_data[i] = 0.5;
  }
  for (int i = 0; i < test_tensor.size(); i++) {
    CHECK(tensor_data[i] == 0.5);
  }
}

208 209 210 211
TEST(PtenTensor, All) {
  // TODO(chenweihang, before 2021.11.20) support copy, slice and cast methods
  // VLOG(2) << "TestCopy";
  // GroupTestCopy();
212 213 214 215 216 217
  VLOG(2) << "TestDtype";
  GroupTestDtype();
  VLOG(2) << "TestShape";
  TestAPISizeAndShape();
  VLOG(2) << "TestPlace";
  TestAPIPlace();
218 219 220 221
  // VLOG(2) << "TestSlice";
  // TestAPISlice();
  // VLOG(2) << "TestCast";
  // GroupTestCast();
222 223
  VLOG(2) << "TestInitilized";
  TestInitilized();
224
}
225 226 227

}  // namespace tests
}  // namespace pten