tensor_util_test.cc 14.0 KB
Newer Older
1
//  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2 3 4 5 6 7 8 9 10 11 12 13
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//    http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
D
dzhwinter 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/tensor_util.h"
D
dzhwinter 已提交
16
#include <gtest/gtest.h>
Y
Yang Yu 已提交
17
#include <cmath>
D
dzhwinter 已提交
18 19 20 21
#include <string>

namespace paddle {
namespace framework {
D
dzhwinter 已提交
22

Y
Yi Wang 已提交
23
TEST(TensorCopy, Tensor) {
D
dzhwinter 已提交
24 25 26 27 28 29 30 31 32
  Tensor src_tensor;
  Tensor dst_tensor;
  platform::CPUDeviceContext cpu_ctx((platform::CPUPlace()));

  int* src_ptr =
      src_tensor.mutable_data<int>(make_ddim({3, 3}), platform::CPUPlace());

  int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
  memcpy(src_ptr, arr, 9 * sizeof(int));
D
dzhwinter 已提交
33
  src_tensor.set_layout(DataLayout::kAnyLayout);
D
dzhwinter 已提交
34 35

  auto cpu_place = new platform::CPUPlace();
Y
Yi Wang 已提交
36
  TensorCopy(src_tensor, *cpu_place, &dst_tensor);
D
dzhwinter 已提交
37 38

  const int* dst_ptr = dst_tensor.data<int>();
39
  EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
40 41 42 43
  for (size_t i = 0; i < 9; ++i) {
    EXPECT_EQ(src_ptr[i], dst_ptr[i]);
  }

M
minqiyang 已提交
44 45 46 47 48
  TensorCopy(dst_tensor, *cpu_place, &dst_tensor);
  for (size_t i = 0; i < 9; ++i) {
    EXPECT_EQ(src_ptr[i], dst_ptr[i]);
  }

D
dzhwinter 已提交
49 50
  EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());

D
dzhwinter 已提交
51
  Tensor slice_tensor = src_tensor.Slice(1, 2);
Y
Yi Wang 已提交
52
  TensorCopy(slice_tensor, *cpu_place, &dst_tensor);
D
dzhwinter 已提交
53 54
  const int* slice_ptr = slice_tensor.data<int>();
  dst_ptr = dst_tensor.data<int>();
55
  EXPECT_NE(dst_ptr, slice_ptr);
D
dzhwinter 已提交
56 57 58
  for (size_t i = 0; i < 3; ++i) {
    EXPECT_EQ(dst_ptr[i], slice_ptr[i]);
  }
D
dzhwinter 已提交
59 60
  EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());

D
dzhwinter 已提交
61 62 63 64 65 66 67 68 69 70 71 72 73
#ifdef PADDLE_WITH_CUDA
  {
    Tensor src_tensor;
    Tensor gpu_tensor;
    Tensor dst_tensor;

    int* src_ptr =
        src_tensor.mutable_data<int>(make_ddim({3, 3}), platform::CPUPlace());

    int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    memcpy(src_ptr, arr, 9 * sizeof(int));

    // CPU Tensor to GPU Tensor
D
dzhwinter 已提交
74
    auto gpu_place = new platform::CUDAPlace(0);
D
dzhwinter 已提交
75
    platform::CUDADeviceContext gpu_ctx(*gpu_place);
Y
Yi Wang 已提交
76
    TensorCopy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
77 78 79

    // GPU Tensor to CPU Tensor
    auto cpu_place = new platform::CPUPlace();
Y
Yi Wang 已提交
80
    TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
81 82 83 84

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* dst_ptr = dst_tensor.data<int>();
85
    EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
86 87 88 89
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

M
minqiyang 已提交
90 91 92 93 94 95 96 97 98
    // Copy the same tensor
    TensorCopy(gpu_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
    gpu_ctx.Wait();
    const int* dst_ptr_tmp = dst_tensor.data<int>();
    EXPECT_NE(src_ptr, dst_ptr_tmp);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], dst_ptr_tmp[i]);
    }

D
dzhwinter 已提交
99 100 101
    Tensor slice_tensor = src_tensor.Slice(1, 2);

    // CPU Slice Tensor to GPU Tensor
Y
Yi Wang 已提交
102
    TensorCopy(slice_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
103 104

    // GPU Tensor to CPU Tensor
Y
Yi Wang 已提交
105
    TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
106 107 108 109 110

    // Sync before Compare Slice Tensors
    gpu_ctx.Wait();
    const int* slice_ptr = slice_tensor.data<int>();
    dst_ptr = dst_tensor.data<int>();
111
    EXPECT_NE(dst_ptr, slice_ptr);
D
dzhwinter 已提交
112 113 114
    for (size_t i = 0; i < 3; ++i) {
      EXPECT_EQ(dst_ptr[i], slice_ptr[i]);
    }
D
dzhwinter 已提交
115 116

    EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());
D
dzhwinter 已提交
117 118 119 120
  }
#endif
}

Y
Yi Wang 已提交
121
TEST(TensorFromVector, Tensor) {
D
dzhwinter 已提交
122 123
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
124
    paddle::framework::Tensor cpu_tensor;
D
dzhwinter 已提交
125 126

    // Copy to CPU Tensor
127
    cpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
D
dzhwinter 已提交
128
    auto cpu_place = new paddle::platform::CPUPlace();
129
    paddle::framework::TensorFromVector<int>(src_vec, &cpu_tensor);
D
dzhwinter 已提交
130 131 132 133

    // Compare Tensors
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* src_ptr = src_vec.data();
134
    EXPECT_NE(src_ptr, cpu_ptr);
D
dzhwinter 已提交
135 136 137 138 139
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }

    src_vec.erase(src_vec.begin(), src_vec.begin() + 5);
140 141
    cpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
    paddle::framework::TensorFromVector<int>(src_vec, &cpu_tensor);
D
dzhwinter 已提交
142 143
    cpu_ptr = cpu_tensor.data<int>();
    src_ptr = src_vec.data();
144
    EXPECT_NE(src_ptr, cpu_ptr);
D
dzhwinter 已提交
145 146 147 148 149 150 151 152 153 154
    for (size_t i = 0; i < 5; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }

    delete cpu_place;
  }

#ifdef PADDLE_WITH_CUDA
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
155 156 157
    paddle::framework::Tensor cpu_tensor;
    paddle::framework::Tensor gpu_tensor;
    paddle::framework::Tensor dst_tensor;
D
dzhwinter 已提交
158 159 160 161

    // Copy to CPU Tensor
    cpu_tensor.Resize(make_ddim({3, 3}));
    auto cpu_place = new paddle::platform::CPUPlace();
162 163
    paddle::platform::CPUDeviceContext cpu_ctx(*cpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
D
dzhwinter 已提交
164 165

    // Copy to GPUTensor
166
    gpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
D
dzhwinter 已提交
167
    auto gpu_place = new paddle::platform::CUDAPlace();
168 169
    paddle::platform::CUDADeviceContext gpu_ctx(*gpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
170
    // Copy from GPU to CPU tensor for comparison
171
    paddle::framework::TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
172 173 174 175 176 177

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* src_ptr = src_vec.data();
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* dst_ptr = dst_tensor.data<int>();
178 179
    EXPECT_NE(src_ptr, cpu_ptr);
    EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
180 181 182 183 184 185 186
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

    src_vec.erase(src_vec.begin(), src_vec.begin() + 5);

187 188 189 190 191
    cpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
    gpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
    paddle::framework::TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
192 193 194 195 196 197

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    src_ptr = src_vec.data();
    cpu_ptr = cpu_tensor.data<int>();
    dst_ptr = dst_tensor.data<int>();
198 199
    EXPECT_NE(src_ptr, cpu_ptr);
    EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
200 201 202 203 204 205 206 207 208 209 210
    for (size_t i = 0; i < 5; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

    delete cpu_place;
    delete gpu_place;
  }
#endif
}

Y
Yi Wang 已提交
211
TEST(TensorToVector, Tensor) {
D
dzhwinter 已提交
212
  {
213 214
    paddle::framework::Tensor src;
    int* src_ptr = src.mutable_data<int>({3, 3}, paddle::platform::CPUPlace());
D
dzhwinter 已提交
215 216 217 218
    for (int i = 0; i < 3 * 3; ++i) {
      src_ptr[i] = i;
    }

219
    paddle::platform::CPUPlace place;
D
dzhwinter 已提交
220
    std::vector<int> dst;
221
    paddle::framework::TensorToVector<int>(src, &dst);
D
dzhwinter 已提交
222 223 224 225 226 227 228 229

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_ptr[i], dst[i]);
    }
  }
#ifdef PADDLE_WITH_CUDA
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
230 231 232 233
    paddle::framework::Tensor gpu_tensor;
    paddle::platform::CUDAPlace place;
    paddle::platform::CUDADeviceContext gpu_ctx(place);
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
234 235

    std::vector<int> dst;
236
    paddle::framework::TensorToVector<int>(gpu_tensor, gpu_ctx, &dst);
D
dzhwinter 已提交
237 238 239 240 241 242 243 244

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_vec[i], dst[i]);
    }
  }
#endif
}

6
633WHU 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
TEST(TensorFromDLPack, Tensor) {
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    paddle::framework::Tensor cpu_tensor;

    cpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
    paddle::platform::CPUPlace cpu_place;
    paddle::platform::CPUDeviceContext cpu_ctx(cpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
    paddle::framework::DLPackTensor dlpack_tensor(cpu_tensor, 1);

    paddle::framework::Tensor dst_tensor;
    paddle::framework::TensorFromDLPack(dlpack_tensor, &dst_tensor);

    auto cpu_ptr = cpu_tensor.data<int>();
    auto src_ptr = dst_tensor.data<int>();
    EXPECT_NE(src_ptr, cpu_ptr);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }
  }

#ifdef PADDLE_WITH_CUDA
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    paddle::framework::Tensor cpu_tensor;
    paddle::framework::Tensor gpu_tensor;
    paddle::framework::Tensor dst_tensor;
    paddle::framework::Tensor gpu_tensor_from_dlpack;

    // Copy to CPU Tensor
    cpu_tensor.Resize(make_ddim({3, 3}));
    paddle::platform::CPUPlace cpu_place;
    paddle::platform::CPUDeviceContext cpu_ctx(cpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);

    // Copy to GPUTensor
    gpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
    paddle::platform::CUDAPlace gpu_place;
284 285
    auto& gpu_ctx =
        *paddle::platform::DeviceContextPool::Instance().GetByPlace(gpu_place);
6
633WHU 已提交
286
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
287
    gpu_ctx.Wait();
6
633WHU 已提交
288 289 290

    paddle::framework::DLPackTensor dlpack_tensor(gpu_tensor, 1);
    paddle::framework::TensorFromDLPack(dlpack_tensor, &gpu_tensor_from_dlpack);
291 292
    gpu_ctx.Wait();

6
633WHU 已提交
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
    // Copy from GPU to CPU tensor for comparison
    paddle::framework::TensorCopy(gpu_tensor_from_dlpack, cpu_place, gpu_ctx,
                                  &dst_tensor);
    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* src_ptr = src_vec.data();
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* dst_ptr = dst_tensor.data<int>();
    EXPECT_NE(src_ptr, cpu_ptr);
    EXPECT_NE(src_ptr, dst_ptr);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }
  }
#endif
}

Y
Yi Wang 已提交
311
TEST(TensorContainsNAN, CPU) {
312
  {
313 314
    paddle::framework::Tensor src;
    float* buf = src.mutable_data<float>({3}, paddle::platform::CPUPlace());
315 316 317
    buf[0] = 0.0;
    buf[1] = NAN;
    buf[2] = 0.0;
318
    EXPECT_TRUE(paddle::framework::TensorContainsNAN(src));
319
    buf[1] = 0.0;
320
    EXPECT_FALSE(paddle::framework::TensorContainsNAN(src));
321 322 323
  }

  {
324
    paddle::framework::Tensor src;
A
Abhinav Arora 已提交
325 326 327
    paddle::platform::float16* buf =
        src.mutable_data<paddle::platform::float16>(
            {3}, paddle::platform::CPUPlace());
328 329 330
    buf[0] = 0.0;
    buf[1].x = 0x7fff;
    buf[2] = 0.0;
331
    EXPECT_TRUE(paddle::framework::TensorContainsNAN(src));
332
    buf[1] = 0.0;
333
    EXPECT_FALSE(paddle::framework::TensorContainsNAN(src));
334
  }
Y
Yang Yu 已提交
335 336
}

Y
Yi Wang 已提交
337
TEST(TensorContainsInf, CPU) {
338
  {
339 340
    paddle::framework::Tensor src;
    double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
341 342 343
    buf[0] = 1.0;
    buf[1] = INFINITY;
    buf[2] = 0.0;
344
    EXPECT_TRUE(paddle::framework::TensorContainsInf(src));
345
    buf[1] = 1.0;
346
    EXPECT_FALSE(paddle::framework::TensorContainsInf(src));
347 348 349
  }

  {
350
    paddle::framework::Tensor src;
A
Abhinav Arora 已提交
351 352 353
    paddle::platform::float16* buf =
        src.mutable_data<paddle::platform::float16>(
            {3}, paddle::platform::CPUPlace());
354 355 356
    buf[0] = 1.0;
    buf[1].x = 0x7c00;
    buf[2] = 0.0;
357
    EXPECT_TRUE(paddle::framework::TensorContainsInf(src));
358
    buf[1] = 1.0;
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
    EXPECT_FALSE(paddle::framework::TensorContainsInf(src));
  }
}

TEST(TensorIsfinite, CPU) {
  {
    paddle::framework::Tensor src, out;
    double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
    buf[0] = 1.0;
    buf[1] = INFINITY;
    buf[2] = 0.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
    buf[1] = 1.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], true);
  }

  {
    paddle::framework::Tensor src, out;
    double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
    buf[0] = 1.0;
    buf[1] = NAN;
    buf[2] = 0.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
    buf[1] = 1.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], true);
  }

  {
    paddle::framework::Tensor src, out;
    paddle::platform::float16* buf =
        src.mutable_data<paddle::platform::float16>(
            {3}, paddle::platform::CPUPlace());
    buf[0] = 1.0;
    buf[1].x = 0x7c00;
    buf[2] = 0.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
    buf[1] = 1.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], true);
    buf[1].x = 0x7fff;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
406
  }
Y
Yang Yu 已提交
407 408
}

Y
Yi Wang 已提交
409
TEST(Tensor, FromAndToStream) {
410 411 412 413 414 415 416 417 418 419 420 421
  framework::Tensor src_tensor;
  int array[6] = {1, 2, 3, 4, 5, 6};
  src_tensor.Resize({2, 3});
  int* src_ptr = src_tensor.mutable_data<int>(platform::CPUPlace());
  for (int i = 0; i < 6; ++i) {
    src_ptr[i] = array[i];
  }
  {
    framework::Tensor dst_tensor;
    auto place = new platform::CPUPlace();
    platform::CPUDeviceContext cpu_ctx(*place);
    std::ostringstream oss;
Y
Yi Wang 已提交
422
    TensorToStream(oss, src_tensor, cpu_ctx);
423 424

    std::istringstream iss(oss.str());
Y
Yi Wang 已提交
425
    TensorFromStream(iss, &dst_tensor, cpu_ctx);
426 427
    int* dst_ptr = dst_tensor.mutable_data<int>(platform::CPUPlace());
    for (int i = 0; i < 5; ++i) {
428
      EXPECT_EQ(dst_ptr[i], array[i]);
429
    }
430
    EXPECT_EQ(dst_tensor.dims(), src_tensor.dims());
431 432 433 434 435 436 437 438 439 440 441
    delete place;
  }
#ifdef PADDLE_WITH_CUDA
  {
    Tensor gpu_tensor;
    gpu_tensor.Resize({2, 3});
    Tensor dst_tensor;

    auto gpu_place = new platform::CUDAPlace();
    platform::CUDADeviceContext gpu_ctx(*gpu_place);

Y
Yi Wang 已提交
442
    TensorCopy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
443 444

    std::ostringstream oss;
Y
Yi Wang 已提交
445
    TensorToStream(oss, gpu_tensor, gpu_ctx);
446 447

    std::istringstream iss(oss.str());
Y
Yu Yang 已提交
448 449 450
    TensorFromStream(
        iss, &dst_tensor,
        *platform::DeviceContextPool::Instance().Get(platform::CPUPlace()));
451 452 453

    int* dst_ptr = dst_tensor.mutable_data<int>(platform::CPUPlace());
    for (int i = 0; i < 6; ++i) {
454
      EXPECT_EQ(dst_ptr[i], array[i]);
455 456 457 458 459 460
    }
    delete gpu_place;
  }
#endif
}

D
dzhwinter 已提交
461 462
}  // namespace framework
}  // namespace paddle