tensor_util_test.cc 16.8 KB
Newer Older
1
//  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2 3 4 5 6 7 8 9 10 11 12 13
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//    http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
D
dzhwinter 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/tensor_util.h"
W
wanghuancoder 已提交
16

D
dzhwinter 已提交
17
#include <gtest/gtest.h>
Y
Yang Yu 已提交
18
#include <cmath>
D
dzhwinter 已提交
19 20 21

namespace paddle {
namespace framework {
D
dzhwinter 已提交
22

Y
Yi Wang 已提交
23
TEST(TensorCopy, Tensor) {
D
dzhwinter 已提交
24 25 26 27 28 29 30 31 32
  Tensor src_tensor;
  Tensor dst_tensor;
  platform::CPUDeviceContext cpu_ctx((platform::CPUPlace()));

  int* src_ptr =
      src_tensor.mutable_data<int>(make_ddim({3, 3}), platform::CPUPlace());

  int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
  memcpy(src_ptr, arr, 9 * sizeof(int));
D
dzhwinter 已提交
33
  src_tensor.set_layout(DataLayout::kAnyLayout);
D
dzhwinter 已提交
34 35

  auto cpu_place = new platform::CPUPlace();
Y
Yi Wang 已提交
36
  TensorCopy(src_tensor, *cpu_place, &dst_tensor);
D
dzhwinter 已提交
37 38

  const int* dst_ptr = dst_tensor.data<int>();
39
  EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
40 41 42 43
  for (size_t i = 0; i < 9; ++i) {
    EXPECT_EQ(src_ptr[i], dst_ptr[i]);
  }

M
minqiyang 已提交
44 45 46 47 48
  TensorCopy(dst_tensor, *cpu_place, &dst_tensor);
  for (size_t i = 0; i < 9; ++i) {
    EXPECT_EQ(src_ptr[i], dst_ptr[i]);
  }

D
dzhwinter 已提交
49 50
  EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());

D
dzhwinter 已提交
51
  Tensor slice_tensor = src_tensor.Slice(1, 2);
Y
Yi Wang 已提交
52
  TensorCopy(slice_tensor, *cpu_place, &dst_tensor);
D
dzhwinter 已提交
53 54
  const int* slice_ptr = slice_tensor.data<int>();
  dst_ptr = dst_tensor.data<int>();
55
  EXPECT_NE(dst_ptr, slice_ptr);
D
dzhwinter 已提交
56 57 58
  for (size_t i = 0; i < 3; ++i) {
    EXPECT_EQ(dst_ptr[i], slice_ptr[i]);
  }
D
dzhwinter 已提交
59 60
  EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());

61
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
D
dzhwinter 已提交
62 63 64 65 66 67 68 69 70 71 72 73
  {
    Tensor src_tensor;
    Tensor gpu_tensor;
    Tensor dst_tensor;

    int* src_ptr =
        src_tensor.mutable_data<int>(make_ddim({3, 3}), platform::CPUPlace());

    int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    memcpy(src_ptr, arr, 9 * sizeof(int));

    // CPU Tensor to GPU Tensor
D
dzhwinter 已提交
74
    auto gpu_place = new platform::CUDAPlace(0);
D
dzhwinter 已提交
75
    platform::CUDADeviceContext gpu_ctx(*gpu_place);
W
Wilber 已提交
76 77 78 79
    gpu_ctx.SetAllocator(paddle::memory::allocation::AllocatorFacade::Instance()
                             .GetAllocator(*gpu_place, gpu_ctx.stream())
                             .get());
    gpu_ctx.PartialInitWithAllocator();
Y
Yi Wang 已提交
80
    TensorCopy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
81 82 83

    // GPU Tensor to CPU Tensor
    auto cpu_place = new platform::CPUPlace();
Y
Yi Wang 已提交
84
    TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
85 86 87 88

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* dst_ptr = dst_tensor.data<int>();
89
    EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
90 91 92 93
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

M
minqiyang 已提交
94 95 96 97 98 99 100 101 102
    // Copy the same tensor
    TensorCopy(gpu_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
    gpu_ctx.Wait();
    const int* dst_ptr_tmp = dst_tensor.data<int>();
    EXPECT_NE(src_ptr, dst_ptr_tmp);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], dst_ptr_tmp[i]);
    }

D
dzhwinter 已提交
103 104 105
    Tensor slice_tensor = src_tensor.Slice(1, 2);

    // CPU Slice Tensor to GPU Tensor
Y
Yi Wang 已提交
106
    TensorCopy(slice_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
107 108

    // GPU Tensor to CPU Tensor
Y
Yi Wang 已提交
109
    TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
110 111 112 113 114

    // Sync before Compare Slice Tensors
    gpu_ctx.Wait();
    const int* slice_ptr = slice_tensor.data<int>();
    dst_ptr = dst_tensor.data<int>();
115
    EXPECT_NE(dst_ptr, slice_ptr);
D
dzhwinter 已提交
116 117 118
    for (size_t i = 0; i < 3; ++i) {
      EXPECT_EQ(dst_ptr[i], slice_ptr[i]);
    }
D
dzhwinter 已提交
119 120

    EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());
D
dzhwinter 已提交
121 122 123 124
  }
#endif
}

Y
Yi Wang 已提交
125
TEST(TensorFromVector, Tensor) {
D
dzhwinter 已提交
126 127
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
128
    paddle::framework::Tensor cpu_tensor;
D
dzhwinter 已提交
129 130

    // Copy to CPU Tensor
131
    cpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
D
dzhwinter 已提交
132
    auto cpu_place = new paddle::platform::CPUPlace();
133
    paddle::framework::TensorFromVector<int>(src_vec, &cpu_tensor);
D
dzhwinter 已提交
134 135 136 137

    // Compare Tensors
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* src_ptr = src_vec.data();
138
    EXPECT_NE(src_ptr, cpu_ptr);
D
dzhwinter 已提交
139 140 141 142 143
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }

    src_vec.erase(src_vec.begin(), src_vec.begin() + 5);
144 145
    cpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
    paddle::framework::TensorFromVector<int>(src_vec, &cpu_tensor);
D
dzhwinter 已提交
146 147
    cpu_ptr = cpu_tensor.data<int>();
    src_ptr = src_vec.data();
148
    EXPECT_NE(src_ptr, cpu_ptr);
D
dzhwinter 已提交
149 150 151 152 153 154 155
    for (size_t i = 0; i < 5; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }

    delete cpu_place;
  }

156
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
D
dzhwinter 已提交
157 158
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
159 160 161
    paddle::framework::Tensor cpu_tensor;
    paddle::framework::Tensor gpu_tensor;
    paddle::framework::Tensor dst_tensor;
D
dzhwinter 已提交
162 163 164 165

    // Copy to CPU Tensor
    cpu_tensor.Resize(make_ddim({3, 3}));
    auto cpu_place = new paddle::platform::CPUPlace();
166 167
    paddle::platform::CPUDeviceContext cpu_ctx(*cpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
D
dzhwinter 已提交
168 169

    // Copy to GPUTensor
170
    gpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
D
dzhwinter 已提交
171
    auto gpu_place = new paddle::platform::CUDAPlace();
172
    paddle::platform::CUDADeviceContext gpu_ctx(*gpu_place);
W
Wilber 已提交
173 174 175 176
    gpu_ctx.SetAllocator(paddle::memory::allocation::AllocatorFacade::Instance()
                             .GetAllocator(*gpu_place, gpu_ctx.stream())
                             .get());
    gpu_ctx.PartialInitWithAllocator();
177
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
178
    // Copy from GPU to CPU tensor for comparison
179
    paddle::framework::TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
180 181 182 183 184 185

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* src_ptr = src_vec.data();
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* dst_ptr = dst_tensor.data<int>();
186 187
    EXPECT_NE(src_ptr, cpu_ptr);
    EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
188 189 190 191 192 193 194
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

    src_vec.erase(src_vec.begin(), src_vec.begin() + 5);

195 196 197 198 199
    cpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
    gpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
    paddle::framework::TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
200 201 202 203 204 205

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    src_ptr = src_vec.data();
    cpu_ptr = cpu_tensor.data<int>();
    dst_ptr = dst_tensor.data<int>();
206 207
    EXPECT_NE(src_ptr, cpu_ptr);
    EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
208 209 210 211 212 213 214 215 216 217 218
    for (size_t i = 0; i < 5; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

    delete cpu_place;
    delete gpu_place;
  }
#endif
}

Y
Yi Wang 已提交
219
TEST(TensorToVector, Tensor) {
D
dzhwinter 已提交
220
  {
221 222
    paddle::framework::Tensor src;
    int* src_ptr = src.mutable_data<int>({3, 3}, paddle::platform::CPUPlace());
D
dzhwinter 已提交
223 224 225 226
    for (int i = 0; i < 3 * 3; ++i) {
      src_ptr[i] = i;
    }

227
    paddle::platform::CPUPlace place;
D
dzhwinter 已提交
228
    std::vector<int> dst;
229
    paddle::framework::TensorToVector<int>(src, &dst);
D
dzhwinter 已提交
230 231 232 233 234

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_ptr[i], dst[i]);
    }
  }
235
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
D
dzhwinter 已提交
236 237
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
238 239 240
    paddle::framework::Tensor gpu_tensor;
    paddle::platform::CUDAPlace place;
    paddle::platform::CUDADeviceContext gpu_ctx(place);
W
Wilber 已提交
241 242 243 244
    gpu_ctx.SetAllocator(paddle::memory::allocation::AllocatorFacade::Instance()
                             .GetAllocator(place, gpu_ctx.stream())
                             .get());
    gpu_ctx.PartialInitWithAllocator();
245
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
246 247

    std::vector<int> dst;
248
    paddle::framework::TensorToVector<int>(gpu_tensor, gpu_ctx, &dst);
D
dzhwinter 已提交
249 250 251 252 253 254 255 256

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_vec[i], dst[i]);
    }
  }
#endif
}

257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
TEST(TensorToVector, Tensor_bool) {
  {
    paddle::framework::Tensor src;
    bool* src_ptr =
        src.mutable_data<bool>({3, 3}, paddle::platform::CPUPlace());
    for (int i = 0; i < 3 * 3; ++i) {
      src_ptr[i] = static_cast<bool>(i % 2);
    }

    paddle::platform::CPUPlace place;
    std::vector<bool> dst;
    paddle::framework::TensorToVector<bool>(src, &dst);

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_ptr[i], dst[i]);
    }
  }
#ifdef PADDLE_WITH_CUDA
  {
    std::vector<bool> src_vec = {
        false, true, false, true, false, true, false, true, false,
    };
    paddle::framework::Tensor gpu_tensor;
    paddle::platform::CUDAPlace place;
    paddle::platform::CUDADeviceContext gpu_ctx(place);
W
Wilber 已提交
282 283 284 285
    gpu_ctx.SetAllocator(paddle::memory::allocation::AllocatorFacade::Instance()
                             .GetAllocator(place, gpu_ctx.stream())
                             .get());
    gpu_ctx.PartialInitWithAllocator();
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
    paddle::framework::TensorFromVector<bool>(src_vec, gpu_ctx, &gpu_tensor);

    std::vector<bool> dst;
    paddle::framework::TensorToVector<bool>(gpu_tensor, gpu_ctx, &dst);

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_vec[i], dst[i]);
    }
  }
#endif
#ifdef PADDLE_WITH_ASCEND_CL
  {
    std::vector<bool> src_vec = {
        false, true, false, true, false, true, false, true, false,
    };
    paddle::framework::Tensor npu_tensor;
    paddle::platform::NPUPlace place(0);
    paddle::platform::NPUDeviceContext npu_ctx(place);
    paddle::framework::TensorFromVector<bool>(src_vec, npu_ctx, &npu_tensor);

    std::vector<bool> dst;
    paddle::framework::TensorToVector<bool>(npu_tensor, npu_ctx, &dst);

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_vec[i], dst[i]);
    }
  }
#endif
}

6
633WHU 已提交
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
TEST(TensorFromDLPack, Tensor) {
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    paddle::framework::Tensor cpu_tensor;

    cpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
    paddle::platform::CPUPlace cpu_place;
    paddle::platform::CPUDeviceContext cpu_ctx(cpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
    paddle::framework::DLPackTensor dlpack_tensor(cpu_tensor, 1);

    paddle::framework::Tensor dst_tensor;
    paddle::framework::TensorFromDLPack(dlpack_tensor, &dst_tensor);

    auto cpu_ptr = cpu_tensor.data<int>();
    auto src_ptr = dst_tensor.data<int>();
    EXPECT_NE(src_ptr, cpu_ptr);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }
  }

338
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
6
633WHU 已提交
339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    paddle::framework::Tensor cpu_tensor;
    paddle::framework::Tensor gpu_tensor;
    paddle::framework::Tensor dst_tensor;
    paddle::framework::Tensor gpu_tensor_from_dlpack;

    // Copy to CPU Tensor
    cpu_tensor.Resize(make_ddim({3, 3}));
    paddle::platform::CPUPlace cpu_place;
    paddle::platform::CPUDeviceContext cpu_ctx(cpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);

    // Copy to GPUTensor
    gpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
    paddle::platform::CUDAPlace gpu_place;
355 356
    auto& gpu_ctx =
        *paddle::platform::DeviceContextPool::Instance().GetByPlace(gpu_place);
6
633WHU 已提交
357
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
358
    gpu_ctx.Wait();
6
633WHU 已提交
359 360 361

    paddle::framework::DLPackTensor dlpack_tensor(gpu_tensor, 1);
    paddle::framework::TensorFromDLPack(dlpack_tensor, &gpu_tensor_from_dlpack);
362 363
    gpu_ctx.Wait();

6
633WHU 已提交
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
    // Copy from GPU to CPU tensor for comparison
    paddle::framework::TensorCopy(gpu_tensor_from_dlpack, cpu_place, gpu_ctx,
                                  &dst_tensor);
    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* src_ptr = src_vec.data();
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* dst_ptr = dst_tensor.data<int>();
    EXPECT_NE(src_ptr, cpu_ptr);
    EXPECT_NE(src_ptr, dst_ptr);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }
  }
#endif
}

Y
Yi Wang 已提交
382
TEST(TensorContainsNAN, CPU) {
383
  {
384 385
    paddle::framework::Tensor src;
    float* buf = src.mutable_data<float>({3}, paddle::platform::CPUPlace());
386 387 388
    buf[0] = 0.0;
    buf[1] = NAN;
    buf[2] = 0.0;
389
    EXPECT_TRUE(paddle::framework::TensorContainsNAN(src));
390
    buf[1] = 0.0;
391
    EXPECT_FALSE(paddle::framework::TensorContainsNAN(src));
392 393 394
  }

  {
395
    paddle::framework::Tensor src;
A
Abhinav Arora 已提交
396 397 398
    paddle::platform::float16* buf =
        src.mutable_data<paddle::platform::float16>(
            {3}, paddle::platform::CPUPlace());
399 400 401
    buf[0] = 0.0;
    buf[1].x = 0x7fff;
    buf[2] = 0.0;
402
    EXPECT_TRUE(paddle::framework::TensorContainsNAN(src));
403
    buf[1] = 0.0;
404
    EXPECT_FALSE(paddle::framework::TensorContainsNAN(src));
405
  }
Y
Yang Yu 已提交
406 407
}

Y
Yi Wang 已提交
408
TEST(TensorContainsInf, CPU) {
409
  {
410 411
    paddle::framework::Tensor src;
    double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
412 413 414
    buf[0] = 1.0;
    buf[1] = INFINITY;
    buf[2] = 0.0;
415
    EXPECT_TRUE(paddle::framework::TensorContainsInf(src));
416
    buf[1] = 1.0;
417
    EXPECT_FALSE(paddle::framework::TensorContainsInf(src));
418 419 420
  }

  {
421
    paddle::framework::Tensor src;
A
Abhinav Arora 已提交
422 423 424
    paddle::platform::float16* buf =
        src.mutable_data<paddle::platform::float16>(
            {3}, paddle::platform::CPUPlace());
425 426 427
    buf[0] = 1.0;
    buf[1].x = 0x7c00;
    buf[2] = 0.0;
428
    EXPECT_TRUE(paddle::framework::TensorContainsInf(src));
429
    buf[1] = 1.0;
430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
    EXPECT_FALSE(paddle::framework::TensorContainsInf(src));
  }
}

TEST(TensorIsfinite, CPU) {
  {
    paddle::framework::Tensor src, out;
    double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
    buf[0] = 1.0;
    buf[1] = INFINITY;
    buf[2] = 0.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
    buf[1] = 1.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], true);
  }

  {
    paddle::framework::Tensor src, out;
    double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
    buf[0] = 1.0;
    buf[1] = NAN;
    buf[2] = 0.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
    buf[1] = 1.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], true);
  }

  {
    paddle::framework::Tensor src, out;
    paddle::platform::float16* buf =
        src.mutable_data<paddle::platform::float16>(
            {3}, paddle::platform::CPUPlace());
    buf[0] = 1.0;
    buf[1].x = 0x7c00;
    buf[2] = 0.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
    buf[1] = 1.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], true);
    buf[1].x = 0x7fff;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
477
  }
Y
Yang Yu 已提交
478 479
}

Y
Yi Wang 已提交
480
TEST(Tensor, FromAndToStream) {
481 482 483 484 485 486 487 488 489 490 491 492
  framework::Tensor src_tensor;
  int array[6] = {1, 2, 3, 4, 5, 6};
  src_tensor.Resize({2, 3});
  int* src_ptr = src_tensor.mutable_data<int>(platform::CPUPlace());
  for (int i = 0; i < 6; ++i) {
    src_ptr[i] = array[i];
  }
  {
    framework::Tensor dst_tensor;
    auto place = new platform::CPUPlace();
    platform::CPUDeviceContext cpu_ctx(*place);
    std::ostringstream oss;
Y
Yi Wang 已提交
493
    TensorToStream(oss, src_tensor, cpu_ctx);
494 495

    std::istringstream iss(oss.str());
Y
Yi Wang 已提交
496
    TensorFromStream(iss, &dst_tensor, cpu_ctx);
497 498
    int* dst_ptr = dst_tensor.mutable_data<int>(platform::CPUPlace());
    for (int i = 0; i < 5; ++i) {
499
      EXPECT_EQ(dst_ptr[i], array[i]);
500
    }
501
    EXPECT_EQ(dst_tensor.dims(), src_tensor.dims());
502 503
    delete place;
  }
504
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
505 506 507 508 509 510 511
  {
    Tensor gpu_tensor;
    gpu_tensor.Resize({2, 3});
    Tensor dst_tensor;

    auto gpu_place = new platform::CUDAPlace();
    platform::CUDADeviceContext gpu_ctx(*gpu_place);
W
Wilber 已提交
512 513 514 515
    gpu_ctx.SetAllocator(paddle::memory::allocation::AllocatorFacade::Instance()
                             .GetAllocator(*gpu_place, gpu_ctx.stream())
                             .get());
    gpu_ctx.PartialInitWithAllocator();
516

Y
Yi Wang 已提交
517
    TensorCopy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
518 519

    std::ostringstream oss;
Y
Yi Wang 已提交
520
    TensorToStream(oss, gpu_tensor, gpu_ctx);
521 522

    std::istringstream iss(oss.str());
Y
Yu Yang 已提交
523 524 525
    TensorFromStream(
        iss, &dst_tensor,
        *platform::DeviceContextPool::Instance().Get(platform::CPUPlace()));
526 527 528

    int* dst_ptr = dst_tensor.mutable_data<int>(platform::CPUPlace());
    for (int i = 0; i < 6; ++i) {
529
      EXPECT_EQ(dst_ptr[i], array[i]);
530 531 532 533 534 535
    }
    delete gpu_place;
  }
#endif
}

D
dzhwinter 已提交
536 537
}  // namespace framework
}  // namespace paddle