tensor_util_test.cc 15.7 KB
Newer Older
1
//  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2 3 4 5 6 7 8 9 10 11 12 13
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//    http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
D
dzhwinter 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/tensor_util.h"
W
wanghuancoder 已提交
16

D
dzhwinter 已提交
17
#include <gtest/gtest.h>
Y
Yang Yu 已提交
18
#include <cmath>
D
dzhwinter 已提交
19 20 21

namespace paddle {
namespace framework {
D
dzhwinter 已提交
22

Y
Yi Wang 已提交
23
TEST(TensorCopy, Tensor) {
D
dzhwinter 已提交
24 25 26 27 28 29 30 31 32
  Tensor src_tensor;
  Tensor dst_tensor;
  platform::CPUDeviceContext cpu_ctx((platform::CPUPlace()));

  int* src_ptr =
      src_tensor.mutable_data<int>(make_ddim({3, 3}), platform::CPUPlace());

  int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
  memcpy(src_ptr, arr, 9 * sizeof(int));
D
dzhwinter 已提交
33
  src_tensor.set_layout(DataLayout::kAnyLayout);
D
dzhwinter 已提交
34 35

  auto cpu_place = new platform::CPUPlace();
Y
Yi Wang 已提交
36
  TensorCopy(src_tensor, *cpu_place, &dst_tensor);
D
dzhwinter 已提交
37 38

  const int* dst_ptr = dst_tensor.data<int>();
39
  EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
40 41 42 43
  for (size_t i = 0; i < 9; ++i) {
    EXPECT_EQ(src_ptr[i], dst_ptr[i]);
  }

M
minqiyang 已提交
44 45 46 47 48
  TensorCopy(dst_tensor, *cpu_place, &dst_tensor);
  for (size_t i = 0; i < 9; ++i) {
    EXPECT_EQ(src_ptr[i], dst_ptr[i]);
  }

D
dzhwinter 已提交
49 50
  EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());

D
dzhwinter 已提交
51
  Tensor slice_tensor = src_tensor.Slice(1, 2);
Y
Yi Wang 已提交
52
  TensorCopy(slice_tensor, *cpu_place, &dst_tensor);
D
dzhwinter 已提交
53 54
  const int* slice_ptr = slice_tensor.data<int>();
  dst_ptr = dst_tensor.data<int>();
55
  EXPECT_NE(dst_ptr, slice_ptr);
D
dzhwinter 已提交
56 57 58
  for (size_t i = 0; i < 3; ++i) {
    EXPECT_EQ(dst_ptr[i], slice_ptr[i]);
  }
D
dzhwinter 已提交
59 60
  EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());

61
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
D
dzhwinter 已提交
62 63 64 65 66 67 68 69 70 71 72 73
  {
    Tensor src_tensor;
    Tensor gpu_tensor;
    Tensor dst_tensor;

    int* src_ptr =
        src_tensor.mutable_data<int>(make_ddim({3, 3}), platform::CPUPlace());

    int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    memcpy(src_ptr, arr, 9 * sizeof(int));

    // CPU Tensor to GPU Tensor
D
dzhwinter 已提交
74
    auto gpu_place = new platform::CUDAPlace(0);
D
dzhwinter 已提交
75
    platform::CUDADeviceContext gpu_ctx(*gpu_place);
Y
Yi Wang 已提交
76
    TensorCopy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
77 78 79

    // GPU Tensor to CPU Tensor
    auto cpu_place = new platform::CPUPlace();
Y
Yi Wang 已提交
80
    TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
81 82 83 84

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* dst_ptr = dst_tensor.data<int>();
85
    EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
86 87 88 89
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

M
minqiyang 已提交
90 91 92 93 94 95 96 97 98
    // Copy the same tensor
    TensorCopy(gpu_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
    gpu_ctx.Wait();
    const int* dst_ptr_tmp = dst_tensor.data<int>();
    EXPECT_NE(src_ptr, dst_ptr_tmp);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], dst_ptr_tmp[i]);
    }

D
dzhwinter 已提交
99 100 101
    Tensor slice_tensor = src_tensor.Slice(1, 2);

    // CPU Slice Tensor to GPU Tensor
Y
Yi Wang 已提交
102
    TensorCopy(slice_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
103 104

    // GPU Tensor to CPU Tensor
Y
Yi Wang 已提交
105
    TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
106 107 108 109 110

    // Sync before Compare Slice Tensors
    gpu_ctx.Wait();
    const int* slice_ptr = slice_tensor.data<int>();
    dst_ptr = dst_tensor.data<int>();
111
    EXPECT_NE(dst_ptr, slice_ptr);
D
dzhwinter 已提交
112 113 114
    for (size_t i = 0; i < 3; ++i) {
      EXPECT_EQ(dst_ptr[i], slice_ptr[i]);
    }
D
dzhwinter 已提交
115 116

    EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());
D
dzhwinter 已提交
117 118 119 120
  }
#endif
}

Y
Yi Wang 已提交
121
TEST(TensorFromVector, Tensor) {
D
dzhwinter 已提交
122 123
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
124
    paddle::framework::Tensor cpu_tensor;
D
dzhwinter 已提交
125 126

    // Copy to CPU Tensor
127
    cpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
D
dzhwinter 已提交
128
    auto cpu_place = new paddle::platform::CPUPlace();
129
    paddle::framework::TensorFromVector<int>(src_vec, &cpu_tensor);
D
dzhwinter 已提交
130 131 132 133

    // Compare Tensors
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* src_ptr = src_vec.data();
134
    EXPECT_NE(src_ptr, cpu_ptr);
D
dzhwinter 已提交
135 136 137 138 139
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }

    src_vec.erase(src_vec.begin(), src_vec.begin() + 5);
140 141
    cpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
    paddle::framework::TensorFromVector<int>(src_vec, &cpu_tensor);
D
dzhwinter 已提交
142 143
    cpu_ptr = cpu_tensor.data<int>();
    src_ptr = src_vec.data();
144
    EXPECT_NE(src_ptr, cpu_ptr);
D
dzhwinter 已提交
145 146 147 148 149 150 151
    for (size_t i = 0; i < 5; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }

    delete cpu_place;
  }

152
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
D
dzhwinter 已提交
153 154
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
155 156 157
    paddle::framework::Tensor cpu_tensor;
    paddle::framework::Tensor gpu_tensor;
    paddle::framework::Tensor dst_tensor;
D
dzhwinter 已提交
158 159 160 161

    // Copy to CPU Tensor
    cpu_tensor.Resize(make_ddim({3, 3}));
    auto cpu_place = new paddle::platform::CPUPlace();
162 163
    paddle::platform::CPUDeviceContext cpu_ctx(*cpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
D
dzhwinter 已提交
164 165

    // Copy to GPUTensor
166
    gpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
D
dzhwinter 已提交
167
    auto gpu_place = new paddle::platform::CUDAPlace();
168 169
    paddle::platform::CUDADeviceContext gpu_ctx(*gpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
170
    // Copy from GPU to CPU tensor for comparison
171
    paddle::framework::TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
172 173 174 175 176 177

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* src_ptr = src_vec.data();
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* dst_ptr = dst_tensor.data<int>();
178 179
    EXPECT_NE(src_ptr, cpu_ptr);
    EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
180 181 182 183 184 185 186
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

    src_vec.erase(src_vec.begin(), src_vec.begin() + 5);

187 188 189 190 191
    cpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
    gpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
    paddle::framework::TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
192 193 194 195 196 197

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    src_ptr = src_vec.data();
    cpu_ptr = cpu_tensor.data<int>();
    dst_ptr = dst_tensor.data<int>();
198 199
    EXPECT_NE(src_ptr, cpu_ptr);
    EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
200 201 202 203 204 205 206 207 208 209 210
    for (size_t i = 0; i < 5; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

    delete cpu_place;
    delete gpu_place;
  }
#endif
}

Y
Yi Wang 已提交
211
TEST(TensorToVector, Tensor) {
D
dzhwinter 已提交
212
  {
213 214
    paddle::framework::Tensor src;
    int* src_ptr = src.mutable_data<int>({3, 3}, paddle::platform::CPUPlace());
D
dzhwinter 已提交
215 216 217 218
    for (int i = 0; i < 3 * 3; ++i) {
      src_ptr[i] = i;
    }

219
    paddle::platform::CPUPlace place;
D
dzhwinter 已提交
220
    std::vector<int> dst;
221
    paddle::framework::TensorToVector<int>(src, &dst);
D
dzhwinter 已提交
222 223 224 225 226

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_ptr[i], dst[i]);
    }
  }
227
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
D
dzhwinter 已提交
228 229
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
230 231 232 233
    paddle::framework::Tensor gpu_tensor;
    paddle::platform::CUDAPlace place;
    paddle::platform::CUDADeviceContext gpu_ctx(place);
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
234 235

    std::vector<int> dst;
236
    paddle::framework::TensorToVector<int>(gpu_tensor, gpu_ctx, &dst);
D
dzhwinter 已提交
237 238 239 240 241 242 243 244

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_vec[i], dst[i]);
    }
  }
#endif
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
TEST(TensorToVector, Tensor_bool) {
  {
    paddle::framework::Tensor src;
    bool* src_ptr =
        src.mutable_data<bool>({3, 3}, paddle::platform::CPUPlace());
    for (int i = 0; i < 3 * 3; ++i) {
      src_ptr[i] = static_cast<bool>(i % 2);
    }

    paddle::platform::CPUPlace place;
    std::vector<bool> dst;
    paddle::framework::TensorToVector<bool>(src, &dst);

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_ptr[i], dst[i]);
    }
  }
#ifdef PADDLE_WITH_CUDA
  {
    std::vector<bool> src_vec = {
        false, true, false, true, false, true, false, true, false,
    };
    paddle::framework::Tensor gpu_tensor;
    paddle::platform::CUDAPlace place;
    paddle::platform::CUDADeviceContext gpu_ctx(place);
    paddle::framework::TensorFromVector<bool>(src_vec, gpu_ctx, &gpu_tensor);

    std::vector<bool> dst;
    paddle::framework::TensorToVector<bool>(gpu_tensor, gpu_ctx, &dst);

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_vec[i], dst[i]);
    }
  }
#endif
#ifdef PADDLE_WITH_ASCEND_CL
  {
    std::vector<bool> src_vec = {
        false, true, false, true, false, true, false, true, false,
    };
    paddle::framework::Tensor npu_tensor;
    paddle::platform::NPUPlace place(0);
    paddle::platform::NPUDeviceContext npu_ctx(place);
    paddle::framework::TensorFromVector<bool>(src_vec, npu_ctx, &npu_tensor);

    std::vector<bool> dst;
    paddle::framework::TensorToVector<bool>(npu_tensor, npu_ctx, &dst);

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_vec[i], dst[i]);
    }
  }
#endif
}

6
633WHU 已提交
300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
TEST(TensorFromDLPack, Tensor) {
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    paddle::framework::Tensor cpu_tensor;

    cpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
    paddle::platform::CPUPlace cpu_place;
    paddle::platform::CPUDeviceContext cpu_ctx(cpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
    paddle::framework::DLPackTensor dlpack_tensor(cpu_tensor, 1);

    paddle::framework::Tensor dst_tensor;
    paddle::framework::TensorFromDLPack(dlpack_tensor, &dst_tensor);

    auto cpu_ptr = cpu_tensor.data<int>();
    auto src_ptr = dst_tensor.data<int>();
    EXPECT_NE(src_ptr, cpu_ptr);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }
  }

322
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
6
633WHU 已提交
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    paddle::framework::Tensor cpu_tensor;
    paddle::framework::Tensor gpu_tensor;
    paddle::framework::Tensor dst_tensor;
    paddle::framework::Tensor gpu_tensor_from_dlpack;

    // Copy to CPU Tensor
    cpu_tensor.Resize(make_ddim({3, 3}));
    paddle::platform::CPUPlace cpu_place;
    paddle::platform::CPUDeviceContext cpu_ctx(cpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);

    // Copy to GPUTensor
    gpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
    paddle::platform::CUDAPlace gpu_place;
339 340
    auto& gpu_ctx =
        *paddle::platform::DeviceContextPool::Instance().GetByPlace(gpu_place);
6
633WHU 已提交
341
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
342
    gpu_ctx.Wait();
6
633WHU 已提交
343 344 345

    paddle::framework::DLPackTensor dlpack_tensor(gpu_tensor, 1);
    paddle::framework::TensorFromDLPack(dlpack_tensor, &gpu_tensor_from_dlpack);
346 347
    gpu_ctx.Wait();

6
633WHU 已提交
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
    // Copy from GPU to CPU tensor for comparison
    paddle::framework::TensorCopy(gpu_tensor_from_dlpack, cpu_place, gpu_ctx,
                                  &dst_tensor);
    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* src_ptr = src_vec.data();
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* dst_ptr = dst_tensor.data<int>();
    EXPECT_NE(src_ptr, cpu_ptr);
    EXPECT_NE(src_ptr, dst_ptr);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }
  }
#endif
}

Y
Yi Wang 已提交
366
TEST(TensorContainsNAN, CPU) {
367
  {
368 369
    paddle::framework::Tensor src;
    float* buf = src.mutable_data<float>({3}, paddle::platform::CPUPlace());
370 371 372
    buf[0] = 0.0;
    buf[1] = NAN;
    buf[2] = 0.0;
373
    EXPECT_TRUE(paddle::framework::TensorContainsNAN(src));
374
    buf[1] = 0.0;
375
    EXPECT_FALSE(paddle::framework::TensorContainsNAN(src));
376 377 378
  }

  {
379
    paddle::framework::Tensor src;
A
Abhinav Arora 已提交
380 381 382
    paddle::platform::float16* buf =
        src.mutable_data<paddle::platform::float16>(
            {3}, paddle::platform::CPUPlace());
383 384 385
    buf[0] = 0.0;
    buf[1].x = 0x7fff;
    buf[2] = 0.0;
386
    EXPECT_TRUE(paddle::framework::TensorContainsNAN(src));
387
    buf[1] = 0.0;
388
    EXPECT_FALSE(paddle::framework::TensorContainsNAN(src));
389
  }
Y
Yang Yu 已提交
390 391
}

Y
Yi Wang 已提交
392
TEST(TensorContainsInf, CPU) {
393
  {
394 395
    paddle::framework::Tensor src;
    double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
396 397 398
    buf[0] = 1.0;
    buf[1] = INFINITY;
    buf[2] = 0.0;
399
    EXPECT_TRUE(paddle::framework::TensorContainsInf(src));
400
    buf[1] = 1.0;
401
    EXPECT_FALSE(paddle::framework::TensorContainsInf(src));
402 403 404
  }

  {
405
    paddle::framework::Tensor src;
A
Abhinav Arora 已提交
406 407 408
    paddle::platform::float16* buf =
        src.mutable_data<paddle::platform::float16>(
            {3}, paddle::platform::CPUPlace());
409 410 411
    buf[0] = 1.0;
    buf[1].x = 0x7c00;
    buf[2] = 0.0;
412
    EXPECT_TRUE(paddle::framework::TensorContainsInf(src));
413
    buf[1] = 1.0;
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
    EXPECT_FALSE(paddle::framework::TensorContainsInf(src));
  }
}

TEST(TensorIsfinite, CPU) {
  {
    paddle::framework::Tensor src, out;
    double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
    buf[0] = 1.0;
    buf[1] = INFINITY;
    buf[2] = 0.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
    buf[1] = 1.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], true);
  }

  {
    paddle::framework::Tensor src, out;
    double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
    buf[0] = 1.0;
    buf[1] = NAN;
    buf[2] = 0.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
    buf[1] = 1.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], true);
  }

  {
    paddle::framework::Tensor src, out;
    paddle::platform::float16* buf =
        src.mutable_data<paddle::platform::float16>(
            {3}, paddle::platform::CPUPlace());
    buf[0] = 1.0;
    buf[1].x = 0x7c00;
    buf[2] = 0.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
    buf[1] = 1.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], true);
    buf[1].x = 0x7fff;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
461
  }
Y
Yang Yu 已提交
462 463
}

Y
Yi Wang 已提交
464
TEST(Tensor, FromAndToStream) {
465 466 467 468 469 470 471 472 473 474 475 476
  framework::Tensor src_tensor;
  int array[6] = {1, 2, 3, 4, 5, 6};
  src_tensor.Resize({2, 3});
  int* src_ptr = src_tensor.mutable_data<int>(platform::CPUPlace());
  for (int i = 0; i < 6; ++i) {
    src_ptr[i] = array[i];
  }
  {
    framework::Tensor dst_tensor;
    auto place = new platform::CPUPlace();
    platform::CPUDeviceContext cpu_ctx(*place);
    std::ostringstream oss;
Y
Yi Wang 已提交
477
    TensorToStream(oss, src_tensor, cpu_ctx);
478 479

    std::istringstream iss(oss.str());
Y
Yi Wang 已提交
480
    TensorFromStream(iss, &dst_tensor, cpu_ctx);
481 482
    int* dst_ptr = dst_tensor.mutable_data<int>(platform::CPUPlace());
    for (int i = 0; i < 5; ++i) {
483
      EXPECT_EQ(dst_ptr[i], array[i]);
484
    }
485
    EXPECT_EQ(dst_tensor.dims(), src_tensor.dims());
486 487
    delete place;
  }
488
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
489 490 491 492 493 494 495 496
  {
    Tensor gpu_tensor;
    gpu_tensor.Resize({2, 3});
    Tensor dst_tensor;

    auto gpu_place = new platform::CUDAPlace();
    platform::CUDADeviceContext gpu_ctx(*gpu_place);

Y
Yi Wang 已提交
497
    TensorCopy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
498 499

    std::ostringstream oss;
Y
Yi Wang 已提交
500
    TensorToStream(oss, gpu_tensor, gpu_ctx);
501 502

    std::istringstream iss(oss.str());
Y
Yu Yang 已提交
503 504 505
    TensorFromStream(
        iss, &dst_tensor,
        *platform::DeviceContextPool::Instance().Get(platform::CPUPlace()));
506 507 508

    int* dst_ptr = dst_tensor.mutable_data<int>(platform::CPUPlace());
    for (int i = 0; i < 6; ++i) {
509
      EXPECT_EQ(dst_ptr[i], array[i]);
510 511 512 513 514 515
    }
    delete gpu_place;
  }
#endif
}

D
dzhwinter 已提交
516 517
}  // namespace framework
}  // namespace paddle