tensor_util_test.cc 11.7 KB
Newer Older
1
//  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2 3 4 5 6 7 8 9 10 11 12 13
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//    http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
D
dzhwinter 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/tensor_util.h"
D
dzhwinter 已提交
16
#include <gtest/gtest.h>
Y
Yang Yu 已提交
17
#include <cmath>
D
dzhwinter 已提交
18 19 20 21
#include <string>

namespace paddle {
namespace framework {
D
dzhwinter 已提交
22

Y
Yi Wang 已提交
23
TEST(TensorCopy, Tensor) {
D
dzhwinter 已提交
24 25 26 27 28 29 30 31 32
  Tensor src_tensor;
  Tensor dst_tensor;
  platform::CPUDeviceContext cpu_ctx((platform::CPUPlace()));

  int* src_ptr =
      src_tensor.mutable_data<int>(make_ddim({3, 3}), platform::CPUPlace());

  int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
  memcpy(src_ptr, arr, 9 * sizeof(int));
D
dzhwinter 已提交
33
  src_tensor.set_layout(DataLayout::kAnyLayout);
D
dzhwinter 已提交
34 35

  auto cpu_place = new platform::CPUPlace();
Y
Yi Wang 已提交
36
  TensorCopy(src_tensor, *cpu_place, &dst_tensor);
D
dzhwinter 已提交
37 38

  const int* dst_ptr = dst_tensor.data<int>();
39
  EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
40 41 42 43
  for (size_t i = 0; i < 9; ++i) {
    EXPECT_EQ(src_ptr[i], dst_ptr[i]);
  }

M
minqiyang 已提交
44 45 46 47 48
  TensorCopy(dst_tensor, *cpu_place, &dst_tensor);
  for (size_t i = 0; i < 9; ++i) {
    EXPECT_EQ(src_ptr[i], dst_ptr[i]);
  }

D
dzhwinter 已提交
49 50
  EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());

D
dzhwinter 已提交
51
  Tensor slice_tensor = src_tensor.Slice(1, 2);
Y
Yi Wang 已提交
52
  TensorCopy(slice_tensor, *cpu_place, &dst_tensor);
D
dzhwinter 已提交
53 54
  const int* slice_ptr = slice_tensor.data<int>();
  dst_ptr = dst_tensor.data<int>();
55
  EXPECT_NE(dst_ptr, slice_ptr);
D
dzhwinter 已提交
56 57 58
  for (size_t i = 0; i < 3; ++i) {
    EXPECT_EQ(dst_ptr[i], slice_ptr[i]);
  }
D
dzhwinter 已提交
59 60
  EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());

D
dzhwinter 已提交
61 62 63 64 65 66 67 68 69 70 71 72 73
#ifdef PADDLE_WITH_CUDA
  {
    Tensor src_tensor;
    Tensor gpu_tensor;
    Tensor dst_tensor;

    int* src_ptr =
        src_tensor.mutable_data<int>(make_ddim({3, 3}), platform::CPUPlace());

    int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    memcpy(src_ptr, arr, 9 * sizeof(int));

    // CPU Tensor to GPU Tensor
D
dzhwinter 已提交
74
    auto gpu_place = new platform::CUDAPlace(0);
D
dzhwinter 已提交
75
    platform::CUDADeviceContext gpu_ctx(*gpu_place);
Y
Yi Wang 已提交
76
    TensorCopy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
77 78 79

    // GPU Tensor to CPU Tensor
    auto cpu_place = new platform::CPUPlace();
Y
Yi Wang 已提交
80
    TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
81 82 83 84

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* dst_ptr = dst_tensor.data<int>();
85
    EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
86 87 88 89
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

M
minqiyang 已提交
90 91 92 93 94 95 96 97 98
    // Copy the same tensor
    TensorCopy(gpu_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
    gpu_ctx.Wait();
    const int* dst_ptr_tmp = dst_tensor.data<int>();
    EXPECT_NE(src_ptr, dst_ptr_tmp);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], dst_ptr_tmp[i]);
    }

D
dzhwinter 已提交
99 100 101
    Tensor slice_tensor = src_tensor.Slice(1, 2);

    // CPU Slice Tensor to GPU Tensor
Y
Yi Wang 已提交
102
    TensorCopy(slice_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
103 104

    // GPU Tensor to CPU Tensor
Y
Yi Wang 已提交
105
    TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
106 107 108 109 110

    // Sync before Compare Slice Tensors
    gpu_ctx.Wait();
    const int* slice_ptr = slice_tensor.data<int>();
    dst_ptr = dst_tensor.data<int>();
111
    EXPECT_NE(dst_ptr, slice_ptr);
D
dzhwinter 已提交
112 113 114
    for (size_t i = 0; i < 3; ++i) {
      EXPECT_EQ(dst_ptr[i], slice_ptr[i]);
    }
D
dzhwinter 已提交
115 116

    EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());
D
dzhwinter 已提交
117 118 119 120
  }
#endif
}

Y
Yi Wang 已提交
121
TEST(TensorFromVector, Tensor) {
D
dzhwinter 已提交
122 123
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
124
    paddle::framework::Tensor cpu_tensor;
D
dzhwinter 已提交
125 126

    // Copy to CPU Tensor
127
    cpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
D
dzhwinter 已提交
128
    auto cpu_place = new paddle::platform::CPUPlace();
129
    paddle::framework::TensorFromVector<int>(src_vec, &cpu_tensor);
D
dzhwinter 已提交
130 131 132 133

    // Compare Tensors
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* src_ptr = src_vec.data();
134
    EXPECT_NE(src_ptr, cpu_ptr);
D
dzhwinter 已提交
135 136 137 138 139
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }

    src_vec.erase(src_vec.begin(), src_vec.begin() + 5);
140 141
    cpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
    paddle::framework::TensorFromVector<int>(src_vec, &cpu_tensor);
D
dzhwinter 已提交
142 143
    cpu_ptr = cpu_tensor.data<int>();
    src_ptr = src_vec.data();
144
    EXPECT_NE(src_ptr, cpu_ptr);
D
dzhwinter 已提交
145 146 147 148 149 150 151 152 153 154
    for (size_t i = 0; i < 5; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }

    delete cpu_place;
  }

#ifdef PADDLE_WITH_CUDA
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
155 156 157
    paddle::framework::Tensor cpu_tensor;
    paddle::framework::Tensor gpu_tensor;
    paddle::framework::Tensor dst_tensor;
D
dzhwinter 已提交
158 159 160 161

    // Copy to CPU Tensor
    cpu_tensor.Resize(make_ddim({3, 3}));
    auto cpu_place = new paddle::platform::CPUPlace();
162 163
    paddle::platform::CPUDeviceContext cpu_ctx(*cpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
D
dzhwinter 已提交
164 165

    // Copy to GPUTensor
166
    gpu_tensor.Resize(paddle::framework::make_ddim({3, 3}));
D
dzhwinter 已提交
167
    auto gpu_place = new paddle::platform::CUDAPlace();
168 169
    paddle::platform::CUDADeviceContext gpu_ctx(*gpu_place);
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
170
    // Copy from GPU to CPU tensor for comparison
171
    paddle::framework::TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
172 173 174 175 176 177

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* src_ptr = src_vec.data();
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* dst_ptr = dst_tensor.data<int>();
178 179
    EXPECT_NE(src_ptr, cpu_ptr);
    EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
180 181 182 183 184 185 186
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

    src_vec.erase(src_vec.begin(), src_vec.begin() + 5);

187 188 189 190 191
    cpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
    paddle::framework::TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
    gpu_tensor.Resize(paddle::framework::make_ddim({2, 2}));
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
    paddle::framework::TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
192 193 194 195 196 197

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    src_ptr = src_vec.data();
    cpu_ptr = cpu_tensor.data<int>();
    dst_ptr = dst_tensor.data<int>();
198 199
    EXPECT_NE(src_ptr, cpu_ptr);
    EXPECT_NE(src_ptr, dst_ptr);
D
dzhwinter 已提交
200 201 202 203 204 205 206 207 208 209 210
    for (size_t i = 0; i < 5; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

    delete cpu_place;
    delete gpu_place;
  }
#endif
}

Y
Yi Wang 已提交
211
TEST(TensorToVector, Tensor) {
D
dzhwinter 已提交
212
  {
213 214
    paddle::framework::Tensor src;
    int* src_ptr = src.mutable_data<int>({3, 3}, paddle::platform::CPUPlace());
D
dzhwinter 已提交
215 216 217 218
    for (int i = 0; i < 3 * 3; ++i) {
      src_ptr[i] = i;
    }

219
    paddle::platform::CPUPlace place;
D
dzhwinter 已提交
220
    std::vector<int> dst;
221
    paddle::framework::TensorToVector<int>(src, &dst);
D
dzhwinter 已提交
222 223 224 225 226 227 228 229

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_ptr[i], dst[i]);
    }
  }
#ifdef PADDLE_WITH_CUDA
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
230 231 232 233
    paddle::framework::Tensor gpu_tensor;
    paddle::platform::CUDAPlace place;
    paddle::platform::CUDADeviceContext gpu_ctx(place);
    paddle::framework::TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
234 235

    std::vector<int> dst;
236
    paddle::framework::TensorToVector<int>(gpu_tensor, gpu_ctx, &dst);
D
dzhwinter 已提交
237 238 239 240 241 242 243 244

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_vec[i], dst[i]);
    }
  }
#endif
}

Y
Yi Wang 已提交
245
TEST(TensorContainsNAN, CPU) {
246
  {
247 248
    paddle::framework::Tensor src;
    float* buf = src.mutable_data<float>({3}, paddle::platform::CPUPlace());
249 250 251
    buf[0] = 0.0;
    buf[1] = NAN;
    buf[2] = 0.0;
252
    EXPECT_TRUE(paddle::framework::TensorContainsNAN(src));
253
    buf[1] = 0.0;
254
    EXPECT_FALSE(paddle::framework::TensorContainsNAN(src));
255 256 257
  }

  {
258
    paddle::framework::Tensor src;
A
Abhinav Arora 已提交
259 260 261
    paddle::platform::float16* buf =
        src.mutable_data<paddle::platform::float16>(
            {3}, paddle::platform::CPUPlace());
262 263 264
    buf[0] = 0.0;
    buf[1].x = 0x7fff;
    buf[2] = 0.0;
265
    EXPECT_TRUE(paddle::framework::TensorContainsNAN(src));
266
    buf[1] = 0.0;
267
    EXPECT_FALSE(paddle::framework::TensorContainsNAN(src));
268
  }
Y
Yang Yu 已提交
269 270
}

Y
Yi Wang 已提交
271
TEST(TensorContainsInf, CPU) {
272
  {
273 274
    paddle::framework::Tensor src;
    double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
275 276 277
    buf[0] = 1.0;
    buf[1] = INFINITY;
    buf[2] = 0.0;
278
    EXPECT_TRUE(paddle::framework::TensorContainsInf(src));
279
    buf[1] = 1.0;
280
    EXPECT_FALSE(paddle::framework::TensorContainsInf(src));
281 282 283
  }

  {
284
    paddle::framework::Tensor src;
A
Abhinav Arora 已提交
285 286 287
    paddle::platform::float16* buf =
        src.mutable_data<paddle::platform::float16>(
            {3}, paddle::platform::CPUPlace());
288 289 290
    buf[0] = 1.0;
    buf[1].x = 0x7c00;
    buf[2] = 0.0;
291
    EXPECT_TRUE(paddle::framework::TensorContainsInf(src));
292
    buf[1] = 1.0;
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
    EXPECT_FALSE(paddle::framework::TensorContainsInf(src));
  }
}

TEST(TensorIsfinite, CPU) {
  {
    paddle::framework::Tensor src, out;
    double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
    buf[0] = 1.0;
    buf[1] = INFINITY;
    buf[2] = 0.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
    buf[1] = 1.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], true);
  }

  {
    paddle::framework::Tensor src, out;
    double* buf = src.mutable_data<double>({3}, paddle::platform::CPUPlace());
    buf[0] = 1.0;
    buf[1] = NAN;
    buf[2] = 0.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
    buf[1] = 1.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], true);
  }

  {
    paddle::framework::Tensor src, out;
    paddle::platform::float16* buf =
        src.mutable_data<paddle::platform::float16>(
            {3}, paddle::platform::CPUPlace());
    buf[0] = 1.0;
    buf[1].x = 0x7c00;
    buf[2] = 0.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
    buf[1] = 1.0;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], true);
    buf[1].x = 0x7fff;
    paddle::framework::TensorIsfinite(src, &out);
    EXPECT_EQ(out.data<bool>()[0], false);
340
  }
Y
Yang Yu 已提交
341 342
}

Y
Yi Wang 已提交
343
TEST(Tensor, FromAndToStream) {
344 345 346 347 348 349 350 351 352 353 354 355
  framework::Tensor src_tensor;
  int array[6] = {1, 2, 3, 4, 5, 6};
  src_tensor.Resize({2, 3});
  int* src_ptr = src_tensor.mutable_data<int>(platform::CPUPlace());
  for (int i = 0; i < 6; ++i) {
    src_ptr[i] = array[i];
  }
  {
    framework::Tensor dst_tensor;
    auto place = new platform::CPUPlace();
    platform::CPUDeviceContext cpu_ctx(*place);
    std::ostringstream oss;
Y
Yi Wang 已提交
356
    TensorToStream(oss, src_tensor, cpu_ctx);
357 358

    std::istringstream iss(oss.str());
Y
Yi Wang 已提交
359
    TensorFromStream(iss, &dst_tensor, cpu_ctx);
360 361
    int* dst_ptr = dst_tensor.mutable_data<int>(platform::CPUPlace());
    for (int i = 0; i < 5; ++i) {
362
      EXPECT_EQ(dst_ptr[i], array[i]);
363
    }
364
    EXPECT_EQ(dst_tensor.dims(), src_tensor.dims());
365 366 367 368 369 370 371 372 373 374 375
    delete place;
  }
#ifdef PADDLE_WITH_CUDA
  {
    Tensor gpu_tensor;
    gpu_tensor.Resize({2, 3});
    Tensor dst_tensor;

    auto gpu_place = new platform::CUDAPlace();
    platform::CUDADeviceContext gpu_ctx(*gpu_place);

Y
Yi Wang 已提交
376
    TensorCopy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
377 378

    std::ostringstream oss;
Y
Yi Wang 已提交
379
    TensorToStream(oss, gpu_tensor, gpu_ctx);
380 381

    std::istringstream iss(oss.str());
Y
Yi Wang 已提交
382
    TensorFromStream(iss, &dst_tensor, gpu_ctx);
383 384 385

    int* dst_ptr = dst_tensor.mutable_data<int>(platform::CPUPlace());
    for (int i = 0; i < 6; ++i) {
386
      EXPECT_EQ(dst_ptr[i], array[i]);
387 388 389 390 391 392
    }
    delete gpu_place;
  }
#endif
}

D
dzhwinter 已提交
393 394
}  // namespace framework
}  // namespace paddle