tensor_util_test.cc 9.3 KB
Newer Older
1
//  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2 3 4 5 6 7 8 9 10 11 12 13
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//    http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
D
dzhwinter 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/tensor_util.h"
D
dzhwinter 已提交
16
#include <gtest/gtest.h>
Y
Yang Yu 已提交
17
#include <cmath>
D
dzhwinter 已提交
18 19 20 21
#include <string>

namespace paddle {
namespace framework {
D
dzhwinter 已提交
22

Y
Yi Wang 已提交
23
TEST(TensorCopy, Tensor) {
D
dzhwinter 已提交
24 25 26 27 28 29 30 31 32
  Tensor src_tensor;
  Tensor dst_tensor;
  platform::CPUDeviceContext cpu_ctx((platform::CPUPlace()));

  int* src_ptr =
      src_tensor.mutable_data<int>(make_ddim({3, 3}), platform::CPUPlace());

  int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
  memcpy(src_ptr, arr, 9 * sizeof(int));
D
dzhwinter 已提交
33
  src_tensor.set_layout(DataLayout::kAnyLayout);
D
dzhwinter 已提交
34 35

  auto cpu_place = new platform::CPUPlace();
Y
Yi Wang 已提交
36
  TensorCopy(src_tensor, *cpu_place, &dst_tensor);
D
dzhwinter 已提交
37 38 39 40 41 42 43

  const int* dst_ptr = dst_tensor.data<int>();
  ASSERT_NE(src_ptr, dst_ptr);
  for (size_t i = 0; i < 9; ++i) {
    EXPECT_EQ(src_ptr[i], dst_ptr[i]);
  }

D
dzhwinter 已提交
44 45
  EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());

D
dzhwinter 已提交
46
  Tensor slice_tensor = src_tensor.Slice(1, 2);
Y
Yi Wang 已提交
47
  TensorCopy(slice_tensor, *cpu_place, &dst_tensor);
D
dzhwinter 已提交
48 49 50 51 52 53
  const int* slice_ptr = slice_tensor.data<int>();
  dst_ptr = dst_tensor.data<int>();
  ASSERT_NE(dst_ptr, slice_ptr);
  for (size_t i = 0; i < 3; ++i) {
    EXPECT_EQ(dst_ptr[i], slice_ptr[i]);
  }
D
dzhwinter 已提交
54 55
  EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());

D
dzhwinter 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68
#ifdef PADDLE_WITH_CUDA
  {
    Tensor src_tensor;
    Tensor gpu_tensor;
    Tensor dst_tensor;

    int* src_ptr =
        src_tensor.mutable_data<int>(make_ddim({3, 3}), platform::CPUPlace());

    int arr[9] = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    memcpy(src_ptr, arr, 9 * sizeof(int));

    // CPU Tensor to GPU Tensor
D
dzhwinter 已提交
69
    auto gpu_place = new platform::CUDAPlace(0);
D
dzhwinter 已提交
70
    platform::CUDADeviceContext gpu_ctx(*gpu_place);
Y
Yi Wang 已提交
71
    TensorCopy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
72 73 74

    // GPU Tensor to CPU Tensor
    auto cpu_place = new platform::CPUPlace();
Y
Yi Wang 已提交
75
    TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
76 77 78 79 80 81 82 83 84 85 86 87

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* dst_ptr = dst_tensor.data<int>();
    ASSERT_NE(src_ptr, dst_ptr);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

    Tensor slice_tensor = src_tensor.Slice(1, 2);

    // CPU Slice Tensor to GPU Tensor
Y
Yi Wang 已提交
88
    TensorCopy(slice_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
89 90

    // GPU Tensor to CPU Tensor
Y
Yi Wang 已提交
91
    TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
92 93 94 95 96 97 98 99 100

    // Sync before Compare Slice Tensors
    gpu_ctx.Wait();
    const int* slice_ptr = slice_tensor.data<int>();
    dst_ptr = dst_tensor.data<int>();
    ASSERT_NE(dst_ptr, slice_ptr);
    for (size_t i = 0; i < 3; ++i) {
      EXPECT_EQ(dst_ptr[i], slice_ptr[i]);
    }
D
dzhwinter 已提交
101 102

    EXPECT_TRUE(dst_tensor.layout() == src_tensor.layout());
D
dzhwinter 已提交
103 104 105 106
  }
#endif
}

Y
Yi Wang 已提交
107
TEST(TensorFromVector, Tensor) {
D
dzhwinter 已提交
108 109 110 111 112 113 114 115 116
  using namespace paddle::framework;
  using namespace paddle::platform;
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    Tensor cpu_tensor;

    // Copy to CPU Tensor
    cpu_tensor.Resize(make_ddim({3, 3}));
    auto cpu_place = new paddle::platform::CPUPlace();
Y
Yi Wang 已提交
117
    TensorFromVector<int>(src_vec, &cpu_tensor);
D
dzhwinter 已提交
118 119 120 121 122 123 124 125 126 127 128

    // Compare Tensors
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* src_ptr = src_vec.data();
    ASSERT_NE(src_ptr, cpu_ptr);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }

    src_vec.erase(src_vec.begin(), src_vec.begin() + 5);
    cpu_tensor.Resize(make_ddim({2, 2}));
Y
Yi Wang 已提交
129
    TensorFromVector<int>(src_vec, &cpu_tensor);
D
dzhwinter 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
    cpu_ptr = cpu_tensor.data<int>();
    src_ptr = src_vec.data();
    ASSERT_NE(src_ptr, cpu_ptr);
    for (size_t i = 0; i < 5; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
    }

    delete cpu_place;
  }

#ifdef PADDLE_WITH_CUDA
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    Tensor cpu_tensor;
    Tensor gpu_tensor;
    Tensor dst_tensor;

    // Copy to CPU Tensor
    cpu_tensor.Resize(make_ddim({3, 3}));
    auto cpu_place = new paddle::platform::CPUPlace();
    CPUDeviceContext cpu_ctx(*cpu_place);
Y
Yi Wang 已提交
151
    TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
D
dzhwinter 已提交
152 153 154

    // Copy to GPUTensor
    gpu_tensor.Resize(make_ddim({3, 3}));
D
dzhwinter 已提交
155
    auto gpu_place = new paddle::platform::CUDAPlace();
D
dzhwinter 已提交
156
    CUDADeviceContext gpu_ctx(*gpu_place);
Y
Yi Wang 已提交
157
    TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
158
    // Copy from GPU to CPU tensor for comparison
Y
Yi Wang 已提交
159
    TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    const int* src_ptr = src_vec.data();
    const int* cpu_ptr = cpu_tensor.data<int>();
    const int* dst_ptr = dst_tensor.data<int>();
    ASSERT_NE(src_ptr, cpu_ptr);
    ASSERT_NE(src_ptr, dst_ptr);
    for (size_t i = 0; i < 9; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

    src_vec.erase(src_vec.begin(), src_vec.begin() + 5);

    cpu_tensor.Resize(make_ddim({2, 2}));
Y
Yi Wang 已提交
176
    TensorFromVector<int>(src_vec, cpu_ctx, &cpu_tensor);
D
dzhwinter 已提交
177
    gpu_tensor.Resize(make_ddim({2, 2}));
Y
Yi Wang 已提交
178 179
    TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
    TensorCopy(gpu_tensor, *cpu_place, gpu_ctx, &dst_tensor);
D
dzhwinter 已提交
180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198

    // Sync before Compare Tensors
    gpu_ctx.Wait();
    src_ptr = src_vec.data();
    cpu_ptr = cpu_tensor.data<int>();
    dst_ptr = dst_tensor.data<int>();
    ASSERT_NE(src_ptr, cpu_ptr);
    ASSERT_NE(src_ptr, dst_ptr);
    for (size_t i = 0; i < 5; ++i) {
      EXPECT_EQ(src_ptr[i], cpu_ptr[i]);
      EXPECT_EQ(src_ptr[i], dst_ptr[i]);
    }

    delete cpu_place;
    delete gpu_place;
  }
#endif
}

Y
Yi Wang 已提交
199
TEST(TensorToVector, Tensor) {
D
dzhwinter 已提交
200 201 202 203 204 205 206 207 208 209 210
  using namespace paddle::framework;
  using namespace paddle::platform;
  {
    Tensor src;
    int* src_ptr = src.mutable_data<int>({3, 3}, CPUPlace());
    for (int i = 0; i < 3 * 3; ++i) {
      src_ptr[i] = i;
    }

    CPUPlace place;
    std::vector<int> dst;
Y
Yi Wang 已提交
211
    TensorToVector<int>(src, &dst);
D
dzhwinter 已提交
212 213 214 215 216 217 218 219 220

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_ptr[i], dst[i]);
    }
  }
#ifdef PADDLE_WITH_CUDA
  {
    std::vector<int> src_vec = {1, 2, 3, 4, 5, 6, 7, 8, 9};
    Tensor gpu_tensor;
D
dzhwinter 已提交
221
    CUDAPlace place;
D
dzhwinter 已提交
222
    CUDADeviceContext gpu_ctx(place);
Y
Yi Wang 已提交
223
    TensorFromVector<int>(src_vec, gpu_ctx, &gpu_tensor);
D
dzhwinter 已提交
224 225

    std::vector<int> dst;
Y
Yi Wang 已提交
226
    TensorToVector<int>(gpu_tensor, gpu_ctx, &dst);
D
dzhwinter 已提交
227 228 229 230 231 232 233 234

    for (int i = 0; i < 3 * 3; ++i) {
      EXPECT_EQ(src_vec[i], dst[i]);
    }
  }
#endif
}

Y
Yi Wang 已提交
235
TEST(TensorContainsNAN, CPU) {
Y
Yang Yu 已提交
236 237
  using namespace paddle::framework;
  using namespace paddle::platform;
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
  {
    Tensor src;
    float* buf = src.mutable_data<float>({3}, CPUPlace());
    buf[0] = 0.0;
    buf[1] = NAN;
    buf[2] = 0.0;
    ASSERT_TRUE(TensorContainsNAN(src));
    buf[1] = 0.0;
    ASSERT_FALSE(TensorContainsNAN(src));
  }

  {
    Tensor src;
    float16* buf = src.mutable_data<float16>({3}, CPUPlace());
    buf[0] = 0.0;
    buf[1].x = 0x7fff;
    buf[2] = 0.0;
    ASSERT_TRUE(TensorContainsNAN(src));
    buf[1] = 0.0;
    ASSERT_FALSE(TensorContainsNAN(src));
  }
Y
Yang Yu 已提交
259 260
}

Y
Yi Wang 已提交
261
TEST(TensorContainsInf, CPU) {
Y
Yang Yu 已提交
262 263
  using namespace paddle::framework;
  using namespace paddle::platform;
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
  {
    Tensor src;
    double* buf = src.mutable_data<double>({3}, CPUPlace());
    buf[0] = 1.0;
    buf[1] = INFINITY;
    buf[2] = 0.0;
    ASSERT_TRUE(TensorContainsInf(src));
    buf[1] = 1.0;
    ASSERT_FALSE(TensorContainsInf(src));
  }

  {
    Tensor src;
    float16* buf = src.mutable_data<float16>({3}, CPUPlace());
    buf[0] = 1.0;
    buf[1].x = 0x7c00;
    buf[2] = 0.0;
    ASSERT_TRUE(TensorContainsInf(src));
    buf[1] = 1.0;
    ASSERT_FALSE(TensorContainsInf(src));
  }
Y
Yang Yu 已提交
285 286
}

Y
Yi Wang 已提交
287
TEST(Tensor, FromAndToStream) {
288 289 290 291 292 293 294 295 296 297 298 299
  framework::Tensor src_tensor;
  int array[6] = {1, 2, 3, 4, 5, 6};
  src_tensor.Resize({2, 3});
  int* src_ptr = src_tensor.mutable_data<int>(platform::CPUPlace());
  for (int i = 0; i < 6; ++i) {
    src_ptr[i] = array[i];
  }
  {
    framework::Tensor dst_tensor;
    auto place = new platform::CPUPlace();
    platform::CPUDeviceContext cpu_ctx(*place);
    std::ostringstream oss;
Y
Yi Wang 已提交
300
    TensorToStream(oss, src_tensor, cpu_ctx);
301 302

    std::istringstream iss(oss.str());
Y
Yi Wang 已提交
303
    TensorFromStream(iss, &dst_tensor, cpu_ctx);
304 305 306 307
    int* dst_ptr = dst_tensor.mutable_data<int>(platform::CPUPlace());
    for (int i = 0; i < 5; ++i) {
      ASSERT_EQ(dst_ptr[i], array[i]);
    }
Y
Yancey 已提交
308
    ASSERT_EQ(dst_tensor.dims(), src_tensor.dims());
309 310 311 312 313 314 315 316 317 318 319
    delete place;
  }
#ifdef PADDLE_WITH_CUDA
  {
    Tensor gpu_tensor;
    gpu_tensor.Resize({2, 3});
    Tensor dst_tensor;

    auto gpu_place = new platform::CUDAPlace();
    platform::CUDADeviceContext gpu_ctx(*gpu_place);

Y
Yi Wang 已提交
320
    TensorCopy(src_tensor, *gpu_place, gpu_ctx, &gpu_tensor);
321 322

    std::ostringstream oss;
Y
Yi Wang 已提交
323
    TensorToStream(oss, gpu_tensor, gpu_ctx);
324 325

    std::istringstream iss(oss.str());
Y
Yi Wang 已提交
326
    TensorFromStream(iss, &dst_tensor, gpu_ctx);
327 328 329 330 331 332 333 334 335 336

    int* dst_ptr = dst_tensor.mutable_data<int>(platform::CPUPlace());
    for (int i = 0; i < 6; ++i) {
      ASSERT_EQ(dst_ptr[i], array[i]);
    }
    delete gpu_place;
  }
#endif
}

D
dzhwinter 已提交
337 338
}  // namespace framework
}  // namespace paddle