tensor_test.cc 9.8 KB
Newer Older
1
//  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2 3 4 5 6 7 8 9 10 11 12 13
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//    http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
F
fengjiayi 已提交
14

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/tensor.h"
F
fengjiayi 已提交
16
#include <gtest/gtest.h>
17
#include <string>
D
dzhwinter 已提交
18
#include "paddle/fluid/platform/float16.h"
F
fengjiayi 已提交
19

20 21 22
namespace framework = paddle::framework;
namespace platform = paddle::platform;

F
fengjiayi 已提交
23
TEST(Tensor, Dims) {
24
  framework::Tensor tt;
25
  tt.Resize({2, 3, 4});
26
  framework::DDim dims = tt.dims();
F
fengjiayi 已提交
27 28 29 30 31 32 33
  ASSERT_EQ(arity(dims), 3);
  for (int i = 0; i < 3; ++i) {
    EXPECT_EQ(i + 2, dims[i]);
  }
}

TEST(Tensor, DataAssert) {
34
  framework::Tensor src_tensor;
F
fengjiayi 已提交
35

36 37
  bool caught = false;
  try {
F
fengjiayi 已提交
38
    src_tensor.data<double>();
39
  } catch (platform::EnforceNotMet err) {
40
    caught = true;
F
fengjiayi 已提交
41
    std::string msg =
Z
zchen0211 已提交
42
        "holder_ should not be null\nTensor holds no memory. Call "
Y
Yan Chunwei 已提交
43
        "Tensor::mutable_data first.";
44 45 46 47 48 49
    const char* what = err.what();
    for (size_t i = 0; i < msg.length(); ++i) {
      ASSERT_EQ(what[i], msg[i]);
    }
  }
  ASSERT_TRUE(caught);
F
fengjiayi 已提交
50 51 52
}

TEST(Tensor, MutableData) {
53
  {
54
    framework::Tensor src_tensor;
55 56 57
    float* p1 = nullptr;
    float* p2 = nullptr;
    // initialization
58 59
    p1 = src_tensor.mutable_data<float>(framework::make_ddim({1, 2, 3}),
                                        platform::CPUPlace());
60
    auto p1_holder = src_tensor.Holder();
61
    EXPECT_NE(p1, nullptr);
F
fengjiayi 已提交
62
    // set src_tensor a new dim with large size
63
    // momery is supposed to be re-allocated
64 65
    p2 = src_tensor.mutable_data<float>(framework::make_ddim({3, 4}),
                                        platform::CPUPlace());
66
    EXPECT_NE(p2, nullptr);
67 68
    auto p2_holder1 = src_tensor.Holder();
    EXPECT_NE(p1_holder.get(), p2_holder1.get());
F
fengjiayi 已提交
69
    // set src_tensor a new dim with same size
70
    // momery block is supposed to be unchanged
71 72
    p1 = src_tensor.mutable_data<float>(framework::make_ddim({2, 2, 3}),
                                        platform::CPUPlace());
73 74
    auto p2_holder2 = src_tensor.Holder();
    EXPECT_EQ(p2_holder1.get(), p2_holder2.get());
F
fengjiayi 已提交
75
    // set src_tensor a new dim with smaller size
76
    // momery block is supposed to be unchanged
77 78
    p2 = src_tensor.mutable_data<float>(framework::make_ddim({2, 2}),
                                        platform::CPUPlace());
79
    auto p2_holder3 = src_tensor.Holder();
80
    EXPECT_EQ(p1, p2);
81
    EXPECT_EQ(p2_holder2.get(), p2_holder3.get());
D
dzhwinter 已提交
82 83 84 85 86 87 88 89

    float* p3 = nullptr;
    float* p4 = nullptr;
    // set src_tensor a different type but smaller size.
    // memory block is supposed to be unchanged.
    auto* tmp = src_tensor.mutable_data<uint8_t>(framework::make_ddim({2, 2}),
                                                 platform::CPUPlace());
    p3 = reinterpret_cast<float*>(tmp);
90
    auto p3_holder1 = src_tensor.Holder();
D
dzhwinter 已提交
91
    EXPECT_EQ(p1, p3);
92
    EXPECT_EQ(p2_holder3.get(), p3_holder1.get());
D
dzhwinter 已提交
93 94 95 96 97

    // set src_tensor a different type but bigger size.
    // memory block is supposed to be changed.
    auto* tmp2 = src_tensor.mutable_data<double>(
        framework::make_ddim({2, 2, 3}), platform::CPUPlace());
98
    auto p3_holder2 = src_tensor.Holder();
D
dzhwinter 已提交
99 100
    p4 = reinterpret_cast<float*>(tmp2);
    EXPECT_NE(p1, p4);
101
    EXPECT_NE(p3_holder1.get(), p3_holder2.get());
102
  }
X
Xin Pan 已提交
103 104 105 106 107 108 109 110 111 112 113 114 115
  // Not sure if it's desired, but currently, Tensor type can be changed.
  {
    framework::Tensor src_tensor;
    int8_t* p1 = src_tensor.mutable_data<int8_t>(framework::make_ddim({1}),
                                                 platform::CPUPlace());
    EXPECT_NE(p1, nullptr);
    *p1 = 1;

    uint8_t* p2 = src_tensor.mutable_data<uint8_t>(framework::make_ddim({1}),
                                                   platform::CPUPlace());
    EXPECT_NE(p2, nullptr);
    EXPECT_EQ(static_cast<int>(p2[0]), 1);
  }
L
liaogang 已提交
116

117
#ifdef PADDLE_WITH_CUDA
118
  {
119
    framework::Tensor src_tensor;
120 121 122
    float* p1 = nullptr;
    float* p2 = nullptr;
    // initialization
123 124
    p1 = src_tensor.mutable_data<float>(framework::make_ddim({1, 2, 3}),
                                        platform::CUDAPlace());
125
    auto p1_holder = src_tensor.Holder();
126 127 128
    EXPECT_NE(p1, nullptr);
    // set src_tensor a new dim with large size
    // momery is supposed to be re-allocated
129
    p2 = src_tensor.mutable_data<float>(framework::make_ddim({3, 1024}),
130
                                        platform::CUDAPlace());
131
    auto p2_holder = src_tensor.Holder();
132
    EXPECT_NE(p2, nullptr);
133
    EXPECT_NE(p1_holder.get(), p2_holder.get());
134 135
    // set src_tensor a new dim with same size
    // momery block is supposed to be unchanged
136 137
    p1 = src_tensor.mutable_data<float>(framework::make_ddim({2, 2, 3}),
                                        platform::CUDAPlace());
138 139 140
    EXPECT_EQ(p1, p2);
    // set src_tensor a new dim with smaller size
    // momery block is supposed to be unchanged
141 142
    p2 = src_tensor.mutable_data<float>(framework::make_ddim({2, 2}),
                                        platform::CUDAPlace());
143 144 145
    EXPECT_EQ(p1, p2);
  }
#endif
F
fengjiayi 已提交
146
}
F
fengjiayi 已提交
147

F
fengjiayi 已提交
148
TEST(Tensor, ShareDataWith) {
F
fengjiayi 已提交
149
  {
150 151
    framework::Tensor src_tensor;
    framework::Tensor dst_tensor;
F
fengjiayi 已提交
152 153 154
    // Try to share data form uninitialized tensor
    bool caught = false;
    try {
155
      dst_tensor.ShareDataWith(src_tensor);
Y
Yu Yang 已提交
156
    } catch (paddle::platform::EnforceNotMet err) {
F
fengjiayi 已提交
157
      caught = true;
F
fengjiayi 已提交
158
      std::string msg =
Z
zchen0211 已提交
159
          "holder_ should not be null\nTensor holds no memory. Call "
Y
Yan Chunwei 已提交
160
          "Tensor::mutable_data first.";
F
fengjiayi 已提交
161 162 163
      const char* what = err.what();
      for (size_t i = 0; i < msg.length(); ++i) {
        ASSERT_EQ(what[i], msg[i]);
F
fengjiayi 已提交
164 165 166 167
      }
    }
    ASSERT_TRUE(caught);

168 169
    src_tensor.mutable_data<int>(framework::make_ddim({2, 3, 4}),
                                 platform::CPUPlace());
170
    dst_tensor.ShareDataWith(src_tensor);
F
fengjiayi 已提交
171 172 173
    ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
  }

174
#ifdef PADDLE_WITH_CUDA
175
  {
176 177 178 179
    framework::Tensor src_tensor;
    framework::Tensor dst_tensor;
    src_tensor.mutable_data<int>(framework::make_ddim({2, 3, 4}),
                                 platform::CUDAPlace());
180
    dst_tensor.ShareDataWith(src_tensor);
181 182 183
    ASSERT_EQ(src_tensor.data<int>(), dst_tensor.data<int>());
  }
#endif
F
fengjiayi 已提交
184 185 186 187
}

TEST(Tensor, Slice) {
  {
188 189 190 191 192
    framework::Tensor src_tensor;
    src_tensor.mutable_data<int>(framework::make_ddim({5, 3, 4}),
                                 platform::CPUPlace());
    framework::Tensor slice_tensor = src_tensor.Slice(1, 3);
    framework::DDim slice_dims = slice_tensor.dims();
F
fengjiayi 已提交
193 194 195 196 197 198 199 200
    ASSERT_EQ(arity(slice_dims), 3);
    EXPECT_EQ(slice_dims[0], 2);
    EXPECT_EQ(slice_dims[1], 3);
    EXPECT_EQ(slice_dims[2], 4);

    uintptr_t src_data_address =
        reinterpret_cast<uintptr_t>(src_tensor.data<int>());
    uintptr_t src_mutable_data_address = reinterpret_cast<uintptr_t>(
201
        src_tensor.mutable_data<int>(src_tensor.dims(), platform::CPUPlace()));
F
fengjiayi 已提交
202 203
    uintptr_t slice_data_address =
        reinterpret_cast<uintptr_t>(slice_tensor.data<int>());
204 205 206
    uintptr_t slice_mutable_data_address =
        reinterpret_cast<uintptr_t>(slice_tensor.mutable_data<int>(
            slice_tensor.dims(), platform::CPUPlace()));
F
fengjiayi 已提交
207 208 209 210 211
    EXPECT_EQ(src_data_address, src_mutable_data_address);
    EXPECT_EQ(slice_data_address, slice_mutable_data_address);
    EXPECT_EQ(src_data_address + 3 * 4 * 1 * sizeof(int), slice_data_address);
  }

212
#ifdef PADDLE_WITH_CUDA
213
  {
214 215 216 217 218
    framework::Tensor src_tensor;
    src_tensor.mutable_data<double>(framework::make_ddim({6, 9}),
                                    platform::CUDAPlace());
    framework::Tensor slice_tensor = src_tensor.Slice(2, 6);
    framework::DDim slice_dims = slice_tensor.dims();
219 220 221
    ASSERT_EQ(arity(slice_dims), 2);
    EXPECT_EQ(slice_dims[0], 4);
    EXPECT_EQ(slice_dims[1], 9);
F
fengjiayi 已提交
222

223 224
    uintptr_t src_data_address =
        reinterpret_cast<uintptr_t>(src_tensor.data<double>());
225 226 227
    uintptr_t src_mutable_data_address =
        reinterpret_cast<uintptr_t>(src_tensor.mutable_data<double>(
            src_tensor.dims(), platform::CUDAPlace()));
228 229
    uintptr_t slice_data_address =
        reinterpret_cast<uintptr_t>(slice_tensor.data<double>());
230 231 232
    uintptr_t slice_mutable_data_address =
        reinterpret_cast<uintptr_t>(slice_tensor.mutable_data<double>(
            slice_tensor.dims(), platform::CUDAPlace()));
233 234 235 236 237
    EXPECT_EQ(src_data_address, src_mutable_data_address);
    EXPECT_EQ(slice_data_address, slice_mutable_data_address);
    EXPECT_EQ(src_data_address + 9 * 2 * sizeof(double), slice_data_address);
  }
#endif
F
fengjiayi 已提交
238 239
}

F
fengjiayi 已提交
240
TEST(Tensor, ReshapeToMatrix) {
241 242
  framework::Tensor src;
  int* src_ptr = src.mutable_data<int>({2, 3, 4, 9}, platform::CPUPlace());
F
WIP  
fengjiayi 已提交
243 244 245
  for (int i = 0; i < 2 * 3 * 4 * 9; ++i) {
    src_ptr[i] = i;
  }
246
  framework::Tensor res = framework::ReshapeToMatrix(src, 2);
F
WIP  
fengjiayi 已提交
247 248
  ASSERT_EQ(res.dims()[0], 2 * 3);
  ASSERT_EQ(res.dims()[1], 4 * 9);
Z
zchen0211 已提交
249
}
D
dzhwinter 已提交
250 251

TEST(Tensor, Layout) {
252
  framework::Tensor src;
M
mozga-intel 已提交
253
  ASSERT_EQ(src.layout(), framework::DataLayout::kNCHW);
254 255
  src.set_layout(framework::DataLayout::kAnyLayout);
  ASSERT_EQ(src.layout(), framework::DataLayout::kAnyLayout);
D
dzhwinter 已提交
256
}
D
dzhwinter 已提交
257 258 259 260 261 262 263 264 265 266 267 268 269 270

TEST(Tensor, FP16) {
  using platform::float16;
  framework::Tensor src;
  float16* src_ptr = src.mutable_data<float16>({2, 3}, platform::CPUPlace());
  for (int i = 0; i < 2 * 3; ++i) {
    src_ptr[i] = static_cast<float16>(i);
  }
  EXPECT_EQ(src.memory_size(), 2 * 3 * sizeof(float16));
  // EXPECT a human readable error message
  // src.data<uint8_t>();
  // Tensor holds the wrong type, it holds N6paddle8platform7float16E at
  // [/paddle/Paddle/paddle/fluid/framework/tensor_impl.h:43]
}