math_function_test.cu 14.8 KB
Newer Older
1
//  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2 3 4 5 6 7 8 9 10 11 12 13
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//    http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
Q
qijun 已提交
14
#include "gtest/gtest.h"
Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/math/math_function.h"
Q
qijun 已提交
16

17 18
#include <iostream>

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
void fill_fp16_data(paddle::platform::float16* in_ptr, size_t size,
                    const std::vector<float>& data) {
  PADDLE_ENFORCE_EQ(size, data.size());
  for (size_t i = 0; i < data.size(); ++i) {
    in_ptr[i] = paddle::platform::float16(data[i]);
  }
}

TEST(math_function, notrans_mul_trans_fp32) {
  using namespace paddle::framework;
  using namespace paddle::platform;

  Tensor input1;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor out_gpu;
  Tensor out;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);

  float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
Q
qijun 已提交
42 43 44
  float arr[6] = {0, 1, 2, 3, 4, 5};
  memcpy(input1_ptr, arr, 6 * sizeof(float));

45 46
  TensorCopy(input1, gpu_place, context, &input1_gpu);
  TensorCopy(input1, gpu_place, context, &input2_gpu);
Q
qijun 已提交
47

48
  out_gpu.mutable_data<float>({2, 2}, gpu_place);
Q
qijun 已提交
49

50
  paddle::operators::math::matmul<CUDADeviceContext, float>(
Q
qijun 已提交
51 52
      context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0);

53
  TensorCopy(out_gpu, cpu_place, context, &out);
Q
qijun 已提交
54 55 56 57 58 59 60 61 62

  float* out_ptr = out.data<float>();
  context.Wait();
  EXPECT_EQ(out_ptr[0], 5);
  EXPECT_EQ(out_ptr[1], 14);
  EXPECT_EQ(out_ptr[2], 14);
  EXPECT_EQ(out_ptr[3], 50);
}

63 64 65 66
TEST(math_function, notrans_mul_trans_fp16) {
  using namespace paddle::framework;
  using namespace paddle::platform;

67 68 69 70
  // fp16 GEMM in cublas requires GPU compute capability >= 53
  if (GetCUDAComputeCapability(0) >= 53) {
    std::cout << "Compute capability is " << GetCUDAComputeCapability(0)
              << std::endl;
71 72 73
    return;
  }

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
  Tensor input1;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor out_gpu;
  Tensor out;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);

  float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place);
  fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});

  TensorCopy(input1, gpu_place, context, &input1_gpu);
  TensorCopy(input1, gpu_place, context, &input2_gpu);

  out_gpu.mutable_data<float16>({2, 2}, gpu_place);

  paddle::operators::math::matmul<CUDADeviceContext, float16>(
      context, input1_gpu, false, input2_gpu, true, float16(1), &out_gpu,
      float16(0));

  TensorCopy(out_gpu, cpu_place, context, &out);

  float16* out_ptr = out.data<float16>();
  context.Wait();
  EXPECT_EQ(static_cast<float>(out_ptr[0]), 5);
  EXPECT_EQ(static_cast<float>(out_ptr[1]), 14);
  EXPECT_EQ(static_cast<float>(out_ptr[2]), 14);
  EXPECT_EQ(static_cast<float>(out_ptr[3]), 50);
}

TEST(math_function, trans_mul_notrans_fp32) {
  using namespace paddle::framework;
  using namespace paddle::platform;

  Tensor input1;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor out_gpu;
  Tensor out;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);
Q
qijun 已提交
119

120
  float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
Q
qijun 已提交
121 122 123
  float arr[6] = {0, 1, 2, 3, 4, 5};
  memcpy(input1_ptr, arr, 6 * sizeof(float));

124 125
  TensorCopy(input1, gpu_place, context, &input1_gpu);
  TensorCopy(input1, gpu_place, context, &input2_gpu);
Q
qijun 已提交
126

127
  out_gpu.mutable_data<float>({3, 3}, gpu_place);
Q
qijun 已提交
128

Q
QI JUN 已提交
129
  paddle::operators::math::matmul<paddle::platform::CUDADeviceContext, float>(
Q
qijun 已提交
130 131
      context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0);

132
  TensorCopy(out_gpu, cpu_place, context, &out);
Q
qijun 已提交
133 134 135 136 137 138 139 140 141 142 143 144 145 146

  float* out_ptr = out.data<float>();
  context.Wait();
  EXPECT_EQ(out_ptr[0], 9);
  EXPECT_EQ(out_ptr[1], 12);
  EXPECT_EQ(out_ptr[2], 15);
  EXPECT_EQ(out_ptr[3], 12);
  EXPECT_EQ(out_ptr[4], 17);
  EXPECT_EQ(out_ptr[5], 22);
  EXPECT_EQ(out_ptr[6], 15);
  EXPECT_EQ(out_ptr[7], 22);
  EXPECT_EQ(out_ptr[8], 29);
}

147 148 149 150
TEST(math_function, trans_mul_notrans_fp16) {
  using namespace paddle::framework;
  using namespace paddle::platform;

151 152
  // fp16 GEMM in cublas requires GPU compute capability >= 53
  if (GetCUDAComputeCapability(0) >= 53) {
153 154 155
    return;
  }

156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
  Tensor input1;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor out_gpu;
  Tensor out;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);

  float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place);
  fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});

  TensorCopy(input1, gpu_place, context, &input1_gpu);
  TensorCopy(input1, gpu_place, context, &input2_gpu);

  out_gpu.mutable_data<float16>({3, 3}, gpu_place);

  paddle::operators::math::matmul<paddle::platform::CUDADeviceContext, float16>(
      context, input1_gpu, true, input2_gpu, false, float16(1), &out_gpu,
      float16(0));

  TensorCopy(out_gpu, cpu_place, context, &out);

  float16* out_ptr = out.data<float16>();
  context.Wait();
  EXPECT_EQ(static_cast<float>(out_ptr[0]), 9);
  EXPECT_EQ(static_cast<float>(out_ptr[1]), 12);
  EXPECT_EQ(static_cast<float>(out_ptr[2]), 15);
  EXPECT_EQ(static_cast<float>(out_ptr[3]), 12);
  EXPECT_EQ(static_cast<float>(out_ptr[4]), 17);
  EXPECT_EQ(static_cast<float>(out_ptr[5]), 22);
  EXPECT_EQ(static_cast<float>(out_ptr[6]), 15);
  EXPECT_EQ(static_cast<float>(out_ptr[7]), 22);
  EXPECT_EQ(static_cast<float>(out_ptr[8]), 29);
}

TEST(math_function, gemm_notrans_cublas_fp32) {
  using namespace paddle::framework;
  using namespace paddle::platform;

  Tensor input1;
  Tensor input2;
  Tensor input3;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor input3_gpu;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);
Q
qijun 已提交
207 208 209 210

  int m = 2;
  int n = 3;
  int k = 3;
211
  float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
Q
qijun 已提交
212 213
  float arr1[6] = {0, 1, 2, 3, 4, 5};
  memcpy(input1_ptr, arr1, 6 * sizeof(float));
214
  float* input2_ptr = input2.mutable_data<float>({3, 4}, cpu_place);
Q
qijun 已提交
215 216
  float arr2[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
  memcpy(input2_ptr, arr2, 12 * sizeof(float));
217
  float* input3_ptr = input3.mutable_data<float>({2, 4}, cpu_place);
Q
qijun 已提交
218 219 220
  float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7};
  memcpy(input3_ptr, arr3, 8 * sizeof(float));

221 222 223
  TensorCopy(input1, gpu_place, context, &input1_gpu);
  TensorCopy(input2, gpu_place, context, &input2_gpu);
  TensorCopy(input3, gpu_place, context, &input3_gpu);
Q
qijun 已提交
224 225
  float* a = input1_gpu.data<float>();
  float* b = input2_gpu.data<float>();
226
  float* c = input3_gpu.mutable_data<float>(gpu_place);
Q
qijun 已提交
227

Q
QI JUN 已提交
228
  paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float>(
Q
qijun 已提交
229 230
      context, false, false, m, n, k, 1, a, 3, b + 1, 4, 1, c + 1, 4);

231
  TensorCopy(input3_gpu, cpu_place, context, &input3);
Q
qijun 已提交
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249

  // numpy code:
  // a = np.arange(6).reshape(2, 3)
  // b = np.arange(12).reshape(3, 4)[:, 1:]
  // c = np.arange(8).reshape(2, 4)[:, 1:]
  // out = np.arange(8).reshape(2, 4)
  // out[:, 1:] = np.dot(a, b) + c
  context.Wait();
  EXPECT_EQ(input3_ptr[0], 0);
  EXPECT_EQ(input3_ptr[1], 24);
  EXPECT_EQ(input3_ptr[2], 28);
  EXPECT_EQ(input3_ptr[3], 32);
  EXPECT_EQ(input3_ptr[4], 4);
  EXPECT_EQ(input3_ptr[5], 73);
  EXPECT_EQ(input3_ptr[6], 86);
  EXPECT_EQ(input3_ptr[7], 99);
}

250 251 252 253
TEST(math_function, gemm_notrans_cublas_fp16) {
  using namespace paddle::framework;
  using namespace paddle::platform;

254 255
  // fp16 GEMM in cublas requires GPU compute capability >= 53
  if (GetCUDAComputeCapability(0) >= 53) {
256 257 258
    return;
  }

259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
  Tensor input1;
  Tensor input2;
  Tensor input3;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor input3_gpu;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);

  int m = 2;
  int n = 3;
  int k = 3;
  float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place);
  fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});
  float16* input2_ptr = input2.mutable_data<float16>({3, 4}, cpu_place);
  fill_fp16_data(input2_ptr, input2.numel(),
                 {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
  float16* input3_ptr = input3.mutable_data<float16>({2, 4}, cpu_place);
  fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7});

  TensorCopy(input1, gpu_place, context, &input1_gpu);
  TensorCopy(input2, gpu_place, context, &input2_gpu);
  TensorCopy(input3, gpu_place, context, &input3_gpu);
  float16* a = input1_gpu.data<float16>();
  float16* b = input2_gpu.data<float16>();
  float16* c = input3_gpu.mutable_data<float16>(gpu_place);

  paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float16>(
      context, false, false, m, n, k, float16(1), a, 3, b + 1, 4, float16(1),
      c + 1, 4);

  TensorCopy(input3_gpu, cpu_place, context, &input3);

  // numpy code:
  // a = np.arange(6).reshape(2, 3)
  // b = np.arange(12).reshape(3, 4)[:, 1:]
  // c = np.arange(8).reshape(2, 4)[:, 1:]
  // out = np.arange(8).reshape(2, 4)
  // out[:, 1:] = np.dot(a, b) + c
  context.Wait();
  EXPECT_EQ(static_cast<float>(input3_ptr[0]), 0);
  EXPECT_EQ(static_cast<float>(input3_ptr[1]), 24);
  EXPECT_EQ(static_cast<float>(input3_ptr[2]), 28);
  EXPECT_EQ(static_cast<float>(input3_ptr[3]), 32);
  EXPECT_EQ(static_cast<float>(input3_ptr[4]), 4);
  EXPECT_EQ(static_cast<float>(input3_ptr[5]), 73);
  EXPECT_EQ(static_cast<float>(input3_ptr[6]), 86);
  EXPECT_EQ(static_cast<float>(input3_ptr[7]), 99);
}

TEST(math_function, gemm_trans_cublas_fp32) {
  using namespace paddle::framework;
  using namespace paddle::platform;

  Tensor input1;
  Tensor input2;
  Tensor input3;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor input3_gpu;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);
Q
qijun 已提交
325 326 327 328

  int m = 2;
  int n = 3;
  int k = 3;
329
  float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
Q
qijun 已提交
330 331
  float arr1[6] = {0, 1, 2, 3, 4, 5};
  memcpy(input1_ptr, arr1, 6 * sizeof(float));
332
  float* input2_ptr = input2.mutable_data<float>({4, 3}, cpu_place);
Q
qijun 已提交
333 334
  float arr2[12] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
  memcpy(input2_ptr, arr2, 12 * sizeof(float));
335
  float* input3_ptr = input3.mutable_data<float>({2, 4}, cpu_place);
Q
qijun 已提交
336 337 338
  float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7};
  memcpy(input3_ptr, arr3, 8 * sizeof(float));

339 340 341
  TensorCopy(input1, gpu_place, context, &input1_gpu);
  TensorCopy(input2, gpu_place, context, &input2_gpu);
  TensorCopy(input3, gpu_place, context, &input3_gpu);
Q
qijun 已提交
342 343
  float* a = input1_gpu.data<float>();
  float* b = input2_gpu.data<float>();
344
  float* c = input3_gpu.mutable_data<float>(gpu_place);
Q
qijun 已提交
345

Q
QI JUN 已提交
346
  paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float>(
Q
qijun 已提交
347 348
      context, false, true, m, n, k, 1, a, 3, b + 3, 3, 1, c + 1, 4);

349
  TensorCopy(input3_gpu, cpu_place, context, &input3);
Q
qijun 已提交
350

351
  context.Wait();
Q
qijun 已提交
352 353 354 355 356 357 358 359
  EXPECT_EQ(input3_ptr[0], 0);
  EXPECT_EQ(input3_ptr[1], 24);
  EXPECT_EQ(input3_ptr[2], 28);
  EXPECT_EQ(input3_ptr[3], 32);
  EXPECT_EQ(input3_ptr[4], 4);
  EXPECT_EQ(input3_ptr[5], 73);
  EXPECT_EQ(input3_ptr[6], 86);
  EXPECT_EQ(input3_ptr[7], 99);
360 361 362 363 364 365
}

TEST(math_function, gemm_trans_cublas_fp16) {
  using namespace paddle::framework;
  using namespace paddle::platform;

366 367
  // fp16 GEMM in cublas requires GPU compute capability >= 53
  if (GetCUDAComputeCapability(0) >= 53) {
368 369 370
    return;
  }

371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
  Tensor input1;
  Tensor input2;
  Tensor input3;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor input3_gpu;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);

  int m = 2;
  int n = 3;
  int k = 3;
  float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place);
  fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});
  float16* input2_ptr = input2.mutable_data<float16>({4, 3}, cpu_place);
  fill_fp16_data(input2_ptr, input2.numel(),
                 {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11});
  float16* input3_ptr = input3.mutable_data<float16>({2, 4}, cpu_place);
  fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7});

  TensorCopy(input1, gpu_place, context, &input1_gpu);
  TensorCopy(input2, gpu_place, context, &input2_gpu);
  TensorCopy(input3, gpu_place, context, &input3_gpu);
  float16* a = input1_gpu.data<float16>();
  float16* b = input2_gpu.data<float16>();
  float16* c = input3_gpu.mutable_data<float16>(gpu_place);

  paddle::operators::math::gemm<paddle::platform::CUDADeviceContext, float16>(
      context, false, true, m, n, k, float16(1), a, 3, b + 3, 3, float16(1),
      c + 1, 4);

  TensorCopy(input3_gpu, cpu_place, context, &input3);

  context.Wait();
  EXPECT_EQ(static_cast<float>(input3_ptr[0]), 0);
  EXPECT_EQ(static_cast<float>(input3_ptr[1]), 24);
  EXPECT_EQ(static_cast<float>(input3_ptr[2]), 28);
  EXPECT_EQ(static_cast<float>(input3_ptr[3]), 32);
  EXPECT_EQ(static_cast<float>(input3_ptr[4]), 4);
  EXPECT_EQ(static_cast<float>(input3_ptr[5]), 73);
  EXPECT_EQ(static_cast<float>(input3_ptr[6]), 86);
  EXPECT_EQ(static_cast<float>(input3_ptr[7]), 99);
Q
qijun 已提交
415
}
416 417 418

template <typename T>
void GemvTest(int m, int n, bool trans) {
419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
  using namespace paddle::framework;
  using namespace paddle::platform;

  Tensor mat_a;
  Tensor vec_b;
  Tensor vec_c;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);

  T* data_a = mat_a.mutable_data<T>({m, n}, cpu_place);
  T* data_b = vec_b.mutable_data<T>({trans ? m : n}, cpu_place);
  T* data_c = vec_c.mutable_data<T>({trans ? n : m}, cpu_place);

  Tensor g_mat_a;
  Tensor g_vec_b;
  Tensor g_vec_c;
  T* g_data_a = g_mat_a.mutable_data<T>(mat_a.dims(), gpu_place);
  T* g_data_b = g_vec_b.mutable_data<T>(vec_b.dims(), gpu_place);
  T* g_data_c = g_vec_c.mutable_data<T>(vec_c.dims(), gpu_place);
440 441 442 443 444 445 446 447

  for (int i = 0; i < mat_a.numel(); ++i) {
    data_a[i] = static_cast<T>(i);
  }
  for (int i = 0; i < vec_b.numel(); ++i) {
    data_b[i] = static_cast<T>(i);
  }

448 449
  TensorCopy(mat_a, gpu_place, context, &g_mat_a);
  TensorCopy(vec_b, gpu_place, context, &g_vec_b);
450

451
  paddle::operators::math::gemv<CUDADeviceContext, T>(
452 453 454
      context, trans, static_cast<int>(m), static_cast<int>(n), 1., g_data_a,
      g_data_b, 0., g_data_c);

455
  TensorCopy(g_vec_c, cpu_place, context, &vec_c);
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481

  if (!trans) {
    for (int i = 0; i < m; ++i) {
      T sum = 0.0;
      for (int j = 0; j < n; ++j) {
        sum += data_a[i * n + j] * data_b[j];
      }
      ASSERT_FLOAT_EQ(data_c[i], sum);
    }
  } else {
    for (int i = 0; i < n; ++i) {
      T sum = 0.0;
      for (int j = 0; j < m; ++j) {
        sum += data_a[j * n + i] * data_b[j];
      }
      ASSERT_FLOAT_EQ(data_c[i], sum);
    }
  }
}

TEST(math_function, gemv) {
  GemvTest<float>(3, 13, false);
  GemvTest<double>(3, 13, false);
  GemvTest<float>(3, 13, true);
  GemvTest<double>(3, 13, true);
}