math_function_test.cu 17.2 KB
Newer Older
1
//  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2 3 4 5 6 7 8 9 10 11 12 13
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//    http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
Q
qijun 已提交
14
#include "gtest/gtest.h"
Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/math/math_function.h"
Y
Yu Yang 已提交
16
#include "paddle/fluid/platform/device_context.h"
Q
qijun 已提交
17

18 19 20 21 22 23 24 25 26
void fill_fp16_data(paddle::platform::float16* in_ptr, size_t size,
                    const std::vector<float>& data) {
  PADDLE_ENFORCE_EQ(size, data.size());
  for (size_t i = 0; i < data.size(); ++i) {
    in_ptr[i] = paddle::platform::float16(data[i]);
  }
}

TEST(math_function, notrans_mul_trans_fp32) {
27 28 29 30 31
  paddle::framework::Tensor input1;
  paddle::framework::Tensor input1_gpu;
  paddle::framework::Tensor input2_gpu;
  paddle::framework::Tensor out_gpu;
  paddle::framework::Tensor out;
32

33 34 35
  paddle::platform::CPUPlace cpu_place;
  paddle::platform::CUDAPlace gpu_place(0);
  paddle::platform::CUDADeviceContext context(gpu_place);
36 37

  float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
Q
qijun 已提交
38 39 40
  float arr[6] = {0, 1, 2, 3, 4, 5};
  memcpy(input1_ptr, arr, 6 * sizeof(float));

41 42
  paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
  paddle::framework::TensorCopySync(input1, gpu_place, &input2_gpu);
Q
qijun 已提交
43

44
  out_gpu.mutable_data<float>({2, 2}, gpu_place);
Q
qijun 已提交
45

46
  paddle::operators::math::matmul<paddle::platform::CUDADeviceContext, float>(
Q
qijun 已提交
47 48
      context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0);

49
  paddle::framework::TensorCopySync(out_gpu, cpu_place, &out);
Q
qijun 已提交
50 51 52 53 54 55 56 57 58

  float* out_ptr = out.data<float>();
  context.Wait();
  EXPECT_EQ(out_ptr[0], 5);
  EXPECT_EQ(out_ptr[1], 14);
  EXPECT_EQ(out_ptr[2], 14);
  EXPECT_EQ(out_ptr[3], 50);
}

59
TEST(math_function, notrans_mul_trans_fp16) {
60 61 62 63 64
  paddle::framework::Tensor input1;
  paddle::framework::Tensor input1_gpu;
  paddle::framework::Tensor input2_gpu;
  paddle::framework::Tensor out_gpu;
  paddle::framework::Tensor out;
65

66 67 68
  paddle::platform::CPUPlace cpu_place;
  paddle::platform::CUDAPlace gpu_place(0);
  paddle::platform::CUDADeviceContext context(gpu_place);
69

K
Kexin Zhao 已提交
70 71 72 73 74
  // fp16 GEMM in cublas requires GPU compute capability >= 53
  if (context.GetComputeCapability() < 53) {
    return;
  }

75 76
  paddle::platform::float16* input1_ptr =
      input1.mutable_data<paddle::platform::float16>({2, 3}, cpu_place);
77 78
  fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});

79 80
  paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
  paddle::framework::TensorCopySync(input1, gpu_place, &input2_gpu);
81

82
  out_gpu.mutable_data<paddle::platform::float16>({2, 2}, gpu_place);
83

84 85 86 87
  paddle::operators::math::matmul<paddle::platform::CUDADeviceContext,
                                  paddle::platform::float16>(
      context, input1_gpu, false, input2_gpu, true,
      paddle::platform::float16(1), &out_gpu, paddle::platform::float16(0));
88

89
  paddle::framework::TensorCopySync(out_gpu, cpu_place, &out);
90

91
  paddle::platform::float16* out_ptr = out.data<paddle::platform::float16>();
92 93 94 95 96 97 98 99
  context.Wait();
  EXPECT_EQ(static_cast<float>(out_ptr[0]), 5);
  EXPECT_EQ(static_cast<float>(out_ptr[1]), 14);
  EXPECT_EQ(static_cast<float>(out_ptr[2]), 14);
  EXPECT_EQ(static_cast<float>(out_ptr[3]), 50);
}

TEST(math_function, trans_mul_notrans_fp32) {
100 101 102 103 104
  paddle::framework::Tensor input1;
  paddle::framework::Tensor input1_gpu;
  paddle::framework::Tensor input2_gpu;
  paddle::framework::Tensor out_gpu;
  paddle::framework::Tensor out;
105

106 107 108
  paddle::platform::CPUPlace cpu_place;
  paddle::platform::CUDAPlace gpu_place(0);
  paddle::platform::CUDADeviceContext context(gpu_place);
Q
qijun 已提交
109

110
  float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
Q
qijun 已提交
111 112 113
  float arr[6] = {0, 1, 2, 3, 4, 5};
  memcpy(input1_ptr, arr, 6 * sizeof(float));

114 115
  paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
  paddle::framework::TensorCopySync(input1, gpu_place, &input2_gpu);
Q
qijun 已提交
116

117
  out_gpu.mutable_data<float>({3, 3}, gpu_place);
Q
qijun 已提交
118

Q
QI JUN 已提交
119
  paddle::operators::math::matmul<paddle::platform::CUDADeviceContext, float>(
Q
qijun 已提交
120 121
      context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0);

122
  paddle::framework::TensorCopySync(out_gpu, cpu_place, &out);
Q
qijun 已提交
123 124 125 126 127 128 129 130 131 132 133 134 135 136

  float* out_ptr = out.data<float>();
  context.Wait();
  EXPECT_EQ(out_ptr[0], 9);
  EXPECT_EQ(out_ptr[1], 12);
  EXPECT_EQ(out_ptr[2], 15);
  EXPECT_EQ(out_ptr[3], 12);
  EXPECT_EQ(out_ptr[4], 17);
  EXPECT_EQ(out_ptr[5], 22);
  EXPECT_EQ(out_ptr[6], 15);
  EXPECT_EQ(out_ptr[7], 22);
  EXPECT_EQ(out_ptr[8], 29);
}

137
TEST(math_function, trans_mul_notrans_fp16) {
138 139 140 141 142
  paddle::framework::Tensor input1;
  paddle::framework::Tensor input1_gpu;
  paddle::framework::Tensor input2_gpu;
  paddle::framework::Tensor out_gpu;
  paddle::framework::Tensor out;
143

144 145 146
  paddle::platform::CPUPlace cpu_place;
  paddle::platform::CUDAPlace gpu_place(0);
  paddle::platform::CUDADeviceContext context(gpu_place);
147

K
Kexin Zhao 已提交
148 149 150 151 152
  // fp16 GEMM in cublas requires GPU compute capability >= 53
  if (context.GetComputeCapability() < 53) {
    return;
  }

153 154
  paddle::platform::float16* input1_ptr =
      input1.mutable_data<paddle::platform::float16>({2, 3}, cpu_place);
155 156
  fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});

157 158
  paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
  paddle::framework::TensorCopySync(input1, gpu_place, &input2_gpu);
159

160
  out_gpu.mutable_data<paddle::platform::float16>({3, 3}, gpu_place);
161

162 163 164 165
  paddle::operators::math::matmul<paddle::platform::CUDADeviceContext,
                                  paddle::platform::float16>(
      context, input1_gpu, true, input2_gpu, false,
      paddle::platform::float16(1), &out_gpu, paddle::platform::float16(0));
166

167
  paddle::framework::TensorCopySync(out_gpu, cpu_place, &out);
168

169
  paddle::platform::float16* out_ptr = out.data<paddle::platform::float16>();
170 171 172 173 174 175 176 177 178 179 180 181
  context.Wait();
  EXPECT_EQ(static_cast<float>(out_ptr[0]), 9);
  EXPECT_EQ(static_cast<float>(out_ptr[1]), 12);
  EXPECT_EQ(static_cast<float>(out_ptr[2]), 15);
  EXPECT_EQ(static_cast<float>(out_ptr[3]), 12);
  EXPECT_EQ(static_cast<float>(out_ptr[4]), 17);
  EXPECT_EQ(static_cast<float>(out_ptr[5]), 22);
  EXPECT_EQ(static_cast<float>(out_ptr[6]), 15);
  EXPECT_EQ(static_cast<float>(out_ptr[7]), 22);
  EXPECT_EQ(static_cast<float>(out_ptr[8]), 29);
}

Y
Yu Yang 已提交
182 183 184 185 186 187 188
template <typename T>
inline paddle::operators::math::BlasT<paddle::platform::CUDADeviceContext, T>
GetBlas(const paddle::platform::CUDADeviceContext& context) {
  return paddle::operators::math::GetBlas<paddle::platform::CUDADeviceContext,
                                          T>(context);
}

189
TEST(math_function, gemm_notrans_cublas_fp32) {
190 191 192 193 194 195
  paddle::framework::Tensor input1;
  paddle::framework::Tensor input2;
  paddle::framework::Tensor input3;
  paddle::framework::Tensor input1_gpu;
  paddle::framework::Tensor input2_gpu;
  paddle::framework::Tensor input3_gpu;
196

197 198 199
  paddle::platform::CPUPlace cpu_place;
  paddle::platform::CUDAPlace gpu_place(0);
  paddle::platform::CUDADeviceContext context(gpu_place);
Q
qijun 已提交
200 201 202 203

  int m = 2;
  int n = 3;
  int k = 3;
204
  float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
Q
qijun 已提交
205 206
  float arr1[6] = {0, 1, 2, 3, 4, 5};
  memcpy(input1_ptr, arr1, 6 * sizeof(float));
207
  float* input2_ptr = input2.mutable_data<float>({3, 4}, cpu_place);
Q
qijun 已提交
208 209
  float arr2[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
  memcpy(input2_ptr, arr2, 12 * sizeof(float));
210
  float* input3_ptr = input3.mutable_data<float>({2, 4}, cpu_place);
Q
qijun 已提交
211 212 213
  float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7};
  memcpy(input3_ptr, arr3, 8 * sizeof(float));

214 215 216
  paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
  paddle::framework::TensorCopySync(input2, gpu_place, &input2_gpu);
  paddle::framework::TensorCopySync(input3, gpu_place, &input3_gpu);
Q
qijun 已提交
217 218
  float* a = input1_gpu.data<float>();
  float* b = input2_gpu.data<float>();
219
  float* c = input3_gpu.mutable_data<float>(gpu_place);
Q
qijun 已提交
220

Y
Yu Yang 已提交
221 222
  GetBlas<float>(context).GEMM(false, false, m, n, k, 1, a, 3, b + 1, 4, 1,
                               c + 1, 4);
Q
qijun 已提交
223

224
  paddle::framework::TensorCopySync(input3_gpu, cpu_place, &input3);
Q
qijun 已提交
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242

  // numpy code:
  // a = np.arange(6).reshape(2, 3)
  // b = np.arange(12).reshape(3, 4)[:, 1:]
  // c = np.arange(8).reshape(2, 4)[:, 1:]
  // out = np.arange(8).reshape(2, 4)
  // out[:, 1:] = np.dot(a, b) + c
  context.Wait();
  EXPECT_EQ(input3_ptr[0], 0);
  EXPECT_EQ(input3_ptr[1], 24);
  EXPECT_EQ(input3_ptr[2], 28);
  EXPECT_EQ(input3_ptr[3], 32);
  EXPECT_EQ(input3_ptr[4], 4);
  EXPECT_EQ(input3_ptr[5], 73);
  EXPECT_EQ(input3_ptr[6], 86);
  EXPECT_EQ(input3_ptr[7], 99);
}

243
TEST(math_function, gemm_notrans_cublas_fp16) {
244 245 246 247 248 249
  paddle::framework::Tensor input1;
  paddle::framework::Tensor input2;
  paddle::framework::Tensor input3;
  paddle::framework::Tensor input1_gpu;
  paddle::framework::Tensor input2_gpu;
  paddle::framework::Tensor input3_gpu;
250

251 252 253
  paddle::platform::CPUPlace cpu_place;
  paddle::platform::CUDAPlace gpu_place(0);
  paddle::platform::CUDADeviceContext context(gpu_place);
254

K
Kexin Zhao 已提交
255 256 257 258 259
  // fp16 GEMM in cublas requires GPU compute capability >= 53
  if (context.GetComputeCapability() < 53) {
    return;
  }

260 261 262
  int m = 2;
  int n = 3;
  int k = 3;
263 264
  paddle::platform::float16* input1_ptr =
      input1.mutable_data<paddle::platform::float16>({2, 3}, cpu_place);
265
  fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});
266 267
  paddle::platform::float16* input2_ptr =
      input2.mutable_data<paddle::platform::float16>({3, 4}, cpu_place);
268 269
  fill_fp16_data(input2_ptr, input2.numel(),
                 {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
270 271
  paddle::platform::float16* input3_ptr =
      input3.mutable_data<paddle::platform::float16>({2, 4}, cpu_place);
272 273
  fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7});

274 275 276 277 278 279 280
  paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
  paddle::framework::TensorCopySync(input2, gpu_place, &input2_gpu);
  paddle::framework::TensorCopySync(input3, gpu_place, &input3_gpu);
  paddle::platform::float16* a = input1_gpu.data<paddle::platform::float16>();
  paddle::platform::float16* b = input2_gpu.data<paddle::platform::float16>();
  paddle::platform::float16* c =
      input3_gpu.mutable_data<paddle::platform::float16>(gpu_place);
281

Y
Yu Yang 已提交
282 283
  GetBlas<float16>(context).GEMM(false, false, m, n, k, float16(1), a, 3, b + 1,
                                 4, float16(1), c + 1, 4);
284

285
  paddle::framework::TensorCopySync(input3_gpu, cpu_place, &input3);
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304

  // numpy code:
  // a = np.arange(6).reshape(2, 3)
  // b = np.arange(12).reshape(3, 4)[:, 1:]
  // c = np.arange(8).reshape(2, 4)[:, 1:]
  // out = np.arange(8).reshape(2, 4)
  // out[:, 1:] = np.dot(a, b) + c
  context.Wait();
  EXPECT_EQ(static_cast<float>(input3_ptr[0]), 0);
  EXPECT_EQ(static_cast<float>(input3_ptr[1]), 24);
  EXPECT_EQ(static_cast<float>(input3_ptr[2]), 28);
  EXPECT_EQ(static_cast<float>(input3_ptr[3]), 32);
  EXPECT_EQ(static_cast<float>(input3_ptr[4]), 4);
  EXPECT_EQ(static_cast<float>(input3_ptr[5]), 73);
  EXPECT_EQ(static_cast<float>(input3_ptr[6]), 86);
  EXPECT_EQ(static_cast<float>(input3_ptr[7]), 99);
}

TEST(math_function, gemm_trans_cublas_fp32) {
305 306 307 308 309 310
  paddle::framework::Tensor input1;
  paddle::framework::Tensor input2;
  paddle::framework::Tensor input3;
  paddle::framework::Tensor input1_gpu;
  paddle::framework::Tensor input2_gpu;
  paddle::framework::Tensor input3_gpu;
311

312 313 314
  paddle::platform::CPUPlace cpu_place;
  paddle::platform::CUDAPlace gpu_place(0);
  paddle::platform::CUDADeviceContext context(gpu_place);
Q
qijun 已提交
315 316 317 318

  int m = 2;
  int n = 3;
  int k = 3;
319
  float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
Q
qijun 已提交
320 321
  float arr1[6] = {0, 1, 2, 3, 4, 5};
  memcpy(input1_ptr, arr1, 6 * sizeof(float));
322
  float* input2_ptr = input2.mutable_data<float>({4, 3}, cpu_place);
Q
qijun 已提交
323 324
  float arr2[12] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
  memcpy(input2_ptr, arr2, 12 * sizeof(float));
325
  float* input3_ptr = input3.mutable_data<float>({2, 4}, cpu_place);
Q
qijun 已提交
326 327 328
  float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7};
  memcpy(input3_ptr, arr3, 8 * sizeof(float));

329 330 331
  paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
  paddle::framework::TensorCopySync(input2, gpu_place, &input2_gpu);
  paddle::framework::TensorCopySync(input3, gpu_place, &input3_gpu);
Q
qijun 已提交
332 333
  float* a = input1_gpu.data<float>();
  float* b = input2_gpu.data<float>();
334
  float* c = input3_gpu.mutable_data<float>(gpu_place);
Q
qijun 已提交
335

Y
Yu Yang 已提交
336 337
  GetBlas<float>(context).GEMM(false, true, m, n, k, 1, a, 3, b + 3, 3, 1,
                               c + 1, 4);
Q
qijun 已提交
338

339
  paddle::framework::TensorCopySync(input3_gpu, cpu_place, &input3);
Q
qijun 已提交
340

341
  context.Wait();
Q
qijun 已提交
342 343 344 345 346 347 348 349
  EXPECT_EQ(input3_ptr[0], 0);
  EXPECT_EQ(input3_ptr[1], 24);
  EXPECT_EQ(input3_ptr[2], 28);
  EXPECT_EQ(input3_ptr[3], 32);
  EXPECT_EQ(input3_ptr[4], 4);
  EXPECT_EQ(input3_ptr[5], 73);
  EXPECT_EQ(input3_ptr[6], 86);
  EXPECT_EQ(input3_ptr[7], 99);
350 351 352
}

TEST(math_function, gemm_trans_cublas_fp16) {
353 354 355 356 357 358
  paddle::framework::Tensor input1;
  paddle::framework::Tensor input2;
  paddle::framework::Tensor input3;
  paddle::framework::Tensor input1_gpu;
  paddle::framework::Tensor input2_gpu;
  paddle::framework::Tensor input3_gpu;
359

360 361 362
  paddle::platform::CPUPlace cpu_place;
  paddle::platform::CUDAPlace gpu_place(0);
  paddle::platform::CUDADeviceContext context(gpu_place);
363

K
Kexin Zhao 已提交
364 365 366 367 368
  // fp16 GEMM in cublas requires GPU compute capability >= 53
  if (context.GetComputeCapability() < 53) {
    return;
  }

369 370 371
  int m = 2;
  int n = 3;
  int k = 3;
372 373
  paddle::platform::float16* input1_ptr =
      input1.mutable_data<paddle::platform::float16>({2, 3}, cpu_place);
374
  fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});
375 376
  paddle::platform::float16* input2_ptr =
      input2.mutable_data<paddle::platform::float16>({4, 3}, cpu_place);
377 378
  fill_fp16_data(input2_ptr, input2.numel(),
                 {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11});
379 380
  paddle::platform::float16* input3_ptr =
      input3.mutable_data<paddle::platform::float16>({2, 4}, cpu_place);
381 382
  fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7});

383 384 385 386 387 388 389
  paddle::framework::TensorCopySync(input1, gpu_place, &input1_gpu);
  paddle::framework::TensorCopySync(input2, gpu_place, &input2_gpu);
  paddle::framework::TensorCopySync(input3, gpu_place, &input3_gpu);
  paddle::platform::float16* a = input1_gpu.data<paddle::platform::float16>();
  paddle::platform::float16* b = input2_gpu.data<paddle::platform::float16>();
  paddle::platform::float16* c =
      input3_gpu.mutable_data<paddle::platform::float16>(gpu_place);
390

Y
Yu Yang 已提交
391 392
  GetBlas<float16>(context).GEMM(false, true, m, n, k, float16(1), a, 3, b + 3,
                                 3, float16(1), c + 1, 4);
393 394 395 396
  paddle::operators::math::gemm<paddle::platform::CUDADeviceContext,
                                paddle::platform::float16>(
      context, false, true, m, n, k, paddle::platform::float16(1), a, 3, b + 3,
      3, paddle::platform::float16(1), c + 1, 4);
397

398
  paddle::framework::TensorCopySync(input3_gpu, cpu_place, &input3);
399 400 401 402 403 404 405 406 407 408

  context.Wait();
  EXPECT_EQ(static_cast<float>(input3_ptr[0]), 0);
  EXPECT_EQ(static_cast<float>(input3_ptr[1]), 24);
  EXPECT_EQ(static_cast<float>(input3_ptr[2]), 28);
  EXPECT_EQ(static_cast<float>(input3_ptr[3]), 32);
  EXPECT_EQ(static_cast<float>(input3_ptr[4]), 4);
  EXPECT_EQ(static_cast<float>(input3_ptr[5]), 73);
  EXPECT_EQ(static_cast<float>(input3_ptr[6]), 86);
  EXPECT_EQ(static_cast<float>(input3_ptr[7]), 99);
Q
qijun 已提交
409
}
410 411 412

template <typename T>
void GemvTest(int m, int n, bool trans) {
413 414 415
  paddle::framework::Tensor mat_a;
  paddle::framework::Tensor vec_b;
  paddle::framework::Tensor vec_c;
416

417 418 419
  paddle::platform::CPUPlace cpu_place;
  paddle::platform::CUDAPlace gpu_place(0);
  paddle::platform::CUDADeviceContext context(gpu_place);
420 421 422 423 424

  T* data_a = mat_a.mutable_data<T>({m, n}, cpu_place);
  T* data_b = vec_b.mutable_data<T>({trans ? m : n}, cpu_place);
  T* data_c = vec_c.mutable_data<T>({trans ? n : m}, cpu_place);

425 426 427
  paddle::framework::Tensor g_mat_a;
  paddle::framework::Tensor g_vec_b;
  paddle::framework::Tensor g_vec_c;
428 429 430
  T* g_data_a = g_mat_a.mutable_data<T>(mat_a.dims(), gpu_place);
  T* g_data_b = g_vec_b.mutable_data<T>(vec_b.dims(), gpu_place);
  T* g_data_c = g_vec_c.mutable_data<T>(vec_c.dims(), gpu_place);
431 432 433 434 435 436 437 438

  for (int i = 0; i < mat_a.numel(); ++i) {
    data_a[i] = static_cast<T>(i);
  }
  for (int i = 0; i < vec_b.numel(); ++i) {
    data_b[i] = static_cast<T>(i);
  }

439 440
  paddle::framework::TensorCopySync(mat_a, gpu_place, &g_mat_a);
  paddle::framework::TensorCopySync(vec_b, gpu_place, &g_vec_b);
441

442
  paddle::operators::math::gemv<paddle::platform::CUDADeviceContext, T>(
443 444 445
      context, trans, static_cast<int>(m), static_cast<int>(n), 1., g_data_a,
      g_data_b, 0., g_data_c);

446
  paddle::framework::TensorCopySync(g_vec_c, cpu_place, &vec_c);
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472

  if (!trans) {
    for (int i = 0; i < m; ++i) {
      T sum = 0.0;
      for (int j = 0; j < n; ++j) {
        sum += data_a[i * n + j] * data_b[j];
      }
      ASSERT_FLOAT_EQ(data_c[i], sum);
    }
  } else {
    for (int i = 0; i < n; ++i) {
      T sum = 0.0;
      for (int j = 0; j < m; ++j) {
        sum += data_a[j * n + i] * data_b[j];
      }
      ASSERT_FLOAT_EQ(data_c[i], sum);
    }
  }
}

TEST(math_function, gemv) {
  GemvTest<float>(3, 13, false);
  GemvTest<double>(3, 13, false);
  GemvTest<float>(3, 13, true);
  GemvTest<double>(3, 13, true);
}