math_function_test.cu 15.0 KB
Newer Older
1
//  Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2 3 4 5 6 7 8 9 10 11 12 13
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//    http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
Q
qijun 已提交
14
#include "gtest/gtest.h"
Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/math/math_function.h"
Y
Yu Yang 已提交
16
#include "paddle/fluid/platform/device_context.h"
Q
qijun 已提交
17

18 19 20 21 22 23 24 25 26
void fill_fp16_data(paddle::platform::float16* in_ptr, size_t size,
                    const std::vector<float>& data) {
  PADDLE_ENFORCE_EQ(size, data.size());
  for (size_t i = 0; i < data.size(); ++i) {
    in_ptr[i] = paddle::platform::float16(data[i]);
  }
}

TEST(math_function, notrans_mul_trans_fp32) {
Y
Yu Yang 已提交
27 28
  using namespace paddle::framework;  // NOLINT
  using namespace paddle::platform;   // NOLINT
29 30 31 32 33 34 35 36 37 38 39 40

  Tensor input1;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor out_gpu;
  Tensor out;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);

  float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
Q
qijun 已提交
41 42 43
  float arr[6] = {0, 1, 2, 3, 4, 5};
  memcpy(input1_ptr, arr, 6 * sizeof(float));

F
fengjiayi 已提交
44 45
  TensorCopySync(input1, gpu_place, &input1_gpu);
  TensorCopySync(input1, gpu_place, &input2_gpu);
Q
qijun 已提交
46

47
  out_gpu.mutable_data<float>({2, 2}, gpu_place);
Q
qijun 已提交
48

49
  paddle::operators::math::matmul<CUDADeviceContext, float>(
Q
qijun 已提交
50 51
      context, input1_gpu, false, input2_gpu, true, 1, &out_gpu, 0);

F
fengjiayi 已提交
52
  TensorCopySync(out_gpu, cpu_place, &out);
Q
qijun 已提交
53 54 55 56 57 58 59 60 61

  float* out_ptr = out.data<float>();
  context.Wait();
  EXPECT_EQ(out_ptr[0], 5);
  EXPECT_EQ(out_ptr[1], 14);
  EXPECT_EQ(out_ptr[2], 14);
  EXPECT_EQ(out_ptr[3], 50);
}

62
TEST(math_function, notrans_mul_trans_fp16) {
Y
Yu Yang 已提交
63 64
  using namespace paddle::framework;  // NOLINT
  using namespace paddle::platform;   // NOLINT
65 66 67 68 69 70 71 72 73 74 75

  Tensor input1;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor out_gpu;
  Tensor out;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);

K
Kexin Zhao 已提交
76 77 78 79 80
  // fp16 GEMM in cublas requires GPU compute capability >= 53
  if (context.GetComputeCapability() < 53) {
    return;
  }

81 82 83
  float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place);
  fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});

F
fengjiayi 已提交
84 85
  TensorCopySync(input1, gpu_place, &input1_gpu);
  TensorCopySync(input1, gpu_place, &input2_gpu);
86 87 88 89 90 91 92

  out_gpu.mutable_data<float16>({2, 2}, gpu_place);

  paddle::operators::math::matmul<CUDADeviceContext, float16>(
      context, input1_gpu, false, input2_gpu, true, float16(1), &out_gpu,
      float16(0));

F
fengjiayi 已提交
93
  TensorCopySync(out_gpu, cpu_place, &out);
94 95 96 97 98 99 100 101 102 103

  float16* out_ptr = out.data<float16>();
  context.Wait();
  EXPECT_EQ(static_cast<float>(out_ptr[0]), 5);
  EXPECT_EQ(static_cast<float>(out_ptr[1]), 14);
  EXPECT_EQ(static_cast<float>(out_ptr[2]), 14);
  EXPECT_EQ(static_cast<float>(out_ptr[3]), 50);
}

TEST(math_function, trans_mul_notrans_fp32) {
Y
Yu Yang 已提交
104 105
  using namespace paddle::framework;  // NOLINT
  using namespace paddle::platform;   // NOLINT
106 107 108 109 110 111 112 113 114 115

  Tensor input1;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor out_gpu;
  Tensor out;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);
Q
qijun 已提交
116

117
  float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
Q
qijun 已提交
118 119 120
  float arr[6] = {0, 1, 2, 3, 4, 5};
  memcpy(input1_ptr, arr, 6 * sizeof(float));

F
fengjiayi 已提交
121 122
  TensorCopySync(input1, gpu_place, &input1_gpu);
  TensorCopySync(input1, gpu_place, &input2_gpu);
Q
qijun 已提交
123

124
  out_gpu.mutable_data<float>({3, 3}, gpu_place);
Q
qijun 已提交
125

Q
QI JUN 已提交
126
  paddle::operators::math::matmul<paddle::platform::CUDADeviceContext, float>(
Q
qijun 已提交
127 128
      context, input1_gpu, true, input2_gpu, false, 1, &out_gpu, 0);

F
fengjiayi 已提交
129
  TensorCopySync(out_gpu, cpu_place, &out);
Q
qijun 已提交
130 131 132 133 134 135 136 137 138 139 140 141 142 143

  float* out_ptr = out.data<float>();
  context.Wait();
  EXPECT_EQ(out_ptr[0], 9);
  EXPECT_EQ(out_ptr[1], 12);
  EXPECT_EQ(out_ptr[2], 15);
  EXPECT_EQ(out_ptr[3], 12);
  EXPECT_EQ(out_ptr[4], 17);
  EXPECT_EQ(out_ptr[5], 22);
  EXPECT_EQ(out_ptr[6], 15);
  EXPECT_EQ(out_ptr[7], 22);
  EXPECT_EQ(out_ptr[8], 29);
}

144
TEST(math_function, trans_mul_notrans_fp16) {
Y
Yu Yang 已提交
145 146
  using namespace paddle::framework;  // NOLINT
  using namespace paddle::platform;   // NOLINT
147 148 149 150 151 152 153 154 155 156 157

  Tensor input1;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor out_gpu;
  Tensor out;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);

K
Kexin Zhao 已提交
158 159 160 161 162
  // fp16 GEMM in cublas requires GPU compute capability >= 53
  if (context.GetComputeCapability() < 53) {
    return;
  }

163 164 165
  float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place);
  fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});

F
fengjiayi 已提交
166 167
  TensorCopySync(input1, gpu_place, &input1_gpu);
  TensorCopySync(input1, gpu_place, &input2_gpu);
168 169 170 171 172 173 174

  out_gpu.mutable_data<float16>({3, 3}, gpu_place);

  paddle::operators::math::matmul<paddle::platform::CUDADeviceContext, float16>(
      context, input1_gpu, true, input2_gpu, false, float16(1), &out_gpu,
      float16(0));

F
fengjiayi 已提交
175
  TensorCopySync(out_gpu, cpu_place, &out);
176 177 178 179 180 181 182 183 184 185 186 187 188 189

  float16* out_ptr = out.data<float16>();
  context.Wait();
  EXPECT_EQ(static_cast<float>(out_ptr[0]), 9);
  EXPECT_EQ(static_cast<float>(out_ptr[1]), 12);
  EXPECT_EQ(static_cast<float>(out_ptr[2]), 15);
  EXPECT_EQ(static_cast<float>(out_ptr[3]), 12);
  EXPECT_EQ(static_cast<float>(out_ptr[4]), 17);
  EXPECT_EQ(static_cast<float>(out_ptr[5]), 22);
  EXPECT_EQ(static_cast<float>(out_ptr[6]), 15);
  EXPECT_EQ(static_cast<float>(out_ptr[7]), 22);
  EXPECT_EQ(static_cast<float>(out_ptr[8]), 29);
}

Y
Yu Yang 已提交
190 191 192 193 194 195 196
template <typename T>
inline paddle::operators::math::BlasT<paddle::platform::CUDADeviceContext, T>
GetBlas(const paddle::platform::CUDADeviceContext& context) {
  return paddle::operators::math::GetBlas<paddle::platform::CUDADeviceContext,
                                          T>(context);
}

197
TEST(math_function, gemm_notrans_cublas_fp32) {
Y
Yu Yang 已提交
198 199
  using namespace paddle::framework;  // NOLINT
  using namespace paddle::platform;   // NOLINT
200 201 202 203 204 205 206 207 208 209 210

  Tensor input1;
  Tensor input2;
  Tensor input3;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor input3_gpu;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);
Q
qijun 已提交
211 212 213 214

  int m = 2;
  int n = 3;
  int k = 3;
215
  float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
Q
qijun 已提交
216 217
  float arr1[6] = {0, 1, 2, 3, 4, 5};
  memcpy(input1_ptr, arr1, 6 * sizeof(float));
218
  float* input2_ptr = input2.mutable_data<float>({3, 4}, cpu_place);
Q
qijun 已提交
219 220
  float arr2[12] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
  memcpy(input2_ptr, arr2, 12 * sizeof(float));
221
  float* input3_ptr = input3.mutable_data<float>({2, 4}, cpu_place);
Q
qijun 已提交
222 223 224
  float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7};
  memcpy(input3_ptr, arr3, 8 * sizeof(float));

F
fengjiayi 已提交
225 226 227
  TensorCopySync(input1, gpu_place, &input1_gpu);
  TensorCopySync(input2, gpu_place, &input2_gpu);
  TensorCopySync(input3, gpu_place, &input3_gpu);
Q
qijun 已提交
228 229
  float* a = input1_gpu.data<float>();
  float* b = input2_gpu.data<float>();
230
  float* c = input3_gpu.mutable_data<float>(gpu_place);
Q
qijun 已提交
231

Y
Yu Yang 已提交
232 233
  GetBlas<float>(context).GEMM(false, false, m, n, k, 1, a, 3, b + 1, 4, 1,
                               c + 1, 4);
Q
qijun 已提交
234

F
fengjiayi 已提交
235
  TensorCopySync(input3_gpu, cpu_place, &input3);
Q
qijun 已提交
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253

  // numpy code:
  // a = np.arange(6).reshape(2, 3)
  // b = np.arange(12).reshape(3, 4)[:, 1:]
  // c = np.arange(8).reshape(2, 4)[:, 1:]
  // out = np.arange(8).reshape(2, 4)
  // out[:, 1:] = np.dot(a, b) + c
  context.Wait();
  EXPECT_EQ(input3_ptr[0], 0);
  EXPECT_EQ(input3_ptr[1], 24);
  EXPECT_EQ(input3_ptr[2], 28);
  EXPECT_EQ(input3_ptr[3], 32);
  EXPECT_EQ(input3_ptr[4], 4);
  EXPECT_EQ(input3_ptr[5], 73);
  EXPECT_EQ(input3_ptr[6], 86);
  EXPECT_EQ(input3_ptr[7], 99);
}

254
TEST(math_function, gemm_notrans_cublas_fp16) {
Y
Yu Yang 已提交
255 256
  using namespace paddle::framework;  // NOLINT
  using namespace paddle::platform;   // NOLINT
257 258 259 260 261 262 263 264 265 266 267 268

  Tensor input1;
  Tensor input2;
  Tensor input3;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor input3_gpu;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);

K
Kexin Zhao 已提交
269 270 271 272 273
  // fp16 GEMM in cublas requires GPU compute capability >= 53
  if (context.GetComputeCapability() < 53) {
    return;
  }

274 275 276 277 278 279 280 281 282 283 284
  int m = 2;
  int n = 3;
  int k = 3;
  float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place);
  fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});
  float16* input2_ptr = input2.mutable_data<float16>({3, 4}, cpu_place);
  fill_fp16_data(input2_ptr, input2.numel(),
                 {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11});
  float16* input3_ptr = input3.mutable_data<float16>({2, 4}, cpu_place);
  fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7});

F
fengjiayi 已提交
285 286 287
  TensorCopySync(input1, gpu_place, &input1_gpu);
  TensorCopySync(input2, gpu_place, &input2_gpu);
  TensorCopySync(input3, gpu_place, &input3_gpu);
288 289 290 291
  float16* a = input1_gpu.data<float16>();
  float16* b = input2_gpu.data<float16>();
  float16* c = input3_gpu.mutable_data<float16>(gpu_place);

Y
Yu Yang 已提交
292 293
  GetBlas<float16>(context).GEMM(false, false, m, n, k, float16(1), a, 3, b + 1,
                                 4, float16(1), c + 1, 4);
294

F
fengjiayi 已提交
295
  TensorCopySync(input3_gpu, cpu_place, &input3);
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314

  // numpy code:
  // a = np.arange(6).reshape(2, 3)
  // b = np.arange(12).reshape(3, 4)[:, 1:]
  // c = np.arange(8).reshape(2, 4)[:, 1:]
  // out = np.arange(8).reshape(2, 4)
  // out[:, 1:] = np.dot(a, b) + c
  context.Wait();
  EXPECT_EQ(static_cast<float>(input3_ptr[0]), 0);
  EXPECT_EQ(static_cast<float>(input3_ptr[1]), 24);
  EXPECT_EQ(static_cast<float>(input3_ptr[2]), 28);
  EXPECT_EQ(static_cast<float>(input3_ptr[3]), 32);
  EXPECT_EQ(static_cast<float>(input3_ptr[4]), 4);
  EXPECT_EQ(static_cast<float>(input3_ptr[5]), 73);
  EXPECT_EQ(static_cast<float>(input3_ptr[6]), 86);
  EXPECT_EQ(static_cast<float>(input3_ptr[7]), 99);
}

TEST(math_function, gemm_trans_cublas_fp32) {
Y
Yu Yang 已提交
315 316
  using namespace paddle::framework;  // NOLINT
  using namespace paddle::platform;   // NOLINT
317 318 319 320 321 322 323 324 325 326 327

  Tensor input1;
  Tensor input2;
  Tensor input3;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor input3_gpu;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);
Q
qijun 已提交
328 329 330 331

  int m = 2;
  int n = 3;
  int k = 3;
332
  float* input1_ptr = input1.mutable_data<float>({2, 3}, cpu_place);
Q
qijun 已提交
333 334
  float arr1[6] = {0, 1, 2, 3, 4, 5};
  memcpy(input1_ptr, arr1, 6 * sizeof(float));
335
  float* input2_ptr = input2.mutable_data<float>({4, 3}, cpu_place);
Q
qijun 已提交
336 337
  float arr2[12] = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
  memcpy(input2_ptr, arr2, 12 * sizeof(float));
338
  float* input3_ptr = input3.mutable_data<float>({2, 4}, cpu_place);
Q
qijun 已提交
339 340 341
  float arr3[8] = {0, 1, 2, 3, 4, 5, 6, 7};
  memcpy(input3_ptr, arr3, 8 * sizeof(float));

F
fengjiayi 已提交
342 343 344
  TensorCopySync(input1, gpu_place, &input1_gpu);
  TensorCopySync(input2, gpu_place, &input2_gpu);
  TensorCopySync(input3, gpu_place, &input3_gpu);
Q
qijun 已提交
345 346
  float* a = input1_gpu.data<float>();
  float* b = input2_gpu.data<float>();
347
  float* c = input3_gpu.mutable_data<float>(gpu_place);
Q
qijun 已提交
348

Y
Yu Yang 已提交
349 350
  GetBlas<float>(context).GEMM(false, true, m, n, k, 1, a, 3, b + 3, 3, 1,
                               c + 1, 4);
Q
qijun 已提交
351

F
fengjiayi 已提交
352
  TensorCopySync(input3_gpu, cpu_place, &input3);
Q
qijun 已提交
353

354
  context.Wait();
Q
qijun 已提交
355 356 357 358 359 360 361 362
  EXPECT_EQ(input3_ptr[0], 0);
  EXPECT_EQ(input3_ptr[1], 24);
  EXPECT_EQ(input3_ptr[2], 28);
  EXPECT_EQ(input3_ptr[3], 32);
  EXPECT_EQ(input3_ptr[4], 4);
  EXPECT_EQ(input3_ptr[5], 73);
  EXPECT_EQ(input3_ptr[6], 86);
  EXPECT_EQ(input3_ptr[7], 99);
363 364 365
}

TEST(math_function, gemm_trans_cublas_fp16) {
Y
Yu Yang 已提交
366 367
  using namespace paddle::framework;  // NOLINT
  using namespace paddle::platform;   // NOLINT
368 369 370 371 372 373 374 375 376 377 378 379

  Tensor input1;
  Tensor input2;
  Tensor input3;
  Tensor input1_gpu;
  Tensor input2_gpu;
  Tensor input3_gpu;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);

K
Kexin Zhao 已提交
380 381 382 383 384
  // fp16 GEMM in cublas requires GPU compute capability >= 53
  if (context.GetComputeCapability() < 53) {
    return;
  }

385 386 387 388 389 390 391 392 393 394 395
  int m = 2;
  int n = 3;
  int k = 3;
  float16* input1_ptr = input1.mutable_data<float16>({2, 3}, cpu_place);
  fill_fp16_data(input1_ptr, input1.numel(), {0, 1, 2, 3, 4, 5});
  float16* input2_ptr = input2.mutable_data<float16>({4, 3}, cpu_place);
  fill_fp16_data(input2_ptr, input2.numel(),
                 {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11});
  float16* input3_ptr = input3.mutable_data<float16>({2, 4}, cpu_place);
  fill_fp16_data(input3_ptr, input3.numel(), {0, 1, 2, 3, 4, 5, 6, 7});

F
fengjiayi 已提交
396 397 398
  TensorCopySync(input1, gpu_place, &input1_gpu);
  TensorCopySync(input2, gpu_place, &input2_gpu);
  TensorCopySync(input3, gpu_place, &input3_gpu);
399 400 401 402
  float16* a = input1_gpu.data<float16>();
  float16* b = input2_gpu.data<float16>();
  float16* c = input3_gpu.mutable_data<float16>(gpu_place);

Y
Yu Yang 已提交
403 404
  GetBlas<float16>(context).GEMM(false, true, m, n, k, float16(1), a, 3, b + 3,
                                 3, float16(1), c + 1, 4);
405

F
fengjiayi 已提交
406
  TensorCopySync(input3_gpu, cpu_place, &input3);
407 408 409 410 411 412 413 414 415 416

  context.Wait();
  EXPECT_EQ(static_cast<float>(input3_ptr[0]), 0);
  EXPECT_EQ(static_cast<float>(input3_ptr[1]), 24);
  EXPECT_EQ(static_cast<float>(input3_ptr[2]), 28);
  EXPECT_EQ(static_cast<float>(input3_ptr[3]), 32);
  EXPECT_EQ(static_cast<float>(input3_ptr[4]), 4);
  EXPECT_EQ(static_cast<float>(input3_ptr[5]), 73);
  EXPECT_EQ(static_cast<float>(input3_ptr[6]), 86);
  EXPECT_EQ(static_cast<float>(input3_ptr[7]), 99);
Q
qijun 已提交
417
}
418 419 420

template <typename T>
void GemvTest(int m, int n, bool trans) {
Y
Yu Yang 已提交
421 422
  using namespace paddle::framework;  // NOLINT
  using namespace paddle::platform;   // NOLINT
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441

  Tensor mat_a;
  Tensor vec_b;
  Tensor vec_c;

  CPUPlace cpu_place;
  CUDAPlace gpu_place(0);
  CUDADeviceContext context(gpu_place);

  T* data_a = mat_a.mutable_data<T>({m, n}, cpu_place);
  T* data_b = vec_b.mutable_data<T>({trans ? m : n}, cpu_place);
  T* data_c = vec_c.mutable_data<T>({trans ? n : m}, cpu_place);

  Tensor g_mat_a;
  Tensor g_vec_b;
  Tensor g_vec_c;
  T* g_data_a = g_mat_a.mutable_data<T>(mat_a.dims(), gpu_place);
  T* g_data_b = g_vec_b.mutable_data<T>(vec_b.dims(), gpu_place);
  T* g_data_c = g_vec_c.mutable_data<T>(vec_c.dims(), gpu_place);
442 443 444 445 446 447 448 449

  for (int i = 0; i < mat_a.numel(); ++i) {
    data_a[i] = static_cast<T>(i);
  }
  for (int i = 0; i < vec_b.numel(); ++i) {
    data_b[i] = static_cast<T>(i);
  }

F
fengjiayi 已提交
450 451
  TensorCopySync(mat_a, gpu_place, &g_mat_a);
  TensorCopySync(vec_b, gpu_place, &g_vec_b);
452

453
  paddle::operators::math::gemv<CUDADeviceContext, T>(
454 455 456
      context, trans, static_cast<int>(m), static_cast<int>(n), 1., g_data_a,
      g_data_b, 0., g_data_c);

F
fengjiayi 已提交
457
  TensorCopySync(g_vec_c, cpu_place, &vec_c);
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483

  if (!trans) {
    for (int i = 0; i < m; ++i) {
      T sum = 0.0;
      for (int j = 0; j < n; ++j) {
        sum += data_a[i * n + j] * data_b[j];
      }
      ASSERT_FLOAT_EQ(data_c[i], sum);
    }
  } else {
    for (int i = 0; i < n; ++i) {
      T sum = 0.0;
      for (int j = 0; j < m; ++j) {
        sum += data_a[j * n + i] * data_b[j];
      }
      ASSERT_FLOAT_EQ(data_c[i], sum);
    }
  }
}

TEST(math_function, gemv) {
  GemvTest<float>(3, 13, false);
  GemvTest<double>(3, 13, false);
  GemvTest<float>(3, 13, true);
  GemvTest<double>(3, 13, true);
}