“2e09ab369df448974b7af0a2e46ae7905bd11a99”上不存在“git@gitcode.net:paddlepaddle/Paddle.git”
test_matmul_api.cc 6.4 KB
Newer Older
Z
zyfncg 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <gtest/gtest.h>
#include <memory>

18
#include "paddle/phi/api/backward/backward_api.h"
19
#include "paddle/phi/api/include/api.h"
Z
zyfncg 已提交
20

21 22 23 24 25
#include "paddle/phi/api/lib/utils/allocator.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/copy_kernel.h"
Z
zyfncg 已提交
26

27 28
// See Note [ Why still include the fluid headers? ]
#include "paddle/fluid/platform/device_context.h"
29 30 31
namespace paddle {
namespace tests {

Z
zyfncg 已提交
32
namespace framework = paddle::framework;
33
using DDim = phi::DDim;
Z
zyfncg 已提交
34 35 36

TEST(API, matmul_cpu) {
  // 1. create tensor
37
  const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(
Z
zyfncg 已提交
38
      paddle::platform::CPUPlace());
39
  auto dense_x = std::make_shared<phi::DenseTensor>(
40
      alloc.get(),
41 42 43
      phi::DenseTensorMeta(phi::DataType::FLOAT32,
                           phi::make_ddim({3, 3}),
                           phi::DataLayout::NCHW));
Z
zyfncg 已提交
44

45 46
  auto* dense_x_data =
      dense_x->mutable_data<float>(paddle::platform::CPUPlace());
Z
zyfncg 已提交
47

48
  auto dense_y = std::make_shared<phi::DenseTensor>(
49
      alloc.get(),
50 51 52
      phi::DenseTensorMeta(phi::DataType::FLOAT32,
                           phi::make_ddim({3, 3}),
                           phi::DataLayout::NCHW));
53 54
  auto* dense_y_data =
      dense_y->mutable_data<float>(paddle::platform::CPUPlace());
Z
zyfncg 已提交
55 56 57 58 59 60 61 62 63 64 65 66 67 68

  for (size_t i = 0; i < 9; ++i) {
    dense_x_data[i] = 1.0;
    dense_y_data[i] = 2.0;
  }
  std::vector<float> sum(9, 6.0);

  paddle::experimental::Tensor x(dense_x);
  paddle::experimental::Tensor y(dense_y);

  // 2. test API
  auto out = paddle::experimental::matmul(x, y, false, false);

  // 3. check result
69 70 71
  ASSERT_EQ(out.dims().size(), 2);
  ASSERT_EQ(out.dims()[0], 3);
  ASSERT_EQ(out.dims()[1], 3);
Z
zyfncg 已提交
72
  ASSERT_EQ(out.numel(), 9);
73 74
  ASSERT_EQ(out.type(), phi::DataType::FLOAT32);
  ASSERT_EQ(out.layout(), phi::DataLayout::NCHW);
Z
zyfncg 已提交
75 76
  ASSERT_EQ(out.initialized(), true);

77
  auto dense_out = std::dynamic_pointer_cast<phi::DenseTensor>(out.impl());
Z
zyfncg 已提交
78 79 80 81 82 83 84 85 86 87

  for (size_t i = 0; i < 9; i++) {
    ASSERT_NEAR(sum[i], dense_out->data<float>()[i], 1e-6f);
  }
}

#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
TEST(API, matmul_cuda) {
  // Prepare CPU Dense Tensor
  const auto alloc_cpu =
88
      std::make_unique<paddle::experimental::DefaultAllocator>(
Z
zyfncg 已提交
89
          paddle::platform::CPUPlace());
90
  auto ref_x = std::make_shared<phi::DenseTensor>(
91
      alloc_cpu.get(),
92 93 94
      phi::DenseTensorMeta(phi::DataType::FLOAT32,
                           phi::make_ddim({3, 3}),
                           phi::DataLayout::NCHW));
Z
zyfncg 已提交
95

96
  auto* ref_x_data = ref_x->mutable_data<float>(paddle::platform::CPUPlace());
Z
zyfncg 已提交
97

98
  auto ref_y = std::make_shared<phi::DenseTensor>(
99
      alloc_cpu.get(),
100 101 102
      phi::DenseTensorMeta(phi::DataType::FLOAT32,
                           phi::make_ddim({3, 3}),
                           phi::DataLayout::NCHW));
103
  auto* ref_y_data = ref_y->mutable_data<float>(paddle::platform::CPUPlace());
Z
zyfncg 已提交
104 105 106 107 108 109 110 111 112

  for (size_t i = 0; i < 9; ++i) {
    ref_x_data[i] = 1.0;
    ref_y_data[i] = 2.0;
  }
  std::vector<float> sum(9, 6.0);

  // 1. create tensor
  const auto alloc_cuda =
113
      std::make_unique<paddle::experimental::DefaultAllocator>(
Z
zyfncg 已提交
114
          paddle::platform::CUDAPlace());
115
  auto dense_x = std::make_shared<phi::DenseTensor>(
116
      alloc_cuda.get(),
117 118 119
      phi::DenseTensorMeta(phi::DataType::FLOAT32,
                           phi::make_ddim({3, 3}),
                           phi::DataLayout::NCHW));
Z
zyfncg 已提交
120

121
  auto dense_y = std::make_shared<phi::DenseTensor>(
122
      alloc_cuda.get(),
123 124 125
      phi::DenseTensorMeta(phi::DataType::FLOAT32,
                           phi::make_ddim({3, 3}),
                           phi::DataLayout::NCHW));
Z
zyfncg 已提交
126 127 128

  auto& pool = paddle::platform::DeviceContextPool::Instance();
  auto place = paddle::platform::CUDAPlace();
129
  auto* dev_ctx = static_cast<const phi::GPUContext*>(pool.GetByPlace(place));
Z
zyfncg 已提交
130

131 132
  phi::Copy(*dev_ctx, *ref_x.get(), phi::GPUPlace(), false, dense_x.get());
  phi::Copy(*dev_ctx, *ref_y.get(), phi::GPUPlace(), false, dense_y.get());
Z
zyfncg 已提交
133 134 135 136 137 138 139 140

  paddle::experimental::Tensor x(dense_x);
  paddle::experimental::Tensor y(dense_y);

  // 2. test API
  auto out = paddle::experimental::matmul(x, y, false, false);

  // 3. check result
141 142 143
  ASSERT_EQ(out.dims().size(), 2);
  ASSERT_EQ(out.dims()[0], 3);
  ASSERT_EQ(out.dims()[1], 3);
Z
zyfncg 已提交
144
  ASSERT_EQ(out.numel(), 9);
145 146
  ASSERT_EQ(out.type(), phi::DataType::FLOAT32);
  ASSERT_EQ(out.layout(), phi::DataLayout::NCHW);
Z
zyfncg 已提交
147 148
  ASSERT_EQ(out.initialized(), true);

149
  auto dense_out = std::dynamic_pointer_cast<phi::DenseTensor>(out.impl());
Z
zyfncg 已提交
150

151
  auto ref_out = std::make_shared<phi::DenseTensor>(
152
      alloc_cpu.get(),
153 154
      phi::DenseTensorMeta(
          phi::DataType::FLOAT32, out.dims(), phi::DataLayout::NCHW));
Z
zyfncg 已提交
155

156
  phi::Copy(*dev_ctx, *dense_out.get(), phi::CPUPlace(), false, ref_out.get());
Z
zyfncg 已提交
157 158 159 160 161 162 163

  for (size_t i = 0; i < 9; i++) {
    ASSERT_NEAR(sum[i], ref_out->data<float>()[i], 1e-6f);
  }
}

#endif
164

165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
TEST(API, matmul_double_grad) {
  // 1. create tensor
  auto x = paddle::experimental::full({3, 3}, 1.0);
  auto y = paddle::experimental::full({3, 3}, 2.0);
  auto out_grad = paddle::experimental::full({3, 3}, 2.0);
  auto dx_grad = paddle::experimental::full({3, 3}, 2.0);

  // 2. test API
  const auto out = paddle::experimental::matmul_double_grad(
      x, y, out_grad, dx_grad, {}, false, false);

  // 3. check result
  ASSERT_EQ(out.size(), 3UL);
  ASSERT_EQ(out[0].size(), 1UL);
  ASSERT_EQ(out[1].size(), 1UL);
  ASSERT_EQ(out[2].size(), 1UL);
  ASSERT_EQ(out[0][0].dims()[1], 3);
  ASSERT_EQ(out[0][0].numel(), 9);
  ASSERT_EQ(out[1][0].numel(), 9);
  ASSERT_EQ(out[2][0].numel(), 9);
  ASSERT_EQ(out[0][0].type(), phi::DataType::FLOAT32);
  ASSERT_EQ(out[0][0].layout(), phi::DataLayout::NCHW);
  ASSERT_EQ(out[1][0].initialized(), true);
  ASSERT_EQ(out[2][0].initialized(), true);
}

191 192
}  // namespace tests
}  // namespace paddle