matmul_mkldnn_op.cc 26.1 KB
Newer Older
1
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15
#include "paddle/fluid/operators/mkldnn/matmul_mkldnn_op.h"
16

17
#include <tuple>
18

19
#include "paddle/fluid/framework/convert_utils.h"
20 21 22

using dnnl::memory;
using dnnl::primitive;
23 24 25 26
using paddle::framework::DataLayout;
using paddle::framework::ExecutionContext;
using paddle::platform::GetMKLDNNFormat;
using paddle::platform::MKLDNNDeviceContext;
27
using paddle::platform::MKLDNNFormatForSize;
28 29
using paddle::platform::MKLDNNGetDataType;
using paddle::platform::to_void_cast;
30
using phi::vectorize;
31 32 33
using Tensor = paddle::framework::Tensor;

namespace {
34

35 36
// Reshape a rank-3 tensor from P x M x N to (P * M) x N.
// Identity op if the tensor is not of rank 3.
37
static Tensor FoldOuterDims(const Tensor& input) {
38 39 40 41 42 43 44 45 46 47 48 49
  auto output = input;
  auto in_dims = input.dims();
  if (in_dims.size() == 3) {
    output.Resize({in_dims[0] * in_dims[1], in_dims[2]});
  }
  return output;
}

// Reshape a rank-3 tensor from P x M x N to M x (P * N).
// (Warning: This requires transposing data and writes into new memory.)
// Identity op if the tensor is not of rank 3.
template <typename T>
50 51 52
static Tensor FoldFirstAndLastDims(const MKLDNNDeviceContext& dev_ctx,
                                   const Tensor* input) {
  auto input_dims = vectorize(input->dims());
53 54 55 56
  if (input_dims.size() != 3) {
    return *input;
  }

57
  Tensor output;
58 59
  output.Resize({input_dims[1], input_dims[0], input_dims[2]});

60
  auto output_dims = vectorize(output.dims());
61

62 63
  memory::data_type input_type = paddle::framework::ToMKLDNNDataType(
      paddle::framework::TransToProtoVarType(input->dtype()));
64
  paddle::platform::ReorderMKLDNNHandler reorder_handler(
65 66
      output_dims, paddle::framework::TransToProtoVarType(input->dtype()),
      input_type, dev_ctx.GetEngine());
67 68

  auto reorder_src_memory_p = reorder_handler.AcquireSrcMemory(
69 70
      memory::format_tag::abc,
      paddle::platform::to_void_cast(input->data<T>()));
71 72 73 74 75
  auto reorder_dst_memory_p = reorder_handler.AcquireDstMemory(
      &output, memory::format_tag::bac, dev_ctx.GetPlace());
  auto reorder_p = reorder_handler.AcquireReorder(reorder_src_memory_p,
                                                  reorder_dst_memory_p);

76
  auto& astream = MKLDNNDeviceContext::tls().get_stream();
77 78 79 80 81 82 83 84
  reorder_p->execute(astream, *reorder_src_memory_p, *reorder_dst_memory_p);
  astream.wait();

  output.Resize({input_dims[1], input_dims[0] * input_dims[2]});
  return output;
}

template <typename T>
85 86 87 88 89 90 91 92 93 94 95 96 97
constexpr bool IsInt8() {
  return std::is_same<T, int8_t>::value || std::is_same<T, uint8_t>::value;
}

template <typename T>
constexpr bool IsBfloat16() {
  return std::is_same<T, paddle::platform::bfloat16>::value;
}

// Get row matrix shape from a vector shape. If the rank of x_dim > 1, the
// original x_dim is returned.
static paddle::framework::DDim RowMatrixDimsFromVector(
    const paddle::framework::DDim& x_dim) {
98
  return x_dim.size() > 1 ? x_dim : phi::make_ddim({1, x_dim[0]});
99 100 101 102 103 104
}

// Get column matrix shape from a vector shape. If the ran of y_dim > 1, the
// original y_dim is returned.
static paddle::framework::DDim ColumnMatrixDimsFromVector(
    const paddle::framework::DDim& y_dim) {
105
  return y_dim.size() > 1 ? y_dim : phi::make_ddim({y_dim[0], 1});
106 107 108
}

template <typename XT, typename YT, typename OT>
109
class MatMulMKLDNNHandler
110
    : public paddle::platform::MKLDNNHandlerNoCachingT<XT, dnnl::matmul> {
111
 public:
112
  MatMulMKLDNNHandler(const dnnl::engine engine,
113 114
                      paddle::platform::Place cpu_place, Tensor* x,
                      bool trans_x, Tensor* y, bool trans_y, Tensor* out,
115
                      float scale)
116 117
      : paddle::platform::MKLDNNHandlerNoCachingT<XT, dnnl::matmul>(engine,
                                                                    cpu_place) {
118 119
    auto mat_dim_x = phi::funcs::CreateMatrixDescriptor(x->dims(), 0, trans_x);
    auto mat_dim_y = phi::funcs::CreateMatrixDescriptor(y->dims(), 0, trans_y);
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139

    memory::dim x_bs = mat_dim_x.batch_size_;
    memory::dim y_bs = mat_dim_y.batch_size_;

    memory::dim out_bs = x_bs || y_bs ? std::max(x_bs, y_bs) : 1;
    const memory::dim M = mat_dim_x.height_;
    const memory::dim N = mat_dim_y.width_;
    const memory::dim K = mat_dim_x.width_;

    memory::dims x_dims = {x_bs > 0 ? x_bs : 1, M, K};
    memory::dims y_dims = {y_bs > 0 ? y_bs : 1, K, N};
    memory::dims out_dims = {out_bs, M, N};

    memory::dims x_strides =
        !trans_x ? memory::dims{M * K, K, 1} : memory::dims{M * K, 1, M};

    memory::dims y_strides =
        !trans_y ? memory::dims{N * K, N, 1} : memory::dims{N * K, 1, K};
    memory::dims out_strides = memory::dims{M * N, N, 1};

140 141 142
    auto x_md = memory::desc(x_dims, MKLDNNGetDataType<XT>(), x_strides);
    auto y_md = memory::desc(y_dims, MKLDNNGetDataType<YT>(), y_strides);
    auto out_md = memory::desc(out_dims, MKLDNNGetDataType<OT>(), out_strides);
143 144 145 146 147

    dnnl::primitive_attr attrs;
    if (scale != 1.0f) attrs.set_output_scales(0, {scale});

    this->AcquireForwardPrimitiveDescriptor(attrs, x_md, y_md, out_md);
148
  }
149
  // Constructor for FWD MatMul
150
  MatMulMKLDNNHandler(const dnnl::engine engine, const ExecutionContext& ctx,
151 152
                      float scale)
      : paddle::platform::MKLDNNHandlerNoCachingT<XT, dnnl::matmul>(
153
            engine, ctx.GetPlace()) {
154 155 156 157 158 159 160
    dnnl::primitive_attr attr;
    float scale_out = ComputeOutputScale(ctx);
    if (scale_out != 1.0f) {
      constexpr unsigned tensor_wide_scale = 0;
      attr.set_output_scales(tensor_wide_scale, {scale_out});
    }

161
    auto matmul_dims_ = GetMatmulDims(ctx);
162 163 164 165 166 167 168 169
    auto x_md = memory::desc(matmul_dims_.x_dims, MKLDNNGetDataType<XT>(),
                             matmul_dims_.x_strides);
    auto y_md = memory::desc(matmul_dims_.y_dims, MKLDNNGetDataType<YT>(),
                             matmul_dims_.y_strides);
    auto out_md = memory::desc(matmul_dims_.out_dims, MKLDNNGetDataType<OT>(),
                               matmul_dims_.out_strides);
    this->AcquireForwardPrimitiveDescriptor(attr, x_md, y_md, out_md);
  }
170 171

  std::shared_ptr<memory> AcquireWeightsMemory(const Tensor* input) {
172
    const YT* input_data = input->data<YT>();
173
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->weights_desc(),
174
                                            to_void_cast<YT>(input_data));
175 176
  }

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
 public:
  void Execute(const paddle::framework::Tensor* x,
               const paddle::framework::Tensor* y,
               paddle::framework::Tensor* out) {
    const auto src_memory_p = this->AcquireSrcMemory(x);
    const auto weights_memory_p = this->AcquireWeightsMemory(y);
    const auto dst_memory_p = this->AcquireDstMemory(out);

    auto matmul_p = this->AcquireForwardPrimitive();

    std::unordered_map<int, dnnl::memory> matmul_args = {
        {DNNL_ARG_SRC, *src_memory_p},
        {DNNL_ARG_WEIGHTS, *weights_memory_p},
        {DNNL_ARG_DST, *dst_memory_p}};

    auto& astream = paddle::platform::MKLDNNDeviceContext::tls().get_stream();

    // Simulate batch matmul by processing in loop
    void* x_ptr = src_memory_p->get_data_handle();
    void* y_ptr = weights_memory_p->get_data_handle();
    void* out_ptr = dst_memory_p->get_data_handle();
    auto offsets = this->GetOffsets();
    for (uint16_t i = 0; i < this->GetBatchSize(); ++i) {
      src_memory_p->set_data_handle(x_ptr);
      weights_memory_p->set_data_handle(y_ptr);
      dst_memory_p->set_data_handle(out_ptr);
      matmul_p->execute(astream, {
204 205 206
                                     {DNNL_ARG_SRC, *src_memory_p},
                                     {DNNL_ARG_WEIGHTS, *weights_memory_p},
                                     {DNNL_ARG_DST, *dst_memory_p},
207 208 209 210 211 212
                                 });
      x_ptr = static_cast<char*>(x_ptr) + std::get<0>(offsets);
      y_ptr = static_cast<char*>(y_ptr) + std::get<1>(offsets);
      out_ptr = static_cast<char*>(out_ptr) + std::get<2>(offsets);
    }
    astream.wait();
213

214 215 216 217
    auto format =
        MKLDNNFormatForSize(out->dims().size(), dnnl::memory::format_tag::nchw);
    out->set_format(format);
    out->set_layout(DataLayout::kMKLDNN);
218 219
  }

220
  std::shared_ptr<dnnl::memory> AcquireDstMemory(
221 222 223 224 225 226 227 228 229 230 231
      paddle::framework::Tensor* output) {
    // We cannot use base AcquireDstMemory as it makes an allocation request
    // base on DST memory primitive size. This is fine in general, but in MatMul
    // we have primitive that covers only one batch of Data and then shift
    // pointer for every new batch. Hence Tensor size is bigger that dst memory
    // primitive size. So would we request less memory that is there and it
    // triggers an
    // assertion.  So as there is no 'any' format here we can leave default size
    // of Tensor as computed in ComputeInferShape
    OT* ptr = output->mutable_data<OT>(this->place_);
    return this->AcquireMemoryFromPrimitive(this->fwd_pd_->dst_desc(), ptr);
232 233 234 235
  }

 private:
  struct MatMulDims {
236 237
    const memory::dims x_dims, y_dims, out_dims, x_strides, y_strides,
        out_strides;
238 239
  };

240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
  phi::DDim GetDimForInput(const ExecutionContext& ctx,
                           std::string input_name) {
    auto shape = ctx.Attr<std::vector<int>>("fused_reshape_" + input_name);
    auto axis = ctx.Attr<std::vector<int>>("fused_transpose_" + input_name);
    auto input_dims = ctx.Input<Tensor>(input_name)->dims();
    if (!shape.empty() && !axis.empty()) {
      auto it_zero = std::find(shape.begin(), shape.end(), 0);
      if (it_zero != shape.end()) {
        for (uint64_t i = 0; i < shape.size(); i++) {
          if (shape[i] == 0) {
            PADDLE_ENFORCE_LT(
                i, input_dims.size(),
                paddle::platform::errors::InvalidArgument(
                    "The index of 0 in fused_reshape_%s ",
                    "should be less than output dim size, ",
                    "but the index is %d and output dim size is %d", input_name,
                    i, input_dims.size()));
            shape[i] = input_dims.at(i);
          }
        }
      }

      return input_dims.reshape(shape).transpose(axis);
    }
    return input_dims;
  }

267
  std::pair<phi::funcs::MatDescriptor, memory::dims> GetInputDimsAndStrides(
268
      const ExecutionContext& ctx, std::string input_name) {
269 270 271 272 273
    auto shape = ctx.Attr<std::vector<int>>("fused_reshape_" + input_name);
    auto axis = ctx.Attr<std::vector<int>>("fused_transpose_" + input_name);
    auto input_dims = ctx.Input<Tensor>(input_name)->dims();
    auto new_dims = input_dims;
    if (!shape.empty() && !axis.empty()) {
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
      auto it_zero = std::find(shape.begin(), shape.end(), 0);
      if (it_zero != shape.end()) {
        for (uint64_t i = 0; i < shape.size(); i++) {
          if (shape[i] == 0) {
            PADDLE_ENFORCE_LT(
                i, input_dims.size(),
                paddle::platform::errors::InvalidArgument(
                    "The index of 0 in fused_reshape_%s ",
                    "should be less than output dim size, ",
                    "but the index is %d and output dim size is %d", input_name,
                    i, input_dims.size()));
            shape[i] = input_dims.at(i);
          }
        }
      }

290 291 292 293 294
      new_dims = input_dims.reshape(shape).transpose(axis);
    }

    auto& MatrixDimsFromVector = input_name == "X" ? RowMatrixDimsFromVector
                                                   : ColumnMatrixDimsFromVector;
295
    phi::funcs::MatDescriptor mat_dim = phi::funcs::CreateMatrixDescriptor(
296 297
        MatrixDimsFromVector(new_dims), 0,
        ctx.Attr<bool>("transpose_" + input_name));
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316

    memory::dims strides;
    if (!shape.empty()) {
      auto shape2 = input_dims.reshape(shape);
      strides.push_back(1);
      for (auto i = shape2.size() - 1; i > 0; --i) {
        strides.insert(strides.begin(), strides.front() * shape2[i]);
      }
      strides = Transpose(strides, axis);
      if (shape.size() == 4)
        strides.erase(strides.begin());
      else if (shape.size() == 2)
        strides.insert(strides.begin(), shape[0] * shape[1]);
      mat_dim.stride_ = strides[0];
      if (mat_dim.trans_) std::swap(*strides.rbegin(), *(++strides.rbegin()));
    }
    return std::make_pair(mat_dim, strides);
  }

317 318 319 320 321 322 323 324 325
  float ComputeOutputScale(const ExecutionContext& ctx) {
    float scale_x = ctx.Attr<float>("Scale_x");
    float scale_y = ctx.Attr<float>("Scale_y");
    bool force_fp32_out = ctx.Attr<bool>("force_fp32_output");
    float scale_out = force_fp32_out ? 1.f : ctx.Attr<float>("Scale_out");
    float alpha = ctx.Attr<float>("alpha");
    return alpha * scale_out / (scale_x * scale_y);
  }

326 327 328 329 330
  bool IsInputFused(const ExecutionContext& ctx) const {
    return !(ctx.Attr<std::vector<int>>("fused_reshape_X").empty() &&
             ctx.Attr<std::vector<int>>("fused_reshape_Y").empty());
  }

331 332 333 334 335 336 337
  bool IsOutputFused(const ExecutionContext& ctx) const {
    auto& fused_reshape_Out = ctx.Attr<std::vector<int>>("fused_reshape_Out");
    auto& fused_transpose_Out =
        ctx.Attr<std::vector<int>>("fused_transpose_Out");
    return !fused_reshape_Out.empty() && !fused_transpose_Out.empty();
  }

338
  MatMulDims GetMatmulDims(const ExecutionContext& ctx) {
339
    phi::funcs::MatDescriptor mat_dim_x;
340 341
    memory::dims strides_x;
    std::tie(mat_dim_x, strides_x) = GetInputDimsAndStrides(ctx, "X");
342
    phi::funcs::MatDescriptor mat_dim_y;
343 344
    memory::dims strides_y;
    std::tie(mat_dim_y, strides_y) = GetInputDimsAndStrides(ctx, "Y");
345

346 347
    auto x_bs = mat_dim_x.batch_size_;
    auto y_bs = mat_dim_y.batch_size_;
348
    PADDLE_ENFORCE_EQ(x_bs > 0 && y_bs > 0 && x_bs != y_bs, false,
349
                      paddle::platform::errors::InvalidArgument(
350 351 352
                          "If batch sizes of X and Y are positive,"
                          "they have to be equal."));

353
    memory::dim out_bs = x_bs || y_bs ? std::max(x_bs, y_bs) : 1;
354 355 356
    const memory::dim M = mat_dim_x.height_;
    const memory::dim N = mat_dim_y.width_;
    const memory::dim K = mat_dim_x.width_;
357 358

    batch_size_ = 1;
359
    if (out_bs > 1 && (IsOutputFused(ctx) || IsInputFused(ctx))) {
360 361
      auto x_dims = GetDimForInput(ctx, "X");
      auto y_dims = GetDimForInput(ctx, "Y");
362
      batch_size_ = x_bs > y_bs ? x_dims[0] : y_dims[0];
363 364 365
      x_bs /= batch_size_;
      y_bs /= batch_size_;
      out_bs /= batch_size_;
366
    }
367 368 369
    memory::dims x_dims = {x_bs > 0 ? x_bs : 1, M, K};
    memory::dims y_dims = {y_bs > 0 ? y_bs : 1, K, N};
    memory::dims out_dims = {out_bs, M, N};
370

371 372 373
    x_offset_ = x_bs * M * K * sizeof(XT);
    y_offset_ = y_bs * K * N * sizeof(YT);
    out_offset_ = out_bs * M * N * sizeof(OT);
374 375

    // Translate transA and transB
376 377 378 379 380 381
    if (strides_x.empty())
      strides_x = !ctx.Attr<bool>("transpose_X") ? memory::dims{M * K, K, 1}
                                                 : memory::dims{M * K, 1, M};
    if (strides_y.empty())
      strides_y = !ctx.Attr<bool>("transpose_Y") ? memory::dims{N * K, N, 1}
                                                 : memory::dims{N * K, 1, K};
382 383
    memory::dims out_strides = memory::dims{M * N, N, 1};

384
    CorrectStridesWhenFloatOutputFused(ctx, N, out_bs, &out_strides);
385 386

    return {x_dims, y_dims, out_dims, strides_x, strides_y, out_strides};
387 388
  }

389 390 391 392
  std::vector<int64_t> Transpose(const std::vector<int64_t>& x,
                                 const std::vector<int>& axis) {
    size_t in_rank = x.size();
    size_t axis_size = axis.size();
393

394 395 396 397
    auto axis_set = std::set<int>(axis.begin(), axis.end());
    PADDLE_ENFORCE_EQ(axis_set.size(), axis_size,
                      paddle::platform::errors::InvalidArgument(
                          "In an axis array, elements must be unique."));
398

399 400 401 402 403 404 405
    PADDLE_ENFORCE_EQ(in_rank, axis_size,
                      paddle::platform::errors::InvalidArgument(
                          "The input dimension's size "
                          "should be equal to the axis's size. "
                          "But received dimension is %d, "
                          "axis's size is %d",
                          in_rank, axis_size));
406

407 408 409
    PADDLE_ENFORCE_LT(*std::max_element(axis.begin(), axis.end()), axis_size,
                      paddle::platform::errors::InvalidArgument(
                          "Axis values must be ranging from 0 to (dims - 1)."));
410

411 412 413 414 415
    std::vector<int64_t> new_x(x.size());
    for (size_t i = 0; i < x.size(); i++) {
      new_x[i] = x[axis[i]];
    }
    return new_x;
416 417
  }

418 419 420 421 422
  void CorrectStridesWhenFloatOutputFused(const ExecutionContext& ctx,
                                          const memory::dim N, memory::dim b,
                                          memory::dims* out_strides) const {
    if (!IsInt8<OT>() && !IsBfloat16<OT>() && IsOutputFused(ctx)) {
      *out_strides = {N, b * N, 1};
423
    }
424 425
  }

426
  uint16_t GetBatchSize(void) const { return batch_size_; }
427

428 429
  std::tuple<uint32_t, uint32_t, uint32_t> GetOffsets() const {
    return std::make_tuple(x_offset_, y_offset_, out_offset_);
430 431 432
  }

 private:
433 434 435 436
  uint32_t x_offset_;
  uint32_t y_offset_;
  uint32_t out_offset_;
  uint16_t batch_size_;
437 438
};

439 440 441 442 443 444 445
/**
 * Reshape a tensor to 3-D or 2-D tensor by matrix descriptor.
 *
 * The shape would be [BatchSize, H, W] or [H, W].
 * If transposed, `H,W` will be swapped.
 */
static void ReshapeTensorToMatrixSequence(
446
    Tensor* x, const phi::funcs::MatDescriptor& descriptor) {
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
  int64_t h, w;
  h = descriptor.height_;
  w = descriptor.width_;
  if (descriptor.trans_) {
    std::swap(w, h);
  }
  if (descriptor.batch_size_) {
    x->Resize({descriptor.batch_size_, h, w});
  } else {
    x->Resize({h, w});
  }
}

/**
 * Reshape the x,y,out tensor to 3-D or 2-D tensor by matrix descriptor
 * Out = matmul(x, y)
 *
 * This method will first calculate X,Y matrix sequence, and then calculate
 * the out shape.
 *
 * Assume X = [BatchSize, H1, W1], Y = [BatchSize, H2, W2]
 * The out = [BatchSize, H1, W2]
 *
 * If there is no batch size in `X` and `Y`, the out will be [H1, W2]
 * If any of `X` and `Y` has batch size BatchSize, the out will have the
 * BatchSize.
 */
static void ReshapeXYOutToMatrixSequence(Tensor* x, Tensor* y, Tensor* out,
                                         bool trans_x, bool trans_y) {
  auto x_dim = RowMatrixDimsFromVector(x->dims());
  auto y_dim = ColumnMatrixDimsFromVector(y->dims());
478 479
  auto mat_dim_x = phi::funcs::CreateMatrixDescriptor(x_dim, 0, trans_x);
  auto mat_dim_y = phi::funcs::CreateMatrixDescriptor(y_dim, 0, trans_y);
480 481 482 483 484
  if (mat_dim_x.batch_size_ == 0 && mat_dim_y.batch_size_ == 0) {
    out->Resize({mat_dim_x.height_, mat_dim_y.width_});
  } else {
    out->Resize({std::max(mat_dim_x.batch_size_, mat_dim_y.batch_size_),
                 mat_dim_x.height_, mat_dim_y.width_});
485 486
  }

487 488
  ReshapeTensorToMatrixSequence(x, mat_dim_x);
  ReshapeTensorToMatrixSequence(y, mat_dim_y);
489 490
}

491
// Choose appropriate Handler instances based on inferred
492 493 494 495
// output type (uint8, int8 or float).
template <typename XT, typename YT>
static void ExecuteMatMul(const ExecutionContext& ctx) {
  constexpr bool is_int8 = IsInt8<XT>();
496
  constexpr bool is_bfloat16 = IsBfloat16<XT>();
497 498
  const bool force_fp32_output = ctx.Attr<bool>("force_fp32_output");
  constexpr bool fuse_relu = false;  // TODO(intel): Enable eltwise fuses
499 500 501 502 503 504 505
  auto* x = ctx.Input<Tensor>("X");
  auto* y = ctx.Input<Tensor>("Y");
  auto* out = ctx.Output<Tensor>("Out");
  float alpha = ctx.HasAttr("alpha") ? ctx.Attr<float>("alpha") : 1.0f;
  const auto& dev_ctx =
      ctx.template device_context<paddle::platform::MKLDNNDeviceContext>();

506
  if (force_fp32_output || ((!is_int8) && (!is_bfloat16))) {
507 508
    MatMulMKLDNNHandler<XT, YT, float>(dev_ctx.GetEngine(), ctx, alpha)
        .Execute(x, y, out);
509
  } else if (is_bfloat16) {
510 511 512
    MatMulMKLDNNHandler<XT, YT, paddle::platform::bfloat16>(dev_ctx.GetEngine(),
                                                            ctx, alpha)
        .Execute(x, y, out);
513
  } else if (fuse_relu) {
514 515
    MatMulMKLDNNHandler<XT, YT, uint8_t>(dev_ctx.GetEngine(), ctx, alpha)
        .Execute(x, y, out);
516
  } else {
517 518
    MatMulMKLDNNHandler<XT, YT, int8_t>(dev_ctx.GetEngine(), ctx, alpha)
        .Execute(x, y, out);
519 520 521 522
  }
}

template <typename T>
523
class MatMulMKLDNNKernel : public paddle::framework::OpKernel<T> {
524
 public:
525
  void Compute(const ExecutionContext& ctx) const override {
526
    if (ctx.HasAttr("head_number")) {
527 528
      PADDLE_ENFORCE_EQ(
          ctx.Attr<int>("head_number"), 1,
529
          paddle::platform::errors::Unimplemented(
530
              "oneDNN matmul doesn't support multiple heads. Expected "
531 532
              "head_number=1. But received `head_number` is %d",
              ctx.Attr<int>("head_number")));
533 534 535 536
    }
    ExecuteMatMul<T, T>(ctx);
  }
};
537

538 539 540 541 542
}  // anonymous namespace

namespace paddle {
namespace operators {

543
template <typename T>
544 545 546 547 548
void MatMulGradMKLDNNKernel<T>::Compute(const ExecutionContext& ctx) const {
  if (ctx.HasAttr("head_number")) {
    PADDLE_ENFORCE_EQ(
        ctx.Attr<int>("head_number"), 1,
        platform::errors::Unimplemented(
549
            "oneDNN matmul doesn't support multiple heads. Expected "
550 551
            "head_number=1. But received `head_number` is %d",
            ctx.Attr<int>("head_number")));
552
  }
553 554
  RunKernel(ctx);
}
555

556 557 558
template <typename T>
void MatMulGradMKLDNNKernel<T>::ExecuteMatMulGrad(
    const ExecutionContext& ctx, const MKLDNNDeviceContext& dev_ctx,
559
    const dnnl::engine& engine, Tensor* x, bool trans_x,
560
    bool is_fold_init_dims_x, Tensor* y, bool trans_y, bool is_fold_init_dims_y,
561
    Tensor* out) const {
562 563 564 565 566 567 568 569 570 571 572 573 574 575
  // gradient is calculated in a different way when broadcasting is used
  bool need_combine = (x->dims().size() == 3 || y->dims().size() == 3) &&
                      out->dims().size() == 2;

  Tensor x_combined, y_combined;
  if (!need_combine) {
    x_combined = *x;
    y_combined = *y;
  } else {
    x_combined = is_fold_init_dims_x ? FoldOuterDims(*x)
                                     : FoldFirstAndLastDims<T>(dev_ctx, x);
    y_combined = is_fold_init_dims_y ? FoldOuterDims(*y)
                                     : FoldFirstAndLastDims<T>(dev_ctx, y);
  }
576

577
  float alpha = ctx.HasAttr("alpha") ? ctx.Attr<float>("alpha") : 1.0f;
578

579 580 581
  MatMulMKLDNNHandler<T, T, T> handler(engine, ctx.GetPlace(), &x_combined,
                                       trans_x, &y_combined, trans_y, out,
                                       alpha);
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626

  const auto src_memory_p = handler.AcquireSrcMemory(&x_combined);
  const auto weights_memory_p = handler.AcquireWeightsMemory(&y_combined);
  const auto dst_memory_p = handler.AcquireDstMemory(out);

  auto matmul_p = handler.AcquireForwardPrimitive();

  std::unordered_map<int, dnnl::memory> matmul_args = {
      {DNNL_ARG_SRC, *src_memory_p},
      {DNNL_ARG_WEIGHTS, *weights_memory_p},
      {DNNL_ARG_DST, *dst_memory_p}};

  auto& astream = platform::MKLDNNDeviceContext::tls().get_stream();
  matmul_p->execute(astream, matmul_args);
  astream.wait();

  out->set_layout(framework::DataLayout::kMKLDNN);
  out->set_format(platform::GetMKLDNNFormat(
      dst_memory_p->get_desc().reshape(vectorize<int64_t>(out->dims()))));
}

template <typename T>
void MatMulGradMKLDNNKernel<T>::RunKernel(const ExecutionContext& ctx) const {
  const auto& dev_ctx =
      ctx.template device_context<platform::MKLDNNDeviceContext>();
  const auto& onednn_engine = dev_ctx.GetEngine();

  auto x = *ctx.Input<Tensor>("X");
  auto y = *ctx.Input<Tensor>("Y");
  auto dout = *ctx.Input<Tensor>(framework::GradVarName("Out"));
  auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
  auto* dy = ctx.Output<Tensor>(framework::GradVarName("Y"));

  bool transpose_x = ctx.HasAttr("transpose_X") ? ctx.Attr<bool>("transpose_X")
                                                : ctx.Attr<bool>("trans_x");
  bool transpose_y = ctx.HasAttr("transpose_Y") ? ctx.Attr<bool>("transpose_Y")
                                                : ctx.Attr<bool>("trans_y");

  ReshapeXYOutToMatrixSequence(&x, &y, &dout, transpose_x, transpose_y);

  framework::DDim dx_dims;
  if (dx) {
    dx_dims = dx->dims();
    if (dx_dims != x.dims()) {
      dx->Resize(x.dims());
627
    }
628
  }
629

630 631 632 633 634
  framework::DDim dy_dims;
  if (dy) {
    dy_dims = dy->dims();
    if (dy_dims != y.dims()) {
      dy->Resize(y.dims());
635
    }
636
  }
637

638 639
  if (transpose_x && transpose_y) {
    this->ExecuteMatMulGrad(ctx, dev_ctx, onednn_engine, &y, true, true, &dout,
640
                            true, false, dx);
641
    this->ExecuteMatMulGrad(ctx, dev_ctx, onednn_engine, &dout, true, true, &x,
642
                            true, false, dy);
643 644
  } else if (transpose_x) {
    this->ExecuteMatMulGrad(ctx, dev_ctx, onednn_engine, &y, false, false,
645
                            &dout, true, false, dx);
646
    this->ExecuteMatMulGrad(ctx, dev_ctx, onednn_engine, &x, false, false,
647
                            &dout, false, true, dy);
648 649
  } else if (transpose_y) {
    this->ExecuteMatMulGrad(ctx, dev_ctx, onednn_engine, &dout, false, false,
650
                            &y, false, true, dx);
651
    this->ExecuteMatMulGrad(ctx, dev_ctx, onednn_engine, &dout, true, true, &x,
652
                            false, true, dy);
653 654
  } else {
    this->ExecuteMatMulGrad(ctx, dev_ctx, onednn_engine, &dout, false, false,
655
                            &y, true, false, dx);
656
    this->ExecuteMatMulGrad(ctx, dev_ctx, onednn_engine, &x, true, true, &dout,
657
                            false, true, dy);
658 659 660 661 662 663
  }

  if (dx) {
    if (dx_dims != x.dims()) {
      dx->Resize(dx_dims);
      dx->set_format(x.format());
664
    }
665 666 667 668 669
  }
  if (dy) {
    if (dy_dims != y.dims()) {
      dy->Resize(dy_dims);
      dy->set_format(y.format());
670 671
    }
  }
672 673 674 675
}

template class MatMulGradMKLDNNKernel<float>;
template class MatMulGradMKLDNNKernel<paddle::platform::bfloat16>;
676

677 678 679 680 681
}  // namespace operators
}  // namespace paddle
namespace ops = paddle::operators;

REGISTER_OP_KERNEL(matmul, MKLDNN, ::paddle::platform::CPUPlace,
682 683 684
                   MatMulMKLDNNKernel<float>,
                   MatMulMKLDNNKernel<paddle::platform::bfloat16>,
                   MatMulMKLDNNKernel<int8_t>, MatMulMKLDNNKernel<uint8_t>);
685 686 687 688

REGISTER_OP_KERNEL(matmul_grad, MKLDNN, ::paddle::platform::CPUPlace,
                   ops::MatMulGradMKLDNNKernel<float>,
                   ops::MatMulGradMKLDNNKernel<paddle::platform::bfloat16>);