matmul_op.cc 36.0 KB
Newer Older
1
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.
M
Markus Kliegl 已提交
2 3 4 5 6 7 8 9 10 11
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

12
#include <algorithm>
Y
Yu Yang 已提交
13
#include <utility>
14
#include <vector>
15

Y
Yu Yang 已提交
16
#include "paddle/fluid/framework/op_registry.h"
17
#include "paddle/fluid/framework/op_version_registry.h"
18
#include "paddle/phi/kernels/funcs/blas/blas.h"
19 20 21
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
M
Markus Kliegl 已提交
22 23 24

namespace paddle {
namespace operators {
25 26 27 28

/**
 * Printing shape information into a string is easy to use.
 */
29
inline static std::string DumpMatrixShape(
30
    const phi::funcs::MatDescriptor &desc) {
31 32 33 34 35 36
  std::stringstream buffer;
  buffer << "[" << desc.batch_size_ << ", " << desc.height_ << ", "
         << desc.width_ << "]";
  return buffer.str();
}

Y
Yu Yang 已提交
37 38 39 40
/**
 * Get row matrix shape from a vector shape. If the rank of x_dim > 1, the
 * original x_dim is returned.
 */
Y
yuyang18 已提交
41
static framework::DDim RowMatrixFromVector(const framework::DDim &x_dim) {
Y
Yu Yang 已提交
42 43 44
  if (x_dim.size() > 1) {
    return x_dim;
  }
45
  return phi::make_ddim({1, x_dim[0]});
Y
Yu Yang 已提交
46 47 48 49 50 51
}

/**
 * Get column matrix shape from a vector shape. If the ran of y_dim > 1, the
 * original y_dim is returned.
 */
Y
yuyang18 已提交
52
static framework::DDim ColumnMatrixFromVector(const framework::DDim &y_dim) {
Y
Yu Yang 已提交
53 54 55
  if (y_dim.size() > 1) {
    return y_dim;
  }
56
  return phi::make_ddim({y_dim[0], 1});
Y
Yu Yang 已提交
57 58 59 60 61
}

template <typename DeviceContext, typename T>
class MatMulKernel : public framework::OpKernel<T> {
 public:
Y
yuyang18 已提交
62
  void Compute(const framework::ExecutionContext &context) const override {
63 64 65 66
    auto &x = GET_DATA_SAFELY(
        context.Input<framework::Tensor>("X"), "Input", "X", "MatMul");
    auto &y = GET_DATA_SAFELY(
        context.Input<framework::Tensor>("Y"), "Input", "Y", "MatMul");
Y
yuyang18 已提交
67
    auto *out = context.Output<framework::Tensor>("Out");
W
Wilber 已提交
68 69 70

    auto &dev_ctx = context.template device_context<DeviceContext>();
    dev_ctx.template Alloc<T>(out, out->numel() * sizeof(T));
Y
Yu Yang 已提交
71

72 73
    auto blas = phi::funcs::GetBlas<DeviceContext, T>(context);
    auto mat_dim_a = phi::funcs::CreateMatrixDescriptor(
Y
Yu Yang 已提交
74
        RowMatrixFromVector(x.dims()), 0, context.Attr<bool>("transpose_X"));
75
    auto mat_dim_b = phi::funcs::CreateMatrixDescriptor(
Y
Yu Yang 已提交
76
        ColumnMatrixFromVector(y.dims()), 0, context.Attr<bool>("transpose_Y"));
S
sneaxiy 已提交
77
    auto scale = static_cast<T>(context.Attr<float>("alpha"));
78

79
    int head_number = 1;
80 81
#if defined(PADDLE_WITH_MKLML) && !defined(PADDLE_WITH_CUDA) && \
    !defined(PADDLE_WITH_HIP)
82 83 84 85 86 87 88 89 90 91 92 93
    head_number = context.Attr<int>("head_number");
#endif

    const auto &x_dims = x.dims();
    const auto &y_dims = y.dims();
    if (head_number <= 1 && x_dims.size() == 3 && y_dims.size() <= 2) {
      // the transpose_X must be false, if is true, the transpose cost much time
      if (!context.Attr<bool>("transpose_X")) {
        mat_dim_a.height_ *= mat_dim_a.batch_size_;
        mat_dim_a.batch_size_ = 0;
      }
    }
94 95
#if defined(PADDLE_WITH_MKLML) && !defined(PADDLE_WITH_CUDA) && \
    !defined(PADDLE_WITH_HIP)
96 97 98
    bool split_vertical_y = (mat_dim_a.width_ != mat_dim_b.height_);

    if (head_number > 1) {
99 100 101 102 103 104 105 106 107
      blas.MatMulWithHead(x,
                          mat_dim_a,
                          y,
                          mat_dim_b,
                          scale,
                          head_number,
                          out,
                          T(0),
                          split_vertical_y);
108 109
    } else {
      blas.MatMul(x, mat_dim_a, y, mat_dim_b, scale, out, T(0));
110 111
    }
#else
S
sneaxiy 已提交
112
    blas.MatMul(x, mat_dim_a, y, mat_dim_b, scale, out, T(0));
113
#endif
Y
Yu Yang 已提交
114 115 116 117 118
  }
};

// Reshape a rank-3 tensor from P x M x N to (P * M) x N.
// Identity op if the tensor is not of rank 3.
Y
yuyang18 已提交
119
static framework::Tensor FoldInitDims(const framework::Tensor &input) {
Y
Yu Yang 已提交
120 121 122 123 124 125 126 127 128 129 130 131
  auto output = input;
  auto in_dims = input.dims();
  if (in_dims.size() == 3) {
    output.Resize({in_dims[0] * in_dims[1], in_dims[2]});
  }
  return output;
}

// Reshape a rank-3 tensor from P x M x N to M x (P * N).
// (Warning: This requires transposing data and writes into new memory.)
// Identity op if the tensor is not of rank 3.
template <typename DeviceContext, typename T>
Y
yuyang18 已提交
132 133
static framework::Tensor FoldHeadAndLastDims(const DeviceContext &context,
                                             const framework::Tensor &input) {
Y
Yu Yang 已提交
134 135 136 137 138 139 140 141
  auto in_dims = input.dims();
  if (in_dims.size() != 3) {
    return input;
  }
  framework::Tensor output;
  output.Resize({in_dims[1], in_dims[0], in_dims[2]});
  output.mutable_data<T>(context.GetPlace());
  std::vector<int> axis = {1, 0, 2};
142
  phi::funcs::Transpose<DeviceContext, T, 3> trans;
Y
Yu Yang 已提交
143 144
  trans(context, input, &output, axis);
  output.Resize({in_dims[1], in_dims[0] * in_dims[2]});
M
Markus Kliegl 已提交
145

Y
Yu Yang 已提交
146 147 148 149 150 151 152 153 154 155
  return output;
}

/**
 * Reshape a tensor to 3-D or 2-D tensor by matrix descriptor.
 *
 * The shape would be [BatchSize, H, W] or [H, W].
 * If transposed, `H,W` will be swapped.
 */
static void ReshapeTensorIntoMatrixSequence(
156
    framework::Tensor *x, const phi::funcs::MatDescriptor &descriptor) {
Y
Yu Yang 已提交
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
  int64_t h, w;
  h = descriptor.height_;
  w = descriptor.width_;
  if (descriptor.trans_) {
    std::swap(w, h);
  }
  if (descriptor.batch_size_) {
    x->Resize({descriptor.batch_size_, h, w});
  } else {
    x->Resize({h, w});
  }
}

/**
 * Reshape the x,y,out tensor to 3-D or 2-D tensor by matrix descriptor
 * Out = matmul(x, y)
 *
 * This method will first calculate X,Y matrix sequence, and then calculate
 * the out shape.
 *
 * Assume X = [BatchSize, H1, W1], Y = [BatchSize, H2, W2]
 * The out = [BatchSize, H1, W2]
 *
 * If there is no batch size in `X` and `Y`, the out will be [H1, W2]
 * If any of `X` and `Y` has batch size BatchSize, the out will have the
 * BatchSize.
 */
Y
yuyang18 已提交
184 185
static void ReshapeXYOutIntoMatrixSequence(framework::Tensor *x,
                                           framework::Tensor *y,
186 187
                                           framework::Tensor *out,
                                           bool trans_x,
Y
Yu Yang 已提交
188 189 190
                                           bool trans_y) {
  auto x_dim = RowMatrixFromVector(x->dims());
  auto y_dim = ColumnMatrixFromVector(y->dims());
191 192
  auto mat_dim_x = phi::funcs::CreateMatrixDescriptor(x_dim, 0, trans_x);
  auto mat_dim_y = phi::funcs::CreateMatrixDescriptor(y_dim, 0, trans_y);
Y
Yu Yang 已提交
193 194 195 196
  if (mat_dim_x.batch_size_ == 0 && mat_dim_y.batch_size_ == 0) {
    out->Resize({mat_dim_x.height_, mat_dim_y.width_});
  } else {
    out->Resize({std::max(mat_dim_x.batch_size_, mat_dim_y.batch_size_),
197 198
                 mat_dim_x.height_,
                 mat_dim_y.width_});
Y
Yu Yang 已提交
199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
  }

  ReshapeTensorIntoMatrixSequence(x, mat_dim_x);
  ReshapeTensorIntoMatrixSequence(y, mat_dim_y);
}

// Using dimensional constraints on matrix multiplication, it is
// straight-forward to check the following table for when X and Y
// are both matrices.
//
// transpose_X | False    | True     | False    | True
// transpose_Y | False    | False    | True     | True
// -----------+----------+----------+----------+-----------
//        dX = | dOut Y^T | Y dOut^T | dOut Y   | Y^T dOut^T
//        dY = | X^T dOut | X dOut   | dOut^T X | dOut^T X^T
//
// When X is a vector of size K, we treat it instead as a matrix of shape
// (1, K). Similarly, when Y is a vector of size K, we treat it instead as
// a matrix of shape (K, 1).
//
// When X and Y are both 3-dimensional tensors, then the first dimension
// the batch dimension can be ignored and the exact same formulas apply
// as for two matrices.
//
// Finally, when, e.g., X is a 3-dimensional tensor but Y is a matrix, we end
// up with formulas like
//
//   dY_{ij} = \sum_{p, m} X_{pmi} dOut_{pmj}
//
// To handle this sort of scenario, we reshape X : P x M x K, dOut: P x M x N
// to X: (P * M) x K, dOut: (P * M) x N.
template <typename DeviceContext, typename T>
class MatMulGradKernel : public framework::OpKernel<T> {
 public:
Y
yuyang18 已提交
233
  void MatMul(const framework::ExecutionContext &context,
234 235 236 237
              const framework::Tensor &a,
              bool trans_a,
              const framework::Tensor &b,
              bool trans_b,
Y
yuyang18 已提交
238
              framework::Tensor *out) const {
Y
Yu Yang 已提交
239
    out->mutable_data<T>(context.GetPlace());
240 241 242
    auto blas = phi::funcs::GetBlas<DeviceContext, T>(context);
    auto mat_dim_a = phi::funcs::CreateMatrixDescriptor(a.dims(), 0, trans_a);
    auto mat_dim_b = phi::funcs::CreateMatrixDescriptor(b.dims(), 0, trans_b);
243 244

    int head_number = 1;
245 246
#if defined(PADDLE_WITH_MKLML) && !defined(PADDLE_WITH_CUDA) && \
    !defined(PADDLE_WITH_HIP)
247 248 249
    if (context.HasAttr("head_number")) {
      head_number = context.Attr<int>("head_number");
    }
250 251 252 253 254 255 256 257 258
#endif

    if (head_number <= 1 && a.dims().size() == 3 && b.dims().size() <= 2) {
      // the transpose_X must be false, if is true, the transpose cost much time
      if (!trans_a) {
        mat_dim_a.height_ *= mat_dim_a.batch_size_;
        mat_dim_a.batch_size_ = 0;
      }
    }
259 260 261 262 263 264 265
    blas.MatMul(a,
                mat_dim_a,
                b,
                mat_dim_b,
                static_cast<T>(context.Attr<float>("alpha")),
                out,
                T(0));
Y
Yu Yang 已提交
266 267
  }

Y
yuyang18 已提交
268
  void CalcInputGrad(const framework::ExecutionContext &context,
269 270 271 272 273 274
                     const framework::Tensor &a,
                     bool trans_a,
                     bool is_fold_init_dims_a,
                     const framework::Tensor &b,
                     bool trans_b,
                     bool is_fold_init_dims_b,
Y
yuyang18 已提交
275
                     framework::Tensor *out) const {
Y
Yu Yang 已提交
276 277 278 279 280 281
    if (out == nullptr) return;
    bool need_combine = (a.dims().size() == 3 || b.dims().size() == 3) &&
                        out->dims().size() == 2;
    if (!need_combine) {
      MatMul(context, a, trans_a, b, trans_b, out);
    } else {
Y
yuyang18 已提交
282
      auto &ctx = context.template device_context<DeviceContext>();
283 284 285 286 287 288 289
      MatMul(
          context,
          is_fold_init_dims_a ? FoldInitDims(a)
                              : FoldHeadAndLastDims<DeviceContext, T>(ctx, a),
          trans_a,
          is_fold_init_dims_b ? FoldInitDims(b)
                              : FoldHeadAndLastDims<DeviceContext, T>(ctx, b),
290 291
          trans_b,
          out);
Y
Yu Yang 已提交
292 293 294
    }
  }

Y
yuyang18 已提交
295
  void Compute(const framework::ExecutionContext &context) const override {
Y
Yu Yang 已提交
296 297 298 299
    auto x = *context.Input<framework::Tensor>("X");
    auto y = *context.Input<framework::Tensor>("Y");
    auto dout =
        *context.Input<framework::Tensor>(framework::GradVarName("Out"));
Y
yuyang18 已提交
300 301
    auto *dx = context.Output<framework::Tensor>(framework::GradVarName("X"));
    auto *dy = context.Output<framework::Tensor>(framework::GradVarName("Y"));
Y
Yu Yang 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
    bool transpose_x = context.Attr<bool>("transpose_X");
    bool transpose_y = context.Attr<bool>("transpose_Y");

    ReshapeXYOutIntoMatrixSequence(&x, &y, &dout, transpose_x, transpose_y);
    framework::DDim dx_dims;
    if (dx) {
      dx_dims = dx->dims();
      if (dx_dims != x.dims()) {
        dx->Resize(x.dims());
      }
    }

    framework::DDim dy_dims;
    if (dy) {
      dy_dims = dy->dims();
      if (dy_dims != y.dims()) {
        dy->Resize(y.dims());
      }
    }

    if (transpose_x && transpose_y) {
      CalcInputGrad(context, y, true, true, dout, true, false, dx);
      CalcInputGrad(context, dout, true, true, x, true, false, dy);
    } else if (transpose_x) {
      CalcInputGrad(context, y, false, false, dout, true, false, dx);
      CalcInputGrad(context, x, false, false, dout, false, true, dy);
    } else if (transpose_y) {
      CalcInputGrad(context, dout, false, false, y, false, true, dx);
      CalcInputGrad(context, dout, true, true, x, false, true, dy);
    } else {
      CalcInputGrad(context, dout, false, false, y, true, false, dx);
      CalcInputGrad(context, x, true, true, dout, false, true, dy);
    }

    if (dx) {
      if (dx_dims != x.dims()) {
        dx->Resize(dx_dims);
      }
    }
    if (dy) {
      if (dy_dims != y.dims()) {
        dy->Resize(dy_dims);
      }
    }
  }
};
M
Markus Kliegl 已提交
348

349 350 351 352 353 354
framework::DDim GetDimForInput(const framework::InferShapeContext &ctx,
                               std::string input_name) {
  auto shape = ctx.Attrs().Get<std::vector<int>>("fused_reshape_" + input_name);
  auto axis =
      ctx.Attrs().Get<std::vector<int>>("fused_transpose_" + input_name);
  auto dim = ctx.GetInputDim(input_name);
355

356 357
  PADDLE_ENFORCE_GT(dim.size(),
                    0,
358 359 360 361
                    platform::errors::InvalidArgument(
                        "The Input(%s) has not been initialized properly. The "
                        "shape of Input(%s) = [%s].",
                        dim));
362

363 364 365 366 367 368
  if (!shape.empty() && !axis.empty()) {
    dim = dim.reshape(shape).transpose(axis);
  }
  return dim;
}

369 370 371 372
template <typename DeviceContext, typename T>
class MatMulDoubleGradKernel : public framework::OpKernel<T> {
 public:
  void MatMul(const framework::ExecutionContext &context,
373 374 375 376 377
              const framework::Tensor &a,
              bool trans_a,
              const framework::Tensor &b,
              bool trans_b,
              bool flag,
378 379
              framework::Tensor *out) const {
    out->mutable_data<T>(context.GetPlace());
380 381 382
    auto blas = phi::funcs::GetBlas<DeviceContext, T>(context);
    auto mat_dim_a = phi::funcs::CreateMatrixDescriptor(a.dims(), 0, trans_a);
    auto mat_dim_b = phi::funcs::CreateMatrixDescriptor(b.dims(), 0, trans_b);
383 384

    int head_number = 1;
385 386
#if defined(PADDLE_WITH_MKLML) && !defined(PADDLE_WITH_CUDA) && \
    !defined(PADDLE_WITH_HIP)
387 388 389 390 391 392 393 394 395 396
    head_number = context.Attr<int>("head_number");
#endif

    if (head_number <= 1 && a.dims().size() == 3 && b.dims().size() <= 2) {
      // the transpose_X must be false, if is true, the transpose cost much time
      if (!trans_a) {
        mat_dim_a.height_ *= mat_dim_a.batch_size_;
        mat_dim_a.batch_size_ = 0;
      }
    }
397 398 399 400 401 402
    blas.MatMul(a,
                mat_dim_a,
                b,
                mat_dim_b,
                static_cast<T>(context.Attr<float>("alpha")),
                out,
403 404 405 406
                static_cast<T>(flag));
  }

  void CalcInputGrad(const framework::ExecutionContext &context,
407 408 409 410 411 412 413
                     const framework::Tensor &a,
                     bool trans_a,
                     bool is_fold_init_dims_a,
                     const framework::Tensor &b,
                     bool trans_b,
                     bool is_fold_init_dims_b,
                     bool flag,
414 415 416 417 418 419 420 421
                     framework::Tensor *out) const {
    if (out == nullptr) return;
    bool need_combine = (a.dims().size() == 3 || b.dims().size() == 3) &&
                        out->dims().size() == 2;
    if (!need_combine) {
      MatMul(context, a, trans_a, b, trans_b, flag, out);
    } else {
      auto &ctx = context.template device_context<DeviceContext>();
422 423 424 425 426 427 428
      MatMul(
          context,
          is_fold_init_dims_a ? FoldInitDims(a)
                              : FoldHeadAndLastDims<DeviceContext, T>(ctx, a),
          trans_a,
          is_fold_init_dims_b ? FoldInitDims(b)
                              : FoldHeadAndLastDims<DeviceContext, T>(ctx, b),
429 430 431
          trans_b,
          flag,
          out);
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
    }
  }

  void Compute(const framework::ExecutionContext &context) const override {
    auto x = *context.Input<framework::Tensor>("X");
    auto y = *context.Input<framework::Tensor>("Y");
    auto dout = *context.Input<framework::LoDTensor>("DOut");
    auto *ddx = context.Input<framework::LoDTensor>("DDX");
    auto *ddy = context.Input<framework::LoDTensor>("DDY");

    auto *dx = context.Output<framework::LoDTensor>("DX");
    auto *dy = context.Output<framework::LoDTensor>("DY");
    auto *ddout = context.Output<framework::LoDTensor>("DDOut");

    bool transpose_x = context.Attr<bool>("transpose_X");
    bool transpose_y = context.Attr<bool>("transpose_Y");

    ReshapeXYOutIntoMatrixSequence(&x, &y, &dout, transpose_x, transpose_y);

    framework::DDim dx_dims;
    if (dx) {
      dx_dims = dx->dims();
      if (dx_dims != x.dims()) {
        dx->Resize(x.dims());
      }
    }

    framework::DDim dy_dims;
    if (dy) {
      dy_dims = dy->dims();
      if (dy_dims != y.dims()) {
        dy->Resize(y.dims());
      }
    }

    framework::DDim ddout_dims;
    if (ddout) {
      ddout_dims = ddout->dims();
      if (ddout_dims != dout.dims()) {
        ddout->Resize(dout.dims());
      }
    }

    bool ddout_flag = false;
    if (ddx) {
      auto ddx_mat = *ddx;
      if (ddx_mat.dims() != x.dims()) {
        ddx_mat.Resize(x.dims());
      }
      if (dy) {
        if (transpose_x && transpose_y) {
          // dy = dout' * ddx'
484 485
          CalcInputGrad(
              context, dout, true, true, ddx_mat, true, false, false, dy);
486 487
        } else if (transpose_x) {
          // dy = ddx * dout
488 489
          CalcInputGrad(
              context, ddx_mat, false, false, dout, false, true, false, dy);
490 491
        } else if (transpose_y) {
          // dy = dout' * ddx
492 493
          CalcInputGrad(
              context, dout, true, true, ddx_mat, false, true, false, dy);
494 495
        } else {
          // dy = ddx' * dout
496 497
          CalcInputGrad(
              context, ddx_mat, true, true, dout, false, true, false, dy);
498 499 500 501
        }
      }

      if (ddout) {
502 503 504 505 506 507 508 509 510
        CalcInputGrad(context,
                      ddx_mat,
                      transpose_x,
                      true,
                      y,
                      transpose_y,
                      false,
                      ddout_flag,
                      ddout);
511 512 513 514 515 516 517 518 519 520 521 522
        ddout_flag = true;
      }
    }

    if (ddy) {
      auto ddy_mat = *ddy;
      if (ddy_mat.dims() != y.dims()) {
        ddy_mat.Resize(y.dims());
      }
      if (dx) {
        if (transpose_x && transpose_y) {
          // dx = ddy' * dout'
523 524
          CalcInputGrad(
              context, ddy_mat, true, true, dout, true, false, false, dx);
525 526
        } else if (transpose_x) {
          // dx = ddy * dout'
527 528
          CalcInputGrad(
              context, ddy_mat, false, false, dout, true, false, false, dx);
529 530
        } else if (transpose_y) {
          // dx = dout * ddy
531 532
          CalcInputGrad(
              context, dout, false, false, ddy_mat, false, true, false, dx);
533 534
        } else {
          // dx = dout * ddy'
535 536
          CalcInputGrad(
              context, dout, false, false, ddy_mat, true, false, false, dx);
537 538 539 540
        }
      }

      if (ddout) {
541 542 543 544 545 546 547 548 549
        CalcInputGrad(context,
                      x,
                      transpose_x,
                      true,
                      ddy_mat,
                      transpose_y,
                      false,
                      ddout_flag,
                      ddout);
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
      }
    }

    if (dx) {
      if (dx_dims != x.dims()) {
        dx->Resize(dx_dims);
      }
    }

    if (dy) {
      if (dy_dims != y.dims()) {
        dy->Resize(dy_dims);
      }
    }

    if (ddout) {
      if (ddout_dims != dout.dims()) {
        ddout->Resize(ddout_dims);
      }
    }
  }
};

M
Markus Kliegl 已提交
573 574 575 576 577
class MatMulOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
Y
yuyang18 已提交
578
  void InferShape(framework::InferShapeContext *context) const override {
579 580 581
    OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "matmul");
    OP_INOUT_CHECK(context->HasInput("Y"), "Input", "Y", "matmul");
    OP_INOUT_CHECK(context->HasOutput("Out"), "Output", "Out", "matmul");
M
Markus Kliegl 已提交
582

583 584
    auto dim_x = GetDimForInput(*context, "X");
    auto dim_y = GetDimForInput(*context, "Y");
585 586 587 588 589 590 591 592 593 594 595 596 597

#ifdef PADDLE_WITH_MKLDNN
    // (jczaja): For NHWC execution output shape needs
    // to be computed like instead x*y we are to do y*x
    bool channelwise_onednn =
        context->IsRunMKLDNNKernel() &&
        (platform::MKLDNNDeviceContext::tls().get_cur_paddle_data_layout() ==
         framework::DataLayout::kNHWC);
    if (channelwise_onednn) {
      std::swap(dim_x, dim_y);
    }
#endif

598
    auto mat_dim_x = phi::funcs::CreateMatrixDescriptor(
599 600
        RowMatrixFromVector(dim_x),
        0,
601
        context->Attrs().Get<bool>("transpose_X"));
602
    auto mat_dim_y = phi::funcs::CreateMatrixDescriptor(
603 604
        ColumnMatrixFromVector(dim_y),
        0,
605
        context->Attrs().Get<bool>("transpose_Y"));
C
chengduoZH 已提交
606

607 608 609 610 611 612 613
    if (mat_dim_x.width_ == -1) {
      mat_dim_x.width_ = mat_dim_y.height_;
    }
    if (mat_dim_y.height_ == -1) {
      mat_dim_y.height_ = mat_dim_x.width_;
    }

P
phlrain 已提交
614
    if (context->IsRuntime()) {
615
      PADDLE_ENFORCE_EQ(
616 617
          mat_dim_x.batch_size_ == mat_dim_y.batch_size_ ||
              mat_dim_x.batch_size_ == 0 || mat_dim_y.batch_size_ == 0,
618 619 620 621 622 623 624
          true,
          platform::errors::InvalidArgument(
              "The batch size of the two matrices should be equal, or "
              "at least one is zero.\n"
              "But received X's shape: %s, Y's shape: %s.",
              DumpMatrixShape(mat_dim_x).c_str(),
              DumpMatrixShape(mat_dim_y).c_str()));
P
phlrain 已提交
625
    }
626
    int64_t dim_out_y = mat_dim_y.width_;
627 628
#if defined(PADDLE_WITH_MKLML) && !defined(PADDLE_WITH_CUDA) && \
    !defined(PADDLE_WITH_HIP)
629
    int head_number = context->Attrs().Get<int>("head_number");
630
    bool split_vertical_y = (mat_dim_x.width_ != mat_dim_y.height_);
631 632
    if (context->IsRuntime()) {
      PADDLE_ENFORCE_LE(
633 634
          head_number,
          mat_dim_x.width_,
635 636 637 638
          platform::errors::InvalidArgument(
              "Unsatisfied mkl acceleration library requirements: "
              "The number of heads "
              "(%d) must be equal to X's width. But received X's shape: %s.",
639 640
              head_number,
              DumpMatrixShape(mat_dim_x).c_str()));
641 642 643 644

      if (!split_vertical_y && head_number > 0) {
        dim_out_y = head_number * mat_dim_y.width_;
      }
645
    }
646
#else
647 648
    PADDLE_ENFORCE_EQ(mat_dim_x.width_,
                      mat_dim_y.height_,
649 650
                      platform::errors::InvalidArgument(
                          "Input X's width should be equal to the Y's height, "
651
                          "but received X's shape: [%s], "
652
                          "Y's shape: [%s].",
653 654
                          dim_x,
                          dim_y));
655 656
#endif

657
    std::vector<int64_t> dim_out;
Y
Yu Yang 已提交
658
    if (mat_dim_x.batch_size_ != 0) {
659
      dim_out = phi::vectorize(dim_x);
Y
Yu Yang 已提交
660
      dim_out[dim_out.size() - 2] = mat_dim_x.height_;
661
      dim_out[dim_out.size() - 1] = dim_out_y;
Y
Yu Yang 已提交
662
    } else if (mat_dim_y.batch_size_ != 0) {
663
      dim_out = phi::vectorize(dim_y);
Y
Yu Yang 已提交
664
      dim_out[dim_out.size() - 2] = mat_dim_x.height_;
665
      dim_out[dim_out.size() - 1] = dim_out_y;
Y
Yu Yang 已提交
666
    } else {
667
      dim_out = {mat_dim_x.height_, dim_out_y};
M
Markus Kliegl 已提交
668 669
    }

Y
Yu Yang 已提交
670 671 672
    if (dim_x.size() == 1 && dim_out[dim_out.size() - 2] == 1) {
      std::swap(dim_out[dim_out.size() - 2], dim_out[dim_out.size() - 1]);
      dim_out.resize(dim_out.size() - 1);
M
Markus Kliegl 已提交
673 674
    }

Y
Yu Yang 已提交
675 676
    if (dim_y.size() == 1 && dim_out[dim_out.size() - 1] == 1) {
      dim_out.resize(dim_out.size() - 1);
M
Markus Kliegl 已提交
677 678
    }

Y
Yu Yang 已提交
679 680
    if (dim_out.empty()) {
      dim_out = {1};
M
Markus Kliegl 已提交
681
    }
682

683
    framework::DDim ddim_out = phi::make_ddim(dim_out);
684 685

#ifdef PADDLE_WITH_MKLDNN
686 687 688 689 690
    auto shape = context->Attrs().Get<std::vector<int>>("fused_reshape_Out");
    auto axis = context->Attrs().Get<std::vector<int>>("fused_transpose_Out");

    if (!shape.empty() && !axis.empty()) {
      ddim_out = ddim_out.transpose(axis).reshape(shape);
691 692
    }
#endif
693 694
    context->SetOutputDim("Out", ddim_out);
    context->ShareLoD("X", "Out");
M
Markus Kliegl 已提交
695
  }
696 697 698

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
699 700
    auto input_data_type =
        OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y");
701 702

#ifdef PADDLE_WITH_MKLDNN
703
    using dnnl::memory;
704
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
705 706
      return framework::OpKernelType(input_data_type,
                                     ctx.GetPlace(),
707 708 709 710 711 712
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
713 714

  framework::OpKernelType GetKernelTypeForVar(
715 716
      const std::string &var_name,
      const framework::Tensor &tensor,
717 718 719
      const framework::OpKernelType &expected_kernel_type) const {
    if (framework::IsComplexType(expected_kernel_type.data_type_)) {
      // only promote inputs’s types when contains complex input
720
      return framework::OpKernelType(
721 722
          framework::TransToProtoVarType(tensor.dtype()),
          tensor.place(),
723
          tensor.layout());
724
    } else {
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
#ifdef PADDLE_WITH_MKLDNN
      // When matmul is first oneDNN op in a chain (there was some non oneDNN op
      // previously)
      // then we also need to rotate shape NHWC -> NCWH
      if ((expected_kernel_type.data_layout_ ==
           framework::DataLayout::kMKLDNN) &&
          (tensor.layout() != framework::DataLayout::kMKLDNN) &&
          paddle::platform::MKLDNNDeviceContext::tls()
                  .get_cur_paddle_data_layout() ==
              framework::DataLayout::kNHWC) {
        return framework::OpKernelType(expected_kernel_type.data_type_,
                                       tensor.place(),
                                       framework::DataLayout::kNHWC);
      }
#endif
740 741
      return framework::OpKernelType(
          expected_kernel_type.data_type_, tensor.place(), tensor.layout());
742 743
    }
  }
M
Markus Kliegl 已提交
744 745 746 747
};

class MatMulOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
748
  void Make() override {
M
Markus Kliegl 已提交
749 750 751 752 753 754 755 756 757 758 759
    AddInput("X", "The first input of MatMul op");
    AddInput("Y", "The second input of MatMul op");
    AddOutput("Out", "The output of MatMul op");
    AddAttr<bool>("transpose_X",
                  R"DOC(If true, use the transpose of `X`.
        )DOC")
        .SetDefault(false);
    AddAttr<bool>("transpose_Y",
                  R"DOC(If true, use the transpose of `Y`.
        )DOC")
        .SetDefault(false);
S
sneaxiy 已提交
760
    AddAttr<float>("alpha", "The scale of Out").SetDefault(1.0f);
761 762 763
    AddAttr<bool>(
        "use_mkldnn",
        "(bool, default false) Indicates if MKL-DNN kernel will be used")
764 765
        .SetDefault(false)
        .AsExtra();
766 767
    AddAttr<std::vector<int>>("fused_reshape_X",
                              R"DOC(Shape of fused reshape of `X` input.)DOC")
768 769
        .SetDefault({})
        .AsExtra();
770 771
    AddAttr<std::vector<int>>("fused_reshape_Y",
                              R"DOC(Shape of fused reshape of `Y` input.)DOC")
772 773
        .SetDefault({})
        .AsExtra();
774 775
    AddAttr<std::vector<int>>("fused_transpose_X",
                              R"DOC(Axis of fused transpose of `X` input.)DOC")
776 777
        .SetDefault({})
        .AsExtra();
778 779
    AddAttr<std::vector<int>>("fused_transpose_Y",
                              R"DOC(Axis of fused transpose of `Y` input.)DOC")
780 781
        .SetDefault({})
        .AsExtra();
782 783 784 785
    AddAttr<std::vector<int>>(
        "fused_reshape_Out",
        R"DOC(When MKLDNN MatMul_transpose_reshape fuse activated, "
              "it's a shape atribute of fused reshape for `Out` output.)DOC")
786 787
        .SetDefault({})
        .AsExtra();
788 789 790 791
    AddAttr<std::vector<int>>(
        "fused_transpose_Out",
        R"DOC(When MKLDNN MatMul_transpose_reshape fuse activated, "
              "it's a axis atribute of fused transpose for `Out` output.)DOC")
792 793
        .SetDefault({})
        .AsExtra();
794 795 796 797
    AddAttr<bool>(
        "use_quantizer",
        "(bool, default false) "
        "This parameter is no longer used. Use 'mkldnn_data_type' instead.")
798 799
        .SetDefault(false)
        .AsExtra();
800 801 802 803
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
804 805
        .InEnum({"float32", "int8", "bfloat16"})
        .AsExtra();
806
    /* int8 parameters */
807 808
    AddAttr<float>("Scale_x",
                   "(float, default 1.0f), The quantize scale of X tensor")
809 810
        .SetDefault(1.0f)
        .AsExtra();
811 812
    AddAttr<float>("Scale_y",
                   "(float, default 1.0f), The quantize scale of Y tensor")
813 814
        .SetDefault(1.0f)
        .AsExtra();
815 816
    AddAttr<float>("Scale_out",
                   "(float, default 1.0f), The quantize scale of output data")
817 818
        .SetDefault(1.0f)
        .AsExtra();
819 820 821
    AddAttr<bool>("force_fp32_output",
                  "(bool, default false) Force INT8 kernel output FP32, only "
                  "used in MKL-DNN INT8")
822 823
        .SetDefault(false)
        .AsExtra();
824

825 826
#if defined(PADDLE_WITH_MKLML) && !defined(PADDLE_WITH_CUDA) && \
    !defined(PADDLE_WITH_HIP)
827 828 829
    AddAttr<int>("head_number", "The number of heads of the matrix")
        .SetDefault(1);
#endif
M
Markus Kliegl 已提交
830
    AddComment(R"DOC(
K
kexinzhao 已提交
831 832
MatMul Operator.
This operator is used to perform (batched) matrix multiplication
M
Markus Kliegl 已提交
833 834 835 836 837 838 839 840 841 842 843 844
over the last two dimensions of the input tensors `X` and `Y`.
If a transpose flag is specified, the last two dimensions of the
tensor are transposed. If the tensor is rank-1 of shape [D], then
for `X` it is treated as [1, D] in nontransposed form and as [D, 1]
in transposed form, whereas for `Y` it is the opposite: It is treated
as [D, 1] in nontransposed form and as [1, D] in transposed form.
Examples without transpose:
- X: [K], Y: [K] => Out: [1]
- X: [K], Y: [K, N] => Out: [N]
- X: [B, M, K], Y: [K] => Out: [B, M]
- X: [M, K], Y: [B, K, N] => Out: [B, M, N]
- X: [B, M, K], Y: [B, K, N] => Out: [B, M, N]
C
chengduoZH 已提交
845
- X: [B, ..., M, K], Y: [B, ..., K, N] => Out: [B, ..., M, N]
846 847
Example of matrix multiplication with head_number of H
- X: [B, M, K], Y: [B, K, N] => Out: [B, M, H * N]
M
Markus Kliegl 已提交
848 849
The behavior is designed to be similar to the `numpy.matmul` function.
The differences are:
C
chengduoZH 已提交
850 851
- When the rank of the input data is less than or equal to 3, it
  is similar to the `numpy.matmul` function.
C
chengduoZH 已提交
852
- When the rank of the input is greater than 3, the rank of X and
C
chengduoZH 已提交
853
  Y must be equal, and the first `rank - 2` dimensions must be equal.
M
Markus Kliegl 已提交
854
- We add `transpose_X` and `transpose_Y` flags.
855 856 857
- We add `head_number` attribute, which is used to multiple two matrixes head
  by head, and eventually concatenates the output of several (head_number)
  small matrixes multiplication.
M
Markus Kliegl 已提交
858
Both the input `X` and `Y` can carry the LoD (Level of Details) information,
K
kexinzhao 已提交
859
or not. But the output only shares the LoD information with input `X`.
M
Markus Kliegl 已提交
860 861 862 863 864 865 866 867 868
)DOC");
  }
};

class MatMulOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
Y
yuyang18 已提交
869
  void InferShape(framework::InferShapeContext *context) const override {
870 871
    OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "matmul");
    OP_INOUT_CHECK(context->HasInput("Y"), "Input", "Y", "matmul");
872 873 874 875
    OP_INOUT_CHECK(context->HasInput(framework::GradVarName("Out")),
                   "Input",
                   "Out@GRAD",
                   "matmul");
M
Markus Kliegl 已提交
876 877 878 879 880 881 882 883 884 885 886 887 888
    auto x_dims = context->GetInputDim("X");
    auto y_dims = context->GetInputDim("Y");

    auto x_grad_name = framework::GradVarName("X");
    auto y_grad_name = framework::GradVarName("Y");

    if (context->HasOutput(x_grad_name)) {
      context->SetOutputDim(x_grad_name, x_dims);
    }
    if (context->HasOutput(y_grad_name)) {
      context->SetOutputDim(y_grad_name, y_dims);
    }
  }
889 890 891 892 893 894 895 896

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
    auto input_data_type =
        OperatorWithKernel::IndicateOrPromoteVarDataTypes(ctx, "X", "Y");

#ifdef PADDLE_WITH_MKLDNN
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
897 898
      return framework::OpKernelType(input_data_type,
                                     ctx.GetPlace(),
899 900 901 902 903 904
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
M
Markus Kliegl 已提交
905 906
};

H
hong 已提交
907 908
template <typename T>
class MatMulOpGradMaker : public framework::SingleGradOpMaker<T> {
Y
Yu Yang 已提交
909
 public:
H
hong 已提交
910
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
Y
Yu Yang 已提交
911 912

 protected:
913
  void Apply(GradOpPtr<T> retv) const override {
Y
Yu Yang 已提交
914
    retv->SetType("matmul_grad");
H
hong 已提交
915 916 917 918 919 920
    retv->SetInput("X", this->Input("X"));
    retv->SetInput("Y", this->Input("Y"));
    retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    retv->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
    retv->SetAttrMap(this->Attrs());
Y
Yu Yang 已提交
921 922
  }
};
923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977

class MatMulOpDoubleGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext *context) const override {
    OP_INOUT_CHECK(context->HasInput("X"), "Input", "X", "matmul");
    OP_INOUT_CHECK(context->HasInput("Y"), "Input", "Y", "matmul");
    OP_INOUT_CHECK(context->HasInput("DOut"), "Input", "DOut", "matmul");

    if (context->HasOutput("DX") && context->HasInput("DDY")) {
      context->ShareDim("X", "DX");
    }

    if (context->HasOutput("DY") && context->HasInput("DDX")) {
      context->ShareDim("Y", "DY");
    }

    if (context->HasOutput("DDOut") &&
        (context->HasInput("DDY") || context->HasInput("DDX"))) {
      context->ShareDim("DOut", "DDOut");
    }
  }
};

template <typename T>
class MatMulOpDoubleGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> retv) const override {
    retv->SetType("matmul_grad_grad");
    retv->SetInput("X", this->Input("X"));
    retv->SetInput("Y", this->Input("Y"));
    retv->SetInput("DOut", this->Input(framework::GradVarName("Out")));
    retv->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
    retv->SetInput("DDY", this->OutputGrad(framework::GradVarName("Y")));

    auto ddx = this->OutputGrad(framework::GradVarName("X"));
    auto ddy = this->OutputGrad(framework::GradVarName("Y"));

    if (!ddx.empty() || !ddy.empty()) {
      retv->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
    }
    retv->SetOutput(
        "DX", ddy.empty() ? this->EmptyInputGrad() : this->InputGrad("X"));
    retv->SetOutput(
        "DY", ddx.empty() ? this->EmptyInputGrad() : this->InputGrad("Y"));

    retv->SetAttrMap(this->Attrs());
  }
};

M
Markus Kliegl 已提交
978 979 980 981
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
982 983 984
REGISTER_OPERATOR(matmul,
                  ops::MatMulOp,
                  ops::MatMulOpMaker,
H
hong 已提交
985 986
                  ops::MatMulOpGradMaker<paddle::framework::OpDesc>,
                  ops::MatMulOpGradMaker<paddle::imperative::OpBase>);
987 988
REGISTER_OPERATOR(matmul_grad,
                  ops::MatMulOpGrad,
989 990 991
                  ops::MatMulOpDoubleGradMaker<paddle::framework::OpDesc>,
                  ops::MatMulOpDoubleGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(matmul_grad_grad, ops::MatMulOpDoubleGrad);
L
Leo Chen 已提交
992 993 994 995 996 997 998 999 1000 1001
REGISTER_OP_CPU_KERNEL(matmul,
                       ops::MatMulKernel<phi::CPUContext, float>,
                       ops::MatMulKernel<phi::CPUContext, double>);
REGISTER_OP_CPU_KERNEL(matmul_grad,
                       ops::MatMulGradKernel<phi::CPUContext, float>,
                       ops::MatMulGradKernel<phi::CPUContext, double>);

REGISTER_OP_CPU_KERNEL(matmul_grad_grad,
                       ops::MatMulDoubleGradKernel<phi::CPUContext, float>,
                       ops::MatMulDoubleGradKernel<phi::CPUContext, double>);
1002

1003
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Y
Yu Yang 已提交
1004
REGISTER_OP_CUDA_KERNEL(
1005
    matmul,
L
Leo Chen 已提交
1006 1007 1008
    ops::MatMulKernel<phi::GPUContext, float>,
    ops::MatMulKernel<phi::GPUContext, double>,
    ops::MatMulKernel<phi::GPUContext, paddle::platform::float16>);
Y
Yu Yang 已提交
1009 1010
REGISTER_OP_CUDA_KERNEL(
    matmul_grad,
L
Leo Chen 已提交
1011 1012 1013 1014 1015 1016
    ops::MatMulGradKernel<phi::GPUContext, float>,
    ops::MatMulGradKernel<phi::GPUContext, double>,
    ops::MatMulGradKernel<phi::GPUContext, paddle::platform::float16>);
REGISTER_OP_CUDA_KERNEL(matmul_grad_grad,
                        ops::MatMulDoubleGradKernel<phi::GPUContext, float>,
                        ops::MatMulDoubleGradKernel<phi::GPUContext, double>);
Y
Yu Yang 已提交
1017
#endif
1018

1019 1020
REGISTER_OP_VERSION(matmul).AddCheckpoint(
    R"ROC(Register matmul for adding the attribute of
1021
       fused_reshape_Y)ROC",
1022 1023 1024 1025 1026 1027
    paddle::framework::compatible::OpVersionDesc().NewAttr(
        "fused_reshape_Y",
        "In order to support the function of fused the input Y "
        " and input X into the input X when "
        "using the operator of matmul, and get raw shape of input Y.",
        std::vector<int>{}));