fused_gemm_epilogue_op.cc 15.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Copyright (c) 2022 NVIDIA Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

16
#include "paddle/fluid/operators/fused/fused_gemm_epilogue_op.h"
17

18 19 20 21 22 23 24 25 26 27 28 29 30 31
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"

namespace paddle {
namespace operators {

class FusedGemmEpilogueOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "FusedGemmEpilogueOp");
    OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "FusedGemmEpilogueOp");
32 33 34 35
    OP_INOUT_CHECK(
        ctx->HasInput("Bias"), "Output", "Bias", "FusedGemmEpilogueOp");
    OP_INOUT_CHECK(
        ctx->HasOutput("Out"), "Output", "Out", "FusedGemmEpilogueOp");
36 37 38 39 40 41 42 43 44

    auto x_dims = ctx->GetInputDim("X");
    auto y_dims = ctx->GetInputDim("Y");
    auto bias_dims = ctx->GetInputDim("Bias");

    auto trans_x = ctx->Attrs().Get<bool>("trans_x");
    auto trans_y = ctx->Attrs().Get<bool>("trans_y");

    PADDLE_ENFORCE_EQ(
45 46
        y_dims.size(),
        2,
47 48 49 50 51 52
        platform::errors::InvalidArgument(
            "The Input tensor Y's dimension of FusedGemmEpilogueOp "
            " should be 2, but got %d.",
            y_dims.size()));

    PADDLE_ENFORCE_GE(
53 54
        x_dims.size(),
        2,
55 56 57 58 59 60
        platform::errors::InvalidArgument(
            "The Input tensor X's dimension of FusedGemmEpilogueOp "
            " should be >= 2, but got %d.",
            x_dims.size()));

    PADDLE_ENFORCE_EQ(
61 62
        bias_dims.size(),
        1,
63 64 65 66 67
        platform::errors::InvalidArgument(
            "The Input tensor bias's dimension of FusedGemmEpilogueOp "
            " should be == 1, but got %d.",
            bias_dims.size()));

68 69
    PADDLE_ENFORCE_EQ(bias_dims[0],
                      trans_y ? y_dims[0] : y_dims[1],
70 71 72 73
                      platform::errors::InvalidArgument(
                          "The Input tensor bias's dimension 0"
                          " should be == Y[-1], but got bias's shape = [%s] "
                          "and Y's shape = [%s]",
74 75
                          bias_dims,
                          y_dims));
76 77 78 79 80 81 82 83

    auto x_mat_dims =
        phi::flatten_to_2d(x_dims, trans_x ? 1 : x_dims.size() - 1);

    int K_from_x = trans_x ? x_mat_dims[0] : x_mat_dims[1];
    int K_from_y = trans_y ? y_dims[1] : y_dims[0];

    PADDLE_ENFORCE_EQ(
84 85
        K_from_x,
        K_from_y,
86 87 88
        platform::errors::InvalidArgument(
            "The last dimension of X should be equal with Y's first dimension."
            "But received X[-1] = [%d], Y[0] = [%d].",
89 90
            K_from_x,
            K_from_y));
91 92 93 94 95 96

    auto activation = ctx->Attrs().Get<std::string>("activation");

    if ((activation != "relu") && (activation != "gelu") &&
        (activation != "none")) {
      PADDLE_ENFORCE_EQ(
97 98
          true,
          false,
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
          platform::errors::InvalidArgument(
              "The activation attribute of fused_gemm_epilogue op should be"
              " one of {\"none\", \"relu\", \"gelu\"}. But received %s."
              "But received activation=%s.",
              activation));
    }

    if (activation == "none" && ctx->HasOutput("ReserveSpace")) {
      PADDLE_THROW(platform::errors::InvalidArgument(
          "The ReserveSpace would not be used when activation = \"none\""));
    }

    // cublasLt's restriction for auxiliary.
    if (ctx->HasOutput("ReserveSpace") && activation != "none") {
      int min_size_of_n = activation == "relu" ? 128 : 8;
      int N_size = trans_y ? y_dims[0] : y_dims[1];
115 116
      PADDLE_ENFORCE_EQ(N_size % min_size_of_n,
                        0,
117 118 119 120
                        platform::errors::InvalidArgument(
                            "The output dimension N (X(MxK) * Y(KxN) = C(MxN)) "
                            "should be multiple of %d when auxiliary_key given "
                            "and activation=%s, but got N = %d.",
121 122 123
                            min_size_of_n,
                            activation,
                            N_size));
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
    }

    std::vector<int64_t> out_dims;
    out_dims.reserve(static_cast<size_t>(x_dims.size()));
    if (trans_x) {
      for (int i = 1; i < x_dims.size(); ++i) out_dims.push_back(x_dims[i]);
    } else {
      for (int i = 0; i < x_dims.size() - 1; ++i) out_dims.push_back(x_dims[i]);
    }

    if (trans_y) {
      out_dims.push_back(y_dims[0]);
    } else {
      out_dims.push_back(y_dims[1]);
    }

    ctx->SetOutputDim("Out", phi::make_ddim(out_dims));
    // Note (Ming Huang): Reserve space of relu is a bit-mask,
    // which cannot pass nan_and_inf checking if shape is set.
    if (activation == "gelu" && ctx->HasOutput("ReserveSpace")) {
      ctx->SetOutputDim("ReserveSpace", phi::make_ddim(out_dims));
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const {
    framework::LibraryType library = framework::LibraryType::kPlain;
151
    phi::DataLayout layout = phi::DataLayout::kAnyLayout;
152 153 154 155 156 157 158 159 160 161 162 163 164 165
    auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
    return framework::OpKernelType(data_type, ctx.GetPlace(), layout, library);
  }
};

class FusedGemmEpilogueOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "The input tensor X of Out = Act((X * Y) + Bias).");
    AddInput("Y", "The input tensor Y of Out = Act((X * Y) + Bias).");
    AddInput("Bias", "The input tensor bias of Out = Act((X * Y) + Bias).");

    AddOutput("Out", "The output tensor Out of Out = Act((X * Y) + Bias).");
    AddOutput("ReserveSpace",
166 167 168
              R"DOC(Reserve GPU space to place
        auxiliary data pointer. It is used to pass auxiliary data pointer
        for fused_gemm_epilogue op. If not given (empty string), the
169 170 171 172 173 174
        auxiliary mode would not be enable.)DOC")
        .AsDispensable()
        .AsExtra();

    AddAttr<bool>(
        "trans_x",
175 176 177
        R"DOC((bool, default false), Whether to transpose input tensor X
    or not. The input tensor X coulbe be more than two dimension. When
    set trans_x=true, it would fully reverse X. For instant: X with shpae
178 179 180 181
    [d0, d1, d2, d3] -> [d3, d2, d1, d0].)DOC")
        .SetDefault(false);
    AddAttr<bool>(
        "trans_y",
182 183 184
        R"DOC((bool, default false), Whether to transpose input tensor Y
    or not. The input tensor Y should be two dimension. When
    set trans_y=true, it would transpose Y. For instant: Y with shpae
185 186 187 188 189
    [d0, d1] -> [d1, d0].)DOC")
        .SetDefault(false);

    AddAttr<std::string>(
        "activation",
190 191
        R"DOC((string, default none), The activation function. It could be
    one of {none, relu, gelu}. When none is given, Act would be null
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
    operations)DOC")
        .SetDefault("none");

    AddComment(R"DOC(
FusedGemmEpilogue Operator
This operator is used to perform Activeation(Elementwise_add(Matmul(X, Y), bias)).
It is equal to paddle.nn.Linear + Activation (None, ReLU or GeLU).

Note:
X could be more than two dimension and would be flatten to 2D for computing.
X with shape [d0, d1, d2, d3] -> X_2D with shape [d0*d1*d2, d3]
)DOC");
  }
};

class FusedGemmEpilogueGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

 protected:
  void InferShape(framework::InferShapeContext* ctx) const override {
213 214
    OP_INOUT_CHECK(
        ctx->HasInput("DOut"), "Input", "DOut", "FusedGemmEpilogueGradOp");
215 216 217 218 219 220 221 222
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "FusedGemmEpilogueGradOp");
    OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "FusedGemmEpilogueGradOp");
    OP_INOUT_CHECK(ctx->HasOutput("DY"), "Output", "DY", "FusedGemmEpilogueOp");

    auto dout_dims = ctx->GetInputDim("DOut");
    auto x_dims = ctx->GetInputDim("X");
    auto y_dims = ctx->GetInputDim("Y");

223 224 225
    auto trans_x = ctx->Attrs().Get<bool>("trans_x");
    auto trans_y = ctx->Attrs().Get<bool>("trans_y");

226
    PADDLE_ENFORCE_GE(
227 228
        dout_dims.size(),
        2,
229 230 231 232 233 234
        platform::errors::InvalidArgument(
            "The Input tensor DOut's dimension of FusedGemmEpilogueGradOp "
            " should be >= 2, but got %d.",
            dout_dims.size()));

    PADDLE_ENFORCE_EQ(
235 236
        y_dims.size(),
        2,
237 238 239 240 241 242
        platform::errors::InvalidArgument(
            "The Input tensor Y's dimension of FusedGemmEpilogueGradOp "
            " should be 2, but got %d.",
            y_dims.size()));

    PADDLE_ENFORCE_GE(
243 244
        x_dims.size(),
        2,
245 246 247 248 249 250
        platform::errors::InvalidArgument(
            "The Input tensor X's dimension of FusedGemmEpilogueGradOp "
            " should be >= 2, but got %d.",
            x_dims.size()));

    PADDLE_ENFORCE_EQ(
251 252
        dout_dims.size(),
        x_dims.size(),
253 254 255 256
        platform::errors::InvalidArgument(
            "The Input tensor DOut's and X's dimension of "
            "FusedGemmEpilogueGradOp "
            " should be the same, but got DOut's dim = %d and X's = %d.",
257 258
            dout_dims.size(),
            x_dims.size()));
259 260 261 262 263 264

    auto dout_mat_dims = phi::flatten_to_2d(dout_dims, dout_dims.size() - 1);

    auto x_mat_dims = phi::flatten_to_2d(x_dims, x_dims.size() - 1);

    PADDLE_ENFORCE_EQ(
265 266
        dout_mat_dims[1],
        trans_y ? y_dims[0] : y_dims[1],
267 268 269
        platform::errors::InvalidArgument(
            "The last dimension of DOut should be equal with Y's last"
            "dimension. But received DOut[-1] = [%d], Y[1] = [%d].",
270 271
            dout_mat_dims[1],
            y_dims[1]));
272 273

    PADDLE_ENFORCE_EQ(
274 275
        dout_mat_dims[0],
        trans_x ? x_mat_dims[1] : x_mat_dims[0],
276 277 278
        platform::errors::InvalidArgument(
            "The first dimension of DOut should be equal with X's first"
            "dimension. But received DOut[0] = [%d], Y[0] = [%d].",
279 280
            dout_mat_dims[0],
            x_mat_dims[0]));
281 282 283 284 285

    auto activation_grad = ctx->Attrs().Get<std::string>("activation_grad");
    if ((activation_grad != "relu_grad") && (activation_grad != "gelu_grad") &&
        (activation_grad != "none")) {
      PADDLE_ENFORCE_EQ(
286 287
          true,
          false,
288 289 290 291 292 293 294 295
          platform::errors::InvalidArgument(
              "The activation attribute of fused_gemm_epilogue op should be"
              " one of {\"none\", \"relu\", \"gelu\"}. But received %s."
              "But received activation=%s.",
              activation_grad));
    }

    if (activation_grad != "none" && !ctx->HasInput("ReserveSpace")) {
296 297
      PADDLE_ENFORCE_EQ(true,
                        false,
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
                        platform::errors::InvalidArgument(
                            "The ReserveSpace should not be empty. "
                            "when activation_grad == {relu_grad, gelu_grad}."));
    }

    if (ctx->HasOutput("DX")) {
      std::vector<int64_t> dx_dims;
      dx_dims.reserve(static_cast<size_t>(x_dims.size()));
      for (int i = 0; i < x_dims.size(); ++i) {
        dx_dims.push_back(x_dims[i]);
      }
      ctx->SetOutputDim("DX", phi::make_ddim(dx_dims));
    }

    std::vector<int64_t> dy_dims(y_dims.Get(), y_dims.Get() + y_dims.size());
    ctx->SetOutputDim("DY", phi::make_ddim(dy_dims));

    if (ctx->HasOutput("DBias")) {
      std::vector<int64_t> dbias_dims;
317
      dbias_dims.push_back(trans_y ? y_dims[0] : y_dims[1]);
318 319 320 321 322 323 324
      ctx->SetOutputDim("DBias", phi::make_ddim(dbias_dims));
    }
  }

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const {
    framework::LibraryType library = framework::LibraryType::kPlain;
325
    phi::DataLayout layout = phi::DataLayout::kAnyLayout;
326 327 328 329 330 331 332 333 334 335 336 337 338
    auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "DOut");
    return framework::OpKernelType(data_type, ctx.GetPlace(), layout, library);
  }
};

class FusedGemmEpilogueGradOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("DOut",
             "The input grad tensor to Out of Out = (Act(X) * Y) + bias");
    AddInput("X", "The input tensor X of Out = (Act(X) * Y) + bias");
    AddInput("Y", "The input tensor Y of Out = (Act(X) * Y) + bias");
    AddInput("ReserveSpace",
339 340 341
             R"DOC(A GPU space to fetch
        auxiliary data pointer. It is used to pass auxiliary data pointer
        for fused_gemm_epilogue_grad op. If not given (empty string), the
342 343 344 345 346 347 348 349 350 351
        auxiliary mode would not be enable.)DOC")
        .AsDispensable();

    AddOutput("DX", "The output grad tensor to X of Out = (Act(X) * Y) + bias.")
        .AsDispensable();
    AddOutput("DY",
              "The output grad tensor to Y of Out = (Act(X) * Y) + bias.");
    AddOutput("DBias",
              "The output grad tensor to bias of Out = (Act(X) * Y) + bias.")
        .AsDispensable();
352 353
    AddAttr<bool>(
        "trans_x",
354 355 356
        R"DOC((bool, default false), Whether to transpose input tensor X
    or not. The input tensor X coulbe be more than two dimension. When
    set trans_x=true, it would fully reverse X. For instant: X with shpae
357 358 359 360
    [d0, d1, d2, d3] -> [d3, d2, d1, d0].)DOC")
        .SetDefault(false);
    AddAttr<bool>(
        "trans_y",
361 362 363
        R"DOC((bool, default false), Whether to transpose input tensor Y
    or not. The input tensor Y should be two dimension. When
    set trans_y=true, it would transpose Y. For instant: Y with shpae
364 365
    [d0, d1] -> [d1, d0].)DOC")
        .SetDefault(false);
366 367 368

    AddAttr<std::string>(
        "activation_grad",
369 370
        R"DOC((string, default none), The backward activation function. It could be
    one of {none, relu_grad, gelu_grad}. When none is given, The backward Act would
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
    be null operations)DOC")
        .SetDefault("none");

    AddComment(R"DOC(
FusedGemmEpilogueGrad Operator
This operator is used to perform backward of Elementwise_add(Matmul(Activeation(X), Y), bias).
It is equal to Activation (None, ReLU or GeLU) + paddle.nn.Linear.

Note:
X could be more than two dimension and would be flatten to 2D for computing.
X with shape [d0, d1, d2, d3] -> X_2D with shape [d0*d1*d2, d3]
)DOC");
  }
};

386 387 388 389 390 391 392 393 394 395 396 397
template <typename T>
class FusedGemmEpilogueOpGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> op) const override {
    const auto& act_type = this->template Attr<std::string>("activation");

    op->SetType(this->ForwardOpType() + "_grad");
    op->SetInput("X", this->Input("X"));
    op->SetInput("Y", this->Input("Y"));
398 399 400
    if (act_type != "none") {
      op->SetInput("ReserveSpace", this->Input("ReserveSpace"));
    }
401 402 403 404 405 406 407 408 409 410
    op->SetInput("DOut", this->OutputGrad("Out"));

    op->SetOutput("DX", this->InputGrad("X"));
    op->SetOutput("DY", this->InputGrad("Y"));
    op->SetOutput("DBias", this->InputGrad("Bias"));

    op->SetAttrMap(this->Attrs());
  }
};

411 412 413 414
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
415
REGISTER_OPERATOR(
416 417
    fused_gemm_epilogue,
    ops::FusedGemmEpilogueOp,
418 419 420
    ops::FusedGemmEpilogueOpMaker,
    ops::FusedGemmEpilogueOpGradMaker<paddle::framework::OpDesc>,
    ops::FusedGemmEpilogueOpGradMaker<paddle::imperative::OpBase>);
421 422
REGISTER_OPERATOR(fused_gemm_epilogue_grad,
                  ops::FusedGemmEpilogueGradOp,
423
                  ops::FusedGemmEpilogueGradOpMaker);