tril_triu_op.cc 5.0 KB
Newer Older
W
WuHaobo 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/tril_triu_op.h"
#include <memory>

namespace paddle {
namespace operators {

class TrilTriuOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE_EQ(
        ctx->HasInput("X"), true,
        platform::errors::NotFound("Input(X) of TrilTriuOp is not found."));
    PADDLE_ENFORCE_EQ(
        ctx->HasOutput("Out"), true,
        platform::errors::NotFound("Output(Out) of TrilTriuOp is not found."));
    const auto& x_dims = ctx->GetInputDim("X");
    PADDLE_ENFORCE_GE(x_dims.size(), 2,
                      platform::errors::InvalidArgument(
                          "Input(X)'s rank must be at least 2 in TrilTriuOp."));
    ctx->SetOutputDim("Out", x_dims);
    ctx->ShareLoD("X", /*->*/ "Out");
  }
};

class TrilTriuOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
    AddInput("X", "Tensor, the input of tril_triu op");
    AddOutput("Out",
              "Tensor, the output tensor, with the same shape and data type as "
              "input(x)");
    AddAttr<int>("diagonal", "int number, the diagonal to consider.")
        .SetDefault(0);
    AddAttr<bool>("lower", "boolnumber, lower triangular or upper triangular.");
    AddComment(R"DOC(
TrilTriu Operator.

The tril operator returns the lower triangular part of the matrix (2-D tensor)
or batch of matrices $input$. The lower triangular part of the matrix is defined 
as the elements on and below the diagonal.
The triu operator returns the upper triangular part of a matrix (2-D tensor) 
or batch of matrices $input$. The upper triangular part of the matrix is defined
as the elements on and above the diagonal.
The other elements of the result tensor out are set to 0.

The argument diagonal controls which diagonal to consider, default value is 0.

)DOC");
  }
};

class TrilTriuGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
                      platform::errors::NotFound(
                          "Input(Out@GRAD) of TrilTriuOp should not be null"));
    PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true,
                      platform::errors::NotFound(
                          "Output(X@Grad) of TrilTriuOp should not be null"));
    ctx->SetOutputDim(framework::GradVarName("X"),
                      ctx->GetInputDim(framework::GradVarName("Out")));
  }
};

template <typename T>
class TrilTriuGradOpMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> op) const override {
    op->SetType("tril_triu_grad");
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetAttrMap(this->Attrs());
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
102
namespace plat = paddle::platform;
W
WuHaobo 已提交
103 104 105 106 107
REGISTER_OPERATOR(tril_triu, ops::TrilTriuOp, ops::TrilTriuOpMaker,
                  ops::TrilTriuGradOpMaker<paddle::framework::OpDesc>,
                  ops::TrilTriuGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(tril_triu_grad, ops::TrilTriuGradOp);
REGISTER_OP_CPU_KERNEL(
L
liuyuhui 已提交
108 109
    tril_triu, ops::TrilTriuOpKernel<paddle::platform::CPUDeviceContext, bool>,
    ops::TrilTriuOpKernel<paddle::platform::CPUDeviceContext, float>,
W
WuHaobo 已提交
110 111
    ops::TrilTriuOpKernel<paddle::platform::CPUDeviceContext, double>,
    ops::TrilTriuOpKernel<paddle::platform::CPUDeviceContext, int>,
112 113
    ops::TrilTriuOpKernel<paddle::platform::CPUDeviceContext, int64_t>,
    ops::TrilTriuOpKernel<paddle::platform::CPUDeviceContext, plat::float16>);
W
WuHaobo 已提交
114 115
REGISTER_OP_CPU_KERNEL(
    tril_triu_grad,
L
liuyuhui 已提交
116
    ops::TrilTriuGradOpKernel<paddle::platform::CPUDeviceContext, bool>,
W
WuHaobo 已提交
117 118 119
    ops::TrilTriuGradOpKernel<paddle::platform::CPUDeviceContext, float>,
    ops::TrilTriuGradOpKernel<paddle::platform::CPUDeviceContext, double>,
    ops::TrilTriuGradOpKernel<paddle::platform::CPUDeviceContext, int>,
120 121 122
    ops::TrilTriuGradOpKernel<paddle::platform::CPUDeviceContext, int64_t>,
    ops::TrilTriuGradOpKernel<paddle::platform::CPUDeviceContext,
                              plat::float16>);