prelu_op.cc 11.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
L
Luo Tao 已提交
2 3 4 5 6 7 8 9 10
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
    http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
Z
zchen0211 已提交
11

Y
Yi Wang 已提交
12
#include "paddle/fluid/operators/prelu_op.h"
13

14
#include <memory>
15
#include <string>
Z
zchen0211 已提交
16 17 18 19

namespace paddle {
namespace operators {

J
Jacek Czaja 已提交
20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
framework::OpKernelType innerGetKernelTypeForVar(
    const Tensor &tensor, const framework::OpKernelType &expected_kernel_type) {
#ifdef PADDLE_WITH_MKLDNN
  auto isOneDNNKernelChosen =
      (expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN);
  auto isNotOneDNNTensor = (tensor.layout() != framework::DataLayout::kMKLDNN);
  auto isModelNHWC =
      (paddle::platform::MKLDNNDeviceContext::tls()
           .get_cur_paddle_data_layout() == framework::DataLayout::kNHWC);
  // All inputs (including alpha) need shape rotating
  if (isOneDNNKernelChosen && isNotOneDNNTensor && isModelNHWC) {
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(),
                                   framework::DataLayout::kNHWC);
  }
#endif
  return framework::OpKernelType(expected_kernel_type.data_type_,
                                 tensor.place(), tensor.layout());
}

Z
fix  
zchen0211 已提交
40
class PReluOp : public framework::OperatorWithKernel {
Z
zchen0211 已提交
41
 public:
Z
fix  
zchen0211 已提交
42
  PReluOp(const std::string &type, const framework::VariableNameMap &inputs,
Z
zchen0211 已提交
43 44 45 46
          const framework::VariableNameMap &outputs,
          const framework::AttributeMap &attrs)
      : OperatorWithKernel(type, inputs, outputs, attrs) {}

47
  void InferShape(framework::InferShapeContext *ctx) const override {
48 49 50
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "prelu");
    OP_INOUT_CHECK(ctx->HasInput("Alpha"), "Input", "Alpha", "prelu");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "prelu");
J
jerrywgz 已提交
51 52

    auto x_dim = ctx->GetInputDim("X");
53
    std::string mode = ctx->Attrs().Get<std::string>("mode");
J
jerrywgz 已提交
54
    if (mode == "all") {
55
      PADDLE_ENFORCE_EQ(phi::product(ctx->GetInputDim("Alpha")), 1,
56 57 58 59
                        platform::errors::InvalidArgument(
                            "For mode 'all', size of weight Alpha must be one. "
                            "But recevied alpha's size: %d.",
                            product(ctx->GetInputDim("Alpha"))));
J
jerrywgz 已提交
60
    } else if (mode == "channel") {
61 62 63 64 65 66 67
      auto x_rank = x_dim.size();
      PADDLE_ENFORCE_GE(x_rank, 2,
                        platform::errors::InvalidArgument(
                            "For mode 'channel', rank of input X must be "
                            "equal or larger than 2. But recevied X's "
                            "rank: %d",
                            x_rank));
68 69 70 71 72 73 74 75
      const std::string data_format_str =
          ctx->Attrs().Get<std::string>("data_format");
      PADDLE_ENFORCE_EQ(data_format_str == "NCHW" || data_format_str == "NHWC",
                        true,
                        platform::errors::InvalidArgument(
                            "For mode 'channel', data_format must be one of "
                            "NCHW and NHWC. But recevied data_format: %s",
                            data_format_str));
J
Jacek Czaja 已提交
76
      if (data_format_str == "NCHW" || ctx->IsRunMKLDNNKernel()) {
77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
        PADDLE_ENFORCE_EQ(
            product(ctx->GetInputDim("Alpha")) == x_dim[1], true,
            platform::errors::InvalidArgument(
                "For mode 'channel', size of weight Alpha must be "
                "equal to the number of channels of input(x). But "
                "recevied alpha's size: %d, x_dim[1]: %d",
                product(ctx->GetInputDim("Alpha")), x_dim[1]));
      } else {
        PADDLE_ENFORCE_EQ(
            product(ctx->GetInputDim("Alpha")) == x_dim[x_rank - 1], true,
            platform::errors::InvalidArgument(
                "For mode 'channel', size of weight Alpha must be "
                "equal to the number of channels of input(x). But "
                "recevied alpha's size: %d, x_dim[%d]: %d",
                product(ctx->GetInputDim("Alpha")), x_rank - 1,
                x_dim[x_rank - 1]));
      }

J
jerrywgz 已提交
95
    } else if (mode == "element") {
96 97 98
      auto alpha_dim = ctx->GetInputDim("Alpha");
      auto alpha_rank = alpha_dim.size();
      auto x_rank = x_dim.size();
99 100 101 102 103 104
      PADDLE_ENFORCE_GE(x_rank, 1,
                        platform::errors::InvalidArgument(
                            "For mode 'element', rank of input X must be "
                            "equal or larger than 2. But recevied X's "
                            "rank: %d",
                            x_rank));
105 106 107 108 109 110 111
      PADDLE_ENFORCE_EQ(
          alpha_rank, x_rank,
          platform::errors::InvalidArgument(
              "For mode 'element', rank of weight Alpha must be ",
              "equal to the rank of input(x). But recevied alpha's rank: %d, "
              "x's rank: %d.",
              alpha_rank, x_rank));
112 113 114 115 116 117
      size_t x_product = 1;
      size_t alpha_product = 1;
      for (int64_t i = x_rank - 1; i > 0; i--) {
        x_product *= x_dim[i];
        alpha_product *= alpha_dim[i];
      }
118 119 120 121 122 123 124
      PADDLE_ENFORCE_EQ(
          alpha_product, x_product,
          platform::errors::InvalidArgument(
              "For mode 'element', the size of weight Alpha must be "
              "equal to the size of input(x). But recevied alpha's size: %d, "
              "x's size: %d.",
              alpha_product, x_product));
J
jerrywgz 已提交
125
    } else {
126 127 128 129 130
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Attr(mode) of prelu must be one of 'all', 'channel', or 'element'. "
          "But recevied "
          "mode: '%s'.",
          mode));
J
jerrywgz 已提交
131
    }
132
    ctx->ShareDim("X", /*->*/ "Out");
Q
Qiao Longfei 已提交
133
    ctx->ShareLoD("X", /*->*/ "Out");
Z
zchen0211 已提交
134
  }
J
jerrywgz 已提交
135 136 137 138

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
139 140 141 142 143 144 145 146 147 148 149
    auto input_data_type =
        framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");

#ifdef PADDLE_WITH_MKLDNN
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
J
jerrywgz 已提交
150
  }
J
Jacek Czaja 已提交
151 152 153 154 155 156

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    return innerGetKernelTypeForVar(tensor, expected_kernel_type);
  }
Z
zchen0211 已提交
157 158
};

Z
fix  
zchen0211 已提交
159
class PReluOpMaker : public framework::OpProtoAndCheckerMaker {
Z
zchen0211 已提交
160
 public:
Y
Yu Yang 已提交
161
  void Make() override {
Z
zchen0211 已提交
162
    AddInput("X", "The input tensor of prelu operator.");
K
kexinzhao 已提交
163 164 165 166
    AddInput("Alpha", "The alpha weight of prelu operator.");
    AddOutput("Out", "The output tensor of prelu operator.");
    AddComment(R"DOC(
PRelu Operator.
Z
zchen0211 已提交
167
The equation is:
K
kexinzhao 已提交
168 169 170 171 172 173 174
$$
f(x) =
\begin{cases}
\alpha * x, \quad  \text{if} \ x < 0 \\
x,         \qquad  \text{if} \ x >= 0
\end{cases}
$$
175
The input `X` can carry the LoD (Level of Details) information,
K
kexinzhao 已提交
176
or not. And the output shares the LoD information with input `X`.
177
There are modes:
J
jerrywgz 已提交
178 179
  all: all elements share same weight
  channel: elements in a channel share same weight
180
  element: each element has a weight
Z
zchen0211 已提交
181
)DOC");
J
jerrywgz 已提交
182 183
    AddAttr<std::string>("mode", "The mode for inputs to share weights.")
        .SetDefault("all");
184 185 186
    AddAttr<std::string>("data_format",
                         "Data format that specifies the layout of input")
        .SetDefault("NCHW");
187 188
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
C
cc 已提交
189 190
        .SetDefault(false)
        .AsExtra();
191 192 193 194
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
C
cc 已提交
195 196
        .InEnum({"float32", "bfloat16"})
        .AsExtra();
197 198 199
    AddAttr<bool>("is_test",
                  "(bool, default false) Set to true for inference only, false "
                  "for training. Some layers may run faster when this is true.")
C
cc 已提交
200 201
        .SetDefault(false)
        .AsExtra();
Z
zchen0211 已提交
202 203 204 205
  }
};

// The operator to calculate gradients of a prelu operator.
Z
fix  
zchen0211 已提交
206
class PReluGradOp : public framework::OperatorWithKernel {
Z
zchen0211 已提交
207 208 209
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

210
  void InferShape(framework::InferShapeContext *ctx) const override {
211 212 213 214
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "prelu");
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
                   "Out@GRAD", "prelu");

J
jerrywgz 已提交
215 216 217 218 219 220 221 222 223 224 225 226 227 228
    auto x_grad_name = framework::GradVarName("X");
    auto alpha_grad_name = framework::GradVarName("Alpha");

    if (ctx->HasOutput(x_grad_name)) {
      ctx->SetOutputDim(x_grad_name, ctx->GetInputDim("X"));
    }
    if (ctx->HasOutput(alpha_grad_name)) {
      ctx->SetOutputDim(alpha_grad_name, ctx->GetInputDim("Alpha"));
    }
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
229 230 231 232 233 234 235 236 237 238 239
    auto input_data_type =
        framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");

#ifdef PADDLE_WITH_MKLDNN
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
Z
zchen0211 已提交
240
  }
J
Jacek Czaja 已提交
241 242 243 244 245 246

  framework::OpKernelType GetKernelTypeForVar(
      const std::string &var_name, const Tensor &tensor,
      const framework::OpKernelType &expected_kernel_type) const {
    return innerGetKernelTypeForVar(tensor, expected_kernel_type);
  }
Z
zchen0211 已提交
247 248
};

249 250 251 252 253 254
template <typename T>
class PReluGradOpMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
255
  void Apply(GradOpPtr<T> op) const override {
256 257 258 259 260 261 262 263 264 265
    op->SetType("prelu_grad");
    op->SetInput("X", this->Input("X"));
    op->SetInput("Alpha", this->Input("Alpha"));
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
    op->SetOutput(framework::GradVarName("Alpha"), this->InputGrad("Alpha"));
    op->SetAttrMap(this->Attrs());
  }
};

Z
zchen0211 已提交
266 267 268 269 270
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

271 272 273
REGISTER_OPERATOR(prelu, ops::PReluOp, ops::PReluOpMaker,
                  ops::PReluGradOpMaker<paddle::framework::OpDesc>,
                  ops::PReluGradOpMaker<paddle::imperative::OpBase>);
274
REGISTER_OPERATOR(prelu_grad, ops::PReluGradOp);
Q
QI JUN 已提交
275
REGISTER_OP_CPU_KERNEL(
276 277
    prelu, ops::PReluKernel<paddle::platform::CPUDeviceContext, float>,
    ops::PReluKernel<paddle::platform::CPUDeviceContext, double>);
Q
QI JUN 已提交
278
REGISTER_OP_CPU_KERNEL(
279 280
    prelu_grad, ops::PReluGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::PReluGradKernel<paddle::platform::CPUDeviceContext, double>);