fused_elemwise_activation_op.cc 13.3 KB
Newer Older
C
chengduo 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

W
Wu Yi 已提交
15
#include "paddle/fluid/operators/fused/fused_elemwise_activation_op.h"
16 17
#include <memory>
#include <unordered_set>
C
chengduo 已提交
18 19 20 21

namespace paddle {
namespace operators {

C
chengduo 已提交
22
bool IsUnaryCompound(const std::vector<std::string> &functor_list) {
23 24 25 26 27 28 29
  PADDLE_ENFORCE_EQ(functor_list.size(), 2);
  static std::unordered_set<std::string> binary_fun = {
      "elementwise_add", "elementwise_mul", "elementwise_add_grad",
      "elementwise_mul_grad"};
  return binary_fun.count(functor_list[1]) != 0;
}

C
chengduo 已提交
30 31 32 33 34 35 36 37 38 39 40
bool HasInPlaceUnary(const std::vector<std::string> &functor_list) {
  PADDLE_ENFORCE_EQ(functor_list.size(), 2);
  static std::unordered_set<std::string> InplaceOpSet = {"relu", "relu_grad"};
  bool is_in_place = false;
  for (auto &func_name : functor_list) {
    is_in_place |= (InplaceOpSet.count(func_name) == 1);
  }
  return is_in_place;
}

bool InputXCanBeAbsent(const std::vector<std::string> &functor_list) {
41 42 43 44 45 46 47 48 49 50 51 52
  PADDLE_ENFORCE_EQ(functor_list.size(), 2);
  static std::unordered_set<std::string> binary_fun = {"elementwise_add_grad"};
  return binary_fun.count(functor_list[0]) != 0 ||
         binary_fun.count(functor_list[1]) != 0;
}

/*
 * Whether the compound function is supported.
 * For Unary(Binary(X, Y)), the intermediate_out's shape is the same the final
 * out.
 */
static bool IsSupportedCompound(const std::vector<std::string> &functors) {
53 54 55 56
  PADDLE_ENFORCE_EQ(functors.size(), 2UL);

  static std::unordered_set<std::string> unary_fun = {"scale", "relu", "tanh",
                                                      "sigmoid"};
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
  static std::unordered_set<std::string> binary_fun = {"elementwise_add",
                                                       "elementwise_mul"};

  std::string unary_fun_str;
  if (binary_fun.count(functors[0])) {
    unary_fun_str = functors[1];
  } else if (binary_fun.count(functors[1])) {
    unary_fun_str = functors[0];
  } else {
    PADDLE_THROW("%s and %s are not included in fused_list.", functors[0],
                 functors[1]);
  }
  PADDLE_ENFORCE_EQ(unary_fun.count(unary_fun_str), 1,
                    "%s is not included in fused_list.", unary_fun_str);
  return true;
}

C
chengduo 已提交
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
class FusedElemwiseActivationOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {
    PADDLE_ENFORCE(
        ctx->HasInput("X"),
        "Input(X) of FusedElemwiseActivationOp op should not be null.");
    PADDLE_ENFORCE(
        ctx->HasInput("Y"),
        "Input(Y) of FusedElemwiseActivationOp op should not be null.");
    PADDLE_ENFORCE(
        ctx->HasOutput("Out"),
        "Output(Out) of FusedElemwiseActivationOp op should not be null.");

    auto x_dim = ctx->GetInputDim("X");
    auto y_dim = ctx->GetInputDim("Y");

92 93
    // Whether the shape of Y is a continuous subsequence of X,
    // For more information please refer to the op's introduction.
C
chengduo 已提交
94
    bool bcast_y = IsBcastY(x_dim, y_dim);
95 96 97 98

    auto &out_dim = bcast_y ? x_dim : y_dim;
    std::string out_lod = bcast_y ? "X" : "Y";

C
chengduo 已提交
99
    if (ctx->Attrs().Get<bool>("save_intermediate_out")) {
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
      PADDLE_ENFORCE(ctx->HasOutput("IntermediateOut"),
                     "Output(IntermediateOut) of FusedElemwiseActivationOp "
                     "should not be null.");

      if (IsUnaryCompound(
              ctx->Attrs().Get<std::vector<std::string>>("functor_list"))) {
        // for Unary(Binary(X, Y)), the shape and lod of out and
        // intermediate_out are the same.
        ctx->SetOutputDim("IntermediateOut", out_dim);
        // set the lod of intermediate_out
        ctx->ShareLoD(out_lod, /*->*/ "IntermediateOut");
      } else {
        // for Binary(X, Unary(Y)), the shape and lod of Y and
        // intermediate_out are the same.
        ctx->SetOutputDim("IntermediateOut", y_dim);
        // set the lod of intermediate_out
        ctx->ShareLoD("Y", /*->*/ "IntermediateOut");
      }
    }
    ctx->SetOutputDim("Out", out_dim);
    ctx->ShareLoD(out_lod, /*->*/ "Out");
C
chengduo 已提交
121 122
  }

C
chengduo 已提交
123 124 125 126 127 128 129 130 131 132 133 134 135 136
  static bool IsBcastY(const framework::DDim &x_dim,
                       const framework::DDim &y_dim) {
    bool bcast_y = x_dim.size() >= y_dim.size();
    if (x_dim.size() == y_dim.size()) {
      for (int i = 0; i < x_dim.size(); ++i) {
        if (x_dim[i] < y_dim[i]) {
          bcast_y = false;
          break;
        }
      }
    }
    return bcast_y;
  }

C
chengduo 已提交
137 138 139 140 141 142
 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
    PADDLE_ENFORCE_EQ(ctx.Input<framework::Tensor>("X")->type(),
                      ctx.Input<framework::Tensor>("Y")->type(),
                      "The element's type of input should be the same.");
143 144
    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace());
C
chengduo 已提交
145 146 147 148 149 150
  }
};

class FusedElemwiseActivationMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() override {
151 152 153 154 155 156 157 158 159 160 161 162 163
    AddInput(
        "X",
        "(Tensor) The input tensor of fused_elemwise_activation operator.");
    AddInput(
        "Y",
        "(Tensor) The input tensor of fused_elemwise_activation operator.");
    AddOutput("Out",
              "vector<Tensor> The output tensor of fused_elemwise_activation "
              "operator.");
    AddOutput("IntermediateOut",
              "Tensor The IntermediateOut tensor of fused_elemwise_activation "
              "operator.")
        .AsIntermediate();
C
chengduo 已提交
164 165 166 167 168 169
    AddAttr<int>("axis",
                 "axis is used by elementwise_op, the default value is -1.")
        .SetDefault(-1);
    AddAttr<float>("scale",
                   "scale is used by scale_op, the default value is 0.0.")
        .SetDefault(0.0);
C
chengduo 已提交
170
    AddAttr<bool>("save_intermediate_out",
171 172
                  "Whether to save the intermediate_out.")
        .SetDefault(false);
C
chengduo 已提交
173 174 175
    AddAttr<std::vector<std::string>>("functor_list",
                                      "The functors that should be fused.")
        .AddCustomChecker([&](const std::vector<std::string> &functor_list) {
176
          PADDLE_ENFORCE(IsSupportedCompound(functor_list));
C
chengduo 已提交
177 178 179 180 181 182 183 184 185 186 187
        });

    AddComment(R"DOC(
FusedElemwiseActivation Operator.

At present, FusedElemwiseActivation only supports Two kinds of compound
operators (elementwise_op and activation_op):

    Z = Binary(X, Unary(Y))
    Z = Unary(Binary(X, Y))

188
There are two cases for this operator:
C
chengduo 已提交
189

190 191
1. The shape of $Y$ and $X$ is the same.
2. The shape of $Y$ is a continuous subsequence of $X$ or the shape of $X$ is a continuous subsequence of $Y$.
C
chengduo 已提交
192

193
For case 2 (assume that the shape of $Y$ is a continuous subsequence of $X$ ):
C
chengduo 已提交
194

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
1. Broadcast $Y$ to match the shape of $X$, where $axis$ is the start dimension index
   for broadcasting $Y$ onto $X$.
2. If $axis$ is -1 (default), $axis = rank(X) - rank(Y)$.
3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of
   subsequence, such as shape(Y) = (2, 1) => (2).

For example:

  .. code-block:: python

    shape(X) = (2, 3, 4, 5), shape(Y) = (,)
    shape(X) = (2, 3, 4, 5), shape(Y) = (5,)
    shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
    shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
    shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
    shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0


The inputs $X$ and $Y$ can carry the different LoD information.
But the output only shares the LoD information with the one whose shape is the same with Out.
The attributions of activation_op can be get from fused_elemwise_activation_op's.
The functor_list records the functions to be fused, for example
["scale", "elementwise_add"].

)DOC");
C
chengduo 已提交
220 221 222
  }
};

H
hong 已提交
223
template <typename T>
C
chengduo 已提交
224
class FusedElemwiseActivationGradMaker
H
hong 已提交
225
    : public framework::SingleGradOpMaker<T> {
C
chengduo 已提交
226
 public:
H
hong 已提交
227
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
C
chengduo 已提交
228 229

 protected:
230
  void Apply(GradOpPtr<T> grad_op) const override {
C
chengduo 已提交
231
    grad_op->SetType(this->ForwardOpType() + "_grad");
C
chengduo 已提交
232 233

    for (auto &input_param : this->InputNames()) {
C
chengduo 已提交
234 235 236
      grad_op->SetInput(input_param, this->Input(input_param));
      grad_op->SetOutput(framework::GradVarName(input_param),
                         this->InputGrad(input_param, true));
C
chengduo 已提交
237 238
    }

C
chengduo 已提交
239 240
    grad_op->SetInput("Out", this->Output("Out"));
    grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
241

C
chengduo 已提交
242
    grad_op->SetAttrMap(this->Attrs());
C
chengduo 已提交
243 244

    std::vector<std::string> functor_names =
C
chengduo 已提交
245 246
        boost::get<std::vector<std::string>>(grad_op->GetAttr("functor_list"));

C
chengduo 已提交
247 248
    functor_names[0] += "_grad";
    functor_names[1] += "_grad";
C
chengduo 已提交
249 250 251
    grad_op->SetAttr("functor_list", functor_names);

    if (boost::get<bool>(grad_op->GetAttr("save_intermediate_out"))) {
H
hong 已提交
252
      // PADDLE_ENFORCE_NE(Output("IntermediateOut").size(), 0);
C
chengduo 已提交
253 254 255 256
      grad_op->SetInput("IntermediateOut", this->Output("IntermediateOut"));
      grad_op->SetOutput(framework::GradVarName("IntermediateOut"),
                         this->OutputGrad("IntermediateOut"));
    } else {
257 258 259
      grad_op->SetInput("IntermediateOut", this->EmptyOutput());
      grad_op->SetOutput(framework::GradVarName("IntermediateOut"),
                         this->EmptyOutputGrad());
C
chengduo 已提交
260
    }
C
chengduo 已提交
261 262 263 264 265 266 267 268 269
  }
};

class FusedElemwiseActivationOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

  void InferShape(framework::InferShapeContext *ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
270
                   "Input(Out@Grad) should not be null");
C
chengduo 已提交
271 272 273 274 275

    auto functor_list =
        ctx->Attrs().Get<std::vector<std::string>>("functor_list");

    if (ctx->Attrs().Get<bool>("save_intermediate_out")) {
276 277 278
      PADDLE_ENFORCE(ctx->HasInput("IntermediateOut"),
                     "Input(IntermediateOut) should not be null");
    } else {
C
chengduo 已提交
279 280 281
      if (!InputXCanBeAbsent(functor_list)) {
        PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null");
      }
282
    }
C
chengduo 已提交
283 284 285

    auto x_grad_name = framework::GradVarName("X");
    auto y_grad_name = framework::GradVarName("Y");
C
chengduo 已提交
286
    auto inter_grad_name = framework::GradVarName("IntermediateOut");
287

C
chengduo 已提交
288
    if (ctx->HasOutput(x_grad_name)) {
289 290 291 292 293 294
      if (ctx->HasInputs("X")) {
        ctx->SetOutputDim(x_grad_name, ctx->GetInputDim("X"));
        ctx->ShareLoD("X", x_grad_name);
      } else {
        // Currently, only when Binary is elementwise_add or elementwise_sub,
        // the "X" could be absent.
C
chengduo 已提交
295
        PADDLE_ENFORCE(InputXCanBeAbsent(functor_list),
296 297 298
                       "Only when BinaryFunctor is elementwise_add, the 'X' "
                       "could be absent.");

C
chengduo 已提交
299 300
        // Node: If "X" is absence, the shape of Y should be a continuous
        // subsequence of X, otherwise, we could not infer the shape of dx.
301 302 303 304 305

        ctx->SetOutputDim(x_grad_name,
                          ctx->GetInputDim(framework::GradVarName("Out")));
        ctx->ShareLoD(framework::GradVarName("Out"), x_grad_name);
      }
C
chengduo 已提交
306
    }
C
chengduo 已提交
307

C
chengduo 已提交
308
    if (ctx->HasOutput(y_grad_name)) {
309 310 311
      PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null");
      ctx->SetOutputDim(y_grad_name, ctx->GetInputDim("Y"));
      ctx->ShareLoD("Y", y_grad_name);
C
chengduo 已提交
312
    }
C
chengduo 已提交
313 314 315 316 317 318 319 320 321 322 323 324

    if (ctx->HasOutput(inter_grad_name)) {
      // For Unary(Binary(X, Y)), IntermediateOut should not be empty.
      if (IsUnaryCompound(functor_list)) {
        ctx->SetOutputDim(inter_grad_name,
                          ctx->GetInputDim(framework::GradVarName("Out")));
        ctx->ShareLoD(framework::GradVarName("Out"), inter_grad_name);
      } else {
        ctx->SetOutputDim(inter_grad_name, ctx->GetInputDim("Y"));
        ctx->ShareLoD("Y", inter_grad_name);
      }
    }
C
chengduo 已提交
325 326 327 328 329
  }

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext &ctx) const override {
330 331
    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "Y"), ctx.GetPlace());
C
chengduo 已提交
332 333 334 335 336 337
  }
};
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
H
hong 已提交
338 339 340 341 342
REGISTER_OPERATOR(
    fused_elemwise_activation, ops::FusedElemwiseActivationOp,
    ops::FusedElemwiseActivationMaker,
    ops::FusedElemwiseActivationGradMaker<paddle::framework::OpDesc>,
    ops::FusedElemwiseActivationGradMaker<paddle::imperative::OpBase>);
C
chengduo 已提交
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
REGISTER_OPERATOR(fused_elemwise_activation_grad,
                  ops::FusedElemwiseActivationOpGrad);

REGISTER_OP_CPU_KERNEL(
    fused_elemwise_activation,
    ops::FusedElemwiseActivationKernel<paddle::platform::CPUDeviceContext,
                                       float>,
    ops::FusedElemwiseActivationKernel<paddle::platform::CPUDeviceContext,
                                       double>);

REGISTER_OP_CPU_KERNEL(
    fused_elemwise_activation_grad,
    ops::FusedElemwiseActivationGradKernel<paddle::platform::CPUDeviceContext,
                                           float>,
    ops::FusedElemwiseActivationGradKernel<paddle::platform::CPUDeviceContext,
                                           double>);