sum_op.cc 10.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

12
#include <algorithm>
M
minqiyang 已提交
13
#include <memory>
14
#include <string>
15
#include <unordered_map>
16
#include <vector>
17

Y
YuanRisheng 已提交
18 19
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
Y
Yi Wang 已提交
20
#include "paddle/fluid/framework/var_type_inference.h"
Y
YuanRisheng 已提交
21 22
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/multiary.h"
23

24 25 26
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
27
#include "paddle/fluid/framework/convert_utils.h"
28

29 30 31 32 33 34 35
namespace paddle {
namespace operators {

class SumOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

36
 protected:
37
  framework::OpKernelType GetExpectedKernelType(
38 39
      const framework::ExecutionContext& ctx) const override {
    auto x_vars = ctx.MultiInputVar("X");
H
hong 已提交
40
    auto x_vars_name = ctx.InputNames("X");
41 42 43 44

    framework::LibraryType library{framework::LibraryType::kPlain};
    framework::DataLayout layout{framework::DataLayout::kAnyLayout};

45
    PADDLE_ENFORCE_GT(
46 47
        x_vars.size(),
        0,
48
        platform::errors::InvalidArgument("Input[X] should not be empty"));
L
Leo Chen 已提交
49 50

    PADDLE_ENFORCE_NOT_NULL(
51 52 53
        x_vars[0],
        platform::errors::NotFound("Input var[%s] should not be nullptr",
                                   x_vars_name[0]));
L
Leo Chen 已提交
54

55
    if (x_vars[0]->IsType<framework::LoDTensor>()) {
56
      int dtype = -1;
C
chengduo 已提交
57
      for (size_t idx = 0; idx < x_vars.size(); ++idx) {
58 59 60 61
        PADDLE_ENFORCE_NOT_NULL(
            x_vars[idx],
            platform::errors::NotFound("Input var[%s] should not be nullptr",
                                       x_vars_name[idx]));
C
chengduo 已提交
62 63
        auto tensor =
            framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_vars[idx]);
64
        if (tensor->numel() <= 0 || (!tensor->IsInitialized())) {
65 66 67
          continue;
        }
        if (dtype == -1) {
68
          dtype = framework::TransToProtoVarType(tensor->dtype());
69
        } else {
70 71
          PADDLE_ENFORCE_EQ(dtype,
                            framework::TransToProtoVarType(tensor->dtype()),
72 73
                            platform::errors::InvalidArgument(
                                "The inputs type of sum op must be same"));
74 75
        }
      }
76 77
      PADDLE_ENFORCE_NE(dtype,
                        -1,
78 79
                        platform::errors::InvalidArgument(
                            "Sum operator should have at least one tensor"));
80

81
      auto data_type = static_cast<framework::proto::VarType::Type>(dtype);
82 83
#ifdef PADDLE_WITH_MKLDNN
      if (library == framework::LibraryType::kPlain &&
84 85 86
          this->CanMKLDNNBeUsed(ctx, data_type) &&
          (data_type == framework::proto::VarType::FP32 ||
           data_type == framework::proto::VarType::BF16) &&
87
          ctx.OutputVar("Out")->IsType<framework::LoDTensor>()) {
88 89 90 91 92 93
        if (std::all_of(
                x_vars.begin(), x_vars.end(), [](const framework::Variable* v) {
                  return v->IsType<framework::LoDTensor>();
                })) {
          return framework::OpKernelType(data_type,
                                         ctx.GetPlace(),
94 95
                                         framework::DataLayout::kMKLDNN,
                                         framework::LibraryType::kMKLDNN);
96 97 98 99
        }
      }
#endif

100 101
      return framework::OpKernelType(
          data_type, ctx.GetPlace(), layout, library);
102
    } else if (x_vars[0]->IsType<phi::SelectedRows>()) {
103
      for (auto& var : x_vars) {
104
        auto& value = var->Get<phi::SelectedRows>().value();
105
        if (value.IsInitialized()) {
106 107
          return framework::OpKernelType(
              framework::TransToProtoVarType(value.dtype()),
108 109 110
              ctx.device_context(),
              layout,
              library);
111 112 113 114
        }
      }
      // if input sparse vars are not initialized, use an default kernel type.
      return framework::OpKernelType(framework::proto::VarType::FP32,
115 116 117
                                     ctx.device_context(),
                                     layout,
                                     library);
118
    } else if (x_vars[0]->IsType<framework::LoDTensorArray>()) {
Y
Yang Yang(Tony) 已提交
119 120 121
      for (auto& x_var : x_vars) {
        auto& array = x_var->Get<framework::LoDTensorArray>();
        for (auto& each : array) {
122
          if (each.numel() != 0 && each.IsInitialized()) {
123 124
            return framework::OpKernelType(
                framework::TransToProtoVarType(each.dtype()),
125 126 127
                ctx.device_context(),
                layout,
                library);
Y
Yang Yang(Tony) 已提交
128
          }
129 130
        }
      }
131 132 133 134 135
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Expected each tensor in Input(x) in sum op has be initialized, but "
          "some tensor in Input(x) is not be initialized, please check your "
          "code.",
          framework::ToTypeName(x_vars[0]->Type())));
136
    }
137 138 139 140 141
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Expected type of Input(X) must be Tensor,  SelectedRows or "
        "LodTensorArray. But got "
        "unsupport type: %s.",
        framework::ToTypeName(x_vars[0]->Type())));
142
  }
143 144 145 146
};

class SumOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
147
  void Make() override {
148 149 150 151 152
    AddInput("X",
             "A Varaible list. The shape and data type of the list elements"
             "should be consistent. Variable can be multi-dimensional Tensor"
             "or LoDTensor, and data types can be: float32, float64, int32, "
             "int64.")
153
        .AsDuplicable();
154 155 156
    AddOutput("Out",
              "the sum of input :code:`x`. its shape and data types are "
              "consistent with :code:`x`.");
157 158 159
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
J
Jacek Czaja 已提交
160 161 162 163 164
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
        .InEnum({"float32", "bfloat16"});
165 166 167
    AddComment(R"DOC(This OP is used to sum one or more Tensor or LoDTensor
                    of the input. If the input is LoDTensor, the output only
                    shares LoD information with the first input.)DOC");
168 169 170
  }
};

Q
QI JUN 已提交
171 172
class SumOpVarTypeInference : public framework::VarTypeInference {
 public:
M
minqiyang 已提交
173
  void operator()(framework::InferVarTypeContext* ctx) const override {
174 175 176 177 178 179 180 181
    if (!ctx->IsDygraph()) {
      auto var_type = framework::proto::VarType::SELECTED_ROWS;
      if (VLOG_IS_ON(10)) {
        for (size_t ind = 0; ind < ctx->InputSize("X"); ++ind) {
          VLOG(10) << ctx->InputVarName("X", ind) << " "
                   << ctx->GetInputType("X", ind);
        }
      }
182

183 184 185 186 187 188 189 190 191 192 193
      if (ctx->InputTypeAnyOf("X",
                              framework::proto::VarType::LOD_TENSOR_ARRAY)) {
        if (!ctx->InputTypeAllOf("X",
                                 framework::proto::VarType::LOD_TENSOR_ARRAY)) {
          std::ostringstream os;
          for (size_t ind = 0; ind < ctx->InputSize("X"); ++ind) {
            os << "    " << ctx->InputVarName("X", ind) << " type is "
               << ctx->GetInputType("X", ind) << "\n";
          }
          PADDLE_THROW(platform::errors::InvalidArgument(
              "Not all inputs are tensor array:\n%s", os.str()));
Y
Yang Yang(Tony) 已提交
194
        }
195 196 197 198
        var_type = framework::proto::VarType::LOD_TENSOR_ARRAY;
      } else if (ctx->InputTypeAnyOf("X",
                                     framework::proto::VarType::LOD_TENSOR)) {
        var_type = framework::proto::VarType::LOD_TENSOR;
Y
Yang Yang(Tony) 已提交
199
      }
Q
QI JUN 已提交
200

201 202 203
      ctx->SetOutputType("Out", var_type);
      ctx->SetOutputDataType("Out", ctx->GetInputDataType("X"));
    }
Q
QI JUN 已提交
204 205 206
  }
};

H
hong 已提交
207
class SumGradDescMaker : public framework::GradOpDescMakerBase {
208
 public:
209
  using framework::GradOpDescMakerBase::GradOpDescMakerBase;
210

Y
Yu Yang 已提交
211
  std::vector<std::unique_ptr<framework::OpDesc>> operator()() const override {
212
    auto x_grads = InputGrad("X", false);
Y
Yu Yang 已提交
213
    std::vector<std::unique_ptr<framework::OpDesc>> grad_ops;
214 215
    grad_ops.reserve(x_grads.size());
    auto og = OutputGrad("Out");
216 217 218
    std::transform(x_grads.begin(),
                   x_grads.end(),
                   std::back_inserter(grad_ops),
219
                   [&og](const std::string& x_grad) {
Y
Yu Yang 已提交
220
                     auto* grad_op = new framework::OpDesc();
Y
Yu Yang 已提交
221 222 223 224
                     grad_op->SetType("scale");
                     grad_op->SetInput("X", og);
                     grad_op->SetOutput("Out", {x_grad});
                     grad_op->SetAttr("scale", 1.0f);
Y
Yu Yang 已提交
225
                     return std::unique_ptr<framework::OpDesc>(grad_op);
226
                   });
H
hong 已提交
227 228 229 230 231 232 233 234 235

    return grad_ops;
  }
};

class SumGradOpBaseMaker : public imperative::GradOpBaseMakerBase {
 public:
  using imperative::GradOpBaseMakerBase::GradOpBaseMakerBase;

236
  std::shared_ptr<imperative::GradOpNode> operator()() const override {
H
hong 已提交
237
    auto x_grads = InputGrad("X", false);
238 239
    using InputGradsType = decltype(x_grads);

240 241 242 243 244 245 246 247 248 249
    if (!x_grads.empty()) {
      auto node = this->NewGradNode();
      node->reserve(x_grads.size());
      auto og = OutputGrad("Out");
      for (auto& x_grad : x_grads) {
        imperative::TracedGradOp op(node);
        op.SetType("scale");
        op.SetInput("X", og);
        op.SetOutput("Out", InputGradsType{x_grad});
        op.SetAttr("scale", 1.0f);
250
        op.SetDefaultAttrsMap(DefaultAttrsMap());
251 252 253 254 255
      }
      return node;
    } else {
      return nullptr;
    }
256 257 258
  }
};

259
DECLARE_INPLACE_OP_INFERER(SumInplaceInferer, {"X", "Out"});
260

261 262 263 264
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
265

Y
YuanRisheng 已提交
266 267 268 269 270
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(sum,
                            AddNInferShapeFunctor,
                            PD_INFER_META(phi::AddNTensorArrayInferMeta));

271 272 273 274 275 276
REGISTER_OPERATOR(sum,
                  ops::SumOp,
                  ops::SumOpMaker,
                  ops::SumGradDescMaker,
                  ops::SumGradOpBaseMaker,
                  ops::SumOpVarTypeInference,
Y
YuanRisheng 已提交
277 278
                  ops::SumInplaceInferer,
                  AddNInferShapeFunctor);