sum_op.cc 12.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
12
#include "paddle/fluid/operators/sum_op.h"
13

14
#include <algorithm>
M
minqiyang 已提交
15
#include <memory>
16
#include <string>
17
#include <unordered_map>
18
#include <vector>
19

Y
Yi Wang 已提交
20
#include "paddle/fluid/framework/var_type_inference.h"
21

22 23 24
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
25
#include "paddle/fluid/framework/convert_utils.h"
26

27 28 29 30 31 32 33 34
namespace paddle {
namespace operators {
using framework::Tensor;

class SumOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

35
  void InferShape(framework::InferShapeContext* ctx) const override {
36 37
    OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "sum");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "sum");
38

39 40
    if (ctx->IsRuntime() && ctx->GetOutputsVarType("Out")[0] ==
                                framework::proto::VarType::LOD_TENSOR_ARRAY) {
41 42
      return;  // skip runtime infershape when is tensor array;
    }
43

44
    auto x_var_types = ctx->GetInputsVarType("X");
45
    auto x_dims = ctx->GetInputsDim("X");
46

47 48
    auto N = x_dims.size();
    PADDLE_ENFORCE_GT(
49 50 51 52 53 54
        N, 0,
        platform::errors::InvalidArgument(
            "The input tensor X's dimensions of SumOp "
            "should be larger than 0. But received X's dimensions %d, "
            "X's shape = [%s].",
            N, &x_dims));
55
    if (N == 1) {
56
      VLOG(3) << "Warning: SumOp have only one input, may waste memory";
57
    }
Q
qiaolongfei 已提交
58

59
    framework::DDim in_dim({0});
60
    for (size_t i = 0; i < x_dims.size(); ++i) {
61 62 63 64
      auto& x_dim = x_dims[i];
      // x_dim.size() == 1 means the real dim of selected rows is [0]
      if (x_var_types[i] == framework::proto::VarType::SELECTED_ROWS &&
          x_dim.size() == 1) {
65 66
        continue;
      }
67
      if (phi::product(x_dim) == 0) {
68 69
        continue;
      }
70
      if (phi::product(in_dim) == 0) {
71 72
        in_dim = x_dim;
      } else {
Z
zhaoyuchen 已提交
73
        if (ctx->IsRuntime()) {
74 75 76 77 78 79
          PADDLE_ENFORCE_EQ(in_dim, x_dim,
                            platform::errors::InvalidArgument(
                                "The input tensor X of SumOp must"
                                " have same shape. But received X[0]'s shape = "
                                "[%s], X[%d]'s shape = [%s].",
                                in_dim, i, x_dim));
Z
zhaoyuchen 已提交
80
        } else {
81 82
          PADDLE_ENFORCE_EQ(
              in_dim.size(), x_dim.size(),
83 84 85 86 87 88
              platform::errors::InvalidArgument(
                  "The input tensor X of SumOp must have same "
                  "dimensions. But received X[0]'s dimensions = %d, X[0]'s "
                  "shape = "
                  "[%s], X[%d]'s dimensions = %d, X[%d]'s shape = [%s].",
                  in_dim.size(), in_dim, i, x_dim.size(), i, x_dim));
Z
zhaoyuchen 已提交
89
          // if in_dim or x_dim has -1, not check equal
90 91
          for (int j = 0; j < x_dim.size(); ++j) {
            if (x_dim[j] == -1 || in_dim[j] == -1) {
Z
zhaoyuchen 已提交
92 93
              continue;
            }
94 95
            PADDLE_ENFORCE_EQ(
                in_dim[j], x_dim[j],
96 97 98 99 100
                platform::errors::InvalidArgument(
                    "The input tensor X of SumOp must have same shape "
                    "if not -1."
                    "But received X[0]'s shape = [%s], X[%d]'s shape = [%s].",
                    in_dim, i, x_dim));
Z
zhaoyuchen 已提交
101 102
          }
        }
103
      }
Q
qijun 已提交
104
    }
Q
Qiao Longfei 已提交
105 106
    ctx->SetOutputDim("Out", in_dim);
    ctx->ShareLoD("X", /*->*/ "Out");
107
  }
108 109

 protected:
110
  framework::OpKernelType GetExpectedKernelType(
111 112
      const framework::ExecutionContext& ctx) const override {
    auto x_vars = ctx.MultiInputVar("X");
H
hong 已提交
113
    auto x_vars_name = ctx.InputNames("X");
114 115 116 117

    framework::LibraryType library{framework::LibraryType::kPlain};
    framework::DataLayout layout{framework::DataLayout::kAnyLayout};

118 119 120
    PADDLE_ENFORCE_GT(
        x_vars.size(), 0,
        platform::errors::InvalidArgument("Input[X] should not be empty"));
L
Leo Chen 已提交
121 122 123 124 125

    PADDLE_ENFORCE_NOT_NULL(
        x_vars[0], platform::errors::NotFound(
                       "Input var[%s] should not be nullptr", x_vars_name[0]));

126
    if (x_vars[0]->IsType<framework::LoDTensor>()) {
127
      int dtype = -1;
C
chengduo 已提交
128
      for (size_t idx = 0; idx < x_vars.size(); ++idx) {
129 130 131 132
        PADDLE_ENFORCE_NOT_NULL(
            x_vars[idx],
            platform::errors::NotFound("Input var[%s] should not be nullptr",
                                       x_vars_name[idx]));
C
chengduo 已提交
133 134
        auto tensor =
            framework::GetLoDTensorOrSelectedRowsValueFromVar(*x_vars[idx]);
135
        if (tensor->numel() <= 0 || (!tensor->IsInitialized())) {
136 137 138
          continue;
        }
        if (dtype == -1) {
139
          dtype = framework::TransToProtoVarType(tensor->dtype());
140
        } else {
141 142
          PADDLE_ENFORCE_EQ(dtype,
                            framework::TransToProtoVarType(tensor->dtype()),
143 144
                            platform::errors::InvalidArgument(
                                "The inputs type of sum op must be same"));
145 146 147
        }
      }
      PADDLE_ENFORCE_NE(dtype, -1,
148 149
                        platform::errors::InvalidArgument(
                            "Sum operator should have at least one tensor"));
150

151
      auto data_type = static_cast<framework::proto::VarType::Type>(dtype);
152 153
#ifdef PADDLE_WITH_MKLDNN
      if (library == framework::LibraryType::kPlain &&
154 155 156
          this->CanMKLDNNBeUsed(ctx, data_type) &&
          (data_type == framework::proto::VarType::FP32 ||
           data_type == framework::proto::VarType::BF16) &&
157 158 159 160 161
          ctx.OutputVar("Out")->IsType<framework::LoDTensor>()) {
        if (std::all_of(x_vars.begin(), x_vars.end(),
                        [](const framework::Variable* v) {
                          return v->IsType<framework::LoDTensor>();
                        })) {
162 163 164
          return framework::OpKernelType(data_type, ctx.GetPlace(),
                                         framework::DataLayout::kMKLDNN,
                                         framework::LibraryType::kMKLDNN);
165 166 167 168
        }
      }
#endif

169 170
      return framework::OpKernelType(data_type, ctx.GetPlace(), layout,
                                     library);
171
    } else if (x_vars[0]->IsType<phi::SelectedRows>()) {
172
      for (auto& var : x_vars) {
173
        auto& value = var->Get<phi::SelectedRows>().value();
174
        if (value.IsInitialized()) {
175 176 177
          return framework::OpKernelType(
              framework::TransToProtoVarType(value.dtype()),
              ctx.device_context(), layout, library);
178 179 180 181
        }
      }
      // if input sparse vars are not initialized, use an default kernel type.
      return framework::OpKernelType(framework::proto::VarType::FP32,
182
                                     ctx.device_context(), layout, library);
183
    } else if (x_vars[0]->IsType<framework::LoDTensorArray>()) {
Y
Yang Yang(Tony) 已提交
184 185 186
      for (auto& x_var : x_vars) {
        auto& array = x_var->Get<framework::LoDTensorArray>();
        for (auto& each : array) {
187
          if (each.numel() != 0 && each.IsInitialized()) {
188 189 190
            return framework::OpKernelType(
                framework::TransToProtoVarType(each.dtype()),
                ctx.device_context(), layout, library);
Y
Yang Yang(Tony) 已提交
191
          }
192 193
        }
      }
194 195 196 197 198
      PADDLE_THROW(platform::errors::InvalidArgument(
          "Expected each tensor in Input(x) in sum op has be initialized, but "
          "some tensor in Input(x) is not be initialized, please check your "
          "code.",
          framework::ToTypeName(x_vars[0]->Type())));
199
    }
200 201 202 203 204
    PADDLE_THROW(platform::errors::InvalidArgument(
        "Expected type of Input(X) must be Tensor,  SelectedRows or "
        "LodTensorArray. But got "
        "unsupport type: %s.",
        framework::ToTypeName(x_vars[0]->Type())));
205
  }
206 207 208 209
};

class SumOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
210
  void Make() override {
211 212 213 214 215
    AddInput("X",
             "A Varaible list. The shape and data type of the list elements"
             "should be consistent. Variable can be multi-dimensional Tensor"
             "or LoDTensor, and data types can be: float32, float64, int32, "
             "int64.")
216
        .AsDuplicable();
217 218 219
    AddOutput("Out",
              "the sum of input :code:`x`. its shape and data types are "
              "consistent with :code:`x`.");
220 221 222
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
        .SetDefault(false);
J
Jacek Czaja 已提交
223 224 225 226 227
    AddAttr<std::string>(
        "mkldnn_data_type",
        "(string, default \"float32\"). Data type of mkldnn kernel")
        .SetDefault("float32")
        .InEnum({"float32", "bfloat16"});
228 229 230
    AddComment(R"DOC(This OP is used to sum one or more Tensor or LoDTensor
                    of the input. If the input is LoDTensor, the output only
                    shares LoD information with the first input.)DOC");
231 232 233
  }
};

Q
QI JUN 已提交
234 235
class SumOpVarTypeInference : public framework::VarTypeInference {
 public:
M
minqiyang 已提交
236
  void operator()(framework::InferVarTypeContext* ctx) const override {
237 238 239 240 241 242 243 244
    if (!ctx->IsDygraph()) {
      auto var_type = framework::proto::VarType::SELECTED_ROWS;
      if (VLOG_IS_ON(10)) {
        for (size_t ind = 0; ind < ctx->InputSize("X"); ++ind) {
          VLOG(10) << ctx->InputVarName("X", ind) << " "
                   << ctx->GetInputType("X", ind);
        }
      }
245

246 247 248 249 250 251 252 253 254 255 256
      if (ctx->InputTypeAnyOf("X",
                              framework::proto::VarType::LOD_TENSOR_ARRAY)) {
        if (!ctx->InputTypeAllOf("X",
                                 framework::proto::VarType::LOD_TENSOR_ARRAY)) {
          std::ostringstream os;
          for (size_t ind = 0; ind < ctx->InputSize("X"); ++ind) {
            os << "    " << ctx->InputVarName("X", ind) << " type is "
               << ctx->GetInputType("X", ind) << "\n";
          }
          PADDLE_THROW(platform::errors::InvalidArgument(
              "Not all inputs are tensor array:\n%s", os.str()));
Y
Yang Yang(Tony) 已提交
257
        }
258 259 260 261
        var_type = framework::proto::VarType::LOD_TENSOR_ARRAY;
      } else if (ctx->InputTypeAnyOf("X",
                                     framework::proto::VarType::LOD_TENSOR)) {
        var_type = framework::proto::VarType::LOD_TENSOR;
Y
Yang Yang(Tony) 已提交
262
      }
Q
QI JUN 已提交
263

264 265 266
      ctx->SetOutputType("Out", var_type);
      ctx->SetOutputDataType("Out", ctx->GetInputDataType("X"));
    }
Q
QI JUN 已提交
267 268 269
  }
};

H
hong 已提交
270
class SumGradDescMaker : public framework::GradOpDescMakerBase {
271
 public:
272
  using framework::GradOpDescMakerBase::GradOpDescMakerBase;
273

Y
Yu Yang 已提交
274
  std::vector<std::unique_ptr<framework::OpDesc>> operator()() const override {
275
    auto x_grads = InputGrad("X", false);
Y
Yu Yang 已提交
276
    std::vector<std::unique_ptr<framework::OpDesc>> grad_ops;
277 278 279 280
    grad_ops.reserve(x_grads.size());
    auto og = OutputGrad("Out");
    std::transform(x_grads.begin(), x_grads.end(), std::back_inserter(grad_ops),
                   [&og](const std::string& x_grad) {
Y
Yu Yang 已提交
281
                     auto* grad_op = new framework::OpDesc();
Y
Yu Yang 已提交
282 283 284 285
                     grad_op->SetType("scale");
                     grad_op->SetInput("X", og);
                     grad_op->SetOutput("Out", {x_grad});
                     grad_op->SetAttr("scale", 1.0f);
Y
Yu Yang 已提交
286
                     return std::unique_ptr<framework::OpDesc>(grad_op);
287
                   });
H
hong 已提交
288 289 290 291 292 293 294 295 296

    return grad_ops;
  }
};

class SumGradOpBaseMaker : public imperative::GradOpBaseMakerBase {
 public:
  using imperative::GradOpBaseMakerBase::GradOpBaseMakerBase;

297
  std::shared_ptr<imperative::GradOpNode> operator()() const override {
H
hong 已提交
298
    auto x_grads = InputGrad("X", false);
299 300
    using InputGradsType = decltype(x_grads);

301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
    if (!x_grads.empty()) {
      auto node = this->NewGradNode();
      node->reserve(x_grads.size());
      auto og = OutputGrad("Out");
      for (auto& x_grad : x_grads) {
        imperative::TracedGradOp op(node);
        op.SetType("scale");
        op.SetInput("X", og);
        op.SetOutput("Out", InputGradsType{x_grad});
        op.SetAttr("scale", 1.0f);
      }
      return node;
    } else {
      return nullptr;
    }
316 317 318
  }
};

319
DECLARE_INPLACE_OP_INFERER(SumInplaceInferer, {"X", "Out"});
320

321 322 323 324
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
325

H
hong 已提交
326 327
REGISTER_OPERATOR(sum, ops::SumOp, ops::SumOpMaker, ops::SumGradDescMaker,
                  ops::SumGradOpBaseMaker, ops::SumOpVarTypeInference,
328
                  ops::SumInplaceInferer);
329

Q
QI JUN 已提交
330 331 332 333
REGISTER_OP_CPU_KERNEL(
    sum, ops::SumKernel<paddle::platform::CPUDeviceContext, float>,
    ops::SumKernel<paddle::platform::CPUDeviceContext, double>,
    ops::SumKernel<paddle::platform::CPUDeviceContext, int>,
334 335
    ops::SumKernel<paddle::platform::CPUDeviceContext,
                   paddle::platform::bfloat16>,
Q
QI JUN 已提交
336
    ops::SumKernel<paddle::platform::CPUDeviceContext, int64_t>);