sum_op.cc 7.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
12
#include "paddle/fluid/operators/sum_op.h"
13
#include <vector>
Y
Yi Wang 已提交
14 15
#include "paddle/fluid/framework/var_type_inference.h"
#include "paddle/fluid/operators/detail/safe_ref.h"
16 17 18 19 20 21 22 23 24

namespace paddle {
namespace operators {
using framework::Tensor;

class SumOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

25
  void InferShape(framework::InferShapeContext* ctx) const override {
Q
qiaolongfei 已提交
26
    PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null");
27

Q
Qiao Longfei 已提交
28 29
    PADDLE_ENFORCE(ctx->HasOutput("Out"),
                   "Output(Out) of SumOp should not be null.");
30 31
    if (ctx->IsRuntime() &&
        ctx->GetOutputsVarType("Out")[0] ==
32
            framework::proto::VarType::LOD_TENSOR_ARRAY) {
33 34
      return;  // skip runtime infershape when is tensor array;
    }
35

36
    auto x_dims = ctx->GetInputsDim("X");
Q
Qiao Longfei 已提交
37
    size_t N = x_dims.size();
Q
qijun 已提交
38
    PADDLE_ENFORCE_GT(N, 1, "Input tensors count should > 1.");
Q
qiaolongfei 已提交
39

40 41 42 43 44 45 46 47 48 49
    framework::DDim in_dim({0});
    for (auto& x_dim : x_dims) {
      if (framework::product(x_dim) == 0) {
        continue;
      }
      if (framework::product(in_dim) == 0) {
        in_dim = x_dim;
      } else {
        PADDLE_ENFORCE_EQ(in_dim, x_dim, "Input tensors must have same shape");
      }
Q
qijun 已提交
50
    }
Q
Qiao Longfei 已提交
51 52
    ctx->SetOutputDim("Out", in_dim);
    ctx->ShareLoD("X", /*->*/ "Out");
53
  }
54 55

 protected:
56
  framework::OpKernelType GetExpectedKernelType(
57 58 59
      const framework::ExecutionContext& ctx) const override {
    auto x_vars = ctx.MultiInputVar("X");
    if (x_vars[0]->IsType<framework::LoDTensor>()) {
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
      int dtype = -1;
      for (auto& x_var : x_vars) {
        auto& lod_tensor = x_var->Get<framework::LoDTensor>();
        if (lod_tensor.numel() == 0) {
          continue;
        }
        if (dtype == -1) {
          dtype = framework::ToDataType(lod_tensor.type());
        } else {
          PADDLE_ENFORCE_EQ(dtype, framework::ToDataType(lod_tensor.type()));
        }
      }
      PADDLE_ENFORCE_NE(dtype, -1,
                        "Sum operator should have at least one tensor");

75
      return framework::OpKernelType(
76 77
          static_cast<framework::proto::VarType::Type>(dtype),
          ctx.device_context());
78
    } else if (x_vars[0]->IsType<framework::SelectedRows>()) {
Y
Yu Yang 已提交
79 80 81 82
      return framework::OpKernelType(
          framework::ToDataType(
              x_vars[0]->Get<framework::SelectedRows>().value().type()),
          ctx.device_context());
83
    } else if (x_vars[0]->IsType<framework::LoDTensorArray>()) {
Y
Yang Yang(Tony) 已提交
84 85 86 87 88 89 90
      for (auto& x_var : x_vars) {
        auto& array = x_var->Get<framework::LoDTensorArray>();
        for (auto& each : array) {
          if (each.numel() != 0) {
            return framework::OpKernelType(framework::ToDataType(each.type()),
                                           ctx.device_context());
          }
91 92
        }
      }
Y
Yang Yang(Tony) 已提交
93
      PADDLE_THROW("Cannot find the input data type by all input data");
94 95 96 97
    }
    PADDLE_THROW("Unexpected branch. Input type is %s",
                 x_vars[0]->Type().name());
  }
98 99 100 101
};

class SumOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
102
  SumOpMaker(OpProto* proto, OpAttrChecker* op_checker)
103
      : OpProtoAndCheckerMaker(proto, op_checker) {
104 105 106
    AddInput("X", "(vector<Tensor>) The input tensors of sum operator.")
        .AsDuplicable();
    AddOutput("Out", "(Tensor) The output tensor of sum operator.");
107
    AddComment(R"DOC(
108
Sum operator.
109

110 111
This operators sums the input tensors. All the inputs can carry the
LoD (Level of Details) information. However, the output only shares
112
the LoD information with the first input.
113
)DOC");
114 115 116
  }
};

Q
QI JUN 已提交
117 118
class SumOpVarTypeInference : public framework::VarTypeInference {
 public:
Y
Yu Yang 已提交
119 120
  void operator()(const framework::OpDesc& op_desc,
                  framework::BlockDesc* block) const override {
Q
QI JUN 已提交
121
    auto& inputs = op_desc.Input("X");
122
    auto var_type = framework::proto::VarType::SELECTED_ROWS;
Q
QI JUN 已提交
123

Y
Yang Yang(Tony) 已提交
124 125
    for (auto& name : op_desc.Input("X")) {
      VLOG(10) << name << " "
Y
Yang Yu 已提交
126
               << block->FindRecursiveOrCreateVar(name).GetType();
Y
Yang Yang(Tony) 已提交
127 128
    }

Q
QI JUN 已提交
129 130
    bool any_input_is_lod_tensor = std::any_of(
        inputs.begin(), inputs.end(), [block](const std::string& name) {
Y
Yang Yu 已提交
131
          return block->FindRecursiveOrCreateVar(name).GetType() ==
132
                 framework::proto::VarType::LOD_TENSOR;
Q
QI JUN 已提交
133
        });
134 135

    auto is_tensor_array = [block](const std::string& name) {
Y
Yang Yu 已提交
136
      return block->FindRecursiveOrCreateVar(name).GetType() ==
137
             framework::proto::VarType::LOD_TENSOR_ARRAY;
138 139 140 141 142 143 144 145
    };

    bool any_input_is_tensor_array =
        std::any_of(inputs.begin(), inputs.end(), is_tensor_array);
    bool all_inputs_are_tensor_array =
        std::all_of(inputs.begin(), inputs.end(), is_tensor_array);

    if (any_input_is_tensor_array) {
Y
Yang Yang(Tony) 已提交
146 147 148 149
      if (!all_inputs_are_tensor_array) {
        std::ostringstream os;
        for (auto& each : inputs) {
          os << "    " << each << " type is "
Y
Yang Yu 已提交
150
             << block->FindRecursiveOrCreateVar(each).GetType() << "\n";
Y
Yang Yang(Tony) 已提交
151 152 153 154
        }
        PADDLE_ENFORCE(all_inputs_are_tensor_array,
                       "Not all inputs are tensor array:\n%s", os.str());
      }
155
      var_type = framework::proto::VarType::LOD_TENSOR_ARRAY;
156
    } else if (any_input_is_lod_tensor) {
157
      var_type = framework::proto::VarType::LOD_TENSOR;
Q
QI JUN 已提交
158 159 160
    }

    auto out_var_name = op_desc.Output("Out").front();
Y
Yang Yu 已提交
161
    auto& out_var = block->FindRecursiveOrCreateVar(out_var_name);
Y
Yang Yang(Tony) 已提交
162 163 164
    out_var.SetType(var_type);
    auto& in_var = detail::Ref(block->FindVarRecursive(inputs.front()));
    out_var.SetDataType(in_var.GetDataType());
Q
QI JUN 已提交
165 166 167
  }
};

168
class SumGradMaker : public framework::GradOpDescMakerBase {
169
 public:
170
  using framework::GradOpDescMakerBase::GradOpDescMakerBase;
171

Y
Yu Yang 已提交
172
  std::vector<std::unique_ptr<framework::OpDesc>> operator()() const override {
173
    auto x_grads = InputGrad("X", false);
Y
Yu Yang 已提交
174
    std::vector<std::unique_ptr<framework::OpDesc>> grad_ops;
175 176 177 178
    grad_ops.reserve(x_grads.size());
    auto og = OutputGrad("Out");
    std::transform(x_grads.begin(), x_grads.end(), std::back_inserter(grad_ops),
                   [&og](const std::string& x_grad) {
Y
Yu Yang 已提交
179
                     auto* grad_op = new framework::OpDesc();
Y
Yu Yang 已提交
180 181 182 183
                     grad_op->SetType("scale");
                     grad_op->SetInput("X", og);
                     grad_op->SetOutput("Out", {x_grad});
                     grad_op->SetAttr("scale", 1.0f);
Y
Yu Yang 已提交
184
                     return std::unique_ptr<framework::OpDesc>(grad_op);
185 186
                   });
    return grad_ops;
187 188 189 190 191 192 193
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
194

Q
QI JUN 已提交
195 196
REGISTER_OPERATOR(sum, ops::SumOp, ops::SumOpMaker, ops::SumGradMaker,
                  ops::SumOpVarTypeInference);
Q
QI JUN 已提交
197 198 199 200 201
REGISTER_OP_CPU_KERNEL(
    sum, ops::SumKernel<paddle::platform::CPUDeviceContext, float>,
    ops::SumKernel<paddle::platform::CPUDeviceContext, double>,
    ops::SumKernel<paddle::platform::CPUDeviceContext, int>,
    ops::SumKernel<paddle::platform::CPUDeviceContext, int64_t>);