sum_op.cc 7.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
12
#include "paddle/fluid/operators/sum_op.h"
13

14 15
#include <algorithm>
#include <string>
16
#include <vector>
17

Y
Yi Wang 已提交
18 19
#include "paddle/fluid/framework/var_type_inference.h"
#include "paddle/fluid/operators/detail/safe_ref.h"
20 21 22 23 24 25 26 27 28

namespace paddle {
namespace operators {
using framework::Tensor;

class SumOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

29
  void InferShape(framework::InferShapeContext* ctx) const override {
Q
qiaolongfei 已提交
30
    PADDLE_ENFORCE(ctx->HasInputs("X"), "Inputs(X) should not be null");
31

Q
Qiao Longfei 已提交
32 33
    PADDLE_ENFORCE(ctx->HasOutput("Out"),
                   "Output(Out) of SumOp should not be null.");
34 35
    if (ctx->IsRuntime() &&
        ctx->GetOutputsVarType("Out")[0] ==
36
            framework::proto::VarType::LOD_TENSOR_ARRAY) {
37 38
      return;  // skip runtime infershape when is tensor array;
    }
39

40
    auto x_dims = ctx->GetInputsDim("X");
Q
Qiao Longfei 已提交
41
    size_t N = x_dims.size();
42 43 44 45
    PADDLE_ENFORCE_GT(N, 0, "Input tensors count should > 0.");
    if (N == 1) {
      VLOG(3) << "Warning: sum have only one input, may waste memory";
    }
Q
qiaolongfei 已提交
46

47 48 49 50 51 52 53 54 55 56
    framework::DDim in_dim({0});
    for (auto& x_dim : x_dims) {
      if (framework::product(x_dim) == 0) {
        continue;
      }
      if (framework::product(in_dim) == 0) {
        in_dim = x_dim;
      } else {
        PADDLE_ENFORCE_EQ(in_dim, x_dim, "Input tensors must have same shape");
      }
Q
qijun 已提交
57
    }
Q
Qiao Longfei 已提交
58 59
    ctx->SetOutputDim("Out", in_dim);
    ctx->ShareLoD("X", /*->*/ "Out");
60
  }
61 62

 protected:
63
  framework::OpKernelType GetExpectedKernelType(
64 65 66
      const framework::ExecutionContext& ctx) const override {
    auto x_vars = ctx.MultiInputVar("X");
    if (x_vars[0]->IsType<framework::LoDTensor>()) {
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
      int dtype = -1;
      for (auto& x_var : x_vars) {
        auto& lod_tensor = x_var->Get<framework::LoDTensor>();
        if (lod_tensor.numel() == 0) {
          continue;
        }
        if (dtype == -1) {
          dtype = framework::ToDataType(lod_tensor.type());
        } else {
          PADDLE_ENFORCE_EQ(dtype, framework::ToDataType(lod_tensor.type()));
        }
      }
      PADDLE_ENFORCE_NE(dtype, -1,
                        "Sum operator should have at least one tensor");

82
      return framework::OpKernelType(
83 84
          static_cast<framework::proto::VarType::Type>(dtype),
          ctx.device_context());
85
    } else if (x_vars[0]->IsType<framework::SelectedRows>()) {
86 87 88 89 90 91 92 93 94 95
      for (auto& var : x_vars) {
        auto& value = var->Get<framework::SelectedRows>().value();
        if (value.IsInitialized()) {
          return framework::OpKernelType(framework::ToDataType(value.type()),
                                         ctx.device_context());
        }
      }
      // if input sparse vars are not initialized, use an default kernel type.
      return framework::OpKernelType(framework::proto::VarType::FP32,
                                     ctx.device_context());
96
    } else if (x_vars[0]->IsType<framework::LoDTensorArray>()) {
Y
Yang Yang(Tony) 已提交
97 98 99 100 101 102 103
      for (auto& x_var : x_vars) {
        auto& array = x_var->Get<framework::LoDTensorArray>();
        for (auto& each : array) {
          if (each.numel() != 0) {
            return framework::OpKernelType(framework::ToDataType(each.type()),
                                           ctx.device_context());
          }
104 105
        }
      }
Y
Yang Yang(Tony) 已提交
106
      PADDLE_THROW("Cannot find the input data type by all input data");
107 108 109 110
    }
    PADDLE_THROW("Unexpected branch. Input type is %s",
                 x_vars[0]->Type().name());
  }
111 112 113 114
};

class SumOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
115
  SumOpMaker(OpProto* proto, OpAttrChecker* op_checker)
116
      : OpProtoAndCheckerMaker(proto, op_checker) {
117 118 119
    AddInput("X", "(vector<Tensor>) The input tensors of sum operator.")
        .AsDuplicable();
    AddOutput("Out", "(Tensor) The output tensor of sum operator.");
120
    AddComment(R"DOC(
121
Sum operator.
122

123 124
This operators sums the input tensors. All the inputs can carry the
LoD (Level of Details) information. However, the output only shares
125
the LoD information with the first input.
126
)DOC");
127 128 129
  }
};

Q
QI JUN 已提交
130 131
class SumOpVarTypeInference : public framework::VarTypeInference {
 public:
Y
Yu Yang 已提交
132 133
  void operator()(const framework::OpDesc& op_desc,
                  framework::BlockDesc* block) const override {
Q
QI JUN 已提交
134
    auto& inputs = op_desc.Input("X");
135
    auto var_type = framework::proto::VarType::SELECTED_ROWS;
Q
QI JUN 已提交
136

Y
Yang Yang(Tony) 已提交
137 138
    for (auto& name : op_desc.Input("X")) {
      VLOG(10) << name << " "
Y
Yang Yu 已提交
139
               << block->FindRecursiveOrCreateVar(name).GetType();
Y
Yang Yang(Tony) 已提交
140 141
    }

Q
QI JUN 已提交
142 143
    bool any_input_is_lod_tensor = std::any_of(
        inputs.begin(), inputs.end(), [block](const std::string& name) {
Y
Yang Yu 已提交
144
          return block->FindRecursiveOrCreateVar(name).GetType() ==
145
                 framework::proto::VarType::LOD_TENSOR;
Q
QI JUN 已提交
146
        });
147 148

    auto is_tensor_array = [block](const std::string& name) {
Y
Yang Yu 已提交
149
      return block->FindRecursiveOrCreateVar(name).GetType() ==
150
             framework::proto::VarType::LOD_TENSOR_ARRAY;
151 152 153 154 155 156 157 158
    };

    bool any_input_is_tensor_array =
        std::any_of(inputs.begin(), inputs.end(), is_tensor_array);
    bool all_inputs_are_tensor_array =
        std::all_of(inputs.begin(), inputs.end(), is_tensor_array);

    if (any_input_is_tensor_array) {
Y
Yang Yang(Tony) 已提交
159 160 161 162
      if (!all_inputs_are_tensor_array) {
        std::ostringstream os;
        for (auto& each : inputs) {
          os << "    " << each << " type is "
Y
Yang Yu 已提交
163
             << block->FindRecursiveOrCreateVar(each).GetType() << "\n";
Y
Yang Yang(Tony) 已提交
164 165 166 167
        }
        PADDLE_ENFORCE(all_inputs_are_tensor_array,
                       "Not all inputs are tensor array:\n%s", os.str());
      }
168
      var_type = framework::proto::VarType::LOD_TENSOR_ARRAY;
169
    } else if (any_input_is_lod_tensor) {
170
      var_type = framework::proto::VarType::LOD_TENSOR;
Q
QI JUN 已提交
171 172 173
    }

    auto out_var_name = op_desc.Output("Out").front();
Y
Yang Yu 已提交
174
    auto& out_var = block->FindRecursiveOrCreateVar(out_var_name);
Y
Yang Yang(Tony) 已提交
175 176 177
    out_var.SetType(var_type);
    auto& in_var = detail::Ref(block->FindVarRecursive(inputs.front()));
    out_var.SetDataType(in_var.GetDataType());
Q
QI JUN 已提交
178 179 180
  }
};

181
class SumGradMaker : public framework::GradOpDescMakerBase {
182
 public:
183
  using framework::GradOpDescMakerBase::GradOpDescMakerBase;
184

Y
Yu Yang 已提交
185
  std::vector<std::unique_ptr<framework::OpDesc>> operator()() const override {
186
    auto x_grads = InputGrad("X", false);
Y
Yu Yang 已提交
187
    std::vector<std::unique_ptr<framework::OpDesc>> grad_ops;
188 189 190 191
    grad_ops.reserve(x_grads.size());
    auto og = OutputGrad("Out");
    std::transform(x_grads.begin(), x_grads.end(), std::back_inserter(grad_ops),
                   [&og](const std::string& x_grad) {
Y
Yu Yang 已提交
192
                     auto* grad_op = new framework::OpDesc();
Y
Yu Yang 已提交
193 194 195 196
                     grad_op->SetType("scale");
                     grad_op->SetInput("X", og);
                     grad_op->SetOutput("Out", {x_grad});
                     grad_op->SetAttr("scale", 1.0f);
Y
Yu Yang 已提交
197
                     return std::unique_ptr<framework::OpDesc>(grad_op);
198 199
                   });
    return grad_ops;
200 201 202 203 204 205 206
  }
};

}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;
207

Q
QI JUN 已提交
208 209
REGISTER_OPERATOR(sum, ops::SumOp, ops::SumOpMaker, ops::SumGradMaker,
                  ops::SumOpVarTypeInference);
Q
QI JUN 已提交
210 211 212 213 214
REGISTER_OP_CPU_KERNEL(
    sum, ops::SumKernel<paddle::platform::CPUDeviceContext, float>,
    ops::SumKernel<paddle::platform::CPUDeviceContext, double>,
    ops::SumKernel<paddle::platform::CPUDeviceContext, int>,
    ops::SumKernel<paddle::platform::CPUDeviceContext, int64_t>);