reduce_sum_op.cc 5.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

W
Wu Yi 已提交
15
#include "paddle/fluid/operators/reduce_ops/reduce_sum_op.h"
W
wanghuancoder 已提交
16

17 18
#include <string>

W
wanghuancoder 已提交
19 20 21 22 23 24 25 26 27 28 29 30
namespace paddle {
namespace framework {
class OpDesc;
}  // namespace framework
namespace imperative {
class OpBase;
}  // namespace imperative
namespace platform {
class CPUDeviceContext;
}  // namespace platform
}  // namespace paddle

31 32 33 34 35
namespace paddle {
namespace operators {

// NOTE: Input(Out) is unnecessary in reduce_sum_grad, and Input(X) needs no
// buffer
H
hong 已提交
36 37 38

template <typename T>
class ReduceSumOpGradMaker : public framework::SingleGradOpMaker<T> {
39
 public:
H
hong 已提交
40
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
41 42

 protected:
43
  void Apply(GradOpPtr<T> op) const override {
44
    op->SetType("reduce_sum_grad");
H
hong 已提交
45 46 47 48
    op->SetInput("X", this->Input("X"));
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetAttrMap(this->Attrs());
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
49
  }
50 51 52

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const {
53
    int in_dtype = ctx.Attr<int>("in_dtype");
54 55 56 57 58 59 60 61 62 63
    if (in_dtype >= 0) {
      return framework::OpKernelType(
          static_cast<framework::proto::VarType::Type>(in_dtype),
          ctx.GetPlace());
    }
    return framework::OpKernelType(
        framework::OperatorWithKernel::IndicateVarDataType(
            ctx, framework::GradVarName("Out")),
        ctx.GetPlace());
  }
64 65
};

66 67 68 69 70 71 72 73 74 75 76 77 78 79
template <typename T>
class ReduceSumDoubleOpGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> op) const override {
    op->SetInput("X", this->OutputGrad(framework::GradVarName("X")));
    op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out")));
    op->SetAttrMap(this->Attrs());
    op->SetType("reduce_sum");
  }
};

80
DECLARE_NO_NEED_BUFFER_VARS_INFERER(ReduceSumGradNoNeedBufferVarInferer, "X");
81 82 83 84
class ReduceSumVarTypeInference : public paddle::framework::VarTypeInference {
 public:
  void operator()(paddle::framework::InferVarTypeContext* ctx) const override {
    auto data_type = static_cast<paddle::framework::proto::VarType::Type>(
85
        BOOST_GET_CONST(int, ctx->GetAttr("out_dtype")));
86
    if (data_type >= 0) {
87
      ctx->SetOutputDataType("Out", data_type);
88 89 90
    }
  }
};
91 92 93 94 95 96 97 98 99 100 101

}  // namespace operators
}  // namespace paddle

class ReduceSumOpMaker : public ops::ReduceOpMaker {
 protected:
  virtual std::string GetName() const { return "reduce_sum"; }
  virtual std::string GetOpType() const { return "Reduce reduce_sum"; }
};

REGISTER_OPERATOR(reduce_sum, ops::ReduceOp, ReduceSumOpMaker,
102
                  ops::ReduceSumVarTypeInference,
H
hong 已提交
103 104
                  ops::ReduceSumOpGradMaker<paddle::framework::OpDesc>,
                  ops::ReduceSumOpGradMaker<paddle::imperative::OpBase>);
105
REGISTER_OPERATOR(reduce_sum_grad, ops::ReduceGradOp,
106 107
                  ops::ReduceSumDoubleOpGradMaker<paddle::framework::OpDesc>,
                  ops::ReduceSumDoubleOpGradMaker<paddle::imperative::OpBase>,
108
                  ops::ReduceSumGradNoNeedBufferVarInferer);
109 110

REGISTER_OP_CPU_KERNEL(
L
liuyuhui 已提交
111
    reduce_sum, ops::ReduceKernel<paddle::platform::CPUDeviceContext, bool,
112
                                  ops::SumFunctor>,
L
liuyuhui 已提交
113 114
    ops::ReduceKernel<paddle::platform::CPUDeviceContext, float,
                      ops::SumFunctor>,
115 116
    ops::ReduceKernel<paddle::platform::CPUDeviceContext, double,
                      ops::SumFunctor>,
117 118
    ops::ReduceKernel<paddle::platform::CPUDeviceContext,
                      paddle::platform::float16, ops::SumFunctor>,
119 120
    ops::ReduceKernel<paddle::platform::CPUDeviceContext, int, ops::SumFunctor>,
    ops::ReduceKernel<paddle::platform::CPUDeviceContext, int64_t,
121 122
                      ops::SumFunctor>,
    ops::ReduceKernel<paddle::platform::CPUDeviceContext,
123
                      paddle::platform::complex<float>, ops::SumFunctor>,
124
    ops::ReduceKernel<paddle::platform::CPUDeviceContext,
125
                      paddle::platform::complex<double>,
126

127
                      ops::SumFunctor>);
128 129 130 131 132 133

template <typename T>
using CPUReduceSumGradKernel =
    ops::ReduceSumGradKernel<paddle::platform::CPUDeviceContext, T,
                             ops::SumGradFunctor, true>;

134 135 136
REGISTER_OP_CPU_KERNEL(
    reduce_sum_grad, CPUReduceSumGradKernel<bool>,
    CPUReduceSumGradKernel<float>, CPUReduceSumGradKernel<double>,
137
    CPUReduceSumGradKernel<paddle::platform::float16>,
138 139 140
    CPUReduceSumGradKernel<int>, CPUReduceSumGradKernel<int64_t>,
    CPUReduceSumGradKernel<paddle::platform::complex<float>>,
    CPUReduceSumGradKernel<paddle::platform::complex<double>>);