reduce_op.h 10.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
guosheng 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
G
guosheng 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
G
guosheng 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
G
guosheng 已提交
14 15 16

#pragma once

17 18
#include <algorithm>
#include <string>
W
whs 已提交
19
#include <vector>
20

W
Wu Yi 已提交
21
#include "paddle/fluid/operators/reduce_ops/reduce_op_function.h"
G
guosheng 已提交
22 23 24 25

namespace paddle {
namespace operators {

26 27 28 29 30
#define HANDLE_DIM(NDIM, RDIM)                                            \
  if (ndim == NDIM && rdim == RDIM) {                                     \
    ReduceFunctor<DeviceContext, T, NDIM, RDIM, Functor>(                 \
        context.template device_context<DeviceContext>(), *input, output, \
        dims, keep_dim);                                                  \
W
whs 已提交
31 32
  }

Q
QI JUN 已提交
33
template <typename DeviceContext, typename T, typename Functor>
Y
Yu Yang 已提交
34
class ReduceKernel : public framework::OpKernel<T> {
G
guosheng 已提交
35 36
 public:
  void Compute(const framework::ExecutionContext& context) const override {
37
    bool reduce_all = context.Attr<bool>("reduce_all");
38 39 40 41 42 43 44
    auto* input = context.Input<Tensor>("X");
    auto* output = context.Output<Tensor>("Out");
    output->mutable_data<T>(context.GetPlace());

    auto dims = context.Attr<std::vector<int>>("dim");
    bool keep_dim = context.Attr<bool>("keep_dim");

45 46 47 48 49 50 51 52
    if (reduce_all) {
      // Flatten and reduce 1-D tensor
      auto x = EigenVector<T>::Flatten(*input);
      auto out = EigenScalar<T>::From(*output);
      auto& place =
          *context.template device_context<DeviceContext>().eigen_device();
      auto reduce_dim = Eigen::array<int, 1>({{0}});
      Functor functor;
53
      functor(place, &x, &out, reduce_dim);
54
    } else {
55 56
      int ndim = input->dims().size();
      int rdim = dims.size();
57 58 59 60 61 62 63 64 65 66
      // comments for accelerating compiling temporarily.
      //      HANDLE_DIM(6, 5);
      //      HANDLE_DIM(6, 4);
      //      HANDLE_DIM(6, 3);
      //      HANDLE_DIM(6, 2);
      //      HANDLE_DIM(6, 1);
      //      HANDLE_DIM(5, 4);
      //      HANDLE_DIM(5, 3);
      //      HANDLE_DIM(5, 2);
      //      HANDLE_DIM(5, 1);
W
whs 已提交
67 68 69 70 71 72 73
      HANDLE_DIM(4, 3);
      HANDLE_DIM(4, 2);
      HANDLE_DIM(4, 1);
      HANDLE_DIM(3, 2);
      HANDLE_DIM(3, 1);
      HANDLE_DIM(2, 1);
      HANDLE_DIM(1, 1);
G
guosheng 已提交
74 75 76 77
    }
  }
};

Q
QI JUN 已提交
78
template <typename DeviceContext, typename T, typename Functor>
Y
Yu Yang 已提交
79
class ReduceGradKernel : public framework::OpKernel<T> {
G
guosheng 已提交
80 81
 public:
  void Compute(const framework::ExecutionContext& context) const override {
82
    bool reduce_all = context.Attr<bool>("reduce_all");
83 84 85 86 87 88 89 90
    auto dims = context.Attr<std::vector<int>>("dim");

    auto* input0 = context.Input<Tensor>("X");
    auto* input1 = context.Input<Tensor>("Out");
    auto* input2 = context.Input<Tensor>(framework::GradVarName("Out"));
    auto* output = context.Output<Tensor>(framework::GradVarName("X"));
    output->mutable_data<T>(context.GetPlace());

L
lvmengsi 已提交
91 92 93 94
    // NOTE(dengkaipeng): Out is unnecessary in some reduce kernel and
    // not be set as Input in grad Maker, use Out_grad to replace here
    if (!input1) input1 = input2;

95 96 97 98 99 100 101 102 103 104
    if (reduce_all) {
      auto x = EigenVector<T>::Flatten(*input0);
      auto x_reduce = EigenVector<T>::From(*input1);
      auto x_reduce_grad = EigenVector<T>::From(*input2);
      auto x_grad = EigenVector<T>::Flatten(*output);
      auto& place =
          *context.template device_context<DeviceContext>().eigen_device();
      auto broadcast_dim =
          Eigen::array<int, 1>({{static_cast<int>(input0->numel())}});
      Functor functor;
105
      functor(place, &x, &x_reduce, &x_grad, &x_reduce_grad, broadcast_dim,
106 107
              broadcast_dim[0]);
    } else {
108
      int rank = input0->dims().size();
109 110
      switch (rank) {
        case 1:
111 112 113
          ReduceGradFunctor<DeviceContext, T, 1, Functor>(
              context.template device_context<DeviceContext>(), *input0,
              *input1, *input2, output, dims);
114 115
          break;
        case 2:
116 117 118
          ReduceGradFunctor<DeviceContext, T, 2, Functor>(
              context.template device_context<DeviceContext>(), *input0,
              *input1, *input2, output, dims);
119 120
          break;
        case 3:
121 122 123
          ReduceGradFunctor<DeviceContext, T, 3, Functor>(
              context.template device_context<DeviceContext>(), *input0,
              *input1, *input2, output, dims);
124 125
          break;
        case 4:
126 127 128
          ReduceGradFunctor<DeviceContext, T, 4, Functor>(
              context.template device_context<DeviceContext>(), *input0,
              *input1, *input2, output, dims);
129 130
          break;
        case 5:
131 132 133
          ReduceGradFunctor<DeviceContext, T, 5, Functor>(
              context.template device_context<DeviceContext>(), *input0,
              *input1, *input2, output, dims);
134 135
          break;
        case 6:
136 137 138
          ReduceGradFunctor<DeviceContext, T, 6, Functor>(
              context.template device_context<DeviceContext>(), *input0,
              *input1, *input2, output, dims);
139 140
          break;
      }
G
guosheng 已提交
141 142
    }
  }
143
};
G
guosheng 已提交
144

145 146 147
class ReduceOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
G
guosheng 已提交
148

149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("X"),
                   "Input(X) of ReduceOp should not be null.");
    PADDLE_ENFORCE(ctx->HasOutput("Out"),
                   "Output(Out) of ReduceOp should not be null.");
    auto x_dims = ctx->GetInputDim("X");
    auto x_rank = x_dims.size();
    PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported.");
    auto dims = ctx->Attrs().Get<std::vector<int>>("dim");
    for (size_t i = 0; i < dims.size(); ++i) {
      if (dims[i] < 0) dims[i] = x_rank + dims[i];
      PADDLE_ENFORCE_LT(
          dims[i], x_rank,
          "The dim should be in the range [-rank(input), rank(input)).");
    }
    sort(dims.begin(), dims.end());
    bool reduce_all = ctx->Attrs().Get<bool>("reduce_all");
    bool keep_dim = ctx->Attrs().Get<bool>("keep_dim");
    if (reduce_all) {
      if (keep_dim)
        ctx->SetOutputDim(
            "Out", framework::make_ddim(std::vector<int64_t>(x_rank, 1)));
      else
        ctx->SetOutputDim("Out", {1});
    } else {
      auto dims_vector = vectorize(x_dims);
      if (keep_dim) {
        for (size_t i = 0; i < dims.size(); ++i) {
          dims_vector[dims[i]] = 1;
        }
      } else {
        const int kDelFlag = -2;
        for (size_t i = 0; i < dims.size(); ++i) {
          dims_vector[dims[i]] = kDelFlag;
        }
        dims_vector.erase(
            remove(dims_vector.begin(), dims_vector.end(), kDelFlag),
            dims_vector.end());
      }
      auto out_dims = framework::make_ddim(dims_vector);
      ctx->SetOutputDim("Out", out_dims);
      if (dims[0] != 0) {
        // Only pass LoD when not reducing on the first dim.
        ctx->ShareLoD("X", /*->*/ "Out");
      }
    }
  }
};

class ReduceGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
W
whs 已提交
201

202 203 204 205 206 207 208 209
  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
    PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
                   "Input(Out@GRAD) should not be null.");
    auto x_dims = ctx->GetInputDim("X");
    auto x_rank = x_dims.size();
    PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported.");
    auto dims = ctx->Attrs().Get<std::vector<int>>("dim");
W
whs 已提交
210 211
    for (size_t i = 0; i < dims.size(); ++i) {
      if (dims[i] < 0) dims[i] = x_rank + dims[i];
212 213 214 215 216 217 218 219 220
      PADDLE_ENFORCE_LT(
          dims[i], x_rank,
          "The dim should be in the range [-rank(input), rank(input)).");
    }
    sort(dims.begin(), dims.end());
    auto x_grad_name = framework::GradVarName("X");
    if (ctx->HasOutput(x_grad_name)) {
      ctx->SetOutputDim(x_grad_name, x_dims);
      ctx->ShareLoD("X", /*->*/ x_grad_name);
W
whs 已提交
221
    }
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
  }
};

class ReduceOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() final {
    AddInput("X",
             "(Tensor) The input tensor. Tensors with rank at most 6 are "
             "supported.");
    AddOutput("Out", "(Tensor) The result tensor.");
    AddAttr<std::vector<int>>(
        "dim",
        "(list<int>, default {0}) The dimensions to reduce. "
        "Must be in the range [-rank(input), rank(input)). "
        "If `dim[i] < 0`, the dims[i] to reduce is `rank + dims[i]`. "
        "Note that reducing on the first dim will make the LoD info lost.")
        .SetDefault({0});
    AddAttr<bool>("keep_dim",
                  "(bool, default false) "
                  "If true, retain the reduced dimension with length 1.")
        .SetDefault(false);
    AddAttr<bool>("reduce_all",
                  "(bool, default false) "
                  "If true, output a scalar reduced along all dimensions.")
        .SetDefault(false);
    AddComment(string::Sprintf(R"DOC(
%s Operator.
W
whs 已提交
249

250 251 252
This operator computes the %s of input tensor along the given dimension.
The result tensor has 1 fewer dimension than the input unless keep_dim is true.
If reduce_all is true, just reduce along all dimensions and output a scalar.
W
whs 已提交
253

254 255
)DOC",
                               GetOpType(), GetName()));
G
guosheng 已提交
256
  }
257 258 259 260

 protected:
  virtual std::string GetName() const = 0;
  virtual std::string GetOpType() const = 0;
G
guosheng 已提交
261 262 263 264
};

}  // namespace operators
}  // namespace paddle
265

266 267 268 269 270 271 272 273 274 275 276
namespace ops = paddle::operators;

#define REGISTER_REDUCE_OP(op_name)                                      \
  class __##op_name##Maker__ : public ops::ReduceOpMaker {               \
   protected:                                                            \
    virtual std::string GetName() const { return #op_name; }             \
    virtual std::string GetOpType() const { return "Reduce " #op_name; } \
  };                                                                     \
  REGISTER_OPERATOR(op_name, ops::ReduceOp, __##op_name##Maker__,        \
                    paddle::framework::DefaultGradOpDescMaker<true>);    \
  REGISTER_OPERATOR(op_name##_grad, ops::ReduceGradOp)
Z
zhoukunsheng 已提交
277 278 279 280 281 282 283 284

#define REGISTER_REDUCE_OP_WITHOUT_GRAD(op_name)                         \
  class __##op_name##Maker__ : public ops::ReduceOpMaker {               \
   protected:                                                            \
    virtual std::string GetName() const { return #op_name; }             \
    virtual std::string GetOpType() const { return "Reduce " #op_name; } \
  };                                                                     \
  REGISTER_OPERATOR(op_name, ops::ReduceOp, __##op_name##Maker__,        \
285
                    paddle::framework::EmptyGradOpMaker);