reduce_op.h 31.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
guosheng 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
G
guosheng 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
G
guosheng 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
G
guosheng 已提交
14 15 16

#pragma once

17
#include <algorithm>
18
#include <set>
19
#include <string>
W
whs 已提交
20
#include <vector>
21
#include "paddle/fluid/framework/data_type_transform.h"
22
#include "paddle/fluid/framework/tensor_util.h"
23
#include "paddle/fluid/operators/cast_op.h"
W
Wu Yi 已提交
24
#include "paddle/fluid/operators/reduce_ops/reduce_op_function.h"
25 26
#include "paddle/phi/kernels/funcs/math_function.h"
// only can include the headers in paddle/phi/api dirs
27
#include "paddle/fluid/framework/convert_utils.h"
28 29
#include "paddle/phi/api/lib/utils/tensor_utils.h"
#include "paddle/phi/kernels/cpu/reduce.h"
30

31
#if defined(__HIPCC__) || defined(__NVCC__)
32 33
#include "paddle/phi/kernels/gpu/reduce.h"
#include "paddle/phi/kernels/gpu/reduce_grad.h"
34
#endif
G
guosheng 已提交
35 36 37 38

namespace paddle {
namespace operators {

39 40
#define HANDLE_DIM(NDIM, RDIM)                                            \
  if (ndim == NDIM && rdim == RDIM) {                                     \
41 42
    paddle::operators::ReduceFunctor<DeviceContext, OutT, NDIM, RDIM,     \
                                     Functor>(                            \
43 44
        context.template device_context<DeviceContext>(), *input, output, \
        dims, keep_dim);                                                  \
W
whs 已提交
45 46
  }

47
using Tensor = framework::Tensor;
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
using DDim = framework::DDim;

inline void GetShuffledDim(const DDim& src_dims, DDim* dst_dims,
                           const std::vector<int>& reduced_dims,
                           std::vector<int>* perm_axis) {
  // check if it's a reduced dim
  std::vector<bool> src_dims_check(src_dims.size(), false);
  size_t src_size = src_dims.size();
  size_t reduce_size = reduced_dims.size();
  for (size_t i = 0; i < reduce_size; ++i) {
    dst_dims->at(src_size - reduce_size + i) = src_dims[reduced_dims[i]];
    (*perm_axis)[src_size - reduce_size + i] = reduced_dims[i];
    src_dims_check[reduced_dims[i]] = true;
  }

  size_t offset = 0;
  for (size_t i = 0; i < src_dims_check.size(); ++i) {
    bool is_reduced = src_dims_check[i];
    if (!is_reduced) {
      (*perm_axis)[offset] = i;
      dst_dims->at(offset++) = src_dims[i];
    }
  }
}

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
static inline std::vector<int> GetReduceDim(const std::vector<int>& dims,
                                            int dim_size, bool reduce_all) {
  std::vector<int> reduce_dims;
  if (reduce_all) {
    reduce_dims.resize(dim_size);
    int reduce_size = reduce_dims.size();
    for (int i = 0; i < reduce_size; ++i) {
      reduce_dims[i] = i;
    }
  } else {
    for (auto e : dims) {
      PADDLE_ENFORCE_LT(e, dim_size,
                        paddle::platform::errors::InvalidArgument(
                            "ReduceOp: invalid axis, when x_dims is %d, "
                            "axis[i] should less than x_dims, but got %d.",
                            dim_size, e));
      reduce_dims.push_back(e >= 0 ? e : e + dim_size);
    }
  }
  return reduce_dims;
}
94 95 96 97 98 99 100 101 102 103 104
template <typename DeviceContext, typename OutT>
void GetShuffledInput(const framework::ExecutionContext& context,
                      const Tensor* input, Tensor* shuffled_input,
                      const std::vector<int>& dims) {
  DDim shuffled_dims(input->dims());
  std::vector<int> perm_axis(input->dims().size());
  GetShuffledDim(input->dims(), &shuffled_dims, dims, &perm_axis);

  shuffled_input->Resize(shuffled_dims);
  shuffled_input->mutable_data<OutT>(context.GetPlace());

105
  phi::funcs::TransposeNormal<DeviceContext, OutT> trans;
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
  trans(context.template device_context<DeviceContext>(), *input,
        shuffled_input, perm_axis);
}

inline void GetOriginDimFromShuffled(const DDim& src_dim,
                                     const std::vector<int>& dims,
                                     std::vector<int>* origin_dim) {
  DDim shuffled_dims(src_dim);
  size_t n = src_dim.size();
  std::vector<int> perm_axis(n);
  GetShuffledDim(src_dim, &shuffled_dims, dims, &perm_axis);
  for (size_t i = 0; i < n; ++i) {
    (*origin_dim)[perm_axis[i]] = i;
  }
}

template <typename DeviceContext, typename OutT, typename Functor>
void HandleLargeDim(const framework::ExecutionContext& context,
                    const Tensor* input, Tensor* output,
                    const std::vector<int>& dims, bool keep_dim) {
  //  shuffle the reduced dim to the end
  Tensor shuffled_input;
  GetShuffledInput<DeviceContext, OutT>(context, input, &shuffled_input, dims);

  // transpose to 2D tensor whose shape is {unreduced, reduced}.
  const int64_t unreduced = output->numel();
  const int64_t reduced = shuffled_input.numel() / unreduced;
  shuffled_input.Resize({unreduced, reduced});
  DDim output_dim = output->dims();
  output->Resize({unreduced});
136
  paddle::operators::ReduceFunctor<DeviceContext, OutT, 2, 1, Functor>(
137 138 139 140 141 142 143 144 145 146
      context.template device_context<DeviceContext>(), shuffled_input, output,
      {1}, keep_dim);
  output->Resize(output_dim);
}

template <typename DeviceContext, typename T, typename Functor>
void HandleLargeDimGrad(const framework::ExecutionContext& context,
                        const framework::Tensor* x,
                        const framework::Tensor* out,
                        const framework::Tensor* dout, framework::Tensor* dx,
147
                        Functor functor, const std::vector<int>& dims) {
148 149 150 151 152 153 154 155 156 157 158 159 160
  const int64_t unreduced = out->numel();
  const int64_t reduced = x->numel() / unreduced;
  DDim out_dim(out->dims());
  DDim x_dim(x->dims());
  // transpose and reshape X
  Tensor shuffled_x;
  GetShuffledInput<DeviceContext, T>(context, x, &shuffled_x, dims);
  DDim shuffled_dim = shuffled_x.dims();
  shuffled_x.Resize({unreduced, reduced});
  // reshape dX {unreduced, reduced}
  dx->Resize({unreduced, reduced});
  ReduceGradFunctor<DeviceContext, T, 2, Functor>(
      context.template device_context<DeviceContext>(), shuffled_x, *out, *dout,
161
      dx, functor, {1});
162 163 164 165 166 167 168
  // transpose dX
  std::vector<int> origin_axis(x_dim.size());
  GetOriginDimFromShuffled(x_dim, dims, &origin_axis);
  Tensor dx_tmp;
  framework::TensorCopy(*dx, context.GetPlace(), &dx_tmp);
  dx_tmp.Resize(shuffled_dim);
  dx->Resize(x_dim);
169
  phi::funcs::TransposeNormal<DeviceContext, T> trans;
170 171 172
  trans(context.template device_context<DeviceContext>(), dx_tmp, dx,
        origin_axis);
}
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207

template <typename DeviceContext, typename T, typename Functor>
struct ReduceKernelFunctor {
  const Tensor* input;
  Tensor* output;
  std::vector<int> dims;
  bool keep_dim;
  bool reduce_all;
  const framework::ExecutionContext& context;
  ReduceKernelFunctor(const Tensor* input, Tensor* output,
                      const std::vector<int>& dims, bool keep_dim,
                      bool reduce_all,
                      const framework::ExecutionContext& context)
      : input(input),
        output(output),
        dims(dims),
        keep_dim(keep_dim),
        reduce_all(reduce_all),
        context(context) {}

  template <typename OutT>
  void apply() const {
    output->mutable_data<OutT>(context.GetPlace());
    if (reduce_all) {
      // Flatten and reduce 1-D tensor
      auto x = EigenVector<OutT>::Flatten(*input);
      auto out = EigenScalar<OutT>::From(*output);
      auto& place =
          *context.template device_context<DeviceContext>().eigen_device();
      auto reduce_dim = Eigen::array<int, 1>({{0}});
      Functor functor;
      functor(place, &x, &out, reduce_dim);
    } else {
      int ndim = input->dims().size();
      int rdim = dims.size();
208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
      if (ndim > 6) {
        HandleLargeDim<DeviceContext, OutT, Functor>(context, input, output,
                                                     dims, keep_dim);
      } else {
        HANDLE_DIM(6, 5);
        HANDLE_DIM(6, 4);
        HANDLE_DIM(6, 3);
        HANDLE_DIM(6, 2);
        HANDLE_DIM(6, 1);
        HANDLE_DIM(5, 4);
        HANDLE_DIM(5, 3);
        HANDLE_DIM(5, 2);
        HANDLE_DIM(5, 1);
        HANDLE_DIM(4, 3);
        HANDLE_DIM(4, 2);
        HANDLE_DIM(4, 1);
        HANDLE_DIM(3, 2);
        HANDLE_DIM(3, 1);
        HANDLE_DIM(2, 1);
        HANDLE_DIM(1, 1);
      }
229 230 231
    }
  }
};
Q
QI JUN 已提交
232
template <typename DeviceContext, typename T, typename Functor>
Y
Yu Yang 已提交
233
class ReduceKernel : public framework::OpKernel<T> {
234 235 236 237 238 239 240 241
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    bool reduce_all = context.Attr<bool>("reduce_all");
    auto* output = context.Output<Tensor>("Out");
    auto dims = context.Attr<std::vector<int>>("dim");
    bool keep_dim = context.Attr<bool>("keep_dim");
    int out_dtype = context.Attr<int>("out_dtype");
    framework::proto::VarType::Type cast_out_dtype;
242
    auto* input = context.Input<Tensor>("X");
243

244
    if (out_dtype < 0) {
245 246
      cast_out_dtype = static_cast<framework::proto::VarType::Type>(
          framework::TransToProtoVarType(input->dtype()));
247 248 249
    } else {
      cast_out_dtype = static_cast<framework::proto::VarType::Type>(out_dtype);
    }
250 251 252 253 254 255 256 257 258

    auto& dev_ctx = context.device_context<DeviceContext>();
    output->mutable_data(
        dev_ctx.GetPlace(),
        static_cast<framework::proto::VarType::Type>(cast_out_dtype));

    std::vector<int64_t> tmp_dims(dims.begin(), dims.end());

    // call new kernel
259 260 261
    phi::Reduce<typename framework::ConvertToPhiContext<DeviceContext>::TYPE, T,
                Functor>(
        static_cast<const typename framework::ConvertToPhiContext<
W
Wilber 已提交
262
            DeviceContext>::TYPE&>(dev_ctx),
263
        *input, reduce_all, tmp_dims, keep_dim,
264
        framework::TransToPhiDataType(cast_out_dtype), output);
265 266
  }
};
267

268 269 270 271 272
template <typename DeviceContext, typename T, typename Functor>
void LaunchReduceGradKernel(const framework::ExecutionContext& context,
                            const framework::Tensor* input0,
                            const framework::Tensor* input1,
                            const framework::Tensor* input2,
273
                            paddle::framework::Tensor* output, Functor functor,
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
                            const std::vector<int>& dims,
                            bool reduce_all = false) {
  if (reduce_all) {
    auto x = EigenVector<T>::Flatten(*input0);
    auto x_reduce = EigenVector<T>::Flatten(*input1);
    auto x_reduce_grad = EigenVector<T>::Flatten(*input2);
    auto x_grad = EigenVector<T>::Flatten(*output);
    auto& place =
        *context.template device_context<DeviceContext>().eigen_device();
    auto broadcast_dim =
        Eigen::array<int, 1>({{static_cast<int>(input0->numel())}});
    functor(place, &x, &x_reduce, &x_grad, &x_reduce_grad, broadcast_dim,
            broadcast_dim[0]);
  } else {
    int rank = input0->dims().size();
    switch (rank) {
      case 1:
        ReduceGradFunctor<DeviceContext, T, 1, Functor>(
            context.template device_context<DeviceContext>(), *input0, *input1,
293
            *input2, output, functor, dims);
294 295 296 297
        break;
      case 2:
        ReduceGradFunctor<DeviceContext, T, 2, Functor>(
            context.template device_context<DeviceContext>(), *input0, *input1,
298
            *input2, output, functor, dims);
299 300 301 302
        break;
      case 3:
        ReduceGradFunctor<DeviceContext, T, 3, Functor>(
            context.template device_context<DeviceContext>(), *input0, *input1,
303
            *input2, output, functor, dims);
304 305 306 307
        break;
      case 4:
        ReduceGradFunctor<DeviceContext, T, 4, Functor>(
            context.template device_context<DeviceContext>(), *input0, *input1,
308
            *input2, output, functor, dims);
309 310 311 312
        break;
      case 5:
        ReduceGradFunctor<DeviceContext, T, 5, Functor>(
            context.template device_context<DeviceContext>(), *input0, *input1,
313
            *input2, output, functor, dims);
314 315 316 317
        break;
      case 6:
        ReduceGradFunctor<DeviceContext, T, 6, Functor>(
            context.template device_context<DeviceContext>(), *input0, *input1,
318
            *input2, output, functor, dims);
319 320
        break;
      default:
321 322
        HandleLargeDimGrad<DeviceContext, T, Functor>(
            context, input0, input1, input2, output, functor, dims);
323 324 325 326 327
        break;
    }
  }
}

328 329
template <typename DeviceContext, typename T, typename Functor,
          bool kNoNeedBufferX = false, bool kNoNeedBufferY = false>
Y
Yu Yang 已提交
330
class ReduceGradKernel : public framework::OpKernel<T> {
G
guosheng 已提交
331
 public:
332 333
  void ComputeFromInput(const Tensor* input2,
                        const framework::ExecutionContext& context) const {
334
    bool reduce_all = context.Attr<bool>("reduce_all");
335 336 337
    auto dims = context.Attr<std::vector<int>>("dim");
    auto* input0 = context.Input<Tensor>("X");
    auto* input1 = context.Input<Tensor>("Out");
338

339 340 341
    auto* output = context.Output<Tensor>(framework::GradVarName("X"));
    output->mutable_data<T>(context.GetPlace());

342 343 344 345 346 347 348 349 350 351 352
    // The dims has full dim, set the reduce_all is True
    const auto& input_dim_size = context.Input<Tensor>("X")->dims().size();
    std::set<int> dims_set(dims.begin(), dims.end());
    bool full_dim = true;
    for (auto i = 0; i < input_dim_size; i++) {
      if (dims_set.find(i) == dims_set.end()) {
        full_dim = false;
        break;
      }
    }
    reduce_all = (reduce_all || full_dim);
353 354 355 356 357 358 359 360 361 362 363
    // NOTE: EigenTensor::From() uses tensor->data()
    // if op has NoNeedBufferVarsInferer, the corresponding kNoNeedBufferX or
    // kNoNeedBufferY should set true
    // and use fake var that has same dims.
    if (kNoNeedBufferX) {
      input0 = output;
    }
    if (kNoNeedBufferY) {
      input1 = input2;
    }

364 365
    const std::vector<int> const_dims = dims;

L
lvmengsi 已提交
366 367 368
    // NOTE(dengkaipeng): Out is unnecessary in some reduce kernel and
    // not be set as Input in grad Maker, use Out_grad to replace here
    if (!input1) input1 = input2;
369 370 371 372
    Functor functor;
    LaunchReduceGradKernel<DeviceContext, T, Functor>(context, input0, input1,
                                                      input2, output, functor,
                                                      const_dims, reduce_all);
G
guosheng 已提交
373
  }
374 375 376 377 378 379

  void Compute(const framework::ExecutionContext& context) const override {
    int in_dtype = context.Attr<int>("in_dtype");
    if (in_dtype >= 0) {
      Tensor tmp_tensor;
      auto* pre_input = context.Input<Tensor>(framework::GradVarName("Out"));
380 381 382
      auto in_kernel_type = framework::OpKernelType(
          framework::TransToProtoVarType(pre_input->dtype()),
          context.GetPlace());
383 384 385 386 387 388 389 390 391 392 393 394
      auto out_kernel_type = framework::OpKernelType(
          static_cast<framework::proto::VarType::Type>(in_dtype),
          context.GetPlace());
      framework::TransDataType(in_kernel_type, out_kernel_type, *pre_input,
                               &tmp_tensor);
      ComputeFromInput(&tmp_tensor, context);

    } else {
      auto* input2 = context.Input<Tensor>(framework::GradVarName("Out"));
      ComputeFromInput(input2, context);
    }
  }
395
};
G
guosheng 已提交
396

397 398 399
class ReduceOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
G
guosheng 已提交
400

401
  void InferShape(framework::InferShapeContext* ctx) const override {
402 403
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ReduceOp");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ReduceOp");
404 405 406
    auto x_dims = ctx->GetInputDim("X");
    auto x_rank = x_dims.size();
    auto dims = ctx->Attrs().Get<std::vector<int>>("dim");
407 408 409 410 411 412
    PADDLE_ENFORCE_GT(dims.size(), 0,
                      platform::errors::InvalidArgument(
                          "The input dim dimensions of ReduceOp "
                          "should be greater than 0. But received the dim "
                          "dimesions of Reduce = %d.",
                          dims.size()));
413

414
    for (size_t i = 0; i < dims.size(); ++i) {
415
      PADDLE_ENFORCE_LT(dims[i], x_rank,
416 417 418 419 420
                        platform::errors::InvalidArgument(
                            "The reduce dim index %d should be in the "
                            "range [-dimension(X), dimension(X)] "
                            "which dimesion = %d. But received dim index = %d.",
                            i, x_rank, dims[i]));
421 422 423 424 425 426
      PADDLE_ENFORCE_GE(dims[i], -x_rank,
                        platform::errors::InvalidArgument(
                            "The reduce dim index %d should be in the "
                            "range [-dimension(X), dimension(X)] "
                            "which dimesion = %d. But received dim index = %d.",
                            i, x_rank, dims[i]));
427 428 429 430 431 432 433
      if (dims[i] < 0) dims[i] = x_rank + dims[i];
    }
    sort(dims.begin(), dims.end());
    bool reduce_all = ctx->Attrs().Get<bool>("reduce_all");
    bool keep_dim = ctx->Attrs().Get<bool>("keep_dim");
    if (reduce_all) {
      if (keep_dim)
434
        ctx->SetOutputDim("Out",
435
                          phi::make_ddim(std::vector<int64_t>(x_rank, 1)));
436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452
      else
        ctx->SetOutputDim("Out", {1});
    } else {
      auto dims_vector = vectorize(x_dims);
      if (keep_dim) {
        for (size_t i = 0; i < dims.size(); ++i) {
          dims_vector[dims[i]] = 1;
        }
      } else {
        const int kDelFlag = -2;
        for (size_t i = 0; i < dims.size(); ++i) {
          dims_vector[dims[i]] = kDelFlag;
        }
        dims_vector.erase(
            remove(dims_vector.begin(), dims_vector.end(), kDelFlag),
            dims_vector.end());
      }
453 454 455
      if (!keep_dim && dims_vector.size() == 0) {
        dims_vector.push_back(1);
      }
456
      auto out_dims = phi::make_ddim(dims_vector);
457
      ctx->SetOutputDim("Out", out_dims);
458
      if (dims.size() > 0 && dims[0] != 0) {
459 460 461 462 463
        // Only pass LoD when not reducing on the first dim.
        ctx->ShareLoD("X", /*->*/ "Out");
      }
    }
  }
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481

  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    // choose cudnn kernel if the runtime supported.
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");

    if (ctx.Input<paddle::framework::LoDTensor>("X")->dims().size() > 5)
      return framework::OpKernelType(input_data_type, ctx.GetPlace());

#ifdef PADDLE_WITH_MKLDNN
    if (this->CanMKLDNNBeUsed(ctx, input_data_type)) {
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif

    if (input_data_type == framework::proto::VarType::FP16) {
482 483 484 485 486 487
      PADDLE_ENFORCE_EQ(
          platform::is_gpu_place(ctx.GetPlace()) ||
              platform::is_npu_place(ctx.GetPlace()) ||
              platform::is_mlu_place(ctx.GetPlace()),
          true, platform::errors::InvalidArgument(
                    "float16 can only be used on GPU or NPU or MLU place"));
488 489 490
    }
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
491 492
};

G
Guo Sheng 已提交
493 494 495 496 497 498 499 500 501 502 503 504 505
class ReduceOpUseInputPlace : public ReduceOp {
 public:
  using ReduceOp::ReduceOp;

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    framework::OpKernelType kt = OperatorWithKernel::GetExpectedKernelType(ctx);
    kt.place_ = ctx.Input<framework::LoDTensor>("X")->place();
    return kt;
  }
};

506 507 508
class ReduceGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
W
whs 已提交
509

510
  void InferShape(framework::InferShapeContext* ctx) const override {
511 512 513
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ReduceOp");
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
                   "Out@GRAD", "ReduceOp");
514 515 516
    auto x_dims = ctx->GetInputDim("X");
    auto x_rank = x_dims.size();
    auto dims = ctx->Attrs().Get<std::vector<int>>("dim");
W
whs 已提交
517
    for (size_t i = 0; i < dims.size(); ++i) {
518
      PADDLE_ENFORCE_LT(dims[i], x_rank,
519 520 521 522 523
                        platform::errors::InvalidArgument(
                            "The reduce dim index %d should be in the "
                            "range [-dimension(X), dimension(X)], "
                            "which dimesion = %d. But received dim index = %d.",
                            i, x_rank, dims[i]));
W
whs 已提交
524
      if (dims[i] < 0) dims[i] = x_rank + dims[i];
525 526 527 528 529 530
    }
    sort(dims.begin(), dims.end());
    auto x_grad_name = framework::GradVarName("X");
    if (ctx->HasOutput(x_grad_name)) {
      ctx->SetOutputDim(x_grad_name, x_dims);
      ctx->ShareLoD("X", /*->*/ x_grad_name);
W
whs 已提交
531
    }
532
  }
533 534 535 536

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
537
    int out_dtype = ctx.Attr<int>("out_dtype");
J
jakpiase 已提交
538
    auto input_data_type =
539 540 541 542
        (out_dtype >= 0)
            ? static_cast<framework::proto::VarType::Type>(out_dtype)
            : OperatorWithKernel::IndicateVarDataType(
                  ctx, framework::GradVarName("Out"));
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
#ifdef PADDLE_WITH_MKLDNN
    auto CanMKLDNNReduceGradBeUsed = [&]() {
      auto dx_dims = ctx.Input<Tensor>("X")->dims();

      if (dx_dims.size() > 5) return false;  // max 5D tensor is supported

      return true;
    };
    if (this->CanMKLDNNBeUsed(ctx, input_data_type) &&
        CanMKLDNNReduceGradBeUsed()) {
      return framework::OpKernelType(input_data_type, ctx.GetPlace(),
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif

    return framework::OpKernelType(input_data_type, ctx.GetPlace());
560
  }
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
};

class ReduceOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() final {
    AddInput("X",
             "(Tensor) The input tensor. Tensors with rank at most 6 are "
             "supported.");
    AddOutput("Out", "(Tensor) The result tensor.");
    AddAttr<std::vector<int>>(
        "dim",
        "(list<int>, default {0}) The dimensions to reduce. "
        "Must be in the range [-rank(input), rank(input)). "
        "If `dim[i] < 0`, the dims[i] to reduce is `rank + dims[i]`. "
        "Note that reducing on the first dim will make the LoD info lost.")
        .SetDefault({0});
    AddAttr<bool>("keep_dim",
                  "(bool, default false) "
                  "If true, retain the reduced dimension with length 1.")
        .SetDefault(false);
    AddAttr<bool>("reduce_all",
                  "(bool, default false) "
                  "If true, output a scalar reduced along all dimensions.")
        .SetDefault(false);
585 586 587 588 589 590 591 592 593 594
    AddAttr<int>("in_dtype",
                 "(int, default -1)"
                 "The dtype of input, default value is -1, the user could not "
                 "set this value.")
        .SetDefault(-1);
    AddAttr<int>(
        "out_dtype",
        "(int, default -1)"
        "The dtype of output, default value is -1, the dtype is same as intput")
        .SetDefault(-1);
595 596
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
597 598
        .SetDefault(false)
        .AsExtra();
599 600
    AddComment(string::Sprintf(R"DOC(
%s Operator.
W
whs 已提交
601

602 603 604
This operator computes the %s of input tensor along the given dimension.
The result tensor has 1 fewer dimension than the input unless keep_dim is true.
If reduce_all is true, just reduce along all dimensions and output a scalar.
W
whs 已提交
605

606 607
)DOC",
                               GetOpType(), GetName()));
G
guosheng 已提交
608
  }
609 610 611 612

 protected:
  virtual std::string GetName() const = 0;
  virtual std::string GetOpType() const = 0;
G
guosheng 已提交
613 614
};

615
#if defined(__HIPCC__) || defined(__NVCC__)
616 617
template <typename T, template <typename> class ReduceOp,
          template <typename, typename> class TransformOp>
618 619 620 621 622 623 624
class ReduceCudaKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    bool reduce_all = context.Attr<bool>("reduce_all");
    const Tensor* input = context.Input<Tensor>("X");
    Tensor* output = context.Output<Tensor>("Out");
    auto out_dtype = context.Attr<int>("out_dtype");
625
    auto pt_out_dtype = paddle::framework::TransToPhiDataType(
626
        static_cast<framework::proto::VarType::Type>(out_dtype));
627 628
    std::vector<int> dims = context.Attr<std::vector<int>>("dim");

629 630
    auto& dev_ctx = context.cuda_device_context();

631
    if (out_dtype >= 0) {
632
      output->mutable_data(dev_ctx.GetPlace(), pt_out_dtype);
633
    } else {
634
      output->mutable_data(dev_ctx.GetPlace(), input->dtype());
635
    }
636 637 638

    std::vector<int64_t> dims_int64{dims.begin(), dims.end()};

639
    phi::Reduce<T, ReduceOp, TransformOp>(
640
        dev_ctx, *input, reduce_all, dims_int64, false, pt_out_dtype, output);
641 642
  }
};
643 644 645 646 647 648 649 650

template <typename T, template <typename, typename> class TransformOp>
class ReduceCudaGradKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    bool reduce_all = context.Attr<bool>("reduce_all");
    std::vector<int> dims = context.Attr<std::vector<int>>("dim");
    auto* in_x = context.Input<Tensor>("X");
651

652 653 654 655
    auto* d_out =
        context.Input<framework::Tensor>(framework::GradVarName("Out"));
    auto* d_x = context.Output<framework::Tensor>(framework::GradVarName("X"));
    auto out_dtype = context.Attr<int>("in_dtype");
656
    auto pt_out_dtype = framework::TransToPhiDataType(
657
        static_cast<framework::proto::VarType::Type>(out_dtype));
658 659 660 661 662 663 664 665 666 667 668 669
    // get reduce_dim and reduce_num for reduce_mean_grad
    int dim_size = in_x->dims().size();
    std::vector<int> reduce_dims = GetReduceDim(dims, dim_size, reduce_all);
    auto update_dims = vectorize(d_x->dims());
    int reduce_num = 1;
    for (auto i : reduce_dims) {
      reduce_num *= (in_x->dims())[i];
      update_dims[i] = 1;
    }
    // make new tensor
    framework::Tensor new_d_out(d_out->type());
    new_d_out.ShareDataWith(*d_out);
670
    new_d_out.Resize(phi::make_ddim(update_dims));
671 672
    auto& dev_ctx = context.cuda_device_context();
    if (out_dtype > 0) {
673
      d_x->mutable_data(dev_ctx.GetPlace(), pt_out_dtype);
674
    } else {
675
      d_x->mutable_data(dev_ctx.GetPlace(), d_out->dtype());
676
    }
677 678
    auto pt_d_out = paddle::experimental::MakePhiDenseTensor(new_d_out);
    auto pt_d_x = paddle::experimental::MakePhiDenseTensor(*d_x);
679
    if (out_dtype <= 0) {
680
      pt_out_dtype = d_out->dtype();
681
    }
682

683
    using MPType = typename kps::details::MPTypeTrait<T>::Type;
684
    phi::ReduceGrad<T, TransformOp<T, MPType>>(
685 686 687 688
        dev_ctx, pt_d_out.get(), pt_d_x.get(), pt_out_dtype,
        TransformOp<T, MPType>(reduce_num));
  }
};
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781

template <typename T>
struct EqualFunctor {
  inline T initial() { return static_cast<T>(0.0f); }

  inline HOSTDEVICE T operator()(const T a, const T b) const {
    return static_cast<T>(a == b);
  }
};

template <typename T, typename Enable = void>
struct DivideFunctor {
  inline T initial() { return static_cast<T>(1.0f); }

  inline HOSTDEVICE T operator()(const T a, const T b) const { return a / b; }
};

template <typename T, template <typename, typename> class TransformOp>
class ReduceCudaAMaxAMinGradKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    bool reduce_all = context.Attr<bool>("reduce_all");
    std::vector<int> dims = context.Attr<std::vector<int>>("dim");
    auto* in_x = context.Input<Tensor>("X");
    auto* out_y = context.Input<Tensor>("Out");
    auto* d_out =
        context.Input<framework::Tensor>(framework::GradVarName("Out"));
    auto* d_x = context.Output<framework::Tensor>(framework::GradVarName("X"));
    auto out_dtype = context.Attr<int>("in_dtype");
    auto pt_out_dtype = framework::TransToPhiDataType(
        static_cast<framework::proto::VarType::Type>(out_dtype));
    // get reduce_dim and reduce_num for reduce_mean_grad
    int dim_size = in_x->dims().size();
    std::vector<int> reduce_dims = GetReduceDim(dims, dim_size, reduce_all);
    auto update_dims = vectorize(d_x->dims());
    int reduce_num = 1;
    for (auto i : reduce_dims) {
      reduce_num *= (in_x->dims())[i];
      update_dims[i] = 1;
    }
    auto& dev_ctx = context.cuda_device_context();

    // make new tensor reduce_out
    phi::DenseTensor new_y(out_y->type());
    new_y.ShareDataWith(*out_y);
    new_y.Resize(phi::make_ddim(update_dims));

    // make new tensor d_out
    phi::DenseTensor new_dout(d_out->type());
    new_dout.ShareDataWith(*d_out);
    new_dout.Resize(phi::make_ddim(update_dims));
    d_x->mutable_data(dev_ctx.GetPlace(), d_out->dtype());

    auto new_in = paddle::experimental::MakePhiDenseTensor(*in_x);
    auto new_in_tensor = new_in.get();

    auto new_dx = paddle::experimental::MakePhiDenseTensor(*d_x);
    auto new_dx_tensor = new_dx.get();

    // make equal_out
    phi::DenseTensor* equal_out = new phi::DenseTensor();
    equal_out->Resize(in_x->dims());
    dev_ctx.template Alloc<T>(equal_out);
    auto equal_out_tensor = *equal_out;

    // make new tensor equal_count
    phi::DenseTensor* equal_count = new phi::DenseTensor();
    equal_count->Resize(phi::make_ddim(update_dims));
    dev_ctx.template Alloc<T>(equal_count);

    // compute
    // 1. equal_out = Equal(x, y)
    std::vector<const phi::DenseTensor*> equal_inputs = {&new_y, new_in_tensor};
    std::vector<phi::DenseTensor*> equal_outputs = {&equal_out_tensor};
    phi::funcs::BroadcastKernel<phi::ElementwiseType::kBinary, T, T>(
        dev_ctx, equal_inputs, &equal_outputs, 0, EqualFunctor<T>());
    // 2. equal_count = reduceSum(equal_out)
    using MPType = typename kps::details::MPTypeTrait<T>::Type;
    phi::funcs::ReduceKernel<T, T, kps::AddFunctor,
                             kps::IdentityFunctor<T, MPType>>(
        dev_ctx, equal_out_tensor, equal_count,
        kps::IdentityFunctor<T, MPType>(), reduce_dims, false);

    // 3. dx = Div(dout, equal_out)
    std::vector<const phi::DenseTensor*> grad_inputs = {&equal_out_tensor,
                                                        equal_count};
    std::vector<phi::DenseTensor*> grad_outputs = {new_dx_tensor};
    phi::funcs::BroadcastKernel<phi::ElementwiseType::kBinary, T, T>(
        dev_ctx, grad_inputs, &grad_outputs, 0, DivideFunctor<T>());
    delete equal_out;
    delete equal_count;
  }
};
782 783
#endif

G
guosheng 已提交
784 785
}  // namespace operators
}  // namespace paddle
786

787 788
namespace ops = paddle::operators;

H
hong 已提交
789 790 791 792 793 794 795 796 797 798 799 800 801 802
#define REGISTER_REDUCE_OP(op_name)                                           \
  class __##op_name##Maker__ : public ops::ReduceOpMaker {                    \
   protected:                                                                 \
    virtual std::string GetName() const { return #op_name; }                  \
    virtual std::string GetOpType() const { return "Reduce " #op_name; }      \
  };                                                                          \
  REGISTER_OPERATOR(                                                          \
      op_name, ops::ReduceOp, __##op_name##Maker__,                           \
      paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>, \
      paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase,       \
                                            true>);                           \
  REGISTER_OPERATOR(op_name##_grad, ops::ReduceGradOp)

#define REGISTER_REDUCE_OP_WITHOUT_GRAD(op_name, ...)                    \
803 804 805 806 807
  class __##op_name##Maker__ : public ops::ReduceOpMaker {               \
   protected:                                                            \
    virtual std::string GetName() const { return #op_name; }             \
    virtual std::string GetOpType() const { return "Reduce " #op_name; } \
  };                                                                     \
H
hong 已提交
808 809 810 811
  REGISTER_OPERATOR(                                                     \
      op_name, ops::ReduceOp##__VA_ARGS__, __##op_name##Maker__,         \
      paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,    \
      paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);