reduce_op.h 31.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
G
guosheng 已提交
2

L
Luo Tao 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
G
guosheng 已提交
6

L
Luo Tao 已提交
7
    http://www.apache.org/licenses/LICENSE-2.0
G
guosheng 已提交
8

L
Luo Tao 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
G
guosheng 已提交
14 15 16

#pragma once

17
#include <algorithm>
18
#include <set>
19
#include <string>
W
whs 已提交
20
#include <vector>
21

22
#include "paddle/fluid/framework/data_type_transform.h"
23
#include "paddle/fluid/framework/tensor_util.h"
24
#include "paddle/fluid/operators/cast_op.h"
W
Wu Yi 已提交
25
#include "paddle/fluid/operators/reduce_ops/reduce_op_function.h"
26 27
#include "paddle/phi/kernels/funcs/math_function.h"
// only can include the headers in paddle/phi/api dirs
28
#include "paddle/fluid/framework/convert_utils.h"
29 30
#include "paddle/phi/api/lib/utils/tensor_utils.h"
#include "paddle/phi/kernels/cpu/reduce.h"
31

32
#if defined(__HIPCC__) || defined(__NVCC__) || defined(__xpu__)
33 34
#include "paddle/phi/kernels/gpu/reduce.h"
#include "paddle/phi/kernels/gpu/reduce_grad.h"
35
#endif
G
guosheng 已提交
36 37 38 39

namespace paddle {
namespace operators {

40 41 42 43 44 45 46 47 48
#define HANDLE_DIM(NDIM, RDIM)                                   \
  if (ndim == NDIM && rdim == RDIM) {                            \
    paddle::operators::                                          \
        ReduceFunctor<DeviceContext, OutT, NDIM, RDIM, Functor>( \
            context.template device_context<DeviceContext>(),    \
            *input,                                              \
            output,                                              \
            dims,                                                \
            keep_dim);                                           \
W
whs 已提交
49 50
  }

51
using Tensor = framework::Tensor;
52 53
using DDim = framework::DDim;

54 55
inline void GetShuffledDim(const DDim& src_dims,
                           DDim* dst_dims,
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
                           const std::vector<int>& reduced_dims,
                           std::vector<int>* perm_axis) {
  // check if it's a reduced dim
  std::vector<bool> src_dims_check(src_dims.size(), false);
  size_t src_size = src_dims.size();
  size_t reduce_size = reduced_dims.size();
  for (size_t i = 0; i < reduce_size; ++i) {
    dst_dims->at(src_size - reduce_size + i) = src_dims[reduced_dims[i]];
    (*perm_axis)[src_size - reduce_size + i] = reduced_dims[i];
    src_dims_check[reduced_dims[i]] = true;
  }

  size_t offset = 0;
  for (size_t i = 0; i < src_dims_check.size(); ++i) {
    bool is_reduced = src_dims_check[i];
    if (!is_reduced) {
      (*perm_axis)[offset] = i;
      dst_dims->at(offset++) = src_dims[i];
    }
  }
}

78
static inline std::vector<int> GetReduceDim(const std::vector<int>& dims,
79 80
                                            int dim_size,
                                            bool reduce_all) {
81 82 83 84 85 86 87 88 89
  std::vector<int> reduce_dims;
  if (reduce_all) {
    reduce_dims.resize(dim_size);
    int reduce_size = reduce_dims.size();
    for (int i = 0; i < reduce_size; ++i) {
      reduce_dims[i] = i;
    }
  } else {
    for (auto e : dims) {
90 91
      PADDLE_ENFORCE_LT(e,
                        dim_size,
92 93 94
                        paddle::platform::errors::InvalidArgument(
                            "ReduceOp: invalid axis, when x_dims is %d, "
                            "axis[i] should less than x_dims, but got %d.",
95 96
                            dim_size,
                            e));
97 98 99 100 101
      reduce_dims.push_back(e >= 0 ? e : e + dim_size);
    }
  }
  return reduce_dims;
}
102 103
template <typename DeviceContext, typename OutT>
void GetShuffledInput(const framework::ExecutionContext& context,
104 105
                      const Tensor* input,
                      Tensor* shuffled_input,
106 107 108 109 110 111 112 113
                      const std::vector<int>& dims) {
  DDim shuffled_dims(input->dims());
  std::vector<int> perm_axis(input->dims().size());
  GetShuffledDim(input->dims(), &shuffled_dims, dims, &perm_axis);

  shuffled_input->Resize(shuffled_dims);
  shuffled_input->mutable_data<OutT>(context.GetPlace());

114
  phi::funcs::TransposeNormal<DeviceContext, OutT> trans;
115 116 117 118
  trans(context.template device_context<DeviceContext>(),
        *input,
        shuffled_input,
        perm_axis);
119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
}

inline void GetOriginDimFromShuffled(const DDim& src_dim,
                                     const std::vector<int>& dims,
                                     std::vector<int>* origin_dim) {
  DDim shuffled_dims(src_dim);
  size_t n = src_dim.size();
  std::vector<int> perm_axis(n);
  GetShuffledDim(src_dim, &shuffled_dims, dims, &perm_axis);
  for (size_t i = 0; i < n; ++i) {
    (*origin_dim)[perm_axis[i]] = i;
  }
}

template <typename DeviceContext, typename OutT, typename Functor>
void HandleLargeDim(const framework::ExecutionContext& context,
135 136 137 138
                    const Tensor* input,
                    Tensor* output,
                    const std::vector<int>& dims,
                    bool keep_dim) {
139 140 141 142 143 144 145 146 147 148
  //  shuffle the reduced dim to the end
  Tensor shuffled_input;
  GetShuffledInput<DeviceContext, OutT>(context, input, &shuffled_input, dims);

  // transpose to 2D tensor whose shape is {unreduced, reduced}.
  const int64_t unreduced = output->numel();
  const int64_t reduced = shuffled_input.numel() / unreduced;
  shuffled_input.Resize({unreduced, reduced});
  DDim output_dim = output->dims();
  output->Resize({unreduced});
149
  paddle::operators::ReduceFunctor<DeviceContext, OutT, 2, 1, Functor>(
150 151 152 153 154
      context.template device_context<DeviceContext>(),
      shuffled_input,
      output,
      {1},
      keep_dim);
155 156 157 158 159 160 161
  output->Resize(output_dim);
}

template <typename DeviceContext, typename T, typename Functor>
void HandleLargeDimGrad(const framework::ExecutionContext& context,
                        const framework::Tensor* x,
                        const framework::Tensor* out,
162 163 164 165
                        const framework::Tensor* dout,
                        framework::Tensor* dx,
                        Functor functor,
                        const std::vector<int>& dims) {
166 167 168 169 170 171 172 173 174 175 176 177
  const int64_t unreduced = out->numel();
  const int64_t reduced = x->numel() / unreduced;
  DDim out_dim(out->dims());
  DDim x_dim(x->dims());
  // transpose and reshape X
  Tensor shuffled_x;
  GetShuffledInput<DeviceContext, T>(context, x, &shuffled_x, dims);
  DDim shuffled_dim = shuffled_x.dims();
  shuffled_x.Resize({unreduced, reduced});
  // reshape dX {unreduced, reduced}
  dx->Resize({unreduced, reduced});
  ReduceGradFunctor<DeviceContext, T, 2, Functor>(
178 179 180 181 182 183 184
      context.template device_context<DeviceContext>(),
      shuffled_x,
      *out,
      *dout,
      dx,
      functor,
      {1});
185 186 187 188 189 190 191
  // transpose dX
  std::vector<int> origin_axis(x_dim.size());
  GetOriginDimFromShuffled(x_dim, dims, &origin_axis);
  Tensor dx_tmp;
  framework::TensorCopy(*dx, context.GetPlace(), &dx_tmp);
  dx_tmp.Resize(shuffled_dim);
  dx->Resize(x_dim);
192
  phi::funcs::TransposeNormal<DeviceContext, T> trans;
193 194 195
  trans(context.template device_context<DeviceContext>(),
        dx_tmp,
        dx,
196 197
        origin_axis);
}
198 199 200 201 202 203 204 205 206

template <typename DeviceContext, typename T, typename Functor>
struct ReduceKernelFunctor {
  const Tensor* input;
  Tensor* output;
  std::vector<int> dims;
  bool keep_dim;
  bool reduce_all;
  const framework::ExecutionContext& context;
207 208 209 210
  ReduceKernelFunctor(const Tensor* input,
                      Tensor* output,
                      const std::vector<int>& dims,
                      bool keep_dim,
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
                      bool reduce_all,
                      const framework::ExecutionContext& context)
      : input(input),
        output(output),
        dims(dims),
        keep_dim(keep_dim),
        reduce_all(reduce_all),
        context(context) {}

  template <typename OutT>
  void apply() const {
    output->mutable_data<OutT>(context.GetPlace());
    if (reduce_all) {
      // Flatten and reduce 1-D tensor
      auto x = EigenVector<OutT>::Flatten(*input);
      auto out = EigenScalar<OutT>::From(*output);
      auto& place =
          *context.template device_context<DeviceContext>().eigen_device();
      auto reduce_dim = Eigen::array<int, 1>({{0}});
      Functor functor;
      functor(place, &x, &out, reduce_dim);
    } else {
      int ndim = input->dims().size();
      int rdim = dims.size();
235
      if (ndim > 6) {
236 237
        HandleLargeDim<DeviceContext, OutT, Functor>(
            context, input, output, dims, keep_dim);
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
      } else {
        HANDLE_DIM(6, 5);
        HANDLE_DIM(6, 4);
        HANDLE_DIM(6, 3);
        HANDLE_DIM(6, 2);
        HANDLE_DIM(6, 1);
        HANDLE_DIM(5, 4);
        HANDLE_DIM(5, 3);
        HANDLE_DIM(5, 2);
        HANDLE_DIM(5, 1);
        HANDLE_DIM(4, 3);
        HANDLE_DIM(4, 2);
        HANDLE_DIM(4, 1);
        HANDLE_DIM(3, 2);
        HANDLE_DIM(3, 1);
        HANDLE_DIM(2, 1);
        HANDLE_DIM(1, 1);
      }
256 257 258
    }
  }
};
Q
QI JUN 已提交
259
template <typename DeviceContext, typename T, typename Functor>
Y
Yu Yang 已提交
260
class ReduceKernel : public framework::OpKernel<T> {
261 262 263 264 265 266 267 268
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    bool reduce_all = context.Attr<bool>("reduce_all");
    auto* output = context.Output<Tensor>("Out");
    auto dims = context.Attr<std::vector<int>>("dim");
    bool keep_dim = context.Attr<bool>("keep_dim");
    int out_dtype = context.Attr<int>("out_dtype");
    framework::proto::VarType::Type cast_out_dtype;
269
    auto* input = context.Input<Tensor>("X");
270

271
    if (out_dtype < 0) {
272 273
      cast_out_dtype = static_cast<framework::proto::VarType::Type>(
          framework::TransToProtoVarType(input->dtype()));
274 275 276
    } else {
      cast_out_dtype = static_cast<framework::proto::VarType::Type>(out_dtype);
    }
277 278 279 280 281 282 283 284 285

    auto& dev_ctx = context.device_context<DeviceContext>();
    output->mutable_data(
        dev_ctx.GetPlace(),
        static_cast<framework::proto::VarType::Type>(cast_out_dtype));

    std::vector<int64_t> tmp_dims(dims.begin(), dims.end());

    // call new kernel
286 287
    phi::Reduce<typename framework::ConvertToPhiContext<DeviceContext>::TYPE,
                T,
288 289
                Functor>(
        static_cast<const typename framework::ConvertToPhiContext<
W
Wilber 已提交
290
            DeviceContext>::TYPE&>(dev_ctx),
291 292 293 294 295 296
        *input,
        reduce_all,
        tmp_dims,
        keep_dim,
        framework::TransToPhiDataType(cast_out_dtype),
        output);
297 298
  }
};
299

300 301 302 303 304
template <typename DeviceContext, typename T, typename Functor>
void LaunchReduceGradKernel(const framework::ExecutionContext& context,
                            const framework::Tensor* input0,
                            const framework::Tensor* input1,
                            const framework::Tensor* input2,
305 306
                            paddle::framework::Tensor* output,
                            Functor functor,
307 308 309 310 311 312 313 314 315 316 317
                            const std::vector<int>& dims,
                            bool reduce_all = false) {
  if (reduce_all) {
    auto x = EigenVector<T>::Flatten(*input0);
    auto x_reduce = EigenVector<T>::Flatten(*input1);
    auto x_reduce_grad = EigenVector<T>::Flatten(*input2);
    auto x_grad = EigenVector<T>::Flatten(*output);
    auto& place =
        *context.template device_context<DeviceContext>().eigen_device();
    auto broadcast_dim =
        Eigen::array<int, 1>({{static_cast<int>(input0->numel())}});
318 319 320 321 322 323
    functor(place,
            &x,
            &x_reduce,
            &x_grad,
            &x_reduce_grad,
            broadcast_dim,
324 325 326 327 328 329
            broadcast_dim[0]);
  } else {
    int rank = input0->dims().size();
    switch (rank) {
      case 1:
        ReduceGradFunctor<DeviceContext, T, 1, Functor>(
330 331 332 333 334 335 336
            context.template device_context<DeviceContext>(),
            *input0,
            *input1,
            *input2,
            output,
            functor,
            dims);
337 338 339
        break;
      case 2:
        ReduceGradFunctor<DeviceContext, T, 2, Functor>(
340 341 342 343 344 345 346
            context.template device_context<DeviceContext>(),
            *input0,
            *input1,
            *input2,
            output,
            functor,
            dims);
347 348 349
        break;
      case 3:
        ReduceGradFunctor<DeviceContext, T, 3, Functor>(
350 351 352 353 354 355 356
            context.template device_context<DeviceContext>(),
            *input0,
            *input1,
            *input2,
            output,
            functor,
            dims);
357 358 359
        break;
      case 4:
        ReduceGradFunctor<DeviceContext, T, 4, Functor>(
360 361 362 363 364 365 366
            context.template device_context<DeviceContext>(),
            *input0,
            *input1,
            *input2,
            output,
            functor,
            dims);
367 368 369
        break;
      case 5:
        ReduceGradFunctor<DeviceContext, T, 5, Functor>(
370 371 372 373 374 375 376
            context.template device_context<DeviceContext>(),
            *input0,
            *input1,
            *input2,
            output,
            functor,
            dims);
377 378 379
        break;
      case 6:
        ReduceGradFunctor<DeviceContext, T, 6, Functor>(
380 381 382 383 384 385 386
            context.template device_context<DeviceContext>(),
            *input0,
            *input1,
            *input2,
            output,
            functor,
            dims);
387 388
        break;
      default:
389 390
        HandleLargeDimGrad<DeviceContext, T, Functor>(
            context, input0, input1, input2, output, functor, dims);
391 392 393 394 395
        break;
    }
  }
}

396 397 398 399 400
template <typename DeviceContext,
          typename T,
          typename Functor,
          bool kNoNeedBufferX = false,
          bool kNoNeedBufferY = false>
Y
Yu Yang 已提交
401
class ReduceGradKernel : public framework::OpKernel<T> {
G
guosheng 已提交
402
 public:
403 404
  void ComputeFromInput(const Tensor* input2,
                        const framework::ExecutionContext& context) const {
405
    bool reduce_all = context.Attr<bool>("reduce_all");
406 407 408
    auto dims = context.Attr<std::vector<int>>("dim");
    auto* input0 = context.Input<Tensor>("X");
    auto* input1 = context.Input<Tensor>("Out");
409

410 411 412
    auto* output = context.Output<Tensor>(framework::GradVarName("X"));
    output->mutable_data<T>(context.GetPlace());

413 414 415 416 417 418 419 420 421 422 423
    // The dims has full dim, set the reduce_all is True
    const auto& input_dim_size = context.Input<Tensor>("X")->dims().size();
    std::set<int> dims_set(dims.begin(), dims.end());
    bool full_dim = true;
    for (auto i = 0; i < input_dim_size; i++) {
      if (dims_set.find(i) == dims_set.end()) {
        full_dim = false;
        break;
      }
    }
    reduce_all = (reduce_all || full_dim);
424 425 426 427 428 429 430 431 432 433 434
    // NOTE: EigenTensor::From() uses tensor->data()
    // if op has NoNeedBufferVarsInferer, the corresponding kNoNeedBufferX or
    // kNoNeedBufferY should set true
    // and use fake var that has same dims.
    if (kNoNeedBufferX) {
      input0 = output;
    }
    if (kNoNeedBufferY) {
      input1 = input2;
    }

435 436
    const std::vector<int> const_dims = dims;

L
lvmengsi 已提交
437 438 439
    // NOTE(dengkaipeng): Out is unnecessary in some reduce kernel and
    // not be set as Input in grad Maker, use Out_grad to replace here
    if (!input1) input1 = input2;
440
    Functor functor;
441 442 443 444 445 446 447 448
    LaunchReduceGradKernel<DeviceContext, T, Functor>(context,
                                                      input0,
                                                      input1,
                                                      input2,
                                                      output,
                                                      functor,
                                                      const_dims,
                                                      reduce_all);
G
guosheng 已提交
449
  }
450 451 452 453 454 455

  void Compute(const framework::ExecutionContext& context) const override {
    int in_dtype = context.Attr<int>("in_dtype");
    if (in_dtype >= 0) {
      Tensor tmp_tensor;
      auto* pre_input = context.Input<Tensor>(framework::GradVarName("Out"));
456 457 458
      auto in_kernel_type = framework::OpKernelType(
          framework::TransToProtoVarType(pre_input->dtype()),
          context.GetPlace());
459 460 461
      auto out_kernel_type = framework::OpKernelType(
          static_cast<framework::proto::VarType::Type>(in_dtype),
          context.GetPlace());
462 463
      framework::TransDataType(
          in_kernel_type, out_kernel_type, *pre_input, &tmp_tensor);
464 465 466 467 468 469 470
      ComputeFromInput(&tmp_tensor, context);

    } else {
      auto* input2 = context.Input<Tensor>(framework::GradVarName("Out"));
      ComputeFromInput(input2, context);
    }
  }
471
};
G
guosheng 已提交
472

473 474 475
class ReduceOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
G
guosheng 已提交
476

477
  void InferShape(framework::InferShapeContext* ctx) const override {
478 479
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ReduceOp");
    OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "ReduceOp");
480 481 482
    auto x_dims = ctx->GetInputDim("X");
    auto x_rank = x_dims.size();
    auto dims = ctx->Attrs().Get<std::vector<int>>("dim");
483 484
    PADDLE_ENFORCE_GT(dims.size(),
                      0,
485 486 487 488 489
                      platform::errors::InvalidArgument(
                          "The input dim dimensions of ReduceOp "
                          "should be greater than 0. But received the dim "
                          "dimesions of Reduce = %d.",
                          dims.size()));
490

491
    for (size_t i = 0; i < dims.size(); ++i) {
492 493
      PADDLE_ENFORCE_LT(dims[i],
                        x_rank,
494 495 496 497
                        platform::errors::InvalidArgument(
                            "The reduce dim index %d should be in the "
                            "range [-dimension(X), dimension(X)] "
                            "which dimesion = %d. But received dim index = %d.",
498 499 500 501 502
                            i,
                            x_rank,
                            dims[i]));
      PADDLE_ENFORCE_GE(dims[i],
                        -x_rank,
503 504 505 506
                        platform::errors::InvalidArgument(
                            "The reduce dim index %d should be in the "
                            "range [-dimension(X), dimension(X)] "
                            "which dimesion = %d. But received dim index = %d.",
507 508 509
                            i,
                            x_rank,
                            dims[i]));
510 511 512 513 514 515 516
      if (dims[i] < 0) dims[i] = x_rank + dims[i];
    }
    sort(dims.begin(), dims.end());
    bool reduce_all = ctx->Attrs().Get<bool>("reduce_all");
    bool keep_dim = ctx->Attrs().Get<bool>("keep_dim");
    if (reduce_all) {
      if (keep_dim)
517
        ctx->SetOutputDim("Out",
518
                          phi::make_ddim(std::vector<int64_t>(x_rank, 1)));
519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
      else
        ctx->SetOutputDim("Out", {1});
    } else {
      auto dims_vector = vectorize(x_dims);
      if (keep_dim) {
        for (size_t i = 0; i < dims.size(); ++i) {
          dims_vector[dims[i]] = 1;
        }
      } else {
        const int kDelFlag = -2;
        for (size_t i = 0; i < dims.size(); ++i) {
          dims_vector[dims[i]] = kDelFlag;
        }
        dims_vector.erase(
            remove(dims_vector.begin(), dims_vector.end(), kDelFlag),
            dims_vector.end());
      }
536 537 538
      if (!keep_dim && dims_vector.size() == 0) {
        dims_vector.push_back(1);
      }
539
      auto out_dims = phi::make_ddim(dims_vector);
540
      ctx->SetOutputDim("Out", out_dims);
541
      if (dims.size() > 0 && dims[0] != 0) {
542 543 544 545 546
        // Only pass LoD when not reducing on the first dim.
        ctx->ShareLoD("X", /*->*/ "Out");
      }
    }
  }
547

548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
  // oneDNN's reduction kernel is optimized only for reducing throughout the
  // most outer dims, so in case of another type of reduction, it would be
  // better to fallback to native implementation
  static bool HasOptimizedOneDNNKernel(const framework::ExecutionContext& ctx) {
    // native reduce kernels don't support bf16
    // so oneDNN kernel is enforced in that case
    if (ctx.Input<framework::LoDTensor>("X")->dtype() ==
        experimental::DataType::BFLOAT16)
      return true;

    auto reduce_dims = ctx.Attr<std::vector<int>>("dim");
    const bool reduce_all = ctx.Attr<bool>("reduce_all");
    int ndims = ctx.Input<framework::LoDTensor>("X")->dims().size();

    if (reduce_all) {
      return true;
    }

    for (size_t i = 0; i < reduce_dims.size(); ++i) {
      if (reduce_dims[i] < 0) reduce_dims[i] = ndims + reduce_dims[i];
    }
    sort(reduce_dims.begin(), reduce_dims.end());
    for (size_t i = 0; i < reduce_dims.size(); ++i) {
      if (reduce_dims[reduce_dims.size() - i - 1] !=
          static_cast<int>(ndims - i - 1)) {
        return false;
      }
    }

    return true;
  }

580 581 582 583 584 585 586 587 588
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    // choose cudnn kernel if the runtime supported.
    auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");

    if (ctx.Input<paddle::framework::LoDTensor>("X")->dims().size() > 5)
      return framework::OpKernelType(input_data_type, ctx.GetPlace());

#ifdef PADDLE_WITH_MKLDNN
589 590
    if (this->CanMKLDNNBeUsed(ctx, input_data_type) &&
        HasOptimizedOneDNNKernel(ctx)) {
591 592
      return framework::OpKernelType(input_data_type,
                                     ctx.GetPlace(),
593 594 595 596 597 598
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif

    if (input_data_type == framework::proto::VarType::FP16) {
599 600 601
      PADDLE_ENFORCE_EQ(
          platform::is_gpu_place(ctx.GetPlace()) ||
              platform::is_npu_place(ctx.GetPlace()) ||
602 603
              platform::is_mlu_place(ctx.GetPlace()) ||
              platform::is_custom_place(ctx.GetPlace()),
604 605 606
          true,
          platform::errors::InvalidArgument(
              "float16 can only be used on GPU or NPU or MLU place"));
607 608 609
    }
    return framework::OpKernelType(input_data_type, ctx.GetPlace());
  }
610 611
};

G
Guo Sheng 已提交
612 613 614 615 616 617 618 619 620 621 622 623 624
class ReduceOpUseInputPlace : public ReduceOp {
 public:
  using ReduceOp::ReduceOp;

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
    framework::OpKernelType kt = OperatorWithKernel::GetExpectedKernelType(ctx);
    kt.place_ = ctx.Input<framework::LoDTensor>("X")->place();
    return kt;
  }
};

625 626 627
class ReduceGradOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
W
whs 已提交
628

629
  void InferShape(framework::InferShapeContext* ctx) const override {
630
    OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "ReduceOp");
631 632 633 634
    OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
                   "Input",
                   "Out@GRAD",
                   "ReduceOp");
635 636 637
    auto x_dims = ctx->GetInputDim("X");
    auto x_rank = x_dims.size();
    auto dims = ctx->Attrs().Get<std::vector<int>>("dim");
W
whs 已提交
638
    for (size_t i = 0; i < dims.size(); ++i) {
639 640
      PADDLE_ENFORCE_LT(dims[i],
                        x_rank,
641 642 643 644
                        platform::errors::InvalidArgument(
                            "The reduce dim index %d should be in the "
                            "range [-dimension(X), dimension(X)], "
                            "which dimesion = %d. But received dim index = %d.",
645 646 647
                            i,
                            x_rank,
                            dims[i]));
W
whs 已提交
648
      if (dims[i] < 0) dims[i] = x_rank + dims[i];
649 650 651 652 653 654
    }
    sort(dims.begin(), dims.end());
    auto x_grad_name = framework::GradVarName("X");
    if (ctx->HasOutput(x_grad_name)) {
      ctx->SetOutputDim(x_grad_name, x_dims);
      ctx->ShareLoD("X", /*->*/ x_grad_name);
W
whs 已提交
655
    }
656
  }
657 658 659 660

 protected:
  framework::OpKernelType GetExpectedKernelType(
      const framework::ExecutionContext& ctx) const override {
661
    int out_dtype = ctx.Attr<int>("out_dtype");
J
jakpiase 已提交
662
    auto input_data_type =
663 664 665 666
        (out_dtype >= 0)
            ? static_cast<framework::proto::VarType::Type>(out_dtype)
            : OperatorWithKernel::IndicateVarDataType(
                  ctx, framework::GradVarName("Out"));
667 668 669 670 671 672 673 674 675 676
#ifdef PADDLE_WITH_MKLDNN
    auto CanMKLDNNReduceGradBeUsed = [&]() {
      auto dx_dims = ctx.Input<Tensor>("X")->dims();

      if (dx_dims.size() > 5) return false;  // max 5D tensor is supported

      return true;
    };
    if (this->CanMKLDNNBeUsed(ctx, input_data_type) &&
        CanMKLDNNReduceGradBeUsed()) {
677 678
      return framework::OpKernelType(input_data_type,
                                     ctx.GetPlace(),
679 680 681 682 683 684
                                     framework::DataLayout::kMKLDNN,
                                     framework::LibraryType::kMKLDNN);
    }
#endif

    return framework::OpKernelType(input_data_type, ctx.GetPlace());
685
  }
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
};

class ReduceOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
  void Make() final {
    AddInput("X",
             "(Tensor) The input tensor. Tensors with rank at most 6 are "
             "supported.");
    AddOutput("Out", "(Tensor) The result tensor.");
    AddAttr<std::vector<int>>(
        "dim",
        "(list<int>, default {0}) The dimensions to reduce. "
        "Must be in the range [-rank(input), rank(input)). "
        "If `dim[i] < 0`, the dims[i] to reduce is `rank + dims[i]`. "
        "Note that reducing on the first dim will make the LoD info lost.")
        .SetDefault({0});
    AddAttr<bool>("keep_dim",
                  "(bool, default false) "
                  "If true, retain the reduced dimension with length 1.")
        .SetDefault(false);
    AddAttr<bool>("reduce_all",
                  "(bool, default false) "
                  "If true, output a scalar reduced along all dimensions.")
        .SetDefault(false);
710 711 712 713 714 715 716 717 718 719
    AddAttr<int>("in_dtype",
                 "(int, default -1)"
                 "The dtype of input, default value is -1, the user could not "
                 "set this value.")
        .SetDefault(-1);
    AddAttr<int>(
        "out_dtype",
        "(int, default -1)"
        "The dtype of output, default value is -1, the dtype is same as intput")
        .SetDefault(-1);
720 721
    AddAttr<bool>("use_mkldnn",
                  "(bool, default false) Only used in mkldnn kernel")
722 723
        .SetDefault(false)
        .AsExtra();
724 725
    AddComment(string::Sprintf(R"DOC(
%s Operator.
W
whs 已提交
726

727 728 729
This operator computes the %s of input tensor along the given dimension.
The result tensor has 1 fewer dimension than the input unless keep_dim is true.
If reduce_all is true, just reduce along all dimensions and output a scalar.
W
whs 已提交
730

731
)DOC",
732 733
                               GetOpType(),
                               GetName()));
G
guosheng 已提交
734
  }
735 736 737 738

 protected:
  virtual std::string GetName() const = 0;
  virtual std::string GetOpType() const = 0;
G
guosheng 已提交
739 740
};

741
#if defined(__HIPCC__) || defined(__NVCC__) || defined(__xpu__)
742 743 744 745 746
template <typename T,
          template <typename>
          class ReduceOp,
          template <typename, typename>
          class TransformOp>
747 748 749 750 751 752 753
class ReduceCudaKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    bool reduce_all = context.Attr<bool>("reduce_all");
    const Tensor* input = context.Input<Tensor>("X");
    Tensor* output = context.Output<Tensor>("Out");
    auto out_dtype = context.Attr<int>("out_dtype");
754
    auto pt_out_dtype = paddle::framework::TransToPhiDataType(
755
        static_cast<framework::proto::VarType::Type>(out_dtype));
756
    std::vector<int> dims = context.Attr<std::vector<int>>("dim");
757 758 759 760
#ifdef PADDLE_WITH_XPU_KP
    auto& dev_ctx =
        context.template device_context<paddle::platform::XPUDeviceContext>();
#else
761
    auto& dev_ctx = context.cuda_device_context();
762
#endif
763
    if (out_dtype >= 0) {
764
      output->mutable_data(dev_ctx.GetPlace(), pt_out_dtype);
765
    } else {
766
      output->mutable_data(dev_ctx.GetPlace(), input->dtype());
767
    }
768 769 770

    std::vector<int64_t> dims_int64{dims.begin(), dims.end()};

771
    phi::Reduce<T, ReduceOp, TransformOp>(
772
        dev_ctx, *input, reduce_all, dims_int64, false, pt_out_dtype, output);
773 774
  }
};
775

776
#ifndef PADDLE_WITH_XPU_KP
777 778 779 780 781 782 783
template <typename T, template <typename, typename> class TransformOp>
class ReduceCudaGradKernel : public framework::OpKernel<T> {
 public:
  void Compute(const framework::ExecutionContext& context) const override {
    bool reduce_all = context.Attr<bool>("reduce_all");
    std::vector<int> dims = context.Attr<std::vector<int>>("dim");
    auto* in_x = context.Input<Tensor>("X");
784

785 786 787 788
    auto* d_out =
        context.Input<framework::Tensor>(framework::GradVarName("Out"));
    auto* d_x = context.Output<framework::Tensor>(framework::GradVarName("X"));
    auto out_dtype = context.Attr<int>("in_dtype");
789
    auto pt_out_dtype = framework::TransToPhiDataType(
790
        static_cast<framework::proto::VarType::Type>(out_dtype));
791 792 793 794 795 796 797 798 799 800 801 802
    // get reduce_dim and reduce_num for reduce_mean_grad
    int dim_size = in_x->dims().size();
    std::vector<int> reduce_dims = GetReduceDim(dims, dim_size, reduce_all);
    auto update_dims = vectorize(d_x->dims());
    int reduce_num = 1;
    for (auto i : reduce_dims) {
      reduce_num *= (in_x->dims())[i];
      update_dims[i] = 1;
    }
    // make new tensor
    framework::Tensor new_d_out(d_out->type());
    new_d_out.ShareDataWith(*d_out);
803
    new_d_out.Resize(phi::make_ddim(update_dims));
804 805
    auto& dev_ctx = context.cuda_device_context();
    if (out_dtype > 0) {
806
      d_x->mutable_data(dev_ctx.GetPlace(), pt_out_dtype);
807
    } else {
808
      d_x->mutable_data(dev_ctx.GetPlace(), d_out->dtype());
809
    }
810 811
    auto pt_d_out = paddle::experimental::MakePhiDenseTensor(new_d_out);
    auto pt_d_x = paddle::experimental::MakePhiDenseTensor(*d_x);
812
    if (out_dtype <= 0) {
813
      pt_out_dtype = d_out->dtype();
814
    }
815

816
    using MPType = typename kps::details::MPTypeTrait<T>::Type;
817
    phi::ReduceGrad<T, TransformOp<T, MPType>>(
818 819 820 821
        dev_ctx,
        pt_d_out.get(),
        pt_d_x.get(),
        pt_out_dtype,
822 823 824
        TransformOp<T, MPType>(reduce_num));
  }
};
825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840

template <typename T>
struct EqualFunctor {
  inline T initial() { return static_cast<T>(0.0f); }

  inline HOSTDEVICE T operator()(const T a, const T b) const {
    return static_cast<T>(a == b);
  }
};

template <typename T, typename Enable = void>
struct DivideFunctor {
  inline T initial() { return static_cast<T>(1.0f); }

  inline HOSTDEVICE T operator()(const T a, const T b) const { return a / b; }
};
841
#endif
842
#endif
843

G
guosheng 已提交
844 845
}  // namespace operators
}  // namespace paddle
846

847 848
namespace ops = paddle::operators;

H
hong 已提交
849 850 851 852 853 854 855
#define REGISTER_REDUCE_OP(op_name)                                           \
  class __##op_name##Maker__ : public ops::ReduceOpMaker {                    \
   protected:                                                                 \
    virtual std::string GetName() const { return #op_name; }                  \
    virtual std::string GetOpType() const { return "Reduce " #op_name; }      \
  };                                                                          \
  REGISTER_OPERATOR(                                                          \
856 857 858
      op_name,                                                                \
      ops::ReduceOp,                                                          \
      __##op_name##Maker__,                                                   \
H
hong 已提交
859 860 861 862 863 864
      paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>, \
      paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase,       \
                                            true>);                           \
  REGISTER_OPERATOR(op_name##_grad, ops::ReduceGradOp)

#define REGISTER_REDUCE_OP_WITHOUT_GRAD(op_name, ...)                    \
865 866 867 868 869
  class __##op_name##Maker__ : public ops::ReduceOpMaker {               \
   protected:                                                            \
    virtual std::string GetName() const { return #op_name; }             \
    virtual std::string GetOpType() const { return "Reduce " #op_name; } \
  };                                                                     \
H
hong 已提交
870
  REGISTER_OPERATOR(                                                     \
871 872 873
      op_name,                                                           \
      ops::ReduceOp##__VA_ARGS__,                                        \
      __##op_name##Maker__,                                              \
H
hong 已提交
874 875
      paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,    \
      paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>);