reduce.h 3.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
16 17 18

#include <set>

19
#include "paddle/phi/core/visit_type.h"
20
#include "paddle/phi/kernels/cast_kernel.h"
Y
YuanRisheng 已提交
21
#include "paddle/phi/kernels/funcs/reduce_function.h"
22

Y
YuanRisheng 已提交
23
namespace phi {
24

25 26 27 28 29 30 31 32
template <typename DeviceContext, typename T, typename Functor>
void Reduce(const DeviceContext& dev_ctx,
            const DenseTensor& x,
            bool reduce_all,
            const std::vector<int64_t>& dims,
            bool keep_dim,
            DataType out_dtype,
            DenseTensor* out) {
W
wanghuancoder 已提交
33
  reduce_all = recompute_reduce_all(x, dims, reduce_all);
34
  // If the dims has full dim, set the reduce_all is True
35
  const int& input_dim_size = x.dims().size();
36 37
  std::set<int> dims_set(dims.begin(), dims.end());
  bool full_dim = true;
38 39 40
  for (int i = 0; i < input_dim_size; ++i) {
    if (dims_set.find(i) == dims_set.end() &&
        dims_set.find(i - input_dim_size) == dims_set.end()) {
41 42 43 44 45 46 47
      full_dim = false;
      break;
    }
  }
  reduce_all = (reduce_all || full_dim);

  // no need to cast dtype
48
  if (out_dtype == phi::DataType::UNDEFINED || out_dtype == x.dtype()) {
49 50
    // do reduce sum
    PD_VISIT_ALL_TYPES(
51
        x.dtype(), "ReduceKernelImpl", ([&] {
Y
YuanRisheng 已提交
52
          phi::funcs::ReduceKernelImpl<DeviceContext, T, data_t, Functor>(
53 54
              dev_ctx, x, out, dims, keep_dim, reduce_all);
        }));
55

56
  } else {
57
    // cast x tensor to out_dtype
58
    auto tmp_tensor = phi::Cast<T, DeviceContext>(dev_ctx, x, out_dtype);
59 60 61 62

    // do reduce sum
    PD_VISIT_ALL_TYPES(
        out_dtype, "ReduceKernelImpl", ([&] {
Y
YuanRisheng 已提交
63
          phi::funcs::ReduceKernelImpl<DeviceContext, T, data_t, Functor>(
64 65 66 67 68
              dev_ctx, tmp_tensor, out, dims, keep_dim, reduce_all);
        }));
  }
}

69
template <typename DeviceContext, typename T, typename Functor>
70 71 72 73 74 75
void BoolReduceKernel(const DeviceContext& dev_ctx,
                      const phi::DenseTensor& input,
                      const std::vector<int64_t>& dims,
                      bool keep_dim,
                      bool reduce_all,
                      phi::DenseTensor* output) {
W
wanghuancoder 已提交
76
  reduce_all = recompute_reduce_all(input, dims, reduce_all);
77
  dev_ctx.template Alloc<bool>(output);
78 79 80 81 82 83 84 85 86 87 88 89

  // The dims has full dim, set the reduce_all is True
  const auto& input_dim_size = input.dims().size();
  std::set<int> dims_set(dims.begin(), dims.end());
  bool full_dim = true;
  for (auto i = 0; i < input_dim_size; i++) {
    if (dims_set.find(i) == dims_set.end()) {
      full_dim = false;
      break;
    }
  }
  reduce_all = (reduce_all || full_dim);
90 91 92 93 94 95 96 97 98
  DenseTensor tmp_tensor;
  if (input.dtype() != phi::DataType::BOOL) {
    tmp_tensor =
        phi::Cast<T, DeviceContext>(dev_ctx, input, phi::DataType::BOOL);
  } else {
    tmp_tensor = input;
  }
  funcs::ReduceKernelImpl<DeviceContext, bool, bool, Functor>(
      dev_ctx, tmp_tensor, output, dims, keep_dim, reduce_all);
99 100
}

101
}  // namespace phi