reduce_impl.h 2.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include "paddle/fluid/platform/transform.h"
#include "paddle/pten/api/ext/dispatch.h"
#include "paddle/pten/core/dense_tensor.h"
C
Chen Weihang 已提交
19 20
#include "paddle/pten/kernels/hybird/eigen/reduce.h"
#include "paddle/pten/kernels/hybird/math/cast_func.h"
21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
namespace pten {
namespace general {

template <typename DeviceContext, typename T, typename Functor>
void Reduce(const DeviceContext& dev_ctx,
            const DenseTensor& x,
            bool reduce_all,
            const std::vector<int64_t>& dims,
            bool keep_dim,
            DataType out_dtype,
            DenseTensor* out) {
  // If the dims has full dim, set the reduce_all is True
  const auto& input_dim_size = x.dims().size();
  std::set<int> dims_set(dims.begin(), dims.end());
  bool full_dim = true;
  for (auto i = 0; i < input_dim_size; ++i) {
    if (dims_set.find(i) == dims_set.end()) {
      full_dim = false;
      break;
    }
  }
  reduce_all = (reduce_all || full_dim);

  // no need to cast dtype
  if (out_dtype == pten::DataType::UNDEFINED || out_dtype == x.dtype()) {
    if (out_dtype == pten::DataType::UNDEFINED) {
      out_dtype = x.dtype();
    }
    // do reduce sum
    PD_VISIT_ALL_TYPES(
        out_dtype, "ReduceKernelImpl", ([&] {
          pten::eigen::ReduceKernelImpl<DeviceContext, T, data_t, Functor>(
              dev_ctx, x, out, dims, keep_dim, reduce_all);
        }));
  } else {
    const auto alloc =
        std::make_shared<paddle::experimental::DefaultAllocator>(x.place());
    pten::DenseTensor tmp_tensor = pten::DenseTensor(
        alloc, pten::DenseTensorMeta(out_dtype, x.dims(), x.layout()));

    // cast x tensor to out_dtype first
    PD_VISIT_ALL_TYPES(out_dtype, "CastKernelImpl", ([&] {
                         math::CastKernelImpl<DeviceContext, T, data_t>(
                             dev_ctx, x, &tmp_tensor);
                       }));

    // do reduce sum
    PD_VISIT_ALL_TYPES(
        out_dtype, "ReduceKernelImpl", ([&] {
          pten::eigen::ReduceKernelImpl<DeviceContext, T, data_t, Functor>(
              dev_ctx, tmp_tensor, out, dims, keep_dim, reduce_all);
        }));
  }
}

}  // namespace general

}  // namespace pten