reduce_grad.h 4.2 KB
Newer Older
C
chentianyu03 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "paddle/phi/kernels/cast_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/reduce_grad_functions.h"

namespace phi {

template <typename Context,
          typename T,
          typename Functor,
          bool kNoNeedBufferX = false,
          bool kNoNeedBufferY = false>
void ComputeFromInput(const Context& dev_ctx,
                      const DenseTensor& x,
                      const DenseTensor& out_grad,
                      const paddle::optional<DenseTensor>& out,
                      const DenseTensor& input2,
                      const std::vector<int64_t>& dims,
                      bool keep_dim,
                      bool reduce_all,
                      DenseTensor* x_grad) {
  auto* input0 = &x;
  auto* input1 = out.get_ptr();
  auto* output = x_grad;
  dev_ctx.template Alloc<T>(output);

  // The dims has full dim, set the reduce_all is True
  const auto& input_dim_size = x.dims().size();
  std::set<int> dims_set(dims.begin(), dims.end());
  bool full_dim = true;
  for (auto i = 0; i < input_dim_size; i++) {
    if (dims_set.find(i) == dims_set.end()) {
      full_dim = false;
      break;
    }
  }
  reduce_all = (reduce_all || full_dim);
  // NOTE: EigenTensor::From() uses tensor->data()
  // if op has NoNeedBufferVarsInferer, the corresponding kNoNeedBufferX or
  // kNoNeedBufferY should set true
  // and use fake var that has same dims.
  if (kNoNeedBufferX) {
    input0 = output;
  }
  if (kNoNeedBufferY) {
    input1 = &input2;
  }

  const std::vector<int> const_dims{dims.begin(), dims.end()};

  // NOTE(dengkaipeng): Out is unnecessary in some reduce kernel and
  // not be set as Input in grad Maker, use Out_grad to replace here
  if (!input1) input1 = &input2;
  Functor functor;

  funcs::LaunchReduceGradKernel<Context, T, Functor>(dev_ctx,
                                                     input0,
                                                     input1,
                                                     &input2,
                                                     output,
                                                     functor,
                                                     const_dims,
                                                     reduce_all);
}

template <typename Context,
          typename T,
          typename Functor,
          bool kNoNeedBufferX = false,
          bool kNoNeedBufferY = false>
void ReduceGradKernel(const Context& dev_ctx,
                      const DenseTensor& x,
                      const paddle::optional<DenseTensor>& out,
89
                      const DenseTensor& out_grad,
C
chentianyu03 已提交
90 91 92 93
                      const std::vector<int64_t>& dims,
                      bool keep_dim,
                      bool reduce_all,
                      DenseTensor* x_grad) {
94 95 96
  if (dims.size() == 0) {
    reduce_all = true;
  }
97 98 99
  if (x.dtype() != out_grad.dtype()) {
    DenseTensorMeta x_grad_meta(
        out_grad.dtype(), x_grad->dims(), x_grad->layout());
C
chentianyu03 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112
    DenseTensor x_grad_tmp =
        phi::Empty<Context>(dev_ctx, std::move(x_grad_meta));
    ComputeFromInput<Context, T, Functor, kNoNeedBufferX, kNoNeedBufferY>(
        dev_ctx,
        x,
        out_grad,
        out,
        out_grad,
        dims,
        keep_dim,
        reduce_all,
        &x_grad_tmp);

113
    phi::CastKernel<T>(dev_ctx, x_grad_tmp, x.dtype(), x_grad);
C
chentianyu03 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
  } else {
    ComputeFromInput<Context, T, Functor, kNoNeedBufferX, kNoNeedBufferY>(
        dev_ctx,
        x,
        out_grad,
        out,
        out_grad,
        dims,
        keep_dim,
        reduce_all,
        x_grad);
  }
}

}  // namespace phi