polygamma_grad_kernel.cc 1.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/polygamma_grad_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/impl/polygamma_kernel_impl.h"

namespace phi {

template <typename T, typename Context>
void PolygammaGradKernel(const Context& ctx,
                         const DenseTensor& x,
                         const DenseTensor& out_grad,
                         const int n,
                         DenseTensor* x_grad) {
  auto size = x.numel();
  auto* x_data = x.data<T>();
  auto* out_grad_data = out_grad.data<T>();
  auto* x_gard_data = ctx.template Alloc<T>(x_grad);

  phi::funcs::ForRange<Context> for_range(ctx, size);
  PolygammaGradFunctor<T> functor(
      x_data, n + 1, out_grad_data, x_gard_data, size);
  for_range(functor);
}

}  // namespace phi

PD_REGISTER_KERNEL(
    polygamma_grad, CPU, ALL_LAYOUT, phi::PolygammaGradKernel, float, double) {}