log_softmax_grad_kernel.cu 1.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/gpudnn/softmax_gpudnn.h"
18
#include "paddle/phi/kernels/log_softmax_grad_kernel.h"
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52

namespace phi {

template <typename T, typename Context>
void LogSoftmaxGradKernel(const Context &dev_ctx,
                          const DenseTensor &out,
                          const DenseTensor &out_grad,
                          int axis,
                          DenseTensor *x_grad) {
  dev_ctx.template Alloc<T>(x_grad);
  phi::SoftmaxBackwardCUDAKernelDriver<T, true>(
      dev_ctx, out, out_grad, axis, x_grad);
}

}  // namespace phi

#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(log_softmax_grad,
                   GPU,
                   ALL_LAYOUT,
                   phi::LogSoftmaxGradKernel,
                   float,
                   phi::dtype::float16,
                   phi::dtype::bfloat16) {}
#else
PD_REGISTER_KERNEL(log_softmax_grad,
                   GPU,
                   ALL_LAYOUT,
                   phi::LogSoftmaxGradKernel,
                   float,
                   double,
                   phi::dtype::float16,
                   phi::dtype::bfloat16) {}
#endif