log_softmax_op.cu 3.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/operators/log_softmax_op.h"
16
#include "paddle/phi/kernels/gpudnn/softmax_gpudnn.h"
17 18 19 20

namespace paddle {
namespace operators {

21
using Tensor = framework::Tensor;
22

23 24 25 26
template <typename T>
class LogSoftmaxKernel<platform::CUDADeviceContext, T>
    : public framework::OpKernel<T> {
 public:
27 28 29 30
  void Compute(const framework::ExecutionContext &ctx) const override {
    auto *x = ctx.Input<Tensor>("X");
    auto *out = ctx.Output<Tensor>("Out");
    out->mutable_data<T>(ctx.GetPlace());
31

32 33 34
    int input_axis = ctx.Attr<int>("axis");
    auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    phi::SoftmaxForwardCUDAKernelDriver<T, true>(dev_ctx, *x, input_axis, out);
35 36 37
  }
};

38 39 40 41
template <typename T>
class LogSoftmaxGradKernel<platform::CUDADeviceContext, T>
    : public framework::OpKernel<T> {
 public:
42 43 44 45 46
  void Compute(const framework::ExecutionContext &ctx) const override {
    auto *out = ctx.Input<Tensor>("Out");
    auto *dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
    auto *dx = ctx.Output<Tensor>(framework::GradVarName("X"));
    dx->mutable_data<T>(ctx.GetPlace());
47

48 49 50 51
    int input_axis = ctx.Attr<int>("axis");
    auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
    phi::SoftmaxBackwardCUDAKernelDriver<T, true>(dev_ctx, *out, *dout,
                                                  input_axis, dx);
52 53 54
  }
};

55 56
}  // namespace operators
}  // namespace paddle
57 58 59

namespace ops = paddle::operators;
namespace plat = paddle::platform;
60 61 62 63 64 65 66 67 68 69 70

#ifdef PADDLE_WITH_HIP
REGISTER_OP_CUDA_KERNEL(
    log_softmax, ops::LogSoftmaxKernel<plat::CUDADeviceContext, float>,
    ops::LogSoftmaxKernel<plat::CUDADeviceContext, plat::float16>,
    ops::LogSoftmaxKernel<plat::CUDADeviceContext, plat::bfloat16>);
REGISTER_OP_CUDA_KERNEL(
    log_softmax_grad, ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, float>,
    ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, plat::float16>,
    ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, plat::bfloat16>);
#else
71 72 73
REGISTER_OP_CUDA_KERNEL(
    log_softmax, ops::LogSoftmaxKernel<plat::CUDADeviceContext, float>,
    ops::LogSoftmaxKernel<plat::CUDADeviceContext, double>,
74 75
    ops::LogSoftmaxKernel<plat::CUDADeviceContext, plat::float16>,
    ops::LogSoftmaxKernel<plat::CUDADeviceContext, plat::bfloat16>);
76 77 78
REGISTER_OP_CUDA_KERNEL(
    log_softmax_grad, ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, float>,
    ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, double>,
79 80
    ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, plat::float16>,
    ops::LogSoftmaxGradKernel<plat::CUDADeviceContext, plat::bfloat16>);
81
#endif