/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the Licnse. */ #include #include #include "paddle/fluid/operators/activation_op.h" #include "paddle/fluid/operators/mlu/mlu_baseop.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template class ActivationMLUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input("X"); auto* output = ctx.Output("Out"); float alpha = ctx.HasAttr("alpha") ? ctx.Attr("alpha") : 1.0f; output->mutable_data(ctx.GetPlace()); MLUCnnlActivationDesc act_desc(act_mode, alpha); MLUCnnlTensorDesc input_desc(*input); MLUCnnlTensorDesc output_desc(*output); MLUCnnl::Active(ctx, act_desc.get(), input_desc.get(), GetBasePtr(input), output_desc.get(), GetBasePtr(output)); } }; // For gelu, leaky_relu template class ActivationGradMLUKernelV1 : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input("X"); auto* dout = ctx.Input(framework::GradVarName("Out")); auto* dx = ctx.Output(framework::GradVarName("X")); float alpha = ctx.HasAttr("alpha") ? ctx.Attr("alpha") : 1.0f; dx->mutable_data(ctx.GetPlace()); MLUCnnlTensorDesc x_desc(*x); MLUCnnlTensorDesc dout_desc(*dout); MLUCnnlTensorDesc dx_desc(*dx); MLUCnnlActivationDesc act_desc(act_mode, alpha); MLUCnnl::ActiveGrad(ctx, act_desc.get(), nullptr, nullptr, nullptr, nullptr, dout_desc.get(), GetBasePtr(dout), x_desc.get(), GetBasePtr(x), dx_desc.get(), GetBasePtr(dx)); } }; // For tanh, sigmoid template class ActivationGradMLUKernelV2 : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* out = ctx.Input("Out"); auto* dout = ctx.Input(framework::GradVarName("Out")); auto* dx = ctx.Output(framework::GradVarName("X")); float alpha = ctx.HasAttr("alpha") ? ctx.Attr("alpha") : 1.0f; dx->mutable_data(ctx.GetPlace()); MLUCnnlTensorDesc out_desc(*out); MLUCnnlTensorDesc dout_desc(*dout); MLUCnnlTensorDesc dx_desc(*dx); MLUCnnlActivationDesc act_desc(act_mode, alpha); MLUCnnl::ActiveGrad(ctx, act_desc.get(), nullptr, nullptr, out_desc.get(), GetBasePtr(out), dout_desc.get(), GetBasePtr(dout), nullptr, nullptr, dx_desc.get(), GetBasePtr(dx)); } }; // For relu, relu6 template class ActivationGradMLUKernelV3 : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* out = ctx.Input("Out"); auto* dout = ctx.Input(framework::GradVarName("Out")); auto* dx = ctx.Output(framework::GradVarName("X")); float alpha = ctx.HasAttr("alpha") ? ctx.Attr("alpha") : 1.0f; dx->mutable_data(ctx.GetPlace()); MLUCnnlTensorDesc out_desc(*out); MLUCnnlTensorDesc dout_desc(*dout); MLUCnnlTensorDesc dx_desc(*dx); MLUCnnlActivationDesc act_desc(act_mode, alpha); MLUCnnl::ActiveGrad(ctx, act_desc.get(), nullptr, nullptr, nullptr, nullptr, dout_desc.get(), GetBasePtr(dout), out_desc.get(), GetBasePtr(out), dx_desc.get(), GetBasePtr(dx)); } }; // For sqrt template class SqrtMLUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* x = ctx.Input("X"); auto* out = ctx.Output("Out"); auto place = ctx.GetPlace(); out->mutable_data(place); MLUCnnlTensorDesc input_desc(*x); MLUCnnlTensorDesc output_desc(*out); cnnlComputationPreference_t prefer = CNNL_COMPUTATION_FAST; MLUCnnl::Sqrt(ctx, prefer, input_desc.get(), GetBasePtr(x), output_desc.get(), GetBasePtr(out)); } }; template class SqrtGradMLUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* out = ctx.Input("Out"); auto* dout = ctx.Input(framework::GradVarName("Out")); auto* dx = ctx.Output(framework::GradVarName("X")); auto place = ctx.GetPlace(); dx->mutable_data(place); MLUCnnlTensorDesc data_desc(*out); MLUCnnl::SqrtGrad(ctx, data_desc.get(), GetBasePtr(out), GetBasePtr(dout), GetBasePtr(dx)); } }; // CNNL_LOG_E = 0, // CNNL_LOG_2 = 1, // CNNL_LOG_10 = 2, template class LogMLUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input("X"); auto* output = ctx.Output("Out"); output->mutable_data(ctx.GetPlace()); MLUCnnlTensorDesc input_desc(*input); MLUCnnlTensorDesc output_desc(*output); cnnlComputationPreference_t prefer = CNNL_COMPUTATION_HIGH_PRECISION; MLUCnnl::Log(ctx, prefer, Log_base, input_desc.get(), GetBasePtr(input), output_desc.get(), GetBasePtr(output)); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; // relu REGISTER_OP_MLU_KERNEL( relu, ops::ActivationMLUKernel, ops::ActivationMLUKernel); REGISTER_OP_MLU_KERNEL( relu_grad, ops::ActivationGradMLUKernelV3, ops::ActivationGradMLUKernelV3); // relu6 REGISTER_OP_MLU_KERNEL( relu6, ops::ActivationMLUKernel, ops::ActivationMLUKernel); REGISTER_OP_MLU_KERNEL( relu6_grad, ops::ActivationGradMLUKernelV3, ops::ActivationGradMLUKernelV3); // sigmoid REGISTER_OP_MLU_KERNEL(sigmoid, ops::ActivationMLUKernel, ops::ActivationMLUKernel); REGISTER_OP_MLU_KERNEL( sigmoid_grad, ops::ActivationGradMLUKernelV2, ops::ActivationGradMLUKernelV2); // tanh REGISTER_OP_MLU_KERNEL( tanh, ops::ActivationMLUKernel, ops::ActivationMLUKernel); REGISTER_OP_MLU_KERNEL( tanh_grad, ops::ActivationGradMLUKernelV2, ops::ActivationGradMLUKernelV2); // gelu REGISTER_OP_MLU_KERNEL( gelu, ops::ActivationMLUKernel, ops::ActivationMLUKernel); REGISTER_OP_MLU_KERNEL( gelu_grad, ops::ActivationGradMLUKernelV1, ops::ActivationGradMLUKernelV1); // leaky_relu REGISTER_OP_MLU_KERNEL( leaky_relu, ops::ActivationMLUKernel, ops::ActivationMLUKernel); REGISTER_OP_MLU_KERNEL( leaky_relu_grad, ops::ActivationGradMLUKernelV1, ops::ActivationGradMLUKernelV1); // sqrt REGISTER_OP_MLU_KERNEL(sqrt, ops::SqrtMLUKernel, ops::SqrtMLUKernel); REGISTER_OP_MLU_KERNEL(sqrt_grad, ops::SqrtGradMLUKernel, ops::SqrtGradMLUKernel); // log log2 log10 REGISTER_OP_MLU_KERNEL( log, ops::LogMLUKernel, ops::LogMLUKernel); REGISTER_OP_MLU_KERNEL( log2, ops::LogMLUKernel, ops::LogMLUKernel); REGISTER_OP_MLU_KERNEL( log10, ops::LogMLUKernel, ops::LogMLUKernel);