From 59d50468e6212973e35b3b340cd1a09143fbc8bb Mon Sep 17 00:00:00 2001 From: zhaoying9105 Date: Thu, 30 Jun 2022 18:33:31 +0800 Subject: [PATCH] [MLU] add exp and exp_grad kernel (#43852) --- paddle/fluid/operators/activation_op_mlu.cc | 56 +++++++++ .../tests/unittests/mlu/test_exp_op_mlu.py | 114 ++++++++++++++++++ 2 files changed, 170 insertions(+) create mode 100644 python/paddle/fluid/tests/unittests/mlu/test_exp_op_mlu.py diff --git a/paddle/fluid/operators/activation_op_mlu.cc b/paddle/fluid/operators/activation_op_mlu.cc index 4d6fe0d2b38..e19ce87e7c8 100644 --- a/paddle/fluid/operators/activation_op_mlu.cc +++ b/paddle/fluid/operators/activation_op_mlu.cc @@ -208,6 +208,54 @@ class LogMLUKernel : public framework::OpKernel { } }; +template +class ExpMLUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("X"); + auto* output = ctx.Output("Out"); + output->mutable_data(ctx.GetPlace()); + + MLUCnnlTensorDesc input_desc(*input); + MLUCnnlTensorDesc output_desc(*output); + cnnlComputationPreference_t prefer = CNNL_COMPUTATION_HIGH_PRECISION; + + MLUCnnl::Exp(ctx, + prefer, + input_desc.get(), + GetBasePtr(input), + output_desc.get(), + GetBasePtr(output)); + } +}; + +template +class ExpGradMLUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* out = ctx.Input("Out"); + auto* dout = ctx.Input(framework::GradVarName("Out")); + auto* dx = ctx.Output(framework::GradVarName("X")); + dx->mutable_data(ctx.GetPlace()); + MLUCnnlTensorDesc dout_desc(*dout); + MLUCnnlTensorDesc dx_desc(*dx); + MLUCnnlTensorDesc out_desc(*out); + + MLUCnnlOpTensorDesc op_tensor_desc( + CNNL_OP_TENSOR_MUL, ToCnnlDataType(), CNNL_NOT_PROPAGATE_NAN); + + MLUCnnl::OpTensor(ctx, + op_tensor_desc.get(), + dout_desc.get(), + GetBasePtr(dout), + out_desc.get(), + GetBasePtr(out), + dx_desc.get(), + GetBasePtr(dx), + ToCnnlDataType()); + } +}; + } // namespace operators } // namespace paddle @@ -303,3 +351,11 @@ REGISTER_OP_MLU_KERNEL( log10, ops::LogMLUKernel, ops::LogMLUKernel); + +REGISTER_OP_MLU_KERNEL(exp, + ops::ExpMLUKernel, + ops::ExpMLUKernel); + +REGISTER_OP_MLU_KERNEL(exp_grad, + ops::ExpGradMLUKernel, + ops::ExpGradMLUKernel); diff --git a/python/paddle/fluid/tests/unittests/mlu/test_exp_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_exp_op_mlu.py new file mode 100644 index 00000000000..70c001c69cf --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mlu/test_exp_op_mlu.py @@ -0,0 +1,114 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys + +sys.path.append("..") +from op_test import OpTest +import paddle +import paddle.fluid as fluid + +paddle.enable_static() +SEED = 2021 + + +class TestExp(OpTest): + + def setUp(self): + self.set_mlu() + self.op_type = "exp" + self.place = paddle.MLUPlace(0) + + self.init_dtype() + np.random.seed(SEED) + x = np.random.rand(20, 5).astype(self.dtype) + out = np.exp(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.attrs = {} + self.outputs = {'Out': out} + + def set_mlu(self): + self.__class__.use_mlu = True + + def init_dtype(self): + self.dtype = np.float32 + + def test_check_output(self): + self.check_output_with_place(self.place) + + def test_check_grad(self): + self.check_grad_with_place(self.place, ['X'], 'Out') + + +class TestExpFp16(OpTest): + + def setUp(self): + self.set_mlu() + self.op_type = "exp" + self.place = paddle.MLUPlace(0) + + self.init_dtype() + np.random.seed(SEED) + x = np.random.rand(20, 5).astype(self.dtype) + out = np.exp(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.attrs = {} + self.outputs = {'Out': out} + + def set_mlu(self): + self.__class__.use_mlu = True + self.__class__.no_need_check_grad = True + + def init_dtype(self): + self.dtype = np.float16 + + def test_check_output(self): + self.check_output_with_place(self.place) + + +class TestExpNeg(OpTest): + + def setUp(self): + self.set_mlu() + self.op_type = "exp" + self.place = paddle.MLUPlace(0) + + self.init_dtype() + np.random.seed(SEED) + x = np.random.random([20, 5]).astype(self.dtype) + x -= 1 + out = np.exp(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.attrs = {} + self.outputs = {'Out': out} + + def set_mlu(self): + self.__class__.use_mlu = True + + def init_dtype(self): + self.dtype = np.float32 + + def test_check_output(self): + self.check_output_with_place(self.place) + + +if __name__ == '__main__': + unittest.main() -- GitLab