diff --git a/paddle/fluid/operators/activation_op_mlu.cc b/paddle/fluid/operators/activation_op_mlu.cc index 90d0a72074b81da90dcadc71e4a468c7d8f3db94..1debfbf4af2a3131d9106e2c76d805c31c8b035f 100644 --- a/paddle/fluid/operators/activation_op_mlu.cc +++ b/paddle/fluid/operators/activation_op_mlu.cc @@ -145,6 +145,26 @@ class SqrtGradMLUKernel : public framework::OpKernel { } }; +// CNNL_LOG_E = 0, +// CNNL_LOG_2 = 1, +// CNNL_LOG_10 = 2, +template +class LogMLUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("X"); + auto* output = ctx.Output("Out"); + output->mutable_data(ctx.GetPlace()); + + MLUCnnlTensorDesc input_desc(*input); + MLUCnnlTensorDesc output_desc(*output); + cnnlComputationPreference_t prefer = CNNL_COMPUTATION_HIGH_PRECISION; + + MLUCnnl::Log(ctx, prefer, Log_base, input_desc.get(), GetBasePtr(input), + output_desc.get(), GetBasePtr(output)); + } +}; + } // namespace operators } // namespace paddle @@ -213,3 +233,16 @@ REGISTER_OP_MLU_KERNEL(sqrt, ops::SqrtMLUKernel, ops::SqrtMLUKernel); REGISTER_OP_MLU_KERNEL(sqrt_grad, ops::SqrtGradMLUKernel, ops::SqrtGradMLUKernel); + +// log log2 log10 +REGISTER_OP_MLU_KERNEL( + log, ops::LogMLUKernel, + ops::LogMLUKernel); + +REGISTER_OP_MLU_KERNEL( + log2, ops::LogMLUKernel, + ops::LogMLUKernel); + +REGISTER_OP_MLU_KERNEL( + log10, ops::LogMLUKernel, + ops::LogMLUKernel); diff --git a/paddle/fluid/operators/mlu/mlu_baseop.cc b/paddle/fluid/operators/mlu/mlu_baseop.cc index daae452b23abc1c7891d9a02fabdb74ab708697f..d5b843d47afb79f8f7c87615209157afb1f8ce71 100644 --- a/paddle/fluid/operators/mlu/mlu_baseop.cc +++ b/paddle/fluid/operators/mlu/mlu_baseop.cc @@ -901,14 +901,11 @@ MLUCnnlTrigonDesc::~MLUCnnlTrigonDesc() { cnnlAddN(handle, inputs_desc, inputs, input_num, output_desc, output)); } -/* static */ void MLUCnnl::Log(const ExecutionContext& ctx, - cnnlComputationPreference_t prefer, - const cnnlTensorDescriptor_t input_desc, - const void* input, - const cnnlTensorDescriptor_t output_desc, - void* output) { +/* static */ void MLUCnnl::Log( + const ExecutionContext& ctx, cnnlComputationPreference_t prefer, + cnnlLogBase_t log_base, const cnnlTensorDescriptor_t input_desc, + const void* input, const cnnlTensorDescriptor_t output_desc, void* output) { cnnlHandle_t handle = GetHandleFromCTX(ctx); - cnnlLogBase_t log_base = CNNL_LOG_E; PADDLE_ENFORCE_MLU_SUCCESS(cnnlLog_v2(handle, prefer, log_base, input_desc, input, output_desc, output)); diff --git a/paddle/fluid/operators/mlu/mlu_baseop.h b/paddle/fluid/operators/mlu/mlu_baseop.h index 288d74a135bb5884ad201844103058d0e1685c6c..71648c5c5fbcab28afe0d06c3855e7656ee28645 100644 --- a/paddle/fluid/operators/mlu/mlu_baseop.h +++ b/paddle/fluid/operators/mlu/mlu_baseop.h @@ -666,7 +666,7 @@ class MLUCnnl { const cnnlTensorDescriptor_t output_desc, void* output); static void Log(const ExecutionContext& ctx, - cnnlComputationPreference_t prefer, + cnnlComputationPreference_t prefer, cnnlLogBase_t log_base, const cnnlTensorDescriptor_t input_desc, const void* input, const cnnlTensorDescriptor_t output_desc, void* output); diff --git a/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py new file mode 100644 index 0000000000000000000000000000000000000000..82aeb577205d5de9c1d38b6eca31db3a5870b0bf --- /dev/null +++ b/python/paddle/fluid/tests/unittests/mlu/test_log_op_mlu.py @@ -0,0 +1,216 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys + +sys.path.append("..") +from op_test import OpTest +import paddle +import paddle.fluid as fluid + +paddle.enable_static() +SEED = 2021 + + +class TestActivation(OpTest): + + def setUp(self): + self.set_mlu() + self.op_type = "exp" + self.init_dtype() + self.init_kernel_type() + self.python_api = paddle.exp + + np.random.seed(2049) + x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) + out = np.exp(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output_with_place(self.place) + + def init_dtype(self): + self.dtype = np.float32 + + def init_kernel_type(self): + pass + + def set_mlu(self): + self.__class__.use_mlu = True + self.place = paddle.MLUPlace(0) + __class__.no_need_check_grad = True + + +class TestLog(TestActivation): + + def setUp(self): + self.set_mlu() + self.op_type = "log" + self.python_api = paddle.log + self.init_dtype() + + np.random.seed(1024) + x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) + out = np.log(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def test_error(self): + in1 = fluid.layers.data(name="in1", + shape=[11, 17], + append_batch_size=False, + dtype="int32") + in2 = fluid.layers.data(name="in2", + shape=[11, 17], + append_batch_size=False, + dtype="int64") + + self.assertRaises(TypeError, fluid.layers.log, in1) + self.assertRaises(TypeError, fluid.layers.log, in2) + + +class TestLog2(TestActivation): + + def setUp(self): + self.set_mlu() + self.op_type = "log2" + self.python_api = paddle.log2 + self.init_dtype() + + x = np.random.uniform(1, 10, [11, 17]).astype(self.dtype) + out = np.log2(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def test_error(self): + in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32") + in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64") + + self.assertRaises(TypeError, paddle.log2, in1) + self.assertRaises(TypeError, paddle.log2, in2) + + def test_api(self): + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float32") + data_x = paddle.static.data(name="data_x", + shape=[11, 17], + dtype="float32") + + out1 = paddle.log2(data_x) + exe = paddle.static.Executor(place=fluid.CPUPlace()) + exe.run(paddle.static.default_startup_program()) + res1 = exe.run(paddle.static.default_main_program(), + feed={"data_x": input_x}, + fetch_list=[out1]) + expected_res = np.log2(input_x) + self.assertTrue(np.allclose(res1, expected_res)) + + # dygraph + with fluid.dygraph.guard(): + np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float32") + data_x = paddle.to_tensor(np_x) + z = paddle.log2(data_x) + np_z = z.numpy() + z_expected = np.array(np.log2(np_x)) + np.savetxt("np_z.txt", np_z.flatten(), fmt="%.4f") + np.savetxt("z_expected.txt", z_expected.flatten(), fmt="%.4f") + self.assertTrue(np.allclose(np_z, z_expected, atol=1e-6)) + + +class TestLog10(TestActivation): + + def setUp(self): + self.set_mlu() + self.op_type = "log10" + self.python_api = paddle.log10 + self.init_dtype() + + x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype) + out = np.log10(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def test_error(self): + in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32") + in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64") + + self.assertRaises(TypeError, paddle.log10, in1) + self.assertRaises(TypeError, paddle.log10, in2) + + def test_api(self): + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float32") + data_x = paddle.static.data(name="data_x", + shape=[11, 17], + dtype="float32") + + out1 = paddle.log10(data_x) + exe = paddle.static.Executor(place=paddle.CPUPlace()) + exe.run(paddle.static.default_startup_program()) + res1 = exe.run(paddle.static.default_main_program(), + feed={"data_x": input_x}, + fetch_list=[out1]) + expected_res = np.log10(input_x) + self.assertTrue(np.allclose(res1, expected_res)) + + # dygraph + with fluid.dygraph.guard(): + np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float32") + data_x = paddle.to_tensor(np_x) + z = paddle.log10(data_x) + np_z = z.numpy() + z_expected = np.array(np.log10(np_x)) + self.assertTrue(np.allclose(np_z, z_expected)) + + +class TestLogHalf(TestLog): + + def init_dtype(self): + self.dtype = np.float16 + + def test_api(self): + pass + + +class TestLog2Half(TestLog2): + + def init_dtype(self): + self.dtype = np.float16 + + def test_api(self): + pass + + +class TestLog10Half(TestLog10): + + def init_dtype(self): + self.dtype = np.float16 + + def test_api(self): + pass + + +if __name__ == '__main__': + unittest.main()