未验证 提交 4642e8c4 编写于 作者: Z zhaoying9105 提交者: GitHub

[MLU]: add log log10 log2 MLU kernel (#43360)

上级 ed6f1f90
......@@ -145,6 +145,26 @@ class SqrtGradMLUKernel : public framework::OpKernel<T> {
}
};
// CNNL_LOG_E = 0,
// CNNL_LOG_2 = 1,
// CNNL_LOG_10 = 2,
template <cnnlLogBase_t Log_base, typename T>
class LogMLUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* input = ctx.Input<Tensor>("X");
auto* output = ctx.Output<Tensor>("Out");
output->mutable_data<T>(ctx.GetPlace());
MLUCnnlTensorDesc input_desc(*input);
MLUCnnlTensorDesc output_desc(*output);
cnnlComputationPreference_t prefer = CNNL_COMPUTATION_HIGH_PRECISION;
MLUCnnl::Log(ctx, prefer, Log_base, input_desc.get(), GetBasePtr(input),
output_desc.get(), GetBasePtr(output));
}
};
} // namespace operators
} // namespace paddle
......@@ -213,3 +233,16 @@ REGISTER_OP_MLU_KERNEL(sqrt, ops::SqrtMLUKernel<float>,
ops::SqrtMLUKernel<paddle::platform::float16>);
REGISTER_OP_MLU_KERNEL(sqrt_grad, ops::SqrtGradMLUKernel<float>,
ops::SqrtGradMLUKernel<paddle::platform::float16>);
// log log2 log10
REGISTER_OP_MLU_KERNEL(
log, ops::LogMLUKernel<CNNL_LOG_E, float>,
ops::LogMLUKernel<CNNL_LOG_E, paddle::platform::float16>);
REGISTER_OP_MLU_KERNEL(
log2, ops::LogMLUKernel<CNNL_LOG_2, float>,
ops::LogMLUKernel<CNNL_LOG_2, paddle::platform::float16>);
REGISTER_OP_MLU_KERNEL(
log10, ops::LogMLUKernel<CNNL_LOG_10, float>,
ops::LogMLUKernel<CNNL_LOG_10, paddle::platform::float16>);
......@@ -901,14 +901,11 @@ MLUCnnlTrigonDesc::~MLUCnnlTrigonDesc() {
cnnlAddN(handle, inputs_desc, inputs, input_num, output_desc, output));
}
/* static */ void MLUCnnl::Log(const ExecutionContext& ctx,
cnnlComputationPreference_t prefer,
const cnnlTensorDescriptor_t input_desc,
const void* input,
const cnnlTensorDescriptor_t output_desc,
void* output) {
/* static */ void MLUCnnl::Log(
const ExecutionContext& ctx, cnnlComputationPreference_t prefer,
cnnlLogBase_t log_base, const cnnlTensorDescriptor_t input_desc,
const void* input, const cnnlTensorDescriptor_t output_desc, void* output) {
cnnlHandle_t handle = GetHandleFromCTX(ctx);
cnnlLogBase_t log_base = CNNL_LOG_E;
PADDLE_ENFORCE_MLU_SUCCESS(cnnlLog_v2(handle, prefer, log_base, input_desc,
input, output_desc, output));
......
......@@ -666,7 +666,7 @@ class MLUCnnl {
const cnnlTensorDescriptor_t output_desc, void* output);
static void Log(const ExecutionContext& ctx,
cnnlComputationPreference_t prefer,
cnnlComputationPreference_t prefer, cnnlLogBase_t log_base,
const cnnlTensorDescriptor_t input_desc, const void* input,
const cnnlTensorDescriptor_t output_desc, void* output);
......
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import sys
sys.path.append("..")
from op_test import OpTest
import paddle
import paddle.fluid as fluid
paddle.enable_static()
SEED = 2021
class TestActivation(OpTest):
def setUp(self):
self.set_mlu()
self.op_type = "exp"
self.init_dtype()
self.init_kernel_type()
self.python_api = paddle.exp
np.random.seed(2049)
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.exp(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output_with_place(self.place)
def init_dtype(self):
self.dtype = np.float32
def init_kernel_type(self):
pass
def set_mlu(self):
self.__class__.use_mlu = True
self.place = paddle.MLUPlace(0)
__class__.no_need_check_grad = True
class TestLog(TestActivation):
def setUp(self):
self.set_mlu()
self.op_type = "log"
self.python_api = paddle.log
self.init_dtype()
np.random.seed(1024)
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.log(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_error(self):
in1 = fluid.layers.data(name="in1",
shape=[11, 17],
append_batch_size=False,
dtype="int32")
in2 = fluid.layers.data(name="in2",
shape=[11, 17],
append_batch_size=False,
dtype="int64")
self.assertRaises(TypeError, fluid.layers.log, in1)
self.assertRaises(TypeError, fluid.layers.log, in2)
class TestLog2(TestActivation):
def setUp(self):
self.set_mlu()
self.op_type = "log2"
self.python_api = paddle.log2
self.init_dtype()
x = np.random.uniform(1, 10, [11, 17]).astype(self.dtype)
out = np.log2(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_error(self):
in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
self.assertRaises(TypeError, paddle.log2, in1)
self.assertRaises(TypeError, paddle.log2, in2)
def test_api(self):
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float32")
data_x = paddle.static.data(name="data_x",
shape=[11, 17],
dtype="float32")
out1 = paddle.log2(data_x)
exe = paddle.static.Executor(place=fluid.CPUPlace())
exe.run(paddle.static.default_startup_program())
res1 = exe.run(paddle.static.default_main_program(),
feed={"data_x": input_x},
fetch_list=[out1])
expected_res = np.log2(input_x)
self.assertTrue(np.allclose(res1, expected_res))
# dygraph
with fluid.dygraph.guard():
np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float32")
data_x = paddle.to_tensor(np_x)
z = paddle.log2(data_x)
np_z = z.numpy()
z_expected = np.array(np.log2(np_x))
np.savetxt("np_z.txt", np_z.flatten(), fmt="%.4f")
np.savetxt("z_expected.txt", z_expected.flatten(), fmt="%.4f")
self.assertTrue(np.allclose(np_z, z_expected, atol=1e-6))
class TestLog10(TestActivation):
def setUp(self):
self.set_mlu()
self.op_type = "log10"
self.python_api = paddle.log10
self.init_dtype()
x = np.random.uniform(0.1, 1, [11, 17]).astype(self.dtype)
out = np.log10(x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
def test_error(self):
in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
self.assertRaises(TypeError, paddle.log10, in1)
self.assertRaises(TypeError, paddle.log10, in2)
def test_api(self):
with paddle.static.program_guard(paddle.static.Program(),
paddle.static.Program()):
input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float32")
data_x = paddle.static.data(name="data_x",
shape=[11, 17],
dtype="float32")
out1 = paddle.log10(data_x)
exe = paddle.static.Executor(place=paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
res1 = exe.run(paddle.static.default_main_program(),
feed={"data_x": input_x},
fetch_list=[out1])
expected_res = np.log10(input_x)
self.assertTrue(np.allclose(res1, expected_res))
# dygraph
with fluid.dygraph.guard():
np_x = np.random.uniform(0.1, 1, [11, 17]).astype("float32")
data_x = paddle.to_tensor(np_x)
z = paddle.log10(data_x)
np_z = z.numpy()
z_expected = np.array(np.log10(np_x))
self.assertTrue(np.allclose(np_z, z_expected))
class TestLogHalf(TestLog):
def init_dtype(self):
self.dtype = np.float16
def test_api(self):
pass
class TestLog2Half(TestLog2):
def init_dtype(self):
self.dtype = np.float16
def test_api(self):
pass
class TestLog10Half(TestLog10):
def init_dtype(self):
self.dtype = np.float16
def test_api(self):
pass
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册