Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
586f9429
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
586f9429
编写于
5月 30, 2022
作者:
C
cambriconhsq
提交者:
GitHub
5月 30, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[MLU]add mlu kernel for log_softmax op (#43040)
上级
2d6dd55f
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
180 addition
and
8 deletion
+180
-8
paddle/fluid/operators/softmax_op_mlu.cc
paddle/fluid/operators/softmax_op_mlu.cc
+17
-8
python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py
...ddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py
+163
-0
未找到文件。
paddle/fluid/operators/softmax_op_mlu.cc
浏览文件 @
586f9429
...
@@ -19,7 +19,7 @@ limitations under the License. */
...
@@ -19,7 +19,7 @@ limitations under the License. */
namespace
paddle
{
namespace
paddle
{
namespace
operators
{
namespace
operators
{
template
<
typename
T
>
template
<
cnnlSoftmaxAlgorithm_t
softmax_algo
,
typename
T
>
class
SoftmaxMLUKernel
:
public
framework
::
OpKernel
<
T
>
{
class
SoftmaxMLUKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
...
@@ -45,7 +45,7 @@ class SoftmaxMLUKernel : public framework::OpKernel<T> {
...
@@ -45,7 +45,7 @@ class SoftmaxMLUKernel : public framework::OpKernel<T> {
regard_in_shape
=
{
d1
,
d2
,
d3
};
regard_in_shape
=
{
d1
,
d2
,
d3
};
}
}
static
const
cnnlSoftmaxAlgorithm_t
algo
=
CNNL_SOFTMAX_ACCURATE
;
static
const
cnnlSoftmaxAlgorithm_t
algo
=
softmax_algo
;
MLUCnnlTensorDesc
in_desc
(
cnnl_softmax_dims
,
regard_in_shape
.
data
(),
MLUCnnlTensorDesc
in_desc
(
cnnl_softmax_dims
,
regard_in_shape
.
data
(),
ToCnnlDataType
<
T
>
());
ToCnnlDataType
<
T
>
());
MLUCnnl
::
SoftmaxForward
(
ctx
,
algo
,
mode
,
NULL
,
in_desc
.
get
(),
MLUCnnl
::
SoftmaxForward
(
ctx
,
algo
,
mode
,
NULL
,
in_desc
.
get
(),
...
@@ -54,7 +54,7 @@ class SoftmaxMLUKernel : public framework::OpKernel<T> {
...
@@ -54,7 +54,7 @@ class SoftmaxMLUKernel : public framework::OpKernel<T> {
}
}
};
};
template
<
typename
T
>
template
<
cnnlSoftmaxAlgorithm_t
softmax_algo
,
typename
T
>
class
SoftmaxGradMLUKernel
:
public
framework
::
OpKernel
<
T
>
{
class
SoftmaxGradMLUKernel
:
public
framework
::
OpKernel
<
T
>
{
public:
public:
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
void
Compute
(
const
framework
::
ExecutionContext
&
ctx
)
const
override
{
...
@@ -82,7 +82,7 @@ class SoftmaxGradMLUKernel : public framework::OpKernel<T> {
...
@@ -82,7 +82,7 @@ class SoftmaxGradMLUKernel : public framework::OpKernel<T> {
regard_out_shape
=
{
d1
,
d2
,
d3
};
regard_out_shape
=
{
d1
,
d2
,
d3
};
}
}
static
const
cnnlSoftmaxAlgorithm_t
algo
=
CNNL_SOFTMAX_ACCURATE
;
static
const
cnnlSoftmaxAlgorithm_t
algo
=
softmax_algo
;
MLUCnnlTensorDesc
out_desc
(
cnnl_softmax_dims
,
regard_out_shape
.
data
(),
MLUCnnlTensorDesc
out_desc
(
cnnl_softmax_dims
,
regard_out_shape
.
data
(),
ToCnnlDataType
<
T
>
());
ToCnnlDataType
<
T
>
());
MLUCnnl
::
SoftmaxBackward
(
ctx
,
algo
,
mode
,
out_desc
.
get
(),
GetBasePtr
(
out
),
MLUCnnl
::
SoftmaxBackward
(
ctx
,
algo
,
mode
,
out_desc
.
get
(),
GetBasePtr
(
out
),
...
@@ -97,7 +97,16 @@ class SoftmaxGradMLUKernel : public framework::OpKernel<T> {
...
@@ -97,7 +97,16 @@ class SoftmaxGradMLUKernel : public framework::OpKernel<T> {
namespace
ops
=
paddle
::
operators
;
namespace
ops
=
paddle
::
operators
;
namespace
plat
=
paddle
::
platform
;
namespace
plat
=
paddle
::
platform
;
REGISTER_OP_MLU_KERNEL
(
softmax
,
ops
::
SoftmaxMLUKernel
<
float
>
,
REGISTER_OP_MLU_KERNEL
(
ops
::
SoftmaxMLUKernel
<
plat
::
float16
>
);
softmax
,
ops
::
SoftmaxMLUKernel
<
CNNL_SOFTMAX_ACCURATE
,
float
>
,
REGISTER_OP_MLU_KERNEL
(
softmax_grad
,
ops
::
SoftmaxGradMLUKernel
<
float
>
,
ops
::
SoftmaxMLUKernel
<
CNNL_SOFTMAX_ACCURATE
,
plat
::
float16
>
);
ops
::
SoftmaxGradMLUKernel
<
paddle
::
platform
::
float16
>
);
REGISTER_OP_MLU_KERNEL
(
softmax_grad
,
ops
::
SoftmaxGradMLUKernel
<
CNNL_SOFTMAX_ACCURATE
,
float
>
,
ops
::
SoftmaxGradMLUKernel
<
CNNL_SOFTMAX_ACCURATE
,
paddle
::
platform
::
float16
>
);
REGISTER_OP_MLU_KERNEL
(
log_softmax
,
ops
::
SoftmaxMLUKernel
<
CNNL_SOFTMAX_LOG
,
float
>
,
ops
::
SoftmaxMLUKernel
<
CNNL_SOFTMAX_ACCURATE
,
plat
::
float16
>
);
REGISTER_OP_MLU_KERNEL
(
log_softmax_grad
,
ops
::
SoftmaxGradMLUKernel
<
CNNL_SOFTMAX_LOG
,
float
>
,
ops
::
SoftmaxGradMLUKernel
<
CNNL_SOFTMAX_LOG
,
paddle
::
platform
::
float16
>
);
python/paddle/fluid/tests/unittests/mlu/test_log_softmax_op_mlu.py
0 → 100644
浏览文件 @
586f9429
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
from
paddle.fluid.tests.unittests.op_test
import
OpTest
,
convert_float_to_uint16
import
paddle
import
paddle.fluid.core
as
core
import
paddle.nn.functional
as
F
np
.
random
.
seed
(
10
)
paddle
.
enable_static
()
def
ref_log_softmax
(
x
):
shiftx
=
(
x
-
np
.
max
(
x
))
out
=
shiftx
-
np
.
log
(
np
.
exp
(
shiftx
).
sum
())
return
out
def
ref_log_softmax_grad
(
x
,
axis
):
if
axis
<
0
:
axis
+=
len
(
x
.
shape
)
out
=
np
.
apply_along_axis
(
ref_log_softmax
,
axis
,
x
)
axis_dim
=
x
.
shape
[
axis
]
dout
=
np
.
full_like
(
x
,
fill_value
=
1.
/
x
.
size
)
dx
=
dout
-
np
.
exp
(
out
)
*
dout
.
copy
().
sum
(
axis
=
axis
,
keepdims
=
True
).
repeat
(
axis_dim
,
axis
=
axis
)
return
dx
class
TestLogSoftmaxOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
'log_softmax'
self
.
set_mlu
()
self
.
python_api
=
F
.
log_softmax
self
.
dtype
=
'float32'
self
.
shape
=
[
2
,
3
,
4
,
5
]
self
.
axis
=
-
1
self
.
set_attrs
()
x
=
np
.
random
.
uniform
(
0.1
,
1.
,
self
.
shape
).
astype
(
self
.
dtype
)
out
=
np
.
apply_along_axis
(
ref_log_softmax
,
self
.
axis
,
x
)
self
.
x_grad
=
ref_log_softmax_grad
(
x
,
self
.
axis
)
self
.
inputs
=
{
'X'
:
x
}
self
.
outputs
=
{
'Out'
:
out
}
self
.
attrs
=
{
'axis'
:
self
.
axis
}
def
set_attrs
(
self
):
pass
def
set_mlu
(
self
):
self
.
__class__
.
use_mlu
=
True
self
.
place
=
paddle
.
device
.
MLUPlace
(
0
)
def
test_check_output
(
self
):
self
.
check_output_with_place
(
self
.
place
)
def
test_check_grad
(
self
):
self
.
check_grad_with_place
(
self
.
place
,
[
'X'
],
[
'Out'
],
user_defined_grads
=
[
self
.
x_grad
])
class
TestLogSoftmaxShape
(
TestLogSoftmaxOp
):
def
set_attrs
(
self
):
self
.
shape
=
[
12
,
10
]
class
TestLogSoftmaxAxis
(
TestLogSoftmaxOp
):
def
set_attrs
(
self
):
self
.
axis
=
1
class
TestNNLogSoftmaxAPI
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
set_mlu
()
self
.
x_shape
=
[
2
,
3
,
4
,
5
]
self
.
x
=
np
.
random
.
uniform
(
-
1.
,
1.
,
self
.
x_shape
).
astype
(
np
.
float32
)
def
set_mlu
(
self
):
self
.
__class__
.
use_mlu
=
True
self
.
place
=
paddle
.
device
.
MLUPlace
(
0
)
def
check_api
(
self
,
axis
=-
1
):
ref_out
=
np
.
apply_along_axis
(
ref_log_softmax
,
axis
,
self
.
x
)
logsoftmax
=
paddle
.
nn
.
LogSoftmax
(
axis
)
# test static api
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
name
=
'x'
,
shape
=
self
.
x_shape
)
y
=
logsoftmax
(
x
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
out
=
exe
.
run
(
feed
=
{
'x'
:
self
.
x
},
fetch_list
=
[
y
])
self
.
assertTrue
(
np
.
allclose
(
out
[
0
],
ref_out
))
# test dygrapg api
paddle
.
disable_static
()
x
=
paddle
.
to_tensor
(
self
.
x
)
y
=
logsoftmax
(
x
)
self
.
assertTrue
(
np
.
allclose
(
y
.
numpy
(),
ref_out
))
paddle
.
enable_static
()
def
test_check_api
(
self
):
for
axis
in
[
-
1
,
1
]:
self
.
check_api
(
axis
)
class
TestNNFunctionalLogSoftmaxAPI
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
set_mlu
()
self
.
x_shape
=
[
2
,
3
,
4
,
5
]
self
.
x
=
np
.
random
.
uniform
(
-
1
,
1
,
self
.
x_shape
).
astype
(
np
.
float32
)
def
set_mlu
(
self
):
self
.
__class__
.
use_mlu
=
True
self
.
place
=
paddle
.
device
.
MLUPlace
(
0
)
def
check_api
(
self
,
axis
=-
1
,
dtype
=
None
):
x
=
self
.
x
.
copy
()
if
dtype
is
not
None
:
x
=
x
.
astype
(
dtype
)
ref_out
=
np
.
apply_along_axis
(
ref_log_softmax
,
axis
,
x
)
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
name
=
'x'
,
shape
=
self
.
x_shape
)
y
=
F
.
log_softmax
(
x
,
axis
,
dtype
)
exe
=
paddle
.
static
.
Executor
(
self
.
place
)
out
=
exe
.
run
(
feed
=
{
'x'
:
self
.
x
},
fetch_list
=
[
y
])
self
.
assertTrue
(
np
.
allclose
(
out
[
0
],
ref_out
))
paddle
.
disable_static
()
x
=
paddle
.
to_tensor
(
self
.
x
)
y
=
F
.
log_softmax
(
x
,
axis
,
dtype
)
self
.
assertTrue
(
np
.
allclose
(
y
.
numpy
(),
ref_out
),
True
)
paddle
.
enable_static
()
def
test_check_api
(
self
):
for
axis
in
[
-
1
,
1
]:
self
.
check_api
(
axis
)
self
.
check_api
(
-
1
,
'float32'
)
def
test_errors
(
self
):
with
paddle
.
static
.
program_guard
(
paddle
.
static
.
Program
()):
x
=
paddle
.
fluid
.
data
(
name
=
'X1'
,
shape
=
[
100
],
dtype
=
'int32'
)
self
.
assertRaises
(
TypeError
,
F
.
log_softmax
,
x
)
x
=
paddle
.
fluid
.
data
(
name
=
'X2'
,
shape
=
[
100
],
dtype
=
'float32'
)
self
.
assertRaises
(
TypeError
,
F
.
log_softmax
,
x
,
dtype
=
'int32'
)
if
__name__
==
"__main__"
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录