Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
ffd35908
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ffd35908
编写于
9月 20, 2022
作者:
H
houj04
提交者:
GitHub
9月 20, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[XPU] update xdnn activations. (#46246)
上级
3b89e7c0
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
35 addition
and
10 deletion
+35
-10
cmake/external/xpu.cmake
cmake/external/xpu.cmake
+2
-2
paddle/phi/kernels/xpu/activation_kernel.cc
paddle/phi/kernels/xpu/activation_kernel.cc
+33
-8
未找到文件。
cmake/external/xpu.cmake
浏览文件 @
ffd35908
...
...
@@ -10,7 +10,7 @@ set(XPU_RT_LIB_NAME "libxpurt.so")
if
(
NOT DEFINED XPU_BASE_URL
)
set
(
XPU_BASE_URL_WITHOUT_DATE
"https://baidu-kunlun-product.cdn.bcebos.com/KL-SDK/klsdk-dev"
)
set
(
XPU_BASE_URL
"
${
XPU_BASE_URL_WITHOUT_DATE
}
/202209
07
"
)
set
(
XPU_BASE_URL
"
${
XPU_BASE_URL_WITHOUT_DATE
}
/202209
19
"
)
else
()
set
(
XPU_BASE_URL
"
${
XPU_BASE_URL
}
"
)
endif
()
...
...
@@ -19,7 +19,7 @@ endif()
if
(
NOT DEFINED XPU_XDNN_BASE_URL
)
set
(
XPU_XDNN_BASE_URL_WITHOUT_DATE
"https://klx-sdk-release-public.su.bcebos.com/xdnn/dev"
)
set
(
XPU_XDNN_BASE_URL
"
${
XPU_XDNN_BASE_URL_WITHOUT_DATE
}
/202209
07
"
)
set
(
XPU_XDNN_BASE_URL
"
${
XPU_XDNN_BASE_URL_WITHOUT_DATE
}
/202209
19
"
)
else
()
set
(
XPU_XDNN_BASE_URL
"
${
XPU_XDNN_BASE_URL
}
"
)
endif
()
...
...
paddle/phi/kernels/xpu/activation_kernel.cc
浏览文件 @
ffd35908
...
...
@@ -82,18 +82,43 @@ int xpu_activation_func(
}
template
<
typename
Context
,
typename
T
,
typename
XPUType
>
int
xpu_activation_
1attr_func
(
int
xpu_activation_
func_with_max_x_y
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
DenseTensor
*
out
,
float
attr
,
std
::
function
<
int
(
xpu
::
Context
*
,
const
XPUType
*
,
XPUType
*
,
int
,
float
)
>
std
::
function
<
int
(
xpu
::
Context
*
,
const
XPUType
*
,
XPUType
*
,
int
,
const
float
*
,
float
*
)
>
func
)
{
// does not support "const float* max_x, float* max_y" now
int
r
=
func
(
dev_ctx
.
x_context
(),
reinterpret_cast
<
const
XPUType
*>
(
x
.
data
<
T
>
()),
reinterpret_cast
<
XPUType
*>
(
out
->
data
<
T
>
()),
x
.
numel
(),
attr
);
nullptr
,
nullptr
);
return
r
;
}
template
<
typename
Context
,
typename
T
,
typename
XPUType
>
int
xpu_activation_1attr_func
(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
DenseTensor
*
out
,
float
attr
,
std
::
function
<
int
(
xpu
::
Context
*
,
const
XPUType
*
,
XPUType
*
,
int
,
float
,
const
float
*
,
float
*
)
>
func
)
{
// does not support "const float* max_x, float* max_y" now
int
r
=
func
(
dev_ctx
.
x_context
(),
reinterpret_cast
<
const
XPUType
*>
(
x
.
data
<
T
>
()),
reinterpret_cast
<
XPUType
*>
(
out
->
data
<
T
>
()),
x
.
numel
(),
attr
,
nullptr
,
nullptr
);
return
r
;
}
...
...
@@ -213,7 +238,7 @@ struct XPUHardSwishFunctor : public funcs::BaseActivationFunctor<T> {
offset
,
3.0
f
,
errors
::
External
(
"Not support offset [%f] in XPU"
,
offset
));
int
r
=
xpu_activation_func
<
Context
,
T
,
XPUType
>
(
int
r
=
xpu_activation_func
_with_max_x_y
<
Context
,
T
,
XPUType
>
(
dev_ctx
,
x
,
out
,
xpu
::
hard_swish
<
XPUType
>
);
PADDLE_ENFORCE_XDNN_SUCCESS
(
r
,
"hard_swish"
);
}
...
...
@@ -259,7 +284,7 @@ struct XPURelu6Functor : public funcs::BaseActivationFunctor<T> {
void
operator
()(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
DenseTensor
*
out
)
const
{
int
r
=
xpu_activation_func
<
Context
,
T
,
XPUType
>
(
int
r
=
xpu_activation_func
_with_max_x_y
<
Context
,
T
,
XPUType
>
(
dev_ctx
,
x
,
out
,
xpu
::
relu6
<
XPUType
>
);
PADDLE_ENFORCE_XDNN_SUCCESS
(
r
,
"relu6"
);
}
...
...
@@ -272,7 +297,7 @@ struct XPUSigmoidFunctor : public funcs::BaseActivationFunctor<T> {
void
operator
()(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
DenseTensor
*
out
)
const
{
int
r
=
xpu_activation_func
<
Context
,
T
,
XPUType
>
(
int
r
=
xpu_activation_func
_with_max_x_y
<
Context
,
T
,
XPUType
>
(
dev_ctx
,
x
,
out
,
xpu
::
sigmoid
<
XPUType
>
);
PADDLE_ENFORCE_XDNN_SUCCESS
(
r
,
"sigmoid"
);
}
...
...
@@ -363,7 +388,7 @@ struct XPUTanhFunctor : public funcs::BaseActivationFunctor<T> {
void
operator
()(
const
Context
&
dev_ctx
,
const
DenseTensor
&
x
,
DenseTensor
*
out
)
const
{
int
r
=
xpu_activation_func
<
Context
,
T
,
XPUType
>
(
int
r
=
xpu_activation_func
_with_max_x_y
<
Context
,
T
,
XPUType
>
(
dev_ctx
,
x
,
out
,
xpu
::
tanh
<
XPUType
>
);
PADDLE_ENFORCE_XDNN_SUCCESS
(
r
,
"tanh"
);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录