Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
DeepSpeech
提交
244132c1
D
DeepSpeech
项目概览
PaddlePaddle
/
DeepSpeech
大约 2 年 前同步成功
通知
210
Star
8425
Fork
1598
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
245
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
DeepSpeech
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
245
Issue
245
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
244132c1
编写于
9月 07, 2021
作者:
H
Hui Zhang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix activation
上级
7e136d08
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
19 addition
and
19 deletion
+19
-19
deepspeech/__init__.py
deepspeech/__init__.py
+0
-17
deepspeech/modules/activation.py
deepspeech/modules/activation.py
+19
-2
未找到文件。
deepspeech/__init__.py
浏览文件 @
244132c1
...
@@ -351,20 +351,3 @@ if not hasattr(paddle.Tensor, 'tolist'):
...
@@ -351,20 +351,3 @@ if not hasattr(paddle.Tensor, 'tolist'):
logger
.
warn
(
logger
.
warn
(
"register user tolist to paddle.Tensor, remove this when fixed!"
)
"register user tolist to paddle.Tensor, remove this when fixed!"
)
setattr
(
paddle
.
Tensor
,
'tolist'
,
tolist
)
setattr
(
paddle
.
Tensor
,
'tolist'
,
tolist
)
########### hcak paddle.nn #############
class
GLU
(
nn
.
Layer
):
"""Gated Linear Units (GLU) Layer"""
def
__init__
(
self
,
dim
:
int
=-
1
):
super
().
__init__
()
self
.
dim
=
dim
def
forward
(
self
,
xs
):
return
F
.
glu
(
xs
,
axis
=
self
.
dim
)
if
not
hasattr
(
paddle
.
nn
,
'GLU'
):
logger
.
warn
(
"register user GLU to paddle.nn, remove this when fixed!"
)
setattr
(
paddle
.
nn
,
'GLU'
,
GLU
)
deepspeech/modules/activation.py
浏览文件 @
244132c1
...
@@ -15,12 +15,13 @@ from collections import OrderedDict
...
@@ -15,12 +15,13 @@ from collections import OrderedDict
import
paddle
import
paddle
from
paddle
import
nn
from
paddle
import
nn
from
paddle.nn
import
functional
as
F
from
deepspeech.utils.log
import
Log
from
deepspeech.utils.log
import
Log
logger
=
Log
(
__name__
).
getlog
()
logger
=
Log
(
__name__
).
getlog
()
__all__
=
[
"get_activation"
,
"brelu"
,
"LinearGLUBlock"
,
"ConvGLUBlock"
]
__all__
=
[
"get_activation"
,
"brelu"
,
"LinearGLUBlock"
,
"ConvGLUBlock"
,
"GLU"
]
def
brelu
(
x
,
t_min
=
0.0
,
t_max
=
24.0
,
name
=
None
):
def
brelu
(
x
,
t_min
=
0.0
,
t_max
=
24.0
,
name
=
None
):
...
@@ -30,6 +31,17 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None):
...
@@ -30,6 +31,17 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None):
return
x
.
maximum
(
t_min
).
minimum
(
t_max
)
return
x
.
maximum
(
t_min
).
minimum
(
t_max
)
class
GLU
(
nn
.
Layer
):
"""Gated Linear Units (GLU) Layer"""
def
__init__
(
self
,
dim
:
int
=-
1
):
super
().
__init__
()
self
.
dim
=
dim
def
forward
(
self
,
xs
):
return
F
.
glu
(
xs
,
axis
=
self
.
dim
)
class
LinearGLUBlock
(
nn
.
Layer
):
class
LinearGLUBlock
(
nn
.
Layer
):
"""A linear Gated Linear Units (GLU) block."""
"""A linear Gated Linear Units (GLU) block."""
...
@@ -133,13 +145,18 @@ def get_activation(act):
...
@@ -133,13 +145,18 @@ def get_activation(act):
"""Return activation function."""
"""Return activation function."""
# Lazy load to avoid unused import
# Lazy load to avoid unused import
activation_funcs
=
{
activation_funcs
=
{
"hardshrink"
:
paddle
.
nn
.
Hardshrink
,
"hardswish"
:
paddle
.
nn
.
Hardswish
,
"hardtanh"
:
paddle
.
nn
.
Hardtanh
,
"hardtanh"
:
paddle
.
nn
.
Hardtanh
,
"tanh"
:
paddle
.
nn
.
Tanh
,
"tanh"
:
paddle
.
nn
.
Tanh
,
"relu"
:
paddle
.
nn
.
ReLU
,
"relu"
:
paddle
.
nn
.
ReLU
,
"relu6"
:
paddle
.
nn
.
ReLU6
,
"leakyrelu"
:
paddle
.
nn
.
LeakyReLU
,
"selu"
:
paddle
.
nn
.
SELU
,
"selu"
:
paddle
.
nn
.
SELU
,
"swish"
:
paddle
.
nn
.
Swish
,
"swish"
:
paddle
.
nn
.
Swish
,
"gelu"
:
paddle
.
nn
.
GELU
,
"gelu"
:
paddle
.
nn
.
GELU
,
"brelu"
:
brelu
,
"glu"
:
GLU
,
"elu"
:
paddle
.
nn
.
ELU
,
}
}
return
activation_funcs
[
act
]()
return
activation_funcs
[
act
]()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录