Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleSlim
提交
44e359c4
P
PaddleSlim
项目概览
PaddlePaddle
/
PaddleSlim
1 年多 前同步成功
通知
51
Star
1434
Fork
344
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
53
列表
看板
标记
里程碑
合并请求
16
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleSlim
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
53
Issue
53
列表
看板
标记
里程碑
合并请求
16
合并请求
16
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
44e359c4
编写于
6月 05, 2020
作者:
B
Bai Yifan
提交者:
GitHub
6月 05, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
split two text input (#336)
上级
e4d61e5e
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
54 addition
and
20 deletion
+54
-20
demo/bert/train_cell_base.py
demo/bert/train_cell_base.py
+4
-3
paddleslim/nas/darts/search_space/conv_bert/model/bert.py
paddleslim/nas/darts/search_space/conv_bert/model/bert.py
+39
-9
paddleslim/nas/darts/search_space/conv_bert/model/transformer_encoder.py
...darts/search_space/conv_bert/model/transformer_encoder.py
+11
-8
未找到文件。
demo/bert/train_cell_base.py
100644 → 100755
浏览文件 @
44e359c4
...
...
@@ -54,8 +54,7 @@ def train_one_epoch(model, architect, train_loader, valid_loader, optimizer,
else
:
loss
.
backward
()
grad_clip
=
fluid
.
dygraph_grad_clip
.
GradClipByGlobalNorm
(
5.0
)
optimizer
.
minimize
(
loss
,
grad_clip
=
grad_clip
)
optimizer
.
minimize
(
loss
)
model
.
clear_gradients
()
batch_size
=
train_data
[
0
].
shape
[
0
]
...
...
@@ -161,11 +160,13 @@ def main():
if
p
.
name
not
in
[
a
.
name
for
a
in
model
.
arch_parameters
()]
]
clip
=
fluid
.
clip
.
GradientClipByGlobalNorm
(
clip_norm
=
5.0
)
optimizer
=
fluid
.
optimizer
.
MomentumOptimizer
(
learning_rate
,
0.9
,
regularization
=
fluid
.
regularizer
.
L2DecayRegularizer
(
3e-4
),
parameter_list
=
model_parameters
)
parameter_list
=
model_parameters
,
grad_clip
=
clip
)
train_loader
=
fluid
.
io
.
DataLoader
.
from_generator
(
capacity
=
1024
,
...
...
paddleslim/nas/darts/search_space/conv_bert/model/bert.py
100644 → 100755
浏览文件 @
44e359c4
...
...
@@ -113,16 +113,46 @@ class BertModelLayer(Layer):
"""
forward
"""
src_emb
=
self
.
_src_emb
(
src_ids
)
pos_emb
=
self
.
_pos_emb
(
position_ids
)
sent_emb
=
self
.
_sent_emb
(
sentence_ids
)
emb_out
=
src_emb
+
pos_emb
emb_out
=
emb_out
+
sent_emb
emb_out
=
self
.
_emb_fac
(
emb_out
)
ids
=
np
.
squeeze
(
src_ids
.
numpy
())
sids
=
np
.
squeeze
(
sentence_ids
.
numpy
())
batchsize
=
ids
.
shape
[
0
]
ids_0
=
ids
[((
sids
==
0
)
&
(
ids
!=
0
))]
seqlen_0
=
((
sids
==
0
)
&
(
ids
!=
0
)).
astype
(
np
.
int64
).
sum
(
1
)
y_0
=
np
.
concatenate
([
np
.
arange
(
s
)
for
s
in
seqlen_0
])
x_0
=
np
.
concatenate
([
np
.
ones
(
[
s
],
dtype
=
np
.
int64
)
*
i
for
i
,
s
in
enumerate
(
seqlen_0
)
])
ids0
=
np
.
zeros
([
batchsize
,
seqlen_0
.
max
()],
dtype
=
np
.
int64
)
ids0
[(
x_0
,
y_0
)]
=
ids_0
ids_1
=
ids
[(
sids
==
1
)
&
(
ids
!=
0
)]
seqlen_1
=
((
sids
==
1
)
&
(
ids
!=
0
)).
astype
(
np
.
int64
).
sum
(
1
)
y_1
=
np
.
concatenate
([
np
.
arange
(
s
)
for
s
in
seqlen_1
])
x_1
=
np
.
concatenate
([
np
.
ones
(
[
s
],
dtype
=
np
.
int64
)
*
i
for
i
,
s
in
enumerate
(
seqlen_1
)
])
ids1
=
np
.
zeros
([
batchsize
,
seqlen_1
.
max
()],
dtype
=
np
.
int64
)
ids1
[(
x_1
,
y_1
)]
=
ids_1
msl
=
max
(
seqlen_0
.
max
(),
seqlen_1
.
max
())
ids0
=
np
.
pad
(
ids0
,
[[
0
,
0
],
[
0
,
msl
-
seqlen_0
.
max
()]],
mode
=
'constant'
)
ids1
=
np
.
pad
(
ids1
,
[[
0
,
0
],
[
0
,
msl
-
seqlen_1
.
max
()]],
mode
=
'constant'
)
ids0
=
fluid
.
dygraph
.
to_variable
(
ids0
)
ids1
=
fluid
.
dygraph
.
to_variable
(
ids1
)
src_emb_0
=
self
.
_src_emb
(
ids0
)
src_emb_1
=
self
.
_src_emb
(
ids1
)
emb_out_0
=
self
.
_emb_fac
(
src_emb_0
)
emb_out_1
=
self
.
_emb_fac
(
src_emb_1
)
# (bs, seq_len, 768)
enc_output
=
self
.
_encoder
(
emb_out
,
flops
=
flops
,
model_size
=
model_size
)
enc_output
=
self
.
_encoder
(
emb_out_0
,
emb_out_1
,
flops
=
flops
,
model_size
=
model_size
)
return
enc_output
paddleslim/nas/darts/search_space/conv_bert/model/transformer_encoder.py
100644 → 100755
浏览文件 @
44e359c4
...
...
@@ -278,19 +278,22 @@ class EncoderLayer(Layer):
bias_attr
=
ParamAttr
(
initializer
=
MSRA
()))
self
.
use_fixed_gumbel
=
use_fixed_gumbel
self
.
gumbel_alphas
=
gumbel_softmax
(
self
.
alphas
)
self
.
gumbel_alphas
=
gumbel_softmax
(
self
.
alphas
)
.
detach
()
def
forward
(
self
,
enc_input
,
flops
=
[],
model_size
=
[]):
tmp
=
fluid
.
layers
.
reshape
(
enc_input
,
[
-
1
,
1
,
enc_input
.
shape
[
1
],
enc_input
.
shape
[
2
]])
def
forward
(
self
,
enc_input_0
,
enc_input_1
,
flops
=
[],
model_size
=
[]):
alphas
=
self
.
gumbel_alphas
if
self
.
use_fixed_gumbel
else
gumbel_softmax
(
self
.
alphas
)
s0
=
fluid
.
layers
.
reshape
(
enc_input_0
,
[
-
1
,
1
,
enc_input_0
.
shape
[
1
],
enc_input_0
.
shape
[
2
]])
s1
=
fluid
.
layers
.
reshape
(
enc_input_1
,
[
-
1
,
1
,
enc_input_1
.
shape
[
1
],
enc_input_1
.
shape
[
2
]])
# (bs, 1, seq_len, hidden_size)
tmp
=
self
.
stem
(
tmp
)
s0
=
self
.
stem
(
s0
)
s1
=
self
.
stem
(
s1
)
# (bs, n_channel, seq_len, 1)
alphas
=
self
.
gumbel_alphas
if
self
.
use_fixed_gumbel
else
gumbel_softmax
(
self
.
alphas
)
s0
=
s1
=
tmp
for
i
in
range
(
self
.
_n_layer
):
s0
,
s1
=
s1
,
self
.
_cells
[
i
](
s0
,
s1
,
alphas
)
# (bs, n_channel, seq_len, 1)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录