Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
hapi
提交
837eff99
H
hapi
项目概览
PaddlePaddle
/
hapi
通知
11
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
4
列表
看板
标记
里程碑
合并请求
7
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
H
hapi
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
4
Issue
4
列表
看板
标记
里程碑
合并请求
7
合并请求
7
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
837eff99
编写于
5月 13, 2020
作者:
G
guosheng
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Rename Model.self as model in test_text.py
test=develop
上级
503d40a7
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
63 addition
and
63 deletion
+63
-63
hapi/tests/test_text.py
hapi/tests/test_text.py
+63
-63
未找到文件。
hapi/tests/test_text.py
浏览文件 @
837eff99
...
...
@@ -56,13 +56,13 @@ class ModuleApiTest(unittest.TestCase):
return
__impl__
@
staticmethod
def
model_init
(
self
,
*
args
,
**
kwargs
):
def
model_init
(
model
,
*
args
,
**
kwargs
):
raise
NotImplementedError
(
"model_init acts as `Model.__init__`, thus must implement it"
)
@
staticmethod
def
model_forward
(
self
,
*
args
,
**
kwargs
):
return
self
.
module
(
*
args
,
**
kwargs
)
def
model_forward
(
model
,
*
args
,
**
kwargs
):
return
model
.
module
(
*
args
,
**
kwargs
)
def
make_inputs
(
self
):
# TODO(guosheng): add default from `self.inputs`
...
...
@@ -118,7 +118,7 @@ class ModuleApiTest(unittest.TestCase):
class
TestBasicLSTM
(
ModuleApiTest
):
def
setUp
(
self
):
# TODO(guosheng): Change to big size. Currently
s
bigger hidden size for
# TODO(guosheng): Change to big size. Currently bigger hidden size for
# LSTM would fail, the second static graph run might get diff output
# with others.
shape
=
(
2
,
4
,
16
)
...
...
@@ -128,8 +128,8 @@ class TestBasicLSTM(ModuleApiTest):
self
.
param_states
=
{}
@
staticmethod
def
model_init
(
self
,
input_size
,
hidden_size
):
self
.
lstm
=
RNN
(
def
model_init
(
model
,
input_size
,
hidden_size
):
model
.
lstm
=
RNN
(
BasicLSTMCell
(
input_size
,
hidden_size
,
...
...
@@ -137,8 +137,8 @@ class TestBasicLSTM(ModuleApiTest):
bias_attr
=
fluid
.
ParamAttr
(
name
=
"lstm_bias"
)))
@
staticmethod
def
model_forward
(
self
,
inputs
):
return
self
.
lstm
(
inputs
)[
0
]
def
model_forward
(
model
,
inputs
):
return
model
.
lstm
(
inputs
)[
0
]
def
make_inputs
(
self
):
inputs
=
[
...
...
@@ -162,12 +162,12 @@ class TestBasicGRU(ModuleApiTest):
self
.
param_states
=
{}
@
staticmethod
def
model_init
(
self
,
input_size
,
hidden_size
):
self
.
gru
=
RNN
(
BasicGRUCell
(
input_size
,
hidden_size
))
def
model_init
(
model
,
input_size
,
hidden_size
):
model
.
gru
=
RNN
(
BasicGRUCell
(
input_size
,
hidden_size
))
@
staticmethod
def
model_forward
(
self
,
inputs
):
return
self
.
gru
(
inputs
)[
0
]
def
model_forward
(
model
,
inputs
):
return
model
.
gru
(
inputs
)[
0
]
def
make_inputs
(
self
):
inputs
=
[
...
...
@@ -220,8 +220,8 @@ class TestBeamSearch(ModuleApiTest):
decoder
,
max_step_num
=
max_step_num
,
is_test
=
True
)
@
staticmethod
def
model_forward
(
self
,
init_hidden
,
init_cell
):
return
self
.
beam_search_decoder
([
init_hidden
,
init_cell
])[
0
]
def
model_forward
(
model
,
init_hidden
,
init_cell
):
return
model
.
beam_search_decoder
([
init_hidden
,
init_cell
])[
0
]
def
make_inputs
(
self
):
inputs
=
[
...
...
@@ -258,7 +258,7 @@ class TestTransformerEncoder(ModuleApiTest):
self
.
param_states
=
{}
@
staticmethod
def
model_init
(
self
,
def
model_init
(
model
,
n_layer
,
n_head
,
d_key
,
...
...
@@ -271,14 +271,14 @@ class TestTransformerEncoder(ModuleApiTest):
preprocess_cmd
=
"n"
,
postprocess_cmd
=
"da"
,
ffn_fc1_act
=
"relu"
):
self
.
encoder
=
TransformerEncoder
(
model
.
encoder
=
TransformerEncoder
(
n_layer
,
n_head
,
d_key
,
d_value
,
d_model
,
d_inner_hid
,
prepostprocess_dropout
,
attention_dropout
,
relu_dropout
,
preprocess_cmd
,
postprocess_cmd
,
ffn_fc1_act
)
@
staticmethod
def
model_forward
(
self
,
enc_input
,
attn_bias
):
return
self
.
encoder
(
enc_input
,
attn_bias
)
def
model_forward
(
model
,
enc_input
,
attn_bias
):
return
model
.
encoder
(
enc_input
,
attn_bias
)
def
make_inputs
(
self
):
inputs
=
[
...
...
@@ -321,7 +321,7 @@ class TestTransformerDecoder(TestTransformerEncoder):
self
.
param_states
=
{}
@
staticmethod
def
model_init
(
self
,
def
model_init
(
model
,
n_layer
,
n_head
,
d_key
,
...
...
@@ -333,20 +333,20 @@ class TestTransformerDecoder(TestTransformerEncoder):
relu_dropout
=
0.1
,
preprocess_cmd
=
"n"
,
postprocess_cmd
=
"da"
):
self
.
decoder
=
TransformerDecoder
(
model
.
decoder
=
TransformerDecoder
(
n_layer
,
n_head
,
d_key
,
d_value
,
d_model
,
d_inner_hid
,
prepostprocess_dropout
,
attention_dropout
,
relu_dropout
,
preprocess_cmd
,
postprocess_cmd
)
@
staticmethod
def
model_forward
(
self
,
def
model_forward
(
model
,
dec_input
,
enc_output
,
self_attn_bias
,
cross_attn_bias
,
caches
=
None
):
return
self
.
decoder
(
dec_input
,
enc_output
,
self_attn_bias
,
cross_attn_bias
,
caches
)
return
model
.
decoder
(
dec_input
,
enc_output
,
self_attn_bias
,
cross_attn_bias
,
caches
)
def
make_inputs
(
self
):
inputs
=
[
...
...
@@ -394,7 +394,7 @@ class TestTransformerBeamSearchDecoder(ModuleApiTest):
self
.
param_states
=
{}
@
staticmethod
def
model_init
(
self
,
def
model_init
(
model
,
vocab_size
,
n_layer
,
n_head
,
...
...
@@ -411,7 +411,7 @@ class TestTransformerBeamSearchDecoder(ModuleApiTest):
eos_id
=
1
,
beam_size
=
4
,
max_step_num
=
20
):
self
.
beam_size
=
beam_size
model
.
beam_size
=
beam_size
def
embeder_init
(
self
,
size
):
Layer
.
__init__
(
self
)
...
...
@@ -423,13 +423,13 @@ class TestTransformerBeamSearchDecoder(ModuleApiTest):
})
embedder
=
Embedder
(
size
=
[
vocab_size
,
d_model
])
output_layer
=
Linear
(
d_model
,
vocab_size
)
self
.
decoder
=
TransformerDecoder
(
model
.
decoder
=
TransformerDecoder
(
n_layer
,
n_head
,
d_key
,
d_value
,
d_model
,
d_inner_hid
,
prepostprocess_dropout
,
attention_dropout
,
relu_dropout
,
preprocess_cmd
,
postprocess_cmd
)
transformer_cell
=
TransformerCell
(
self
.
decoder
,
embedder
,
transformer_cell
=
TransformerCell
(
model
.
decoder
,
embedder
,
output_layer
)
self
.
beam_search_decoder
=
DynamicDecode
(
model
.
beam_search_decoder
=
DynamicDecode
(
TransformerBeamSearchDecoder
(
transformer_cell
,
bos_id
,
...
...
@@ -440,14 +440,14 @@ class TestTransformerBeamSearchDecoder(ModuleApiTest):
is_test
=
True
)
@
staticmethod
def
model_forward
(
self
,
enc_output
,
trg_src_attn_bias
):
caches
=
self
.
decoder
.
prepare_incremental_cache
(
enc_output
)
def
model_forward
(
model
,
enc_output
,
trg_src_attn_bias
):
caches
=
model
.
decoder
.
prepare_incremental_cache
(
enc_output
)
enc_output
=
TransformerBeamSearchDecoder
.
tile_beam_merge_with_batch
(
enc_output
,
self
.
beam_size
)
enc_output
,
model
.
beam_size
)
trg_src_attn_bias
=
TransformerBeamSearchDecoder
.
tile_beam_merge_with_batch
(
trg_src_attn_bias
,
self
.
beam_size
)
static_caches
=
self
.
decoder
.
prepare_static_cache
(
enc_output
)
rs
,
_
=
self
.
beam_search_decoder
(
trg_src_attn_bias
,
model
.
beam_size
)
static_caches
=
model
.
decoder
.
prepare_static_cache
(
enc_output
)
rs
,
_
=
model
.
beam_search_decoder
(
inits
=
caches
,
enc_output
=
enc_output
,
trg_src_attn_bias
=
trg_src_attn_bias
,
...
...
@@ -483,7 +483,7 @@ class TestSequenceTagging(ModuleApiTest):
self
.
param_states
=
{}
@
staticmethod
def
model_init
(
self
,
def
model_init
(
model
,
vocab_size
,
num_labels
,
word_emb_dim
=
128
,
...
...
@@ -492,13 +492,13 @@ class TestSequenceTagging(ModuleApiTest):
crf_learning_rate
=
0.1
,
bigru_num
=
2
,
init_bound
=
0.1
):
self
.
tagger
=
SequenceTagging
(
vocab_size
,
num_labels
,
word_emb_dim
,
grnn_hidden_dim
,
emb_learning_rate
,
crf_learning_rate
,
bigru_num
,
init_bound
)
model
.
tagger
=
SequenceTagging
(
vocab_size
,
num_labels
,
word_emb_dim
,
grnn_hidden_dim
,
emb_learning_rate
,
crf_learning_rate
,
bigru_num
,
init_bound
)
@
staticmethod
def
model_forward
(
self
,
word
,
lengths
,
target
=
None
):
return
self
.
tagger
(
word
,
lengths
,
target
)
def
model_forward
(
model
,
word
,
lengths
,
target
=
None
):
return
model
.
tagger
(
word
,
lengths
,
target
)
def
make_inputs
(
self
):
inputs
=
[
...
...
@@ -535,13 +535,13 @@ class TestStackedRNN(ModuleApiTest):
self
.
param_states
=
{}
@
staticmethod
def
model_init
(
self
,
input_size
,
hidden_size
,
num_layers
):
def
model_init
(
model
,
input_size
,
hidden_size
,
num_layers
):
cells
=
[
BasicLSTMCell
(
input_size
,
hidden_size
),
BasicLSTMCell
(
hidden_size
,
hidden_size
)
]
stacked_cell
=
StackedRNNCell
(
cells
)
self
.
lstm
=
RNN
(
stacked_cell
)
model
.
lstm
=
RNN
(
stacked_cell
)
@
staticmethod
def
model_forward
(
self
,
inputs
):
...
...
@@ -569,12 +569,12 @@ class TestLSTM(ModuleApiTest):
self
.
param_states
=
{}
@
staticmethod
def
model_init
(
self
,
input_size
,
hidden_size
,
num_layers
):
self
.
lstm
=
LSTM
(
input_size
,
hidden_size
,
num_layers
=
num_layers
)
def
model_init
(
model
,
input_size
,
hidden_size
,
num_layers
):
model
.
lstm
=
LSTM
(
input_size
,
hidden_size
,
num_layers
=
num_layers
)
@
staticmethod
def
model_forward
(
self
,
inputs
):
return
self
.
lstm
(
inputs
)[
0
]
def
model_forward
(
model
,
inputs
):
return
model
.
lstm
(
inputs
)[
0
]
def
make_inputs
(
self
):
inputs
=
[
...
...
@@ -598,13 +598,13 @@ class TestBiLSTM(ModuleApiTest):
self
.
param_states
=
{}
@
staticmethod
def
model_init
(
self
,
def
model_init
(
model
,
input_size
,
hidden_size
,
num_layers
,
merge_mode
=
"concat"
,
merge_each_layer
=
False
):
self
.
bilstm
=
BidirectionalLSTM
(
model
.
bilstm
=
BidirectionalLSTM
(
input_size
,
hidden_size
,
num_layers
=
num_layers
,
...
...
@@ -612,8 +612,8 @@ class TestBiLSTM(ModuleApiTest):
merge_each_layer
=
merge_each_layer
)
@
staticmethod
def
model_forward
(
self
,
inputs
):
return
self
.
bilstm
(
inputs
)[
0
]
def
model_forward
(
model
,
inputs
):
return
model
.
bilstm
(
inputs
)[
0
]
def
make_inputs
(
self
):
inputs
=
[
...
...
@@ -641,12 +641,12 @@ class TestGRU(ModuleApiTest):
self
.
param_states
=
{}
@
staticmethod
def
model_init
(
self
,
input_size
,
hidden_size
,
num_layers
):
self
.
gru
=
GRU
(
input_size
,
hidden_size
,
num_layers
=
num_layers
)
def
model_init
(
model
,
input_size
,
hidden_size
,
num_layers
):
model
.
gru
=
GRU
(
input_size
,
hidden_size
,
num_layers
=
num_layers
)
@
staticmethod
def
model_forward
(
self
,
inputs
):
return
self
.
gru
(
inputs
)[
0
]
def
model_forward
(
model
,
inputs
):
return
model
.
gru
(
inputs
)[
0
]
def
make_inputs
(
self
):
inputs
=
[
...
...
@@ -670,13 +670,13 @@ class TestBiGRU(ModuleApiTest):
self
.
param_states
=
{}
@
staticmethod
def
model_init
(
self
,
def
model_init
(
model
,
input_size
,
hidden_size
,
num_layers
,
merge_mode
=
"concat"
,
merge_each_layer
=
False
):
self
.
bigru
=
BidirectionalGRU
(
model
.
bigru
=
BidirectionalGRU
(
input_size
,
hidden_size
,
num_layers
=
num_layers
,
...
...
@@ -684,8 +684,8 @@ class TestBiGRU(ModuleApiTest):
merge_each_layer
=
merge_each_layer
)
@
staticmethod
def
model_forward
(
self
,
inputs
):
return
self
.
bigru
(
inputs
)[
0
]
def
model_forward
(
model
,
inputs
):
return
model
.
bigru
(
inputs
)[
0
]
def
make_inputs
(
self
):
inputs
=
[
...
...
@@ -713,8 +713,8 @@ class TestCNNEncoder(ModuleApiTest):
self
.
param_states
=
{}
@
staticmethod
def
model_init
(
self
,
num_channels
,
num_filters
,
num_layers
):
self
.
cnn_encoder
=
CNNEncoder
(
def
model_init
(
model
,
num_channels
,
num_filters
,
num_layers
):
model
.
cnn_encoder
=
CNNEncoder
(
num_layers
=
2
,
num_channels
=
num_channels
,
num_filters
=
num_filters
,
...
...
@@ -722,8 +722,8 @@ class TestCNNEncoder(ModuleApiTest):
pool_size
=
[
7
,
6
])
@
staticmethod
def
model_forward
(
self
,
inputs
):
return
self
.
cnn_encoder
(
inputs
)
def
model_forward
(
model
,
inputs
):
return
model
.
cnn_encoder
(
inputs
)
def
make_inputs
(
self
):
inputs
=
[
...
...
@@ -734,7 +734,7 @@ class TestCNNEncoder(ModuleApiTest):
]
return
inputs
def
test_check_output
_merge0
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录