Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
b976ba3e
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b976ba3e
编写于
12月 17, 2019
作者:
Y
Youwei Song
提交者:
hong
12月 17, 2019
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix unittests (#21786)
test=develop
上级
73e97d39
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
14 addition
and
25 deletion
+14
-25
python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py
.../unittests/test_imperative_lod_tensor_to_selected_rows.py
+4
-8
python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py
...le/fluid/tests/unittests/test_imperative_selected_rows.py
+6
-9
python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py
.../unittests/test_imperative_selected_rows_to_lod_tensor.py
+4
-8
未找到文件。
python/paddle/fluid/tests/unittests/test_imperative_lod_tensor_to_selected_rows.py
浏览文件 @
b976ba3e
...
@@ -29,20 +29,18 @@ from utils import DyGraphProgramDescTracerTestHelper
...
@@ -29,20 +29,18 @@ from utils import DyGraphProgramDescTracerTestHelper
class
SimpleNet
(
fluid
.
Layer
):
class
SimpleNet
(
fluid
.
Layer
):
def
__init__
(
self
,
def
__init__
(
self
,
name_scope
,
hidden_size
,
hidden_size
,
vocab_size
,
vocab_size
,
num_steps
=
20
,
num_steps
=
20
,
init_scale
=
0.1
,
init_scale
=
0.1
,
is_sparse
=
False
,
is_sparse
=
False
,
dtype
=
'float32'
):
dtype
=
'float32'
):
super
(
SimpleNet
,
self
).
__init__
(
name_scope
)
super
(
SimpleNet
,
self
).
__init__
()
self
.
hidden_size
=
hidden_size
self
.
hidden_size
=
hidden_size
self
.
vocab_size
=
vocab_size
self
.
vocab_size
=
vocab_size
self
.
init_scale
=
init_scale
self
.
init_scale
=
init_scale
self
.
num_steps
=
num_steps
self
.
num_steps
=
num_steps
self
.
embedding
=
Embedding
(
self
.
embedding
=
Embedding
(
self
.
full_name
(),
size
=
[
vocab_size
,
hidden_size
],
size
=
[
vocab_size
,
hidden_size
],
dtype
=
dtype
,
dtype
=
dtype
,
is_sparse
=
is_sparse
,
is_sparse
=
is_sparse
,
...
@@ -100,7 +98,6 @@ class TestDygraphSimpleNet(unittest.TestCase):
...
@@ -100,7 +98,6 @@ class TestDygraphSimpleNet(unittest.TestCase):
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
simple_net
=
SimpleNet
(
simple_net
=
SimpleNet
(
"simple_net"
,
hidden_size
=
hidden_size
,
hidden_size
=
hidden_size
,
vocab_size
=
vocab_size
,
vocab_size
=
vocab_size
,
num_steps
=
num_steps
,
num_steps
=
num_steps
,
...
@@ -120,7 +117,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
...
@@ -120,7 +117,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
for
i
in
range
(
batch_num
):
for
i
in
range
(
batch_num
):
x_data
=
np
.
arange
(
12
).
reshape
(
4
,
3
).
astype
(
'int64'
)
x_data
=
np
.
arange
(
12
).
reshape
(
4
,
3
).
astype
(
'int64'
)
y_data
=
np
.
arange
(
1
,
13
).
reshape
(
4
,
3
).
astype
(
'int64'
)
y_data
=
np
.
arange
(
1
,
13
).
reshape
(
4
,
3
).
astype
(
'int64'
)
x_data
=
x_data
.
reshape
((
-
1
,
num_steps
,
1
))
x_data
=
x_data
.
reshape
((
-
1
,
num_steps
))
y_data
=
y_data
.
reshape
((
-
1
,
1
))
y_data
=
y_data
.
reshape
((
-
1
,
1
))
x
=
to_variable
(
x_data
)
x
=
to_variable
(
x_data
)
...
@@ -143,7 +140,6 @@ class TestDygraphSimpleNet(unittest.TestCase):
...
@@ -143,7 +140,6 @@ class TestDygraphSimpleNet(unittest.TestCase):
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
simple_net
=
SimpleNet
(
simple_net
=
SimpleNet
(
"simple_net"
,
hidden_size
=
hidden_size
,
hidden_size
=
hidden_size
,
vocab_size
=
vocab_size
,
vocab_size
=
vocab_size
,
num_steps
=
num_steps
,
num_steps
=
num_steps
,
...
@@ -153,7 +149,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
...
@@ -153,7 +149,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
exe
=
fluid
.
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
x
=
fluid
.
layers
.
data
(
x
=
fluid
.
layers
.
data
(
name
=
"x"
,
shape
=
[
-
1
,
num_steps
,
1
],
dtype
=
'int64'
)
name
=
"x"
,
shape
=
[
-
1
,
num_steps
],
dtype
=
'int64'
)
y
=
fluid
.
layers
.
data
(
name
=
"y"
,
shape
=
[
-
1
,
1
],
dtype
=
dtype
)
y
=
fluid
.
layers
.
data
(
name
=
"y"
,
shape
=
[
-
1
,
1
],
dtype
=
dtype
)
static_loss
=
simple_net
(
x
,
y
)
static_loss
=
simple_net
(
x
,
y
)
...
@@ -172,7 +168,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
...
@@ -172,7 +168,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
for
i
in
range
(
batch_num
):
for
i
in
range
(
batch_num
):
x_data
=
np
.
arange
(
12
).
reshape
(
4
,
3
).
astype
(
'int64'
)
x_data
=
np
.
arange
(
12
).
reshape
(
4
,
3
).
astype
(
'int64'
)
y_data
=
np
.
arange
(
1
,
13
).
reshape
(
4
,
3
).
astype
(
'int64'
)
y_data
=
np
.
arange
(
1
,
13
).
reshape
(
4
,
3
).
astype
(
'int64'
)
x_data
=
x_data
.
reshape
((
-
1
,
num_steps
,
1
))
x_data
=
x_data
.
reshape
((
-
1
,
num_steps
))
y_data
=
y_data
.
reshape
((
-
1
,
1
))
y_data
=
y_data
.
reshape
((
-
1
,
1
))
fetch_list
=
[
static_loss
]
fetch_list
=
[
static_loss
]
fetch_list
.
extend
(
static_param_name_list
)
fetch_list
.
extend
(
static_param_name_list
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py
浏览文件 @
b976ba3e
...
@@ -24,10 +24,9 @@ import paddle.fluid.core as core
...
@@ -24,10 +24,9 @@ import paddle.fluid.core as core
class
SimpleNet
(
fluid
.
Layer
):
class
SimpleNet
(
fluid
.
Layer
):
def
__init__
(
self
,
name_scope
,
vocab_size
,
hidden_size
,
dtype
):
def
__init__
(
self
,
vocab_size
,
hidden_size
,
dtype
):
super
(
SimpleNet
,
self
).
__init__
(
name_scope
)
super
(
SimpleNet
,
self
).
__init__
()
self
.
emb
=
fluid
.
dygraph
.
Embedding
(
self
.
emb
=
fluid
.
dygraph
.
Embedding
(
self
.
full_name
(),
size
=
[
vocab_size
,
hidden_size
],
size
=
[
vocab_size
,
hidden_size
],
dtype
=
dtype
,
dtype
=
dtype
,
param_attr
=
'emb.w'
,
param_attr
=
'emb.w'
,
...
@@ -53,11 +52,10 @@ class TestSimpleNet(unittest.TestCase):
...
@@ -53,11 +52,10 @@ class TestSimpleNet(unittest.TestCase):
adam
=
SGDOptimizer
(
learning_rate
=
0.001
)
adam
=
SGDOptimizer
(
learning_rate
=
0.001
)
# grad_clip = fluid.dygraph_grad_clip.GradClipByGlobalNorm(5.0)
# grad_clip = fluid.dygraph_grad_clip.GradClipByGlobalNorm(5.0)
input_word
=
np
.
array
(
input_word
=
np
.
array
([[
1
,
2
],
[
2
,
1
]]).
astype
(
'int64'
)
[[[
1
],
[
2
]],
[[
2
],
[
1
]]]).
astype
(
'int64'
)
input
=
to_variable
(
input_word
)
input
=
to_variable
(
input_word
)
simplenet
=
SimpleNet
(
"SimpleNet"
,
20
,
32
,
dtype
)
simplenet
=
SimpleNet
(
20
,
32
,
dtype
)
input_emb
,
emb
=
simplenet
(
input
)
input_emb
,
emb
=
simplenet
(
input
)
try
:
try
:
...
@@ -99,11 +97,10 @@ class TestSimpleNet(unittest.TestCase):
...
@@ -99,11 +97,10 @@ class TestSimpleNet(unittest.TestCase):
grad_clip
=
fluid
.
dygraph_grad_clip
.
GradClipByGlobalNorm
(
grad_clip
=
fluid
.
dygraph_grad_clip
.
GradClipByGlobalNorm
(
5.0
)
5.0
)
input_word
=
np
.
array
(
input_word
=
np
.
array
([[
1
,
2
],
[
2
,
1
]]).
astype
(
'int64'
)
[[[
1
],
[
2
]],
[[
2
],
[
1
]]]).
astype
(
'int64'
)
input
=
to_variable
(
input_word
)
input
=
to_variable
(
input_word
)
simplenet
=
SimpleNet
(
"SimpleNet"
,
20
,
32
,
"float32"
)
simplenet
=
SimpleNet
(
20
,
32
,
"float32"
)
input_emb
,
emb
=
simplenet
(
input
)
input_emb
,
emb
=
simplenet
(
input
)
try
:
try
:
...
...
python/paddle/fluid/tests/unittests/test_imperative_selected_rows_to_lod_tensor.py
浏览文件 @
b976ba3e
...
@@ -30,20 +30,18 @@ from paddle.fluid.dygraph.jit import TracedLayer
...
@@ -30,20 +30,18 @@ from paddle.fluid.dygraph.jit import TracedLayer
class
SimpleNet
(
fluid
.
Layer
):
class
SimpleNet
(
fluid
.
Layer
):
def
__init__
(
self
,
def
__init__
(
self
,
name_scope
,
hidden_size
,
hidden_size
,
vocab_size
,
vocab_size
,
num_steps
=
20
,
num_steps
=
20
,
init_scale
=
0.1
,
init_scale
=
0.1
,
is_sparse
=
False
,
is_sparse
=
False
,
dtype
=
'float32'
):
dtype
=
'float32'
):
super
(
SimpleNet
,
self
).
__init__
(
name_scope
)
super
(
SimpleNet
,
self
).
__init__
()
self
.
hidden_size
=
hidden_size
self
.
hidden_size
=
hidden_size
self
.
vocab_size
=
vocab_size
self
.
vocab_size
=
vocab_size
self
.
init_scale
=
init_scale
self
.
init_scale
=
init_scale
self
.
num_steps
=
num_steps
self
.
num_steps
=
num_steps
self
.
embedding
=
Embedding
(
self
.
embedding
=
Embedding
(
self
.
full_name
(),
size
=
[
vocab_size
,
hidden_size
],
size
=
[
vocab_size
,
hidden_size
],
dtype
=
dtype
,
dtype
=
dtype
,
is_sparse
=
is_sparse
,
is_sparse
=
is_sparse
,
...
@@ -109,7 +107,6 @@ class TestDygraphSimpleNet(unittest.TestCase):
...
@@ -109,7 +107,6 @@ class TestDygraphSimpleNet(unittest.TestCase):
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
simple_net
=
SimpleNet
(
simple_net
=
SimpleNet
(
"simple_net"
,
hidden_size
=
hidden_size
,
hidden_size
=
hidden_size
,
vocab_size
=
vocab_size
,
vocab_size
=
vocab_size
,
num_steps
=
num_steps
,
num_steps
=
num_steps
,
...
@@ -130,7 +127,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
...
@@ -130,7 +127,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
for
i
in
range
(
batch_num
):
for
i
in
range
(
batch_num
):
x_data
=
np
.
arange
(
12
).
reshape
(
4
,
3
).
astype
(
'int64'
)
x_data
=
np
.
arange
(
12
).
reshape
(
4
,
3
).
astype
(
'int64'
)
y_data
=
np
.
arange
(
1
,
13
).
reshape
(
4
,
3
).
astype
(
'int64'
)
y_data
=
np
.
arange
(
1
,
13
).
reshape
(
4
,
3
).
astype
(
'int64'
)
x_data
=
x_data
.
reshape
((
-
1
,
num_steps
,
1
))
x_data
=
x_data
.
reshape
((
-
1
,
num_steps
))
y_data
=
y_data
.
reshape
((
-
1
,
1
))
y_data
=
y_data
.
reshape
((
-
1
,
1
))
x
=
to_variable
(
x_data
)
x
=
to_variable
(
x_data
)
...
@@ -153,7 +150,6 @@ class TestDygraphSimpleNet(unittest.TestCase):
...
@@ -153,7 +150,6 @@ class TestDygraphSimpleNet(unittest.TestCase):
fluid
.
default_main_program
().
random_seed
=
seed
fluid
.
default_main_program
().
random_seed
=
seed
simple_net
=
SimpleNet
(
simple_net
=
SimpleNet
(
"simple_net"
,
hidden_size
=
hidden_size
,
hidden_size
=
hidden_size
,
vocab_size
=
vocab_size
,
vocab_size
=
vocab_size
,
num_steps
=
num_steps
,
num_steps
=
num_steps
,
...
@@ -163,7 +159,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
...
@@ -163,7 +159,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
exe
=
fluid
.
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
sgd
=
SGDOptimizer
(
learning_rate
=
1e-3
)
x
=
fluid
.
layers
.
data
(
x
=
fluid
.
layers
.
data
(
name
=
"x"
,
shape
=
[
-
1
,
num_steps
,
1
],
dtype
=
'int64'
)
name
=
"x"
,
shape
=
[
-
1
,
num_steps
],
dtype
=
'int64'
)
y
=
fluid
.
layers
.
data
(
name
=
"y"
,
shape
=
[
-
1
,
1
],
dtype
=
dtype
)
y
=
fluid
.
layers
.
data
(
name
=
"y"
,
shape
=
[
-
1
,
1
],
dtype
=
dtype
)
static_loss
=
simple_net
(
x
,
y
)
static_loss
=
simple_net
(
x
,
y
)
...
@@ -182,7 +178,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
...
@@ -182,7 +178,7 @@ class TestDygraphSimpleNet(unittest.TestCase):
for
i
in
range
(
batch_num
):
for
i
in
range
(
batch_num
):
x_data
=
np
.
arange
(
12
).
reshape
(
4
,
3
).
astype
(
'int64'
)
x_data
=
np
.
arange
(
12
).
reshape
(
4
,
3
).
astype
(
'int64'
)
y_data
=
np
.
arange
(
1
,
13
).
reshape
(
4
,
3
).
astype
(
'int64'
)
y_data
=
np
.
arange
(
1
,
13
).
reshape
(
4
,
3
).
astype
(
'int64'
)
x_data
=
x_data
.
reshape
((
-
1
,
num_steps
,
1
))
x_data
=
x_data
.
reshape
((
-
1
,
num_steps
))
y_data
=
y_data
.
reshape
((
-
1
,
1
))
y_data
=
y_data
.
reshape
((
-
1
,
1
))
fetch_list
=
[
static_loss
]
fetch_list
=
[
static_loss
]
fetch_list
.
extend
(
static_param_name_list
)
fetch_list
.
extend
(
static_param_name_list
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录