Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
40bd7a7a
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
40bd7a7a
编写于
7月 29, 2021
作者:
W
wangna11BD
提交者:
GitHub
7月 29, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add parameter of input in model.summary (#34165)
* add input option in model.summary
上级
d3dae0ce
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
105 addition
and
12 deletion
+105
-12
python/paddle/hapi/model.py
python/paddle/hapi/model.py
+1
-1
python/paddle/hapi/model_summary.py
python/paddle/hapi/model_summary.py
+67
-11
python/paddle/tests/test_model.py
python/paddle/tests/test_model.py
+37
-0
未找到文件。
python/paddle/hapi/model.py
浏览文件 @
40bd7a7a
...
@@ -2145,7 +2145,7 @@ class Model(object):
...
@@ -2145,7 +2145,7 @@ class Model(object):
_input_size
=
input_size
_input_size
=
input_size
else
:
else
:
_input_size
=
self
.
_inputs
_input_size
=
self
.
_inputs
return
summary
(
self
.
network
,
_input_size
,
dtype
)
return
summary
(
self
.
network
,
_input_size
,
dtype
s
=
dtype
)
def
_verify_spec
(
self
,
specs
,
shapes
=
None
,
dtypes
=
None
,
is_input
=
False
):
def
_verify_spec
(
self
,
specs
,
shapes
=
None
,
dtypes
=
None
,
is_input
=
False
):
out_specs
=
[]
out_specs
=
[]
...
...
python/paddle/hapi/model_summary.py
浏览文件 @
40bd7a7a
...
@@ -25,7 +25,7 @@ from collections import OrderedDict
...
@@ -25,7 +25,7 @@ from collections import OrderedDict
__all__
=
[]
__all__
=
[]
def
summary
(
net
,
input_size
,
dtypes
=
None
):
def
summary
(
net
,
input_size
=
None
,
dtypes
=
None
,
input
=
None
):
"""Prints a string summary of the network.
"""Prints a string summary of the network.
Args:
Args:
...
@@ -34,8 +34,10 @@ def summary(net, input_size, dtypes=None):
...
@@ -34,8 +34,10 @@ def summary(net, input_size, dtypes=None):
have one input, input_size can be tuple or InputSpec. if model
have one input, input_size can be tuple or InputSpec. if model
have multiple input, input_size must be a list which contain
have multiple input, input_size must be a list which contain
every input's shape. Note that input_size only dim of
every input's shape. Note that input_size only dim of
batch_size can be None or -1.
batch_size can be None or -1. Default: None. Note that
input_size and input cannot be None at the same time.
dtypes (str, optional): if dtypes is None, 'float32' will be used, Default: None.
dtypes (str, optional): if dtypes is None, 'float32' will be used, Default: None.
input: the input tensor. if input is given, input_size and dtype will be ignored, Default: None.
Returns:
Returns:
Dict: a summary of the network including total params and total trainable params.
Dict: a summary of the network including total params and total trainable params.
...
@@ -94,10 +96,62 @@ def summary(net, input_size, dtypes=None):
...
@@ -94,10 +96,62 @@ def summary(net, input_size, dtypes=None):
lenet_multi_input = LeNetMultiInput()
lenet_multi_input = LeNetMultiInput()
params_info = paddle.summary(lenet_multi_input, [(1, 1, 28, 28), (1, 400)],
params_info = paddle.summary(lenet_multi_input, [(1, 1, 28, 28), (1, 400)],
['float32', 'float32'])
dtypes=['float32', 'float32'])
print(params_info)
# list input demo
class LeNetListInput(LeNet):
def forward(self, inputs):
x = self.features(inputs[0])
if self.num_classes > 0:
x = paddle.flatten(x, 1)
x = self.fc(x + inputs[1])
return x
lenet_list_input = LeNetListInput()
input_data = [paddle.rand([1, 1, 28, 28]), paddle.rand([1, 400])]
params_info = paddle.summary(lenet_list_input, input=input_data)
print(params_info)
# dict input demo
class LeNetDictInput(LeNet):
def forward(self, inputs):
x = self.features(inputs['x1'])
if self.num_classes > 0:
x = paddle.flatten(x, 1)
x = self.fc(x + inputs['x2'])
return x
lenet_dict_input = LeNetDictInput()
input_data = {'x1': paddle.rand([1, 1, 28, 28]),
'x2': paddle.rand([1, 400])}
params_info = paddle.summary(lenet_dict_input, input=input_data)
print(params_info)
print(params_info)
"""
"""
if
input_size
is
None
and
input
is
None
:
raise
ValueError
(
"input_size and input cannot be None at the same time"
)
if
input_size
is
None
and
input
is
not
None
:
if
paddle
.
is_tensor
(
input
):
input_size
=
tuple
(
input
.
shape
)
elif
isinstance
(
input
,
(
list
,
tuple
)):
input_size
=
[]
for
x
in
input
:
input_size
.
append
(
tuple
(
x
.
shape
))
elif
isinstance
(
input
,
dict
):
input_size
=
[]
for
key
in
input
.
keys
():
input_size
.
append
(
tuple
(
input
[
key
].
shape
))
else
:
raise
ValueError
(
"Input is not tensor, list, tuple and dict, unable to determine input_size, please input input_size."
)
if
isinstance
(
input_size
,
InputSpec
):
if
isinstance
(
input_size
,
InputSpec
):
_input_size
=
tuple
(
input_size
.
shape
)
_input_size
=
tuple
(
input_size
.
shape
)
elif
isinstance
(
input_size
,
list
):
elif
isinstance
(
input_size
,
list
):
...
@@ -163,7 +217,8 @@ def summary(net, input_size, dtypes=None):
...
@@ -163,7 +217,8 @@ def summary(net, input_size, dtypes=None):
return
[
_check_input
(
i
)
for
i
in
input_size
]
return
[
_check_input
(
i
)
for
i
in
input_size
]
_input_size
=
_check_input
(
_input_size
)
_input_size
=
_check_input
(
_input_size
)
result
,
params_info
=
summary_string
(
net
,
_input_size
,
dtypes
)
result
,
params_info
=
summary_string
(
net
,
_input_size
,
dtypes
,
input
)
print
(
result
)
print
(
result
)
if
in_train_mode
:
if
in_train_mode
:
...
@@ -173,7 +228,7 @@ def summary(net, input_size, dtypes=None):
...
@@ -173,7 +228,7 @@ def summary(net, input_size, dtypes=None):
@
paddle
.
no_grad
()
@
paddle
.
no_grad
()
def
summary_string
(
model
,
input_size
,
dtypes
=
None
):
def
summary_string
(
model
,
input_size
=
None
,
dtypes
=
None
,
input
=
None
):
def
_all_is_numper
(
items
):
def
_all_is_numper
(
items
):
for
item
in
items
:
for
item
in
items
:
if
not
isinstance
(
item
,
numbers
.
Number
):
if
not
isinstance
(
item
,
numbers
.
Number
):
...
@@ -280,17 +335,18 @@ def summary_string(model, input_size, dtypes=None):
...
@@ -280,17 +335,18 @@ def summary_string(model, input_size, dtypes=None):
build_input
(
i
,
dtype
)
for
i
,
dtype
in
zip
(
input_size
,
dtypes
)
build_input
(
i
,
dtype
)
for
i
,
dtype
in
zip
(
input_size
,
dtypes
)
]
]
x
=
build_input
(
input_size
,
dtypes
)
# create properties
# create properties
summary
=
OrderedDict
()
summary
=
OrderedDict
()
hooks
=
[]
hooks
=
[]
# register hook
# register hook
model
.
apply
(
register_hook
)
model
.
apply
(
register_hook
)
if
input
is
not
None
:
# make a forward pass
x
=
input
model
(
*
x
)
model
(
x
)
else
:
x
=
build_input
(
input_size
,
dtypes
)
# make a forward pass
model
(
*
x
)
# remove these hooks
# remove these hooks
for
h
in
hooks
:
for
h
in
hooks
:
...
...
python/paddle/tests/test_model.py
浏览文件 @
40bd7a7a
...
@@ -68,6 +68,27 @@ class LeNetDygraph(paddle.nn.Layer):
...
@@ -68,6 +68,27 @@ class LeNetDygraph(paddle.nn.Layer):
return
x
return
x
class
LeNetListInput
(
LeNetDygraph
):
def
forward
(
self
,
inputs
):
x
=
inputs
[
0
]
x
=
self
.
features
(
x
)
if
self
.
num_classes
>
0
:
x
=
paddle
.
flatten
(
x
,
1
)
x
=
self
.
fc
(
x
+
inputs
[
1
])
return
x
class
LeNetDictInput
(
LeNetDygraph
):
def
forward
(
self
,
inputs
):
x
=
self
.
features
(
inputs
[
'x1'
])
if
self
.
num_classes
>
0
:
x
=
paddle
.
flatten
(
x
,
1
)
x
=
self
.
fc
(
x
+
inputs
[
'x2'
])
return
x
class
MnistDataset
(
MNIST
):
class
MnistDataset
(
MNIST
):
def
__init__
(
self
,
mode
,
return_label
=
True
,
sample_num
=
None
):
def
__init__
(
self
,
mode
,
return_label
=
True
,
sample_num
=
None
):
super
(
MnistDataset
,
self
).
__init__
(
mode
=
mode
)
super
(
MnistDataset
,
self
).
__init__
(
mode
=
mode
)
...
@@ -615,6 +636,22 @@ class TestModelFunction(unittest.TestCase):
...
@@ -615,6 +636,22 @@ class TestModelFunction(unittest.TestCase):
gt_params
=
_get_param_from_state_dict
(
rnn
.
state_dict
())
gt_params
=
_get_param_from_state_dict
(
rnn
.
state_dict
())
np
.
testing
.
assert_allclose
(
params_info
[
'total_params'
],
gt_params
/
2.0
)
np
.
testing
.
assert_allclose
(
params_info
[
'total_params'
],
gt_params
/
2.0
)
def
test_summary_input
(
self
):
rnn
=
paddle
.
nn
.
SimpleRNN
(
16
,
32
,
2
,
direction
=
'bidirectional'
)
input_data
=
paddle
.
rand
([
4
,
23
,
16
])
paddle
.
summary
(
rnn
,
input
=
input_data
)
lenet_List_input
=
LeNetListInput
()
input_data
=
[
paddle
.
rand
([
1
,
1
,
28
,
28
]),
paddle
.
rand
([
1
,
400
])]
paddle
.
summary
(
lenet_List_input
,
input
=
input_data
)
lenet_dict_input
=
LeNetDictInput
()
input_data
=
{
'x1'
:
paddle
.
rand
([
1
,
1
,
28
,
28
]),
'x2'
:
paddle
.
rand
([
1
,
400
])
}
paddle
.
summary
(
lenet_dict_input
,
input
=
input_data
)
def
test_summary_dtype
(
self
):
def
test_summary_dtype
(
self
):
input_shape
=
(
3
,
1
)
input_shape
=
(
3
,
1
)
net
=
paddle
.
nn
.
Embedding
(
10
,
3
,
sparse
=
True
)
net
=
paddle
.
nn
.
Embedding
(
10
,
3
,
sparse
=
True
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录