Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleSlim
提交
983002a3
P
PaddleSlim
项目概览
PaddlePaddle
/
PaddleSlim
接近 2 年 前同步成功
通知
51
Star
1434
Fork
344
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
53
列表
看板
标记
里程碑
合并请求
16
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleSlim
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
53
Issue
53
列表
看板
标记
里程碑
合并请求
16
合并请求
16
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
983002a3
编写于
11月 25, 2019
作者:
W
wanghaoshuang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Change sensitive pruner based size of parameters to that based FLOPS.
上级
5970f5b6
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
86 addition
and
6 deletion
+86
-6
demo/sensitive_prune/greedy_prune.py
demo/sensitive_prune/greedy_prune.py
+1
-1
paddleslim/prune/sensitive.py
paddleslim/prune/sensitive.py
+81
-1
paddleslim/prune/sensitive_pruner.py
paddleslim/prune/sensitive_pruner.py
+4
-4
未找到文件。
demo/sensitive_prune/greedy_prune.py
浏览文件 @
983002a3
...
@@ -208,7 +208,7 @@ def compress(args):
...
@@ -208,7 +208,7 @@ def compress(args):
end
=
args
.
prune_steps
end
=
args
.
prune_steps
for
iter
in
range
(
start
,
end
):
for
iter
in
range
(
start
,
end
):
pruned_program
,
pruned_val_program
=
pruner
.
greedy_prune
(
pruned_program
,
pruned_val_program
=
pruner
.
greedy_prune
(
pruned_program
,
pruned_val_program
,
params
,
0.
1
,
topk
=
1
)
pruned_program
,
pruned_val_program
,
params
,
0.
03
,
topk
=
1
)
current_flops
=
flops
(
pruned_val_program
)
current_flops
=
flops
(
pruned_val_program
)
print
(
"iter:{}; pruned FLOPS: {}"
.
format
(
print
(
"iter:{}; pruned FLOPS: {}"
.
format
(
iter
,
float
(
base_flops
-
current_flops
)
/
base_flops
))
iter
,
float
(
base_flops
-
current_flops
)
/
base_flops
))
...
...
paddleslim/prune/sensitive.py
浏览文件 @
983002a3
...
@@ -20,11 +20,12 @@ import numpy as np
...
@@ -20,11 +20,12 @@ import numpy as np
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
..core
import
GraphWrapper
from
..core
import
GraphWrapper
from
..common
import
get_logger
from
..common
import
get_logger
from
..analysis
import
flops
from
..prune
import
Pruner
from
..prune
import
Pruner
_logger
=
get_logger
(
__name__
,
level
=
logging
.
INFO
)
_logger
=
get_logger
(
__name__
,
level
=
logging
.
INFO
)
__all__
=
[
"sensitivity"
]
__all__
=
[
"sensitivity"
,
"flops_sensitivity"
]
def
sensitivity
(
program
,
def
sensitivity
(
program
,
...
@@ -92,6 +93,85 @@ def sensitivity(program,
...
@@ -92,6 +93,85 @@ def sensitivity(program,
return
sensitivities
return
sensitivities
def
flops_sensitivity
(
program
,
place
,
param_names
,
eval_func
,
sensitivities_file
=
None
,
pruned_flops_rate
=
0.1
):
assert
(
1.0
/
len
(
param_names
)
>
pruned_flops_rate
)
scope
=
fluid
.
global_scope
()
graph
=
GraphWrapper
(
program
)
sensitivities
=
_load_sensitivities
(
sensitivities_file
)
for
name
in
param_names
:
if
name
not
in
sensitivities
:
size
=
graph
.
var
(
name
).
shape
()[
0
]
sensitivities
[
name
]
=
{
'pruned_percent'
:
[],
'loss'
:
[],
'size'
:
size
}
base_flops
=
flops
(
program
)
target_pruned_flops
=
base_flops
*
pruned_flops_rate
pruner
=
Pruner
()
baseline
=
None
for
name
in
sensitivities
:
pruned_program
=
pruner
.
prune
(
program
=
graph
.
program
,
scope
=
None
,
params
=
[
name
],
ratios
=
[
0.5
],
place
=
None
,
lazy
=
False
,
only_graph
=
True
)
param_flops
=
(
base_flops
-
flops
(
pruned_program
))
*
2
channel_size
=
sensitivities
[
name
][
"size"
]
pruned_ratio
=
target_pruned_flops
/
float
(
param_flops
)
pruned_size
=
round
(
pruned_ratio
*
channel_size
)
pruned_ratio
=
1
if
pruned_size
>=
channel_size
else
pruned_ratio
if
len
(
sensitivities
[
name
][
"pruned_percent"
])
>
0
:
_logger
.
debug
(
'{} exist; pruned ratio: {}; excepted ratio: {}'
.
format
(
name
,
sensitivities
[
name
][
"pruned_percent"
][
0
],
pruned_ratio
))
continue
if
baseline
is
None
:
baseline
=
eval_func
(
graph
.
program
)
param_backup
=
{}
pruner
=
Pruner
()
_logger
.
info
(
"sensitive - param: {}; ratios: {}"
.
format
(
name
,
pruned_ratio
))
loss
=
1
if
pruned_ratio
<
1
:
pruned_program
=
pruner
.
prune
(
program
=
graph
.
program
,
scope
=
scope
,
params
=
[
name
],
ratios
=
[
pruned_ratio
],
place
=
place
,
lazy
=
True
,
only_graph
=
False
,
param_backup
=
param_backup
)
pruned_metric
=
eval_func
(
pruned_program
)
loss
=
(
baseline
-
pruned_metric
)
/
baseline
_logger
.
info
(
"pruned param: {}; {}; loss={}"
.
format
(
name
,
pruned_ratio
,
loss
))
sensitivities
[
name
][
'pruned_percent'
].
append
(
pruned_ratio
)
sensitivities
[
name
][
'loss'
].
append
(
loss
)
_save_sensitivities
(
sensitivities
,
sensitivities_file
)
# restore pruned parameters
for
param_name
in
param_backup
.
keys
():
param_t
=
scope
.
find_var
(
param_name
).
get_tensor
()
param_t
.
set
(
param_backup
[
param_name
],
place
)
return
sensitivities
def
_load_sensitivities
(
sensitivities_file
):
def
_load_sensitivities
(
sensitivities_file
):
"""
"""
Load sensitivities from file.
Load sensitivities from file.
...
...
paddleslim/prune/sensitive_pruner.py
浏览文件 @
983002a3
...
@@ -20,6 +20,7 @@ import numpy as np
...
@@ -20,6 +20,7 @@ import numpy as np
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
..common
import
get_logger
from
..common
import
get_logger
from
.sensitive
import
sensitivity
from
.sensitive
import
sensitivity
from
.sensitive
import
flops_sensitivity
from
..analysis
import
flops
from
..analysis
import
flops
from
.pruner
import
Pruner
from
.pruner
import
Pruner
...
@@ -90,20 +91,19 @@ class SensitivePruner(object):
...
@@ -90,20 +91,19 @@ class SensitivePruner(object):
train_program
,
train_program
,
eval_program
,
eval_program
,
params
,
params
,
pruned_
ratio
,
pruned_
flops_rate
,
topk
=
1
):
topk
=
1
):
sensitivities_file
=
"greedy_sensitivities_iter{}.data"
.
format
(
sensitivities_file
=
"greedy_sensitivities_iter{}.data"
.
format
(
self
.
_iter
)
self
.
_iter
)
with
fluid
.
scope_guard
(
self
.
_scope
):
with
fluid
.
scope_guard
(
self
.
_scope
):
sensitivities
=
sensitivity
(
sensitivities
=
flops_
sensitivity
(
eval_program
,
eval_program
,
self
.
_place
,
self
.
_place
,
params
,
params
,
self
.
_eval_func
,
self
.
_eval_func
,
sensitivities_file
=
sensitivities_file
,
sensitivities_file
=
sensitivities_file
,
step_size
=
pruned_ratio
,
pruned_flops_rate
=
pruned_flops_rate
)
max_pruned_times
=
1
)
print
sensitivities
print
sensitivities
params
,
ratios
=
self
.
_greedy_ratio_by_sensitive
(
sensitivities
,
topk
)
params
,
ratios
=
self
.
_greedy_ratio_by_sensitive
(
sensitivities
,
topk
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录