Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
7c9e695f
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
7c9e695f
编写于
9月 27, 2020
作者:
W
weishengyu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change paddle version to 2.0; modify code
上级
ff19b9cf
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
182 addition
and
200 deletion
+182
-200
ppcls/modeling/architectures/shufflenet_v2.py
ppcls/modeling/architectures/shufflenet_v2.py
+182
-200
未找到文件。
ppcls/modeling/architectures/shufflenet_v2.py
浏览文件 @
7c9e695f
...
...
@@ -16,212 +16,202 @@ from __future__ import absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle.fluid.initializer
import
MSRA
import
math
from
paddle
import
ParamAttr
,
reshape
,
transpose
,
concat
,
split
from
paddle.nn
import
Layer
,
Conv2d
,
MaxPool2d
,
AdaptiveAvgPool2d
,
BatchNorm
,
Linear
from
paddle.nn.initializer
import
MSRA
from
paddle.nn.functional
import
relu
,
swish
__all__
=
[
"ShuffleNetV2_x0_25"
,
"ShuffleNetV2_x0_33"
,
"ShuffleNetV2_x0_5"
,
"ShuffleNetV2
_x1_0
"
,
"ShuffleNetV2_x1_5"
,
"ShuffleNetV2_x2_0"
,
"ShuffleNetV2"
,
"ShuffleNetV2_x1_5"
,
"ShuffleNetV2_x2_0"
,
"ShuffleNetV2_swish"
]
def
channel_shuffle
(
x
,
groups
):
batchsize
,
num_channels
,
height
,
width
=
x
.
shape
[
0
],
x
.
shape
[
1
],
x
.
shape
[
2
],
x
.
shape
[
3
]
batch_size
,
num_channels
,
height
,
width
=
x
.
shape
[
0
:
4
]
channels_per_group
=
num_channels
//
groups
# reshape
x
=
fluid
.
layers
.
reshape
(
x
=
x
,
shape
=
[
batchsize
,
groups
,
channels_per_group
,
height
,
width
])
x
=
reshape
(
x
=
x
,
shape
=
[
batch_size
,
groups
,
channels_per_group
,
height
,
width
])
# transpose
x
=
transpose
(
x
=
x
,
perm
=
[
0
,
2
,
1
,
3
,
4
])
x
=
fluid
.
layers
.
transpose
(
x
=
x
,
perm
=
[
0
,
2
,
1
,
3
,
4
])
# flatten
x
=
fluid
.
layers
.
reshape
(
x
=
x
,
shape
=
[
batchsize
,
num_channels
,
height
,
width
])
x
=
reshape
(
x
=
x
,
shape
=
[
batch_size
,
num_channels
,
height
,
width
])
return
x
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
num_channels
,
filter_size
,
num_filters
,
stride
,
padding
,
channels
=
None
,
num_groups
=
1
,
if_act
=
True
,
act
=
'relu'
,
name
=
None
,
use_cudnn
=
True
):
class
ConvBNLayer
(
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
kernel_size
,
stride
,
padding
,
groups
=
1
,
act
=
relu
,
name
=
None
,
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_if_act
=
if_act
assert
act
in
[
'relu'
,
'swish'
],
\
"supported act are {} but your act is {}"
.
format
(
[
'relu'
,
'swish'
],
act
)
self
.
_act
=
act
self
.
_conv
=
Conv2
D
(
num_channels
=
num
_channels
,
num_filters
=
num_filter
s
,
filter_size
=
filter
_size
,
self
.
_conv
=
Conv2
d
(
in_channels
=
in
_channels
,
out_channels
=
out_channel
s
,
kernel_size
=
kernel
_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
num_groups
,
act
=
None
,
use_cudnn
=
use_cudnn
,
param_attr
=
ParamAttr
(
initializer
=
MSRA
(),
name
=
name
+
"_weights"
),
groups
=
groups
,
weight_attr
=
ParamAttr
(
initializer
=
MSRA
(),
name
=
name
+
"_weights"
),
bias_attr
=
False
)
self
.
_batch_norm
=
BatchNorm
(
num_filter
s
,
out_channel
s
,
param_attr
=
ParamAttr
(
name
=
name
+
"_bn_scale"
),
bias_attr
=
ParamAttr
(
name
=
name
+
"_bn_offset"
),
moving_mean_name
=
name
+
"_bn_mean"
,
moving_variance_name
=
name
+
"_bn_variance"
)
moving_variance_name
=
name
+
"_bn_variance"
)
def
forward
(
self
,
inputs
,
if_act
=
True
):
def
forward
(
self
,
inputs
):
y
=
self
.
_conv
(
inputs
)
y
=
self
.
_batch_norm
(
y
)
if
self
.
_if_act
:
y
=
fluid
.
layers
.
relu
(
y
)
if
self
.
_act
==
'relu'
else
fluid
.
layers
.
swish
(
y
)
if
self
.
_act
:
y
=
self
.
_act
(
y
)
return
y
class
InvertedResidual
Unit
(
fluid
.
dygraph
.
Layer
):
class
InvertedResidual
(
Layer
):
def
__init__
(
self
,
num
_channels
,
num_filter
s
,
in
_channels
,
out_channel
s
,
stride
,
benchmodel
,
act
=
'relu'
,
act
=
relu
,
name
=
None
):
super
(
InvertedResidualUnit
,
self
).
__init__
()
assert
stride
in
[
1
,
2
],
\
"supported stride are {} but your stride is {}"
.
format
([
1
,
2
],
stride
)
self
.
benchmodel
=
benchmodel
oup_inc
=
num_filters
//
2
inp
=
num_channels
if
benchmodel
==
1
:
self
.
_conv_pw
=
ConvBNLayer
(
num_channels
=
num_channels
//
2
,
num_filters
=
oup_inc
,
filter_size
=
1
,
stride
=
1
,
padding
=
0
,
num_groups
=
1
,
if_act
=
True
,
act
=
act
,
name
=
'stage_'
+
name
+
'_conv1'
)
self
.
_conv_dw
=
ConvBNLayer
(
num_channels
=
oup_inc
,
num_filters
=
oup_inc
,
filter_size
=
3
,
stride
=
stride
,
padding
=
1
,
num_groups
=
oup_inc
,
if_act
=
False
,
act
=
act
,
use_cudnn
=
False
,
name
=
'stage_'
+
name
+
'_conv2'
)
self
.
_conv_linear
=
ConvBNLayer
(
num_channels
=
oup_inc
,
num_filters
=
oup_inc
,
filter_size
=
1
,
stride
=
1
,
padding
=
0
,
num_groups
=
1
,
if_act
=
True
,
act
=
act
,
name
=
'stage_'
+
name
+
'_conv3'
)
else
:
# branch1
self
.
_conv_dw_1
=
ConvBNLayer
(
num_channels
=
num_channels
,
num_filters
=
inp
,
filter_size
=
3
,
stride
=
stride
,
padding
=
1
,
num_groups
=
inp
,
if_act
=
False
,
act
=
act
,
use_cudnn
=
False
,
name
=
'stage_'
+
name
+
'_conv4'
)
self
.
_conv_linear_1
=
ConvBNLayer
(
num_channels
=
inp
,
num_filters
=
oup_inc
,
filter_size
=
1
,
stride
=
1
,
padding
=
0
,
num_groups
=
1
,
if_act
=
True
,
act
=
act
,
name
=
'stage_'
+
name
+
'_conv5'
)
# branch2
self
.
_conv_pw_2
=
ConvBNLayer
(
num_channels
=
num_channels
,
num_filters
=
oup_inc
,
filter_size
=
1
,
stride
=
1
,
padding
=
0
,
num_groups
=
1
,
if_act
=
True
,
act
=
act
,
name
=
'stage_'
+
name
+
'_conv1'
)
self
.
_conv_dw_2
=
ConvBNLayer
(
num_channels
=
oup_inc
,
num_filters
=
oup_inc
,
filter_size
=
3
,
stride
=
stride
,
padding
=
1
,
num_groups
=
oup_inc
,
if_act
=
False
,
act
=
act
,
use_cudnn
=
False
,
name
=
'stage_'
+
name
+
'_conv2'
)
self
.
_conv_linear_2
=
ConvBNLayer
(
num_channels
=
oup_inc
,
num_filters
=
oup_inc
,
filter_size
=
1
,
stride
=
1
,
padding
=
0
,
num_groups
=
1
,
if_act
=
True
,
act
=
act
,
name
=
'stage_'
+
name
+
'_conv3'
)
super
(
InvertedResidual
,
self
).
__init__
()
self
.
_conv_pw
=
ConvBNLayer
(
in_channels
=
in_channels
//
2
,
out_channels
=
out_channels
//
2
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
,
groups
=
1
,
act
=
act
,
name
=
'stage_'
+
name
+
'_conv1'
)
self
.
_conv_dw
=
ConvBNLayer
(
in_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
,
groups
=
out_channels
//
2
,
act
=
None
,
name
=
'stage_'
+
name
+
'_conv2'
)
self
.
_conv_linear
=
ConvBNLayer
(
in_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
,
groups
=
1
,
act
=
act
,
name
=
'stage_'
+
name
+
'_conv3'
)
def
forward
(
self
,
inputs
):
if
self
.
benchmodel
==
1
:
x1
,
x2
=
fluid
.
layers
.
split
(
inputs
,
num_or_sections
=
[
inputs
.
shape
[
1
]
//
2
,
inputs
.
shape
[
1
]
//
2
],
dim
=
1
)
x2
=
self
.
_conv_pw
(
x2
)
x2
=
self
.
_conv_dw
(
x2
)
x2
=
self
.
_conv_linear
(
x2
)
out
=
fluid
.
layers
.
concat
([
x1
,
x2
],
axis
=
1
)
else
:
x1
=
self
.
_conv_dw_1
(
inputs
)
x1
=
self
.
_conv_linear_1
(
x1
)
x1
,
x2
=
split
(
inputs
,
num_or_sections
=
[
inputs
.
shape
[
1
]
//
2
,
inputs
.
shape
[
1
]
//
2
],
axis
=
1
)
x2
=
self
.
_conv_pw
(
x2
)
x2
=
self
.
_conv_dw
(
x2
)
x2
=
self
.
_conv_linear
(
x2
)
out
=
concat
([
x1
,
x2
],
axis
=
1
)
x2
=
self
.
_conv_pw_2
(
inputs
)
x2
=
self
.
_conv_dw_2
(
x2
)
x2
=
self
.
_conv_linear_2
(
x2
)
out
=
fluid
.
layers
.
concat
([
x1
,
x2
],
axis
=
1
)
return
channel_shuffle
(
out
,
2
)
class
InvertedResidualDS
(
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
stride
,
act
=
relu
,
name
=
None
):
super
(
InvertedResidualDS
,
self
).
__init__
()
# branch1
self
.
_conv_dw_1
=
ConvBNLayer
(
in_channels
=
in_channels
,
out_channels
=
in_channels
,
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
,
groups
=
in_channels
,
act
=
None
,
name
=
'stage_'
+
name
+
'_conv4'
)
self
.
_conv_linear_1
=
ConvBNLayer
(
in_channels
=
in_channels
,
out_channels
=
out_channels
//
2
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
,
groups
=
1
,
act
=
act
,
name
=
'stage_'
+
name
+
'_conv5'
)
# branch2
self
.
_conv_pw_2
=
ConvBNLayer
(
in_channels
=
in_channels
,
out_channels
=
out_channels
//
2
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
,
groups
=
1
,
act
=
act
,
name
=
'stage_'
+
name
+
'_conv1'
)
self
.
_conv_dw_2
=
ConvBNLayer
(
in_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
,
groups
=
out_channels
//
2
,
act
=
None
,
name
=
'stage_'
+
name
+
'_conv2'
)
self
.
_conv_linear_2
=
ConvBNLayer
(
in_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
,
groups
=
1
,
act
=
act
,
name
=
'stage_'
+
name
+
'_conv3'
)
def
forward
(
self
,
inputs
):
x1
=
self
.
_conv_dw_1
(
inputs
)
x1
=
self
.
_conv_linear_1
(
x1
)
x2
=
self
.
_conv_pw_2
(
inputs
)
x2
=
self
.
_conv_dw_2
(
x2
)
x2
=
self
.
_conv_linear_2
(
x2
)
out
=
concat
([
x1
,
x2
],
axis
=
1
)
return
channel_shuffle
(
out
,
2
)
class
ShuffleNet
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
,
scale
=
1.0
,
act
=
'relu'
):
class
ShuffleNet
(
Layer
):
def
__init__
(
self
,
class_dim
=
1000
,
scale
=
1.0
,
act
=
relu
):
super
(
ShuffleNet
,
self
).
__init__
()
self
.
scale
=
scale
self
.
class_dim
=
class_dim
...
...
@@ -244,67 +234,59 @@ class ShuffleNet(fluid.dygraph.Layer):
"] is not implemented!"
)
# 1. conv1
self
.
_conv1
=
ConvBNLayer
(
num
_channels
=
3
,
num_filter
s
=
stage_out_channels
[
1
],
filter
_size
=
3
,
in
_channels
=
3
,
out_channel
s
=
stage_out_channels
[
1
],
kernel
_size
=
3
,
stride
=
2
,
padding
=
1
,
if_act
=
True
,
act
=
act
,
name
=
'stage1_conv'
)
self
.
_max_pool
=
Pool2D
(
pool_type
=
'max'
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
)
self
.
_max_pool
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
# 2. bottleneck sequences
self
.
_block_list
=
[]
i
=
1
in_c
=
int
(
32
*
scale
)
for
idxstage
in
range
(
len
(
stage_repeats
)):
numrepeat
=
stage_repeats
[
idxstage
]
output_channel
=
stage_out_channels
[
idxstage
+
2
]
for
i
in
range
(
numrepeat
):
for
stage_id
,
num_repeat
in
enumerate
(
stage_repeats
):
for
i
in
range
(
num_repeat
):
if
i
==
0
:
block
=
self
.
add_sublayer
(
str
(
idxstage
+
2
)
+
'_'
+
str
(
i
+
1
),
InvertedResidualUnit
(
num_channels
=
stage_out_channels
[
idxstage
+
1
],
num_filters
=
output_channel
,
name
=
str
(
stage_id
+
2
)
+
'_'
+
str
(
i
+
1
),
sublayer
=
InvertedResidualDS
(
in_channels
=
stage_out_channels
[
stage_id
+
1
],
out_channels
=
stage_out_channels
[
stage_id
+
2
]
,
stride
=
2
,
benchmodel
=
2
,
act
=
act
,
name
=
str
(
idxstage
+
2
)
+
'_'
+
str
(
i
+
1
)))
self
.
_block_list
.
append
(
block
)
name
=
str
(
stage_id
+
2
)
+
'_'
+
str
(
i
+
1
)))
else
:
block
=
self
.
add_sublayer
(
str
(
idxstage
+
2
)
+
'_'
+
str
(
i
+
1
),
InvertedResidualUnit
(
num_channels
=
output_channel
,
num_filters
=
output_channel
,
name
=
str
(
stage_id
+
2
)
+
'_'
+
str
(
i
+
1
),
sublayer
=
InvertedResidual
(
in_channels
=
stage_out_channels
[
stage_id
+
2
]
,
out_channels
=
stage_out_channels
[
stage_id
+
2
]
,
stride
=
1
,
benchmodel
=
1
,
act
=
act
,
name
=
str
(
idxstage
+
2
)
+
'_'
+
str
(
i
+
1
)))
self
.
_block_list
.
append
(
block
)
name
=
str
(
stage_id
+
2
)
+
'_'
+
str
(
i
+
1
)))
self
.
_block_list
.
append
(
block
)
# 3. last_conv
self
.
_last_conv
=
ConvBNLayer
(
num
_channels
=
stage_out_channels
[
-
2
],
num_filter
s
=
stage_out_channels
[
-
1
],
filter
_size
=
1
,
in
_channels
=
stage_out_channels
[
-
2
],
out_channel
s
=
stage_out_channels
[
-
1
],
kernel
_size
=
1
,
stride
=
1
,
padding
=
0
,
if_act
=
True
,
act
=
act
,
name
=
'conv5'
)
# 4. pool
self
.
_pool2d_avg
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
_pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
_out_c
=
stage_out_channels
[
-
1
]
# 5. fc
self
.
_fc
=
Linear
(
stage_out_channels
[
-
1
],
class_dim
,
param
_attr
=
ParamAttr
(
name
=
'fc6_weights'
),
weight
_attr
=
ParamAttr
(
name
=
'fc6_weights'
),
bias_attr
=
ParamAttr
(
name
=
'fc6_offset'
))
def
forward
(
self
,
inputs
):
...
...
@@ -314,7 +296,7 @@ class ShuffleNet(fluid.dygraph.Layer):
y
=
inv
(
y
)
y
=
self
.
_last_conv
(
y
)
y
=
self
.
_pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
_out_c
])
y
=
reshape
(
y
,
shape
=
[
-
1
,
self
.
_out_c
])
y
=
self
.
_fc
(
y
)
return
y
...
...
@@ -350,5 +332,5 @@ def ShuffleNetV2_x2_0(**args):
def
ShuffleNetV2_swish
(
**
args
):
model
=
ShuffleNet
(
scale
=
1.0
,
act
=
'swish'
,
**
args
)
model
=
ShuffleNet
(
scale
=
1.0
,
act
=
swish
,
**
args
)
return
model
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录