Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
0e1789d4
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0e1789d4
编写于
9月 13, 2020
作者:
littletomatodonkey
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix mv2 and mv3
上级
515c9c99
变更
2
显示空白变更内容
内联
并排
Showing
2 changed file
with
64 addition
and
69 deletion
+64
-69
ppcls/modeling/architectures/mobilenet_v2.py
ppcls/modeling/architectures/mobilenet_v2.py
+17
-18
ppcls/modeling/architectures/mobilenet_v3.py
ppcls/modeling/architectures/mobilenet_v3.py
+47
-51
未找到文件。
ppcls/modeling/architectures/mobilenet_v2.py
浏览文件 @
0e1789d4
...
@@ -18,9 +18,10 @@ from __future__ import print_function
...
@@ -18,9 +18,10 @@ from __future__ import print_function
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
import
paddle.fluid
as
fluid
from
paddle
import
ParamAttr
from
paddle.fluid.param_attr
import
ParamAttr
import
paddle.nn
as
nn
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
import
math
import
math
...
@@ -30,7 +31,7 @@ __all__ = [
...
@@ -30,7 +31,7 @@ __all__ = [
]
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
def
__init__
(
self
,
num_channels
,
num_channels
,
filter_size
,
filter_size
,
...
@@ -43,16 +44,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
...
@@ -43,16 +44,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
use_cudnn
=
True
):
use_cudnn
=
True
):
super
(
ConvBNLayer
,
self
).
__init__
()
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
self
.
_conv
=
Conv2
d
(
num
_channels
=
num_channels
,
in
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
out_channel
s
=
num_filters
,
filter
_size
=
filter_size
,
kernel
_size
=
filter_size
,
stride
=
stride
,
stride
=
stride
,
padding
=
padding
,
padding
=
padding
,
groups
=
num_groups
,
groups
=
num_groups
,
act
=
None
,
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
use_cudnn
=
use_cudnn
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
bias_attr
=
False
)
self
.
_batch_norm
=
BatchNorm
(
self
.
_batch_norm
=
BatchNorm
(
...
@@ -66,11 +65,11 @@ class ConvBNLayer(fluid.dygraph.Layer):
...
@@ -66,11 +65,11 @@ class ConvBNLayer(fluid.dygraph.Layer):
y
=
self
.
_conv
(
inputs
)
y
=
self
.
_conv
(
inputs
)
y
=
self
.
_batch_norm
(
y
)
y
=
self
.
_batch_norm
(
y
)
if
if_act
:
if
if_act
:
y
=
fluid
.
layers
.
relu6
(
y
)
y
=
F
.
relu6
(
y
)
return
y
return
y
class
InvertedResidualUnit
(
fluid
.
dygraph
.
Layer
):
class
InvertedResidualUnit
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_in_filter
,
num_filters
,
stride
,
def
__init__
(
self
,
num_channels
,
num_in_filter
,
num_filters
,
stride
,
filter_size
,
padding
,
expansion_factor
,
name
):
filter_size
,
padding
,
expansion_factor
,
name
):
super
(
InvertedResidualUnit
,
self
).
__init__
()
super
(
InvertedResidualUnit
,
self
).
__init__
()
...
@@ -108,11 +107,11 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
...
@@ -108,11 +107,11 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
y
=
self
.
_bottleneck_conv
(
y
,
if_act
=
True
)
y
=
self
.
_bottleneck_conv
(
y
,
if_act
=
True
)
y
=
self
.
_linear_conv
(
y
,
if_act
=
False
)
y
=
self
.
_linear_conv
(
y
,
if_act
=
False
)
if
ifshortcut
:
if
ifshortcut
:
y
=
fluid
.
layers
.
elementwise_add
(
inputs
,
y
)
y
=
paddle
.
elementwise_add
(
inputs
,
y
)
return
y
return
y
class
InvresiBlocks
(
fluid
.
dygraph
.
Layer
):
class
InvresiBlocks
(
nn
.
Layer
):
def
__init__
(
self
,
in_c
,
t
,
c
,
n
,
s
,
name
):
def
__init__
(
self
,
in_c
,
t
,
c
,
n
,
s
,
name
):
super
(
InvresiBlocks
,
self
).
__init__
()
super
(
InvresiBlocks
,
self
).
__init__
()
...
@@ -148,7 +147,7 @@ class InvresiBlocks(fluid.dygraph.Layer):
...
@@ -148,7 +147,7 @@ class InvresiBlocks(fluid.dygraph.Layer):
return
y
return
y
class
MobileNet
(
fluid
.
dygraph
.
Layer
):
class
MobileNet
(
nn
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
,
scale
=
1.0
):
def
__init__
(
self
,
class_dim
=
1000
,
scale
=
1.0
):
super
(
MobileNet
,
self
).
__init__
()
super
(
MobileNet
,
self
).
__init__
()
self
.
scale
=
scale
self
.
scale
=
scale
...
@@ -204,7 +203,7 @@ class MobileNet(fluid.dygraph.Layer):
...
@@ -204,7 +203,7 @@ class MobileNet(fluid.dygraph.Layer):
self
.
out
=
Linear
(
self
.
out
=
Linear
(
self
.
out_c
,
self
.
out_c
,
class_dim
,
class_dim
,
param
_attr
=
ParamAttr
(
name
=
"fc10_weights"
),
weight
_attr
=
ParamAttr
(
name
=
"fc10_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc10_offset"
))
bias_attr
=
ParamAttr
(
name
=
"fc10_offset"
))
def
forward
(
self
,
inputs
):
def
forward
(
self
,
inputs
):
...
@@ -213,7 +212,7 @@ class MobileNet(fluid.dygraph.Layer):
...
@@ -213,7 +212,7 @@ class MobileNet(fluid.dygraph.Layer):
y
=
block
(
y
)
y
=
block
(
y
)
y
=
self
.
conv9
(
y
,
if_act
=
True
)
y
=
self
.
conv9
(
y
,
if_act
=
True
)
y
=
self
.
pool2d_avg
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
out_c
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
out_c
])
y
=
self
.
out
(
y
)
y
=
self
.
out
(
y
)
return
y
return
y
...
...
ppcls/modeling/architectures/mobilenet_v3.py
浏览文件 @
0e1789d4
...
@@ -18,9 +18,12 @@ from __future__ import print_function
...
@@ -18,9 +18,12 @@ from __future__ import print_function
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle
import
paddle.fluid
as
fluid
from
paddle
import
ParamAttr
from
paddle.fluid.param_attr
import
ParamAttr
import
paddle.nn
as
nn
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
# TODO: need to be removed later!
from
paddle.fluid.regularizer
import
L2Decay
import
math
import
math
...
@@ -42,7 +45,7 @@ def make_divisible(v, divisor=8, min_value=None):
...
@@ -42,7 +45,7 @@ def make_divisible(v, divisor=8, min_value=None):
return
new_v
return
new_v
class
MobileNetV3
(
fluid
.
dygraph
.
Layer
):
class
MobileNetV3
(
nn
.
Layer
):
def
__init__
(
self
,
scale
=
1.0
,
model_name
=
"small"
,
class_dim
=
1000
):
def
__init__
(
self
,
scale
=
1.0
,
model_name
=
"small"
,
class_dim
=
1000
):
super
(
MobileNetV3
,
self
).
__init__
()
super
(
MobileNetV3
,
self
).
__init__
()
...
@@ -133,20 +136,19 @@ class MobileNetV3(fluid.dygraph.Layer):
...
@@ -133,20 +136,19 @@ class MobileNetV3(fluid.dygraph.Layer):
self
.
pool
=
Pool2D
(
self
.
pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
,
use_cudnn
=
False
)
pool_type
=
"avg"
,
global_pooling
=
True
,
use_cudnn
=
False
)
self
.
last_conv
=
Conv2
D
(
self
.
last_conv
=
Conv2
d
(
num
_channels
=
make_divisible
(
scale
*
self
.
cls_ch_squeeze
),
in
_channels
=
make_divisible
(
scale
*
self
.
cls_ch_squeeze
),
num_filter
s
=
self
.
cls_ch_expand
,
out_channel
s
=
self
.
cls_ch_expand
,
filter
_size
=
1
,
kernel
_size
=
1
,
stride
=
1
,
stride
=
1
,
padding
=
0
,
padding
=
0
,
act
=
None
,
weight_attr
=
ParamAttr
(
name
=
"last_1x1_conv_weights"
),
param_attr
=
ParamAttr
(
name
=
"last_1x1_conv_weights"
),
bias_attr
=
False
)
bias_attr
=
False
)
self
.
out
=
Linear
(
self
.
out
=
Linear
(
input_dim
=
self
.
cls_ch_expand
,
self
.
cls_ch_expand
,
output_dim
=
class_dim
,
class_dim
,
param
_attr
=
ParamAttr
(
"fc_weights"
),
weight
_attr
=
ParamAttr
(
"fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
inputs
,
label
=
None
,
dropout_prob
=
0.2
):
def
forward
(
self
,
inputs
,
label
=
None
,
dropout_prob
=
0.2
):
...
@@ -156,15 +158,15 @@ class MobileNetV3(fluid.dygraph.Layer):
...
@@ -156,15 +158,15 @@ class MobileNetV3(fluid.dygraph.Layer):
x
=
self
.
last_second_conv
(
x
)
x
=
self
.
last_second_conv
(
x
)
x
=
self
.
pool
(
x
)
x
=
self
.
pool
(
x
)
x
=
self
.
last_conv
(
x
)
x
=
self
.
last_conv
(
x
)
x
=
fluid
.
layers
.
hard_swish
(
x
)
x
=
F
.
hard_swish
(
x
)
x
=
fluid
.
layers
.
dropout
(
x
=
x
,
dropout_prob
=
dropout_prob
)
x
=
F
.
dropout
(
x
=
x
,
p
=
dropout_prob
)
x
=
fluid
.
layers
.
reshape
(
x
,
shape
=
[
x
.
shape
[
0
],
x
.
shape
[
1
]])
x
=
paddle
.
reshape
(
x
,
shape
=
[
x
.
shape
[
0
],
x
.
shape
[
1
]])
x
=
self
.
out
(
x
)
x
=
self
.
out
(
x
)
return
x
return
x
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
def
__init__
(
self
,
in_c
,
in_c
,
out_c
,
out_c
,
...
@@ -179,28 +181,24 @@ class ConvBNLayer(fluid.dygraph.Layer):
...
@@ -179,28 +181,24 @@ class ConvBNLayer(fluid.dygraph.Layer):
super
(
ConvBNLayer
,
self
).
__init__
()
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
if_act
=
if_act
self
.
if_act
=
if_act
self
.
act
=
act
self
.
act
=
act
self
.
conv
=
fluid
.
dygraph
.
Conv2D
(
self
.
conv
=
Conv2d
(
num
_channels
=
in_c
,
in
_channels
=
in_c
,
num_filter
s
=
out_c
,
out_channel
s
=
out_c
,
filter
_size
=
filter_size
,
kernel
_size
=
filter_size
,
stride
=
stride
,
stride
=
stride
,
padding
=
padding
,
padding
=
padding
,
groups
=
num_groups
,
groups
=
num_groups
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
,
bias_attr
=
False
)
use_cudnn
=
use_cudnn
,
self
.
bn
=
BatchNorm
(
act
=
None
)
self
.
bn
=
fluid
.
dygraph
.
BatchNorm
(
num_channels
=
out_c
,
num_channels
=
out_c
,
act
=
None
,
act
=
None
,
param_attr
=
ParamAttr
(
param_attr
=
ParamAttr
(
name
=
name
+
"_bn_scale"
,
name
=
name
+
"_bn_scale"
,
regularizer
=
fluid
.
regularizer
.
L2DecayRegularizer
(
regularizer
=
L2Decay
(
regularization_coeff
=
0.0
)),
regularization_coeff
=
0.0
)),
bias_attr
=
ParamAttr
(
bias_attr
=
ParamAttr
(
name
=
name
+
"_bn_offset"
,
name
=
name
+
"_bn_offset"
,
regularizer
=
fluid
.
regularizer
.
L2DecayRegularizer
(
regularizer
=
L2Decay
(
regularization_coeff
=
0.0
)),
regularization_coeff
=
0.0
)),
moving_mean_name
=
name
+
"_bn_mean"
,
moving_mean_name
=
name
+
"_bn_mean"
,
moving_variance_name
=
name
+
"_bn_variance"
)
moving_variance_name
=
name
+
"_bn_variance"
)
...
@@ -209,16 +207,16 @@ class ConvBNLayer(fluid.dygraph.Layer):
...
@@ -209,16 +207,16 @@ class ConvBNLayer(fluid.dygraph.Layer):
x
=
self
.
bn
(
x
)
x
=
self
.
bn
(
x
)
if
self
.
if_act
:
if
self
.
if_act
:
if
self
.
act
==
"relu"
:
if
self
.
act
==
"relu"
:
x
=
fluid
.
layers
.
relu
(
x
)
x
=
F
.
relu
(
x
)
elif
self
.
act
==
"hard_swish"
:
elif
self
.
act
==
"hard_swish"
:
x
=
fluid
.
layers
.
hard_swish
(
x
)
x
=
F
.
hard_swish
(
x
)
else
:
else
:
print
(
"The activation function is selected incorrectly."
)
print
(
"The activation function is selected incorrectly."
)
exit
()
exit
()
return
x
return
x
class
ResidualUnit
(
fluid
.
dygraph
.
Layer
):
class
ResidualUnit
(
nn
.
Layer
):
def
__init__
(
self
,
def
__init__
(
self
,
in_c
,
in_c
,
mid_c
,
mid_c
,
...
@@ -270,40 +268,38 @@ class ResidualUnit(fluid.dygraph.Layer):
...
@@ -270,40 +268,38 @@ class ResidualUnit(fluid.dygraph.Layer):
x
=
self
.
mid_se
(
x
)
x
=
self
.
mid_se
(
x
)
x
=
self
.
linear_conv
(
x
)
x
=
self
.
linear_conv
(
x
)
if
self
.
if_shortcut
:
if
self
.
if_shortcut
:
x
=
fluid
.
layers
.
elementwise_add
(
inputs
,
x
)
x
=
paddle
.
elementwise_add
(
inputs
,
x
)
return
x
return
x
class
SEModule
(
fluid
.
dygraph
.
Layer
):
class
SEModule
(
nn
.
Layer
):
def
__init__
(
self
,
channel
,
reduction
=
4
,
name
=
""
):
def
__init__
(
self
,
channel
,
reduction
=
4
,
name
=
""
):
super
(
SEModule
,
self
).
__init__
()
super
(
SEModule
,
self
).
__init__
()
self
.
avg_pool
=
fluid
.
dygraph
.
Pool2D
(
self
.
avg_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
pool_type
=
"avg"
,
global_pooling
=
True
,
use_cudnn
=
False
)
self
.
conv1
=
Conv2d
(
self
.
conv1
=
fluid
.
dygraph
.
Conv2D
(
in_channels
=
channel
,
num_channels
=
channel
,
out_channels
=
channel
//
reduction
,
num_filters
=
channel
//
reduction
,
kernel_size
=
1
,
filter_size
=
1
,
stride
=
1
,
stride
=
1
,
padding
=
0
,
padding
=
0
,
act
=
"relu"
,
weight_attr
=
ParamAttr
(
name
=
name
+
"_1_weights"
),
param_attr
=
ParamAttr
(
name
=
name
+
"_1_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
"_1_offset"
))
bias_attr
=
ParamAttr
(
name
=
name
+
"_1_offset"
))
self
.
conv2
=
fluid
.
dygraph
.
Conv2D
(
self
.
conv2
=
Conv2d
(
num
_channels
=
channel
//
reduction
,
in
_channels
=
channel
//
reduction
,
num_filter
s
=
channel
,
out_channel
s
=
channel
,
filter
_size
=
1
,
kernel
_size
=
1
,
stride
=
1
,
stride
=
1
,
padding
=
0
,
padding
=
0
,
act
=
None
,
weight_attr
=
ParamAttr
(
name
+
"_2_weights"
),
param_attr
=
ParamAttr
(
name
+
"_2_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
"_2_offset"
))
bias_attr
=
ParamAttr
(
name
=
name
+
"_2_offset"
))
def
forward
(
self
,
inputs
):
def
forward
(
self
,
inputs
):
outputs
=
self
.
avg_pool
(
inputs
)
outputs
=
self
.
avg_pool
(
inputs
)
outputs
=
self
.
conv1
(
outputs
)
outputs
=
self
.
conv1
(
outputs
)
outputs
=
F
.
relu
(
outputs
)
outputs
=
self
.
conv2
(
outputs
)
outputs
=
self
.
conv2
(
outputs
)
outputs
=
fluid
.
layers
.
hard_sigmoid
(
outputs
)
outputs
=
F
.
hard_sigmoid
(
outputs
)
return
fluid
.
layers
.
elementwise_mul
(
x
=
inputs
,
y
=
outputs
,
axis
=
0
)
return
paddle
.
multiply
(
x
=
inputs
,
y
=
outputs
,
axis
=
0
)
def
MobileNetV3_small_x0_35
(
**
args
):
def
MobileNetV3_small_x0_35
(
**
args
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录