Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
f6219dda
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
f6219dda
编写于
4月 23, 2022
作者:
N
Nyakku Shigure
提交者:
GitHub
4月 23, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
reuse ConvNormActivation in some vision models (#40431)
* reuse ConvNormActivation in some vision models
上级
34ac7b74
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
372 addition
and
382 deletion
+372
-382
python/paddle/vision/models/inceptionv3.py
python/paddle/vision/models/inceptionv3.py
+244
-233
python/paddle/vision/models/mobilenetv1.py
python/paddle/vision/models/mobilenetv1.py
+20
-36
python/paddle/vision/models/mobilenetv2.py
python/paddle/vision/models/mobilenetv2.py
+45
-44
python/paddle/vision/models/shufflenetv2.py
python/paddle/vision/models/shufflenetv2.py
+59
-65
python/paddle/vision/ops.py
python/paddle/vision/ops.py
+4
-4
未找到文件。
python/paddle/vision/models/inceptionv3.py
浏览文件 @
f6219dda
...
@@ -19,75 +19,60 @@ from __future__ import print_function
...
@@ -19,75 +19,60 @@ from __future__ import print_function
import
math
import
math
import
paddle
import
paddle
import
paddle.nn
as
nn
import
paddle.nn
as
nn
from
paddle.nn
import
Conv2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2D
,
MaxPool2D
,
AvgPool2D
from
paddle.nn
import
AdaptiveAvgPool2D
,
MaxPool2D
,
AvgPool2D
from
paddle.nn.initializer
import
Uniform
from
paddle.nn.initializer
import
Uniform
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.utils.download
import
get_weights_path_from_url
from
paddle.utils.download
import
get_weights_path_from_url
from
..ops
import
ConvNormActivation
__all__
=
[]
__all__
=
[]
model_urls
=
{
model_urls
=
{
"inception_v3"
:
"inception_v3"
:
(
"https://paddle-
imagenet-models-name.bj.bcebos.com/dygraph/legendary_models/InceptionV3_pretrained
.pdparams"
,
(
"https://paddle-
hapi.bj.bcebos.com/models/inception_v3
.pdparams"
,
"
e4d0905a818f6bb7946e881777a8a935
"
)
"
649a4547c3243e8b59c656f41fe330b8
"
)
}
}
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
filter_size
,
stride
=
1
,
padding
=
0
,
groups
=
1
,
act
=
"relu"
):
super
().
__init__
()
self
.
act
=
act
self
.
conv
=
Conv2D
(
in_channels
=
num_channels
,
out_channels
=
num_filters
,
kernel_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
groups
,
bias_attr
=
False
)
self
.
bn
=
BatchNorm
(
num_filters
)
self
.
relu
=
nn
.
ReLU
()
def
forward
(
self
,
x
):
x
=
self
.
conv
(
x
)
x
=
self
.
bn
(
x
)
if
self
.
act
:
x
=
self
.
relu
(
x
)
return
x
class
InceptionStem
(
nn
.
Layer
):
class
InceptionStem
(
nn
.
Layer
):
def
__init__
(
self
):
def
__init__
(
self
):
super
().
__init__
()
super
().
__init__
()
self
.
conv_1a_3x3
=
ConvBNLayer
(
self
.
conv_1a_3x3
=
ConvNormActivation
(
num_channels
=
3
,
num_filters
=
32
,
filter_size
=
3
,
stride
=
2
,
act
=
"relu"
)
in_channels
=
3
,
self
.
conv_2a_3x3
=
ConvBNLayer
(
out_channels
=
32
,
num_channels
=
32
,
kernel_size
=
3
,
num_filters
=
32
,
stride
=
2
,
filter_size
=
3
,
padding
=
0
,
activation_layer
=
nn
.
ReLU
)
self
.
conv_2a_3x3
=
ConvNormActivation
(
in_channels
=
32
,
out_channels
=
32
,
kernel_size
=
3
,
stride
=
1
,
stride
=
1
,
act
=
"relu"
)
padding
=
0
,
self
.
conv_2b_3x3
=
ConvBNLayer
(
activation_layer
=
nn
.
ReLU
)
num_channels
=
32
,
self
.
conv_2b_3x3
=
ConvNormActivation
(
num_filters
=
64
,
in_channels
=
32
,
filter_size
=
3
,
out_channels
=
64
,
kernel_size
=
3
,
padding
=
1
,
padding
=
1
,
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
max_pool
=
MaxPool2D
(
kernel_size
=
3
,
stride
=
2
,
padding
=
0
)
self
.
max_pool
=
MaxPool2D
(
kernel_size
=
3
,
stride
=
2
,
padding
=
0
)
self
.
conv_3b_1x1
=
ConvBNLayer
(
self
.
conv_3b_1x1
=
ConvNormActivation
(
num_channels
=
64
,
num_filters
=
80
,
filter_size
=
1
,
act
=
"relu"
)
in_channels
=
64
,
self
.
conv_4a_3x3
=
ConvBNLayer
(
out_channels
=
80
,
num_channels
=
80
,
num_filters
=
192
,
filter_size
=
3
,
act
=
"relu"
)
kernel_size
=
1
,
padding
=
0
,
activation_layer
=
nn
.
ReLU
)
self
.
conv_4a_3x3
=
ConvNormActivation
(
in_channels
=
80
,
out_channels
=
192
,
kernel_size
=
3
,
padding
=
0
,
activation_layer
=
nn
.
ReLU
)
def
forward
(
self
,
x
):
def
forward
(
self
,
x
):
x
=
self
.
conv_1a_3x3
(
x
)
x
=
self
.
conv_1a_3x3
(
x
)
...
@@ -103,47 +88,53 @@ class InceptionStem(nn.Layer):
...
@@ -103,47 +88,53 @@ class InceptionStem(nn.Layer):
class
InceptionA
(
nn
.
Layer
):
class
InceptionA
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
pool_features
):
def
__init__
(
self
,
num_channels
,
pool_features
):
super
().
__init__
()
super
().
__init__
()
self
.
branch1x1
=
ConvBNLayer
(
self
.
branch1x1
=
ConvNormActivation
(
num_channels
=
num_channels
,
in_channels
=
num_channels
,
num_filters
=
64
,
out_channels
=
64
,
filter_size
=
1
,
kernel_size
=
1
,
act
=
"relu"
)
padding
=
0
,
self
.
branch5x5_1
=
ConvBNLayer
(
activation_layer
=
nn
.
ReLU
)
num_channels
=
num_channels
,
num_filters
=
48
,
self
.
branch5x5_1
=
ConvNormActivation
(
filter_size
=
1
,
in_channels
=
num_channels
,
act
=
"relu"
)
out_channels
=
48
,
self
.
branch5x5_2
=
ConvBNLayer
(
kernel_size
=
1
,
num_channels
=
48
,
padding
=
0
,
num_filters
=
64
,
activation_layer
=
nn
.
ReLU
)
filter_size
=
5
,
self
.
branch5x5_2
=
ConvNormActivation
(
in_channels
=
48
,
out_channels
=
64
,
kernel_size
=
5
,
padding
=
2
,
padding
=
2
,
act
=
"relu"
)
activation_layer
=
nn
.
ReLU
)
self
.
branch3x3dbl_1
=
ConvBNLayer
(
self
.
branch3x3dbl_1
=
ConvNormActivation
(
num_channels
=
num_channels
,
in_channels
=
num_channels
,
num_filters
=
64
,
out_channels
=
64
,
filter_size
=
1
,
kernel_size
=
1
,
act
=
"relu"
)
padding
=
0
,
self
.
branch3x3dbl_2
=
ConvBNLayer
(
activation_layer
=
nn
.
ReLU
)
num_channels
=
64
,
self
.
branch3x3dbl_2
=
ConvNormActivation
(
num_filters
=
96
,
in_channels
=
64
,
filter_size
=
3
,
out_channels
=
96
,
kernel_size
=
3
,
padding
=
1
,
padding
=
1
,
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
branch3x3dbl_3
=
Conv
BNLayer
(
self
.
branch3x3dbl_3
=
Conv
NormActivation
(
num
_channels
=
96
,
in
_channels
=
96
,
num_filter
s
=
96
,
out_channel
s
=
96
,
filter
_size
=
3
,
kernel
_size
=
3
,
padding
=
1
,
padding
=
1
,
act
=
"relu"
)
activation_layer
=
nn
.
ReLU
)
self
.
branch_pool
=
AvgPool2D
(
self
.
branch_pool
=
AvgPool2D
(
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
exclusive
=
False
)
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
exclusive
=
False
)
self
.
branch_pool_conv
=
ConvBNLayer
(
self
.
branch_pool_conv
=
ConvNormActivation
(
num_channels
=
num_channels
,
in_channels
=
num_channels
,
num_filters
=
pool_features
,
out_channels
=
pool_features
,
filter_size
=
1
,
kernel_size
=
1
,
act
=
"relu"
)
padding
=
0
,
activation_layer
=
nn
.
ReLU
)
def
forward
(
self
,
x
):
def
forward
(
self
,
x
):
branch1x1
=
self
.
branch1x1
(
x
)
branch1x1
=
self
.
branch1x1
(
x
)
...
@@ -164,29 +155,34 @@ class InceptionA(nn.Layer):
...
@@ -164,29 +155,34 @@ class InceptionA(nn.Layer):
class
InceptionB
(
nn
.
Layer
):
class
InceptionB
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
):
def
__init__
(
self
,
num_channels
):
super
().
__init__
()
super
().
__init__
()
self
.
branch3x3
=
Conv
BNLayer
(
self
.
branch3x3
=
Conv
NormActivation
(
num
_channels
=
num_channels
,
in
_channels
=
num_channels
,
num_filter
s
=
384
,
out_channel
s
=
384
,
filter
_size
=
3
,
kernel
_size
=
3
,
stride
=
2
,
stride
=
2
,
act
=
"relu"
)
padding
=
0
,
self
.
branch3x3dbl_1
=
ConvBNLayer
(
activation_layer
=
nn
.
ReLU
)
num_channels
=
num_channels
,
num_filters
=
64
,
self
.
branch3x3dbl_1
=
ConvNormActivation
(
filter_size
=
1
,
in_channels
=
num_channels
,
act
=
"relu"
)
out_channels
=
64
,
self
.
branch3x3dbl_2
=
ConvBNLayer
(
kernel_size
=
1
,
num_channels
=
64
,
padding
=
0
,
num_filters
=
96
,
activation_layer
=
nn
.
ReLU
)
filter_size
=
3
,
self
.
branch3x3dbl_2
=
ConvNormActivation
(
in_channels
=
64
,
out_channels
=
96
,
kernel_size
=
3
,
padding
=
1
,
padding
=
1
,
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
branch3x3dbl_3
=
Conv
BNLayer
(
self
.
branch3x3dbl_3
=
Conv
NormActivation
(
num
_channels
=
96
,
in
_channels
=
96
,
num_filter
s
=
96
,
out_channel
s
=
96
,
filter
_size
=
3
,
kernel
_size
=
3
,
stride
=
2
,
stride
=
2
,
act
=
"relu"
)
padding
=
0
,
activation_layer
=
nn
.
ReLU
)
self
.
branch_pool
=
MaxPool2D
(
kernel_size
=
3
,
stride
=
2
)
self
.
branch_pool
=
MaxPool2D
(
kernel_size
=
3
,
stride
=
2
)
def
forward
(
self
,
x
):
def
forward
(
self
,
x
):
...
@@ -206,70 +202,74 @@ class InceptionB(nn.Layer):
...
@@ -206,70 +202,74 @@ class InceptionB(nn.Layer):
class
InceptionC
(
nn
.
Layer
):
class
InceptionC
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
channels_7x7
):
def
__init__
(
self
,
num_channels
,
channels_7x7
):
super
().
__init__
()
super
().
__init__
()
self
.
branch1x1
=
ConvBNLayer
(
self
.
branch1x1
=
ConvNormActivation
(
num_channels
=
num_channels
,
in_channels
=
num_channels
,
num_filters
=
192
,
out_channels
=
192
,
filter_size
=
1
,
kernel_size
=
1
,
act
=
"relu"
)
padding
=
0
,
activation_layer
=
nn
.
ReLU
)
self
.
branch7x7_1
=
ConvBNLayer
(
num_channels
=
num_channels
,
self
.
branch7x7_1
=
ConvNormActivation
(
num_filters
=
channels_7x7
,
in_channels
=
num_channels
,
filter_size
=
1
,
out_channels
=
channels_7x7
,
kernel_size
=
1
,
stride
=
1
,
stride
=
1
,
act
=
"relu"
)
padding
=
0
,
self
.
branch7x7_2
=
ConvBNLayer
(
activation_layer
=
nn
.
ReLU
)
num_channels
=
channels_7x7
,
self
.
branch7x7_2
=
ConvNormActivation
(
num_filters
=
channels_7x7
,
in_channels
=
channels_7x7
,
filter_size
=
(
1
,
7
),
out_channels
=
channels_7x7
,
kernel_size
=
(
1
,
7
),
stride
=
1
,
stride
=
1
,
padding
=
(
0
,
3
),
padding
=
(
0
,
3
),
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
branch7x7_3
=
Conv
BNLayer
(
self
.
branch7x7_3
=
Conv
NormActivation
(
num
_channels
=
channels_7x7
,
in
_channels
=
channels_7x7
,
num_filter
s
=
192
,
out_channel
s
=
192
,
filter
_size
=
(
7
,
1
),
kernel
_size
=
(
7
,
1
),
stride
=
1
,
stride
=
1
,
padding
=
(
3
,
0
),
padding
=
(
3
,
0
),
act
=
"relu"
)
activation_layer
=
nn
.
ReLU
)
self
.
branch7x7dbl_1
=
ConvBNLayer
(
self
.
branch7x7dbl_1
=
ConvNormActivation
(
num_channels
=
num_channels
,
in_channels
=
num_channels
,
num_filters
=
channels_7x7
,
out_channels
=
channels_7x7
,
filter_size
=
1
,
kernel_size
=
1
,
act
=
"relu"
)
padding
=
0
,
self
.
branch7x7dbl_2
=
ConvBNLayer
(
activation_layer
=
nn
.
ReLU
)
num_channels
=
channels_7x7
,
self
.
branch7x7dbl_2
=
ConvNormActivation
(
num_filters
=
channels_7x7
,
in_channels
=
channels_7x7
,
filter_size
=
(
7
,
1
),
out_channels
=
channels_7x7
,
kernel_size
=
(
7
,
1
),
padding
=
(
3
,
0
),
padding
=
(
3
,
0
),
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
branch7x7dbl_3
=
Conv
BNLayer
(
self
.
branch7x7dbl_3
=
Conv
NormActivation
(
num
_channels
=
channels_7x7
,
in
_channels
=
channels_7x7
,
num_filter
s
=
channels_7x7
,
out_channel
s
=
channels_7x7
,
filter
_size
=
(
1
,
7
),
kernel
_size
=
(
1
,
7
),
padding
=
(
0
,
3
),
padding
=
(
0
,
3
),
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
branch7x7dbl_4
=
Conv
BNLayer
(
self
.
branch7x7dbl_4
=
Conv
NormActivation
(
num
_channels
=
channels_7x7
,
in
_channels
=
channels_7x7
,
num_filter
s
=
channels_7x7
,
out_channel
s
=
channels_7x7
,
filter
_size
=
(
7
,
1
),
kernel
_size
=
(
7
,
1
),
padding
=
(
3
,
0
),
padding
=
(
3
,
0
),
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
branch7x7dbl_5
=
Conv
BNLayer
(
self
.
branch7x7dbl_5
=
Conv
NormActivation
(
num
_channels
=
channels_7x7
,
in
_channels
=
channels_7x7
,
num_filter
s
=
192
,
out_channel
s
=
192
,
filter
_size
=
(
1
,
7
),
kernel
_size
=
(
1
,
7
),
padding
=
(
0
,
3
),
padding
=
(
0
,
3
),
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
branch_pool
=
AvgPool2D
(
self
.
branch_pool
=
AvgPool2D
(
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
exclusive
=
False
)
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
exclusive
=
False
)
self
.
branch_pool_conv
=
ConvBNLayer
(
self
.
branch_pool_conv
=
ConvNormActivation
(
num_channels
=
num_channels
,
in_channels
=
num_channels
,
num_filters
=
192
,
out_channels
=
192
,
filter_size
=
1
,
kernel_size
=
1
,
act
=
"relu"
)
padding
=
0
,
activation_layer
=
nn
.
ReLU
)
def
forward
(
self
,
x
):
def
forward
(
self
,
x
):
branch1x1
=
self
.
branch1x1
(
x
)
branch1x1
=
self
.
branch1x1
(
x
)
...
@@ -296,40 +296,46 @@ class InceptionC(nn.Layer):
...
@@ -296,40 +296,46 @@ class InceptionC(nn.Layer):
class
InceptionD
(
nn
.
Layer
):
class
InceptionD
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
):
def
__init__
(
self
,
num_channels
):
super
().
__init__
()
super
().
__init__
()
self
.
branch3x3_1
=
ConvBNLayer
(
self
.
branch3x3_1
=
ConvNormActivation
(
num_channels
=
num_channels
,
in_channels
=
num_channels
,
num_filters
=
192
,
out_channels
=
192
,
filter_size
=
1
,
kernel_size
=
1
,
act
=
"relu"
)
padding
=
0
,
self
.
branch3x3_2
=
ConvBNLayer
(
activation_layer
=
nn
.
ReLU
)
num_channels
=
192
,
self
.
branch3x3_2
=
ConvNormActivation
(
num_filters
=
320
,
in_channels
=
192
,
filter_size
=
3
,
out_channels
=
320
,
kernel_size
=
3
,
stride
=
2
,
stride
=
2
,
act
=
"relu"
)
padding
=
0
,
self
.
branch7x7x3_1
=
ConvBNLayer
(
activation_layer
=
nn
.
ReLU
)
num_channels
=
num_channels
,
num_filters
=
192
,
self
.
branch7x7x3_1
=
ConvNormActivation
(
filter_size
=
1
,
in_channels
=
num_channels
,
act
=
"relu"
)
out_channels
=
192
,
self
.
branch7x7x3_2
=
ConvBNLayer
(
kernel_size
=
1
,
num_channels
=
192
,
padding
=
0
,
num_filters
=
192
,
activation_layer
=
nn
.
ReLU
)
filter_size
=
(
1
,
7
),
self
.
branch7x7x3_2
=
ConvNormActivation
(
in_channels
=
192
,
out_channels
=
192
,
kernel_size
=
(
1
,
7
),
padding
=
(
0
,
3
),
padding
=
(
0
,
3
),
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
branch7x7x3_3
=
Conv
BNLayer
(
self
.
branch7x7x3_3
=
Conv
NormActivation
(
num
_channels
=
192
,
in
_channels
=
192
,
num_filter
s
=
192
,
out_channel
s
=
192
,
filter
_size
=
(
7
,
1
),
kernel
_size
=
(
7
,
1
),
padding
=
(
3
,
0
),
padding
=
(
3
,
0
),
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
branch7x7x3_4
=
Conv
BNLayer
(
self
.
branch7x7x3_4
=
Conv
NormActivation
(
num
_channels
=
192
,
in
_channels
=
192
,
num_filter
s
=
192
,
out_channel
s
=
192
,
filter
_size
=
3
,
kernel
_size
=
3
,
stride
=
2
,
stride
=
2
,
act
=
"relu"
)
padding
=
0
,
activation_layer
=
nn
.
ReLU
)
self
.
branch_pool
=
MaxPool2D
(
kernel_size
=
3
,
stride
=
2
)
self
.
branch_pool
=
MaxPool2D
(
kernel_size
=
3
,
stride
=
2
)
def
forward
(
self
,
x
):
def
forward
(
self
,
x
):
...
@@ -350,59 +356,64 @@ class InceptionD(nn.Layer):
...
@@ -350,59 +356,64 @@ class InceptionD(nn.Layer):
class
InceptionE
(
nn
.
Layer
):
class
InceptionE
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
):
def
__init__
(
self
,
num_channels
):
super
().
__init__
()
super
().
__init__
()
self
.
branch1x1
=
ConvBNLayer
(
self
.
branch1x1
=
ConvNormActivation
(
num_channels
=
num_channels
,
in_channels
=
num_channels
,
num_filters
=
320
,
out_channels
=
320
,
filter_size
=
1
,
kernel_size
=
1
,
act
=
"relu"
)
padding
=
0
,
self
.
branch3x3_1
=
ConvBNLayer
(
activation_layer
=
nn
.
ReLU
)
num_channels
=
num_channels
,
self
.
branch3x3_1
=
ConvNormActivation
(
num_filters
=
384
,
in_channels
=
num_channels
,
filter_size
=
1
,
out_channels
=
384
,
act
=
"relu"
)
kernel_size
=
1
,
self
.
branch3x3_2a
=
ConvBNLayer
(
padding
=
0
,
num_channels
=
384
,
activation_layer
=
nn
.
ReLU
)
num_filters
=
384
,
self
.
branch3x3_2a
=
ConvNormActivation
(
filter_size
=
(
1
,
3
),
in_channels
=
384
,
out_channels
=
384
,
kernel_size
=
(
1
,
3
),
padding
=
(
0
,
1
),
padding
=
(
0
,
1
),
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
branch3x3_2b
=
Conv
BNLayer
(
self
.
branch3x3_2b
=
Conv
NormActivation
(
num
_channels
=
384
,
in
_channels
=
384
,
num_filter
s
=
384
,
out_channel
s
=
384
,
filter
_size
=
(
3
,
1
),
kernel
_size
=
(
3
,
1
),
padding
=
(
1
,
0
),
padding
=
(
1
,
0
),
act
=
"relu"
)
activation_layer
=
nn
.
ReLU
)
self
.
branch3x3dbl_1
=
ConvBNLayer
(
self
.
branch3x3dbl_1
=
ConvNormActivation
(
num_channels
=
num_channels
,
in_channels
=
num_channels
,
num_filters
=
448
,
out_channels
=
448
,
filter_size
=
1
,
kernel_size
=
1
,
act
=
"relu"
)
padding
=
0
,
self
.
branch3x3dbl_2
=
ConvBNLayer
(
activation_layer
=
nn
.
ReLU
)
num_channels
=
448
,
self
.
branch3x3dbl_2
=
ConvNormActivation
(
num_filters
=
384
,
in_channels
=
448
,
filter_size
=
3
,
out_channels
=
384
,
kernel_size
=
3
,
padding
=
1
,
padding
=
1
,
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
branch3x3dbl_3a
=
Conv
BNLayer
(
self
.
branch3x3dbl_3a
=
Conv
NormActivation
(
num
_channels
=
384
,
in
_channels
=
384
,
num_filter
s
=
384
,
out_channel
s
=
384
,
filter
_size
=
(
1
,
3
),
kernel
_size
=
(
1
,
3
),
padding
=
(
0
,
1
),
padding
=
(
0
,
1
),
act
=
"relu"
)
act
ivation_layer
=
nn
.
ReLU
)
self
.
branch3x3dbl_3b
=
Conv
BNLayer
(
self
.
branch3x3dbl_3b
=
Conv
NormActivation
(
num
_channels
=
384
,
in
_channels
=
384
,
num_filter
s
=
384
,
out_channel
s
=
384
,
filter
_size
=
(
3
,
1
),
kernel
_size
=
(
3
,
1
),
padding
=
(
1
,
0
),
padding
=
(
1
,
0
),
act
=
"relu"
)
activation_layer
=
nn
.
ReLU
)
self
.
branch_pool
=
AvgPool2D
(
self
.
branch_pool
=
AvgPool2D
(
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
exclusive
=
False
)
kernel_size
=
3
,
stride
=
1
,
padding
=
1
,
exclusive
=
False
)
self
.
branch_pool_conv
=
ConvBNLayer
(
self
.
branch_pool_conv
=
ConvNormActivation
(
num_channels
=
num_channels
,
in_channels
=
num_channels
,
num_filters
=
192
,
out_channels
=
192
,
filter_size
=
1
,
kernel_size
=
1
,
act
=
"relu"
)
padding
=
0
,
activation_layer
=
nn
.
ReLU
)
def
forward
(
self
,
x
):
def
forward
(
self
,
x
):
branch1x1
=
self
.
branch1x1
(
x
)
branch1x1
=
self
.
branch1x1
(
x
)
...
...
python/paddle/vision/models/mobilenetv1.py
浏览文件 @
f6219dda
...
@@ -16,59 +16,31 @@ import paddle
...
@@ -16,59 +16,31 @@ import paddle
import
paddle.nn
as
nn
import
paddle.nn
as
nn
from
paddle.utils.download
import
get_weights_path_from_url
from
paddle.utils.download
import
get_weights_path_from_url
from
..ops
import
ConvNormActivation
__all__
=
[]
__all__
=
[]
model_urls
=
{
model_urls
=
{
'mobilenetv1_1.0'
:
'mobilenetv1_1.0'
:
(
'https://paddle-hapi.bj.bcebos.com/models/mobilenet
_v1_x
1.0.pdparams'
,
(
'https://paddle-hapi.bj.bcebos.com/models/mobilenet
v1_
1.0.pdparams'
,
'
42a154c2f26f86e7457d6daded114e8c
'
)
'
3033ab1975b1670bef51545feb65fc45
'
)
}
}
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
kernel_size
,
stride
,
padding
,
num_groups
=
1
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
nn
.
Conv2D
(
in_channels
,
out_channels
,
kernel_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
num_groups
,
bias_attr
=
False
)
self
.
_norm_layer
=
nn
.
BatchNorm2D
(
out_channels
)
self
.
_act
=
nn
.
ReLU
()
def
forward
(
self
,
x
):
x
=
self
.
_conv
(
x
)
x
=
self
.
_norm_layer
(
x
)
x
=
self
.
_act
(
x
)
return
x
class
DepthwiseSeparable
(
nn
.
Layer
):
class
DepthwiseSeparable
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels1
,
out_channels2
,
num_groups
,
def
__init__
(
self
,
in_channels
,
out_channels1
,
out_channels2
,
num_groups
,
stride
,
scale
):
stride
,
scale
):
super
(
DepthwiseSeparable
,
self
).
__init__
()
super
(
DepthwiseSeparable
,
self
).
__init__
()
self
.
_depthwise_conv
=
Conv
BNLayer
(
self
.
_depthwise_conv
=
Conv
NormActivation
(
in_channels
,
in_channels
,
int
(
out_channels1
*
scale
),
int
(
out_channels1
*
scale
),
kernel_size
=
3
,
kernel_size
=
3
,
stride
=
stride
,
stride
=
stride
,
padding
=
1
,
padding
=
1
,
num_
groups
=
int
(
num_groups
*
scale
))
groups
=
int
(
num_groups
*
scale
))
self
.
_pointwise_conv
=
Conv
BNLayer
(
self
.
_pointwise_conv
=
Conv
NormActivation
(
int
(
out_channels1
*
scale
),
int
(
out_channels1
*
scale
),
int
(
out_channels2
*
scale
),
int
(
out_channels2
*
scale
),
kernel_size
=
1
,
kernel_size
=
1
,
...
@@ -94,9 +66,15 @@ class MobileNetV1(nn.Layer):
...
@@ -94,9 +66,15 @@ class MobileNetV1(nn.Layer):
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import paddle
from paddle.vision.models import MobileNetV1
from paddle.vision.models import MobileNetV1
model = MobileNetV1()
model = MobileNetV1()
x = paddle.rand([1, 3, 224, 224])
out = model(x)
print(out.shape)
"""
"""
def
__init__
(
self
,
scale
=
1.0
,
num_classes
=
1000
,
with_pool
=
True
):
def
__init__
(
self
,
scale
=
1.0
,
num_classes
=
1000
,
with_pool
=
True
):
...
@@ -106,7 +84,7 @@ class MobileNetV1(nn.Layer):
...
@@ -106,7 +84,7 @@ class MobileNetV1(nn.Layer):
self
.
num_classes
=
num_classes
self
.
num_classes
=
num_classes
self
.
with_pool
=
with_pool
self
.
with_pool
=
with_pool
self
.
conv1
=
Conv
BNLayer
(
self
.
conv1
=
Conv
NormActivation
(
in_channels
=
3
,
in_channels
=
3
,
out_channels
=
int
(
32
*
scale
),
out_channels
=
int
(
32
*
scale
),
kernel_size
=
3
,
kernel_size
=
3
,
...
@@ -257,6 +235,7 @@ def mobilenet_v1(pretrained=False, scale=1.0, **kwargs):
...
@@ -257,6 +235,7 @@ def mobilenet_v1(pretrained=False, scale=1.0, **kwargs):
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import paddle
from paddle.vision.models import mobilenet_v1
from paddle.vision.models import mobilenet_v1
# build model
# build model
...
@@ -266,7 +245,12 @@ def mobilenet_v1(pretrained=False, scale=1.0, **kwargs):
...
@@ -266,7 +245,12 @@ def mobilenet_v1(pretrained=False, scale=1.0, **kwargs):
# model = mobilenet_v1(pretrained=True)
# model = mobilenet_v1(pretrained=True)
# build mobilenet v1 with scale=0.5
# build mobilenet v1 with scale=0.5
model = mobilenet_v1(scale=0.5)
model_scale = mobilenet_v1(scale=0.5)
x = paddle.rand([1, 3, 224, 224])
out = model(x)
print(out.shape)
"""
"""
model
=
_mobilenet
(
model
=
_mobilenet
(
'mobilenetv1_'
+
str
(
scale
),
pretrained
,
scale
=
scale
,
**
kwargs
)
'mobilenetv1_'
+
str
(
scale
),
pretrained
,
scale
=
scale
,
**
kwargs
)
...
...
python/paddle/vision/models/mobilenetv2.py
浏览文件 @
f6219dda
...
@@ -17,6 +17,7 @@ import paddle.nn as nn
...
@@ -17,6 +17,7 @@ import paddle.nn as nn
from
paddle.utils.download
import
get_weights_path_from_url
from
paddle.utils.download
import
get_weights_path_from_url
from
.utils
import
_make_divisible
from
.utils
import
_make_divisible
from
..ops
import
ConvNormActivation
__all__
=
[]
__all__
=
[]
...
@@ -27,29 +28,6 @@ model_urls = {
...
@@ -27,29 +28,6 @@ model_urls = {
}
}
class
ConvBNReLU
(
nn
.
Sequential
):
def
__init__
(
self
,
in_planes
,
out_planes
,
kernel_size
=
3
,
stride
=
1
,
groups
=
1
,
norm_layer
=
nn
.
BatchNorm2D
):
padding
=
(
kernel_size
-
1
)
//
2
super
(
ConvBNReLU
,
self
).
__init__
(
nn
.
Conv2D
(
in_planes
,
out_planes
,
kernel_size
,
stride
,
padding
,
groups
=
groups
,
bias_attr
=
False
),
norm_layer
(
out_planes
),
nn
.
ReLU6
())
class
InvertedResidual
(
nn
.
Layer
):
class
InvertedResidual
(
nn
.
Layer
):
def
__init__
(
self
,
def
__init__
(
self
,
inp
,
inp
,
...
@@ -67,15 +45,20 @@ class InvertedResidual(nn.Layer):
...
@@ -67,15 +45,20 @@ class InvertedResidual(nn.Layer):
layers
=
[]
layers
=
[]
if
expand_ratio
!=
1
:
if
expand_ratio
!=
1
:
layers
.
append
(
layers
.
append
(
ConvBNReLU
(
ConvNormActivation
(
inp
,
hidden_dim
,
kernel_size
=
1
,
norm_layer
=
norm_layer
))
inp
,
hidden_dim
,
kernel_size
=
1
,
norm_layer
=
norm_layer
,
activation_layer
=
nn
.
ReLU6
))
layers
.
extend
([
layers
.
extend
([
Conv
BNReLU
(
Conv
NormActivation
(
hidden_dim
,
hidden_dim
,
hidden_dim
,
hidden_dim
,
stride
=
stride
,
stride
=
stride
,
groups
=
hidden_dim
,
groups
=
hidden_dim
,
norm_layer
=
norm_layer
),
norm_layer
=
norm_layer
,
activation_layer
=
nn
.
ReLU6
),
nn
.
Conv2D
(
nn
.
Conv2D
(
hidden_dim
,
oup
,
1
,
1
,
0
,
bias_attr
=
False
),
hidden_dim
,
oup
,
1
,
1
,
0
,
bias_attr
=
False
),
norm_layer
(
oup
),
norm_layer
(
oup
),
...
@@ -90,23 +73,30 @@ class InvertedResidual(nn.Layer):
...
@@ -90,23 +73,30 @@ class InvertedResidual(nn.Layer):
class
MobileNetV2
(
nn
.
Layer
):
class
MobileNetV2
(
nn
.
Layer
):
def
__init__
(
self
,
scale
=
1.0
,
num_classes
=
1000
,
with_pool
=
True
):
"""MobileNetV2 model from
"""MobileNetV2 model from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
scale (float): scale of channels in each layer. Default: 1.0.
num_classes (int): output dim of last fc layer. If num_classes <=0, last fc layer
will not be defined. Default: 1000.
with_pool (bool): use pool before the last fc layer or not. Default: True.
Arg
s:
Example
s:
scale (float): scale of channels in each layer. Default: 1.0.
.. code-block:: python
num_classes (int): output dim of last fc layer. If num_classes <=0, last fc layer
will not be defined. Default: 1000.
import paddle
with_pool (bool): use pool before the last fc layer or not. Default: True.
from paddle.vision.models import MobileNetV2
Examples:
model = MobileNetV2()
.. code-block:: python
from paddle.vision.models import MobileNetV2
x = paddle.rand([1, 3, 224, 224])
out = model(x)
print(out.shape)
"""
model = MobileNetV2()
def
__init__
(
self
,
scale
=
1.0
,
num_classes
=
1000
,
with_pool
=
True
):
"""
super
(
MobileNetV2
,
self
).
__init__
()
super
(
MobileNetV2
,
self
).
__init__
()
self
.
num_classes
=
num_classes
self
.
num_classes
=
num_classes
self
.
with_pool
=
with_pool
self
.
with_pool
=
with_pool
...
@@ -130,8 +120,12 @@ class MobileNetV2(nn.Layer):
...
@@ -130,8 +120,12 @@ class MobileNetV2(nn.Layer):
self
.
last_channel
=
_make_divisible
(
last_channel
*
max
(
1.0
,
scale
),
self
.
last_channel
=
_make_divisible
(
last_channel
*
max
(
1.0
,
scale
),
round_nearest
)
round_nearest
)
features
=
[
features
=
[
ConvBNReLU
(
ConvNormActivation
(
3
,
input_channel
,
stride
=
2
,
norm_layer
=
norm_layer
)
3
,
input_channel
,
stride
=
2
,
norm_layer
=
norm_layer
,
activation_layer
=
nn
.
ReLU6
)
]
]
for
t
,
c
,
n
,
s
in
inverted_residual_setting
:
for
t
,
c
,
n
,
s
in
inverted_residual_setting
:
...
@@ -148,11 +142,12 @@ class MobileNetV2(nn.Layer):
...
@@ -148,11 +142,12 @@ class MobileNetV2(nn.Layer):
input_channel
=
output_channel
input_channel
=
output_channel
features
.
append
(
features
.
append
(
Conv
BNReLU
(
Conv
NormActivation
(
input_channel
,
input_channel
,
self
.
last_channel
,
self
.
last_channel
,
kernel_size
=
1
,
kernel_size
=
1
,
norm_layer
=
norm_layer
))
norm_layer
=
norm_layer
,
activation_layer
=
nn
.
ReLU6
))
self
.
features
=
nn
.
Sequential
(
*
features
)
self
.
features
=
nn
.
Sequential
(
*
features
)
...
@@ -199,6 +194,7 @@ def mobilenet_v2(pretrained=False, scale=1.0, **kwargs):
...
@@ -199,6 +194,7 @@ def mobilenet_v2(pretrained=False, scale=1.0, **kwargs):
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import paddle
from paddle.vision.models import mobilenet_v2
from paddle.vision.models import mobilenet_v2
# build model
# build model
...
@@ -209,6 +205,11 @@ def mobilenet_v2(pretrained=False, scale=1.0, **kwargs):
...
@@ -209,6 +205,11 @@ def mobilenet_v2(pretrained=False, scale=1.0, **kwargs):
# build mobilenet v2 with scale=0.5
# build mobilenet v2 with scale=0.5
model = mobilenet_v2(scale=0.5)
model = mobilenet_v2(scale=0.5)
x = paddle.rand([1, 3, 224, 224])
out = model(x)
print(out.shape)
"""
"""
model
=
_mobilenet
(
model
=
_mobilenet
(
'mobilenetv2_'
+
str
(
scale
),
pretrained
,
scale
=
scale
,
**
kwargs
)
'mobilenetv2_'
+
str
(
scale
),
pretrained
,
scale
=
scale
,
**
kwargs
)
...
...
python/paddle/vision/models/shufflenetv2.py
浏览文件 @
f6219dda
...
@@ -18,37 +18,50 @@ from __future__ import print_function
...
@@ -18,37 +18,50 @@ from __future__ import print_function
import
paddle
import
paddle
import
paddle.nn
as
nn
import
paddle.nn
as
nn
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.nn
import
AdaptiveAvgPool2D
,
Linear
,
MaxPool2D
from
paddle.nn
import
AdaptiveAvgPool2D
,
BatchNorm
,
Conv2D
,
Linear
,
MaxPool2D
from
paddle.utils.download
import
get_weights_path_from_url
from
paddle.utils.download
import
get_weights_path_from_url
from
..ops
import
ConvNormActivation
__all__
=
[]
__all__
=
[]
model_urls
=
{
model_urls
=
{
"shufflenet_v2_x0_25"
:
(
"shufflenet_v2_x0_25"
:
(
"https://paddle-
imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_25_pretrained
.pdparams"
,
"https://paddle-
hapi.bj.bcebos.com/models/shufflenet_v2_x0_25
.pdparams"
,
"
e753404cbd95027759c5f56ecd6c9c4
b"
,
),
"
1e509b4c140eeb096bb16e214796d03
b"
,
),
"shufflenet_v2_x0_33"
:
(
"shufflenet_v2_x0_33"
:
(
"https://paddle-
imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_33_pretrained
.pdparams"
,
"https://paddle-
hapi.bj.bcebos.com/models/shufflenet_v2_x0_33
.pdparams"
,
"
776e3cf9a4923abdfce789c45b8fe1f2
"
,
),
"
3d7b3ab0eaa5c0927ff1026d31b729bd
"
,
),
"shufflenet_v2_x0_5"
:
(
"shufflenet_v2_x0_5"
:
(
"https://paddle-
imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x0_5_pretrained
.pdparams"
,
"https://paddle-
hapi.bj.bcebos.com/models/shufflenet_v2_x0_5
.pdparams"
,
"
e3649cf531566917e2969487d2bc6b60
"
,
),
"
5e5cee182a7793c4e4c73949b1a71bd4
"
,
),
"shufflenet_v2_x1_0"
:
(
"shufflenet_v2_x1_0"
:
(
"https://paddle-
imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_0_pretrained
.pdparams"
,
"https://paddle-
hapi.bj.bcebos.com/models/shufflenet_v2_x1_0
.pdparams"
,
"
7821c348ea34e58847c43a08a4ac0bdf
"
,
),
"
122d42478b9e81eb49f8a9ede327b1a4
"
,
),
"shufflenet_v2_x1_5"
:
(
"shufflenet_v2_x1_5"
:
(
"https://paddle-
imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x1_5_pretrained
.pdparams"
,
"https://paddle-
hapi.bj.bcebos.com/models/shufflenet_v2_x1_5
.pdparams"
,
"
93a07fa557ab2d8803550f39e5b6c391
"
,
),
"
faced5827380d73531d0ee027c67826d
"
,
),
"shufflenet_v2_x2_0"
:
(
"shufflenet_v2_x2_0"
:
(
"https://paddle-
imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_x2_0_pretrained
.pdparams"
,
"https://paddle-
hapi.bj.bcebos.com/models/shufflenet_v2_x2_0
.pdparams"
,
"
4ab1f622fd0d341e0f84b4e057797563
"
,
),
"
cd3dddcd8305e7bcd8ad14d1c69a5784
"
,
),
"shufflenet_v2_swish"
:
(
"shufflenet_v2_swish"
:
(
"https://paddle-
imagenet-models-name.bj.bcebos.com/dygraph/ShuffleNetV2_swish_pretrained
.pdparams"
,
"https://paddle-
hapi.bj.bcebos.com/models/shufflenet_v2_swish
.pdparams"
,
"
daff38b3df1b3748fccbb13cfdf02519
"
,
),
"
adde0aa3b023e5b0c94a68be1c394b84
"
,
),
}
}
def
create_activation_layer
(
act
):
if
act
==
"swish"
:
return
nn
.
Swish
elif
act
==
"relu"
:
return
nn
.
ReLU
elif
act
is
None
:
return
None
else
:
raise
RuntimeError
(
"The activation function is not supported: {}"
.
format
(
act
))
def
channel_shuffle
(
x
,
groups
):
def
channel_shuffle
(
x
,
groups
):
batch_size
,
num_channels
,
height
,
width
=
x
.
shape
[
0
:
4
]
batch_size
,
num_channels
,
height
,
width
=
x
.
shape
[
0
:
4
]
channels_per_group
=
num_channels
//
groups
channels_per_group
=
num_channels
//
groups
...
@@ -65,61 +78,37 @@ def channel_shuffle(x, groups):
...
@@ -65,61 +78,37 @@ def channel_shuffle(x, groups):
return
x
return
x
class
ConvBNLayer
(
nn
.
Layer
):
class
InvertedResidual
(
nn
.
Layer
):
def
__init__
(
self
,
def
__init__
(
self
,
in_channels
,
in_channels
,
out_channels
,
out_channels
,
kernel_size
,
stride
,
stride
,
padding
,
activation_layer
=
nn
.
ReLU
):
groups
=
1
,
act
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2D
(
in_channels
=
in_channels
,
out_channels
=
out_channels
,
kernel_size
=
kernel_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
groups
,
weight_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
KaimingNormal
()),
bias_attr
=
False
,
)
self
.
_batch_norm
=
BatchNorm
(
out_channels
,
act
=
act
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv
(
inputs
)
x
=
self
.
_batch_norm
(
x
)
return
x
class
InvertedResidual
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
stride
,
act
=
"relu"
):
super
(
InvertedResidual
,
self
).
__init__
()
super
(
InvertedResidual
,
self
).
__init__
()
self
.
_conv_pw
=
Conv
BNLayer
(
self
.
_conv_pw
=
Conv
NormActivation
(
in_channels
=
in_channels
//
2
,
in_channels
=
in_channels
//
2
,
out_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
kernel_size
=
1
,
kernel_size
=
1
,
stride
=
1
,
stride
=
1
,
padding
=
0
,
padding
=
0
,
groups
=
1
,
groups
=
1
,
act
=
act
)
act
ivation_layer
=
activation_layer
)
self
.
_conv_dw
=
Conv
BNLayer
(
self
.
_conv_dw
=
Conv
NormActivation
(
in_channels
=
out_channels
//
2
,
in_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
kernel_size
=
3
,
kernel_size
=
3
,
stride
=
stride
,
stride
=
stride
,
padding
=
1
,
padding
=
1
,
groups
=
out_channels
//
2
,
groups
=
out_channels
//
2
,
act
=
None
)
act
ivation_layer
=
None
)
self
.
_conv_linear
=
Conv
BNLayer
(
self
.
_conv_linear
=
Conv
NormActivation
(
in_channels
=
out_channels
//
2
,
in_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
kernel_size
=
1
,
kernel_size
=
1
,
stride
=
1
,
stride
=
1
,
padding
=
0
,
padding
=
0
,
groups
=
1
,
groups
=
1
,
act
=
act
)
act
ivation_layer
=
activation_layer
)
def
forward
(
self
,
inputs
):
def
forward
(
self
,
inputs
):
x1
,
x2
=
paddle
.
split
(
x1
,
x2
=
paddle
.
split
(
...
@@ -134,51 +123,55 @@ class InvertedResidual(nn.Layer):
...
@@ -134,51 +123,55 @@ class InvertedResidual(nn.Layer):
class
InvertedResidualDS
(
nn
.
Layer
):
class
InvertedResidualDS
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
stride
,
act
=
"relu"
):
def
__init__
(
self
,
in_channels
,
out_channels
,
stride
,
activation_layer
=
nn
.
ReLU
):
super
(
InvertedResidualDS
,
self
).
__init__
()
super
(
InvertedResidualDS
,
self
).
__init__
()
# branch1
# branch1
self
.
_conv_dw_1
=
Conv
BNLayer
(
self
.
_conv_dw_1
=
Conv
NormActivation
(
in_channels
=
in_channels
,
in_channels
=
in_channels
,
out_channels
=
in_channels
,
out_channels
=
in_channels
,
kernel_size
=
3
,
kernel_size
=
3
,
stride
=
stride
,
stride
=
stride
,
padding
=
1
,
padding
=
1
,
groups
=
in_channels
,
groups
=
in_channels
,
act
=
None
)
act
ivation_layer
=
None
)
self
.
_conv_linear_1
=
Conv
BNLayer
(
self
.
_conv_linear_1
=
Conv
NormActivation
(
in_channels
=
in_channels
,
in_channels
=
in_channels
,
out_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
kernel_size
=
1
,
kernel_size
=
1
,
stride
=
1
,
stride
=
1
,
padding
=
0
,
padding
=
0
,
groups
=
1
,
groups
=
1
,
act
=
act
)
act
ivation_layer
=
activation_layer
)
# branch2
# branch2
self
.
_conv_pw_2
=
Conv
BNLayer
(
self
.
_conv_pw_2
=
Conv
NormActivation
(
in_channels
=
in_channels
,
in_channels
=
in_channels
,
out_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
kernel_size
=
1
,
kernel_size
=
1
,
stride
=
1
,
stride
=
1
,
padding
=
0
,
padding
=
0
,
groups
=
1
,
groups
=
1
,
act
=
act
)
act
ivation_layer
=
activation_layer
)
self
.
_conv_dw_2
=
Conv
BNLayer
(
self
.
_conv_dw_2
=
Conv
NormActivation
(
in_channels
=
out_channels
//
2
,
in_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
kernel_size
=
3
,
kernel_size
=
3
,
stride
=
stride
,
stride
=
stride
,
padding
=
1
,
padding
=
1
,
groups
=
out_channels
//
2
,
groups
=
out_channels
//
2
,
act
=
None
)
act
ivation_layer
=
None
)
self
.
_conv_linear_2
=
Conv
BNLayer
(
self
.
_conv_linear_2
=
Conv
NormActivation
(
in_channels
=
out_channels
//
2
,
in_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
out_channels
=
out_channels
//
2
,
kernel_size
=
1
,
kernel_size
=
1
,
stride
=
1
,
stride
=
1
,
padding
=
0
,
padding
=
0
,
groups
=
1
,
groups
=
1
,
act
=
act
)
act
ivation_layer
=
activation_layer
)
def
forward
(
self
,
inputs
):
def
forward
(
self
,
inputs
):
x1
=
self
.
_conv_dw_1
(
inputs
)
x1
=
self
.
_conv_dw_1
(
inputs
)
...
@@ -221,6 +214,7 @@ class ShuffleNetV2(nn.Layer):
...
@@ -221,6 +214,7 @@ class ShuffleNetV2(nn.Layer):
self
.
num_classes
=
num_classes
self
.
num_classes
=
num_classes
self
.
with_pool
=
with_pool
self
.
with_pool
=
with_pool
stage_repeats
=
[
4
,
8
,
4
]
stage_repeats
=
[
4
,
8
,
4
]
activation_layer
=
create_activation_layer
(
act
)
if
scale
==
0.25
:
if
scale
==
0.25
:
stage_out_channels
=
[
-
1
,
24
,
24
,
48
,
96
,
512
]
stage_out_channels
=
[
-
1
,
24
,
24
,
48
,
96
,
512
]
...
@@ -238,13 +232,13 @@ class ShuffleNetV2(nn.Layer):
...
@@ -238,13 +232,13 @@ class ShuffleNetV2(nn.Layer):
raise
NotImplementedError
(
"This scale size:["
+
str
(
scale
)
+
raise
NotImplementedError
(
"This scale size:["
+
str
(
scale
)
+
"] is not implemented!"
)
"] is not implemented!"
)
# 1. conv1
# 1. conv1
self
.
_conv1
=
Conv
BNLayer
(
self
.
_conv1
=
Conv
NormActivation
(
in_channels
=
3
,
in_channels
=
3
,
out_channels
=
stage_out_channels
[
1
],
out_channels
=
stage_out_channels
[
1
],
kernel_size
=
3
,
kernel_size
=
3
,
stride
=
2
,
stride
=
2
,
padding
=
1
,
padding
=
1
,
act
=
act
)
act
ivation_layer
=
activation_layer
)
self
.
_max_pool
=
MaxPool2D
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
_max_pool
=
MaxPool2D
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
# 2. bottleneck sequences
# 2. bottleneck sequences
...
@@ -257,7 +251,7 @@ class ShuffleNetV2(nn.Layer):
...
@@ -257,7 +251,7 @@ class ShuffleNetV2(nn.Layer):
in_channels
=
stage_out_channels
[
stage_id
+
1
],
in_channels
=
stage_out_channels
[
stage_id
+
1
],
out_channels
=
stage_out_channels
[
stage_id
+
2
],
out_channels
=
stage_out_channels
[
stage_id
+
2
],
stride
=
2
,
stride
=
2
,
act
=
act
),
act
ivation_layer
=
activation_layer
),
name
=
str
(
stage_id
+
2
)
+
"_"
+
str
(
i
+
1
))
name
=
str
(
stage_id
+
2
)
+
"_"
+
str
(
i
+
1
))
else
:
else
:
block
=
self
.
add_sublayer
(
block
=
self
.
add_sublayer
(
...
@@ -265,17 +259,17 @@ class ShuffleNetV2(nn.Layer):
...
@@ -265,17 +259,17 @@ class ShuffleNetV2(nn.Layer):
in_channels
=
stage_out_channels
[
stage_id
+
2
],
in_channels
=
stage_out_channels
[
stage_id
+
2
],
out_channels
=
stage_out_channels
[
stage_id
+
2
],
out_channels
=
stage_out_channels
[
stage_id
+
2
],
stride
=
1
,
stride
=
1
,
act
=
act
),
act
ivation_layer
=
activation_layer
),
name
=
str
(
stage_id
+
2
)
+
"_"
+
str
(
i
+
1
))
name
=
str
(
stage_id
+
2
)
+
"_"
+
str
(
i
+
1
))
self
.
_block_list
.
append
(
block
)
self
.
_block_list
.
append
(
block
)
# 3. last_conv
# 3. last_conv
self
.
_last_conv
=
Conv
BNLayer
(
self
.
_last_conv
=
Conv
NormActivation
(
in_channels
=
stage_out_channels
[
-
2
],
in_channels
=
stage_out_channels
[
-
2
],
out_channels
=
stage_out_channels
[
-
1
],
out_channels
=
stage_out_channels
[
-
1
],
kernel_size
=
1
,
kernel_size
=
1
,
stride
=
1
,
stride
=
1
,
padding
=
0
,
padding
=
0
,
act
=
act
)
act
ivation_layer
=
activation_layer
)
# 4. pool
# 4. pool
if
with_pool
:
if
with_pool
:
self
.
_pool2d_avg
=
AdaptiveAvgPool2D
(
1
)
self
.
_pool2d_avg
=
AdaptiveAvgPool2D
(
1
)
...
...
python/paddle/vision/ops.py
浏览文件 @
f6219dda
...
@@ -1335,13 +1335,13 @@ class ConvNormActivation(Sequential):
...
@@ -1335,13 +1335,13 @@ class ConvNormActivation(Sequential):
Args:
Args:
in_channels (int): Number of channels in the input image
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the Convolution-Normalzation-Activation block
out_channels (int): Number of channels produced by the Convolution-Normalzation-Activation block
kernel_size: (int, optional): Size of the convolving kernel. Default: 3
kernel_size: (int
|list|tuple
, optional): Size of the convolving kernel. Default: 3
stride (int, optional): Stride of the convolution. Default: 1
stride (int
|list|tuple
, optional): Stride of the convolution. Default: 1
padding (int
, tuple or str
, optional): Padding added to all four sides of the input. Default: None,
padding (int
|str|tuple|list
, optional): Padding added to all four sides of the input. Default: None,
in wich case it will calculated as ``padding = (kernel_size - 1) // 2 * dilation``
in wich case it will calculated as ``padding = (kernel_size - 1) // 2 * dilation``
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
norm_layer (Callable[..., paddle.nn.Layer], optional): Norm layer that will be stacked on top of the convolutiuon layer.
norm_layer (Callable[..., paddle.nn.Layer], optional): Norm layer that will be stacked on top of the convolutiuon layer.
If ``None`` this layer wont be used. Default: ``paddle.nn.BatchNorm2
d
``
If ``None`` this layer wont be used. Default: ``paddle.nn.BatchNorm2
D
``
activation_layer (Callable[..., paddle.nn.Layer], optional): Activation function which will be stacked on top of the normalization
activation_layer (Callable[..., paddle.nn.Layer], optional): Activation function which will be stacked on top of the normalization
layer (if not ``None``), otherwise on top of the conv layer. If ``None`` this layer wont be used. Default: ``paddle.nn.ReLU``
layer (if not ``None``), otherwise on top of the conv layer. If ``None`` this layer wont be used. Default: ``paddle.nn.ReLU``
dilation (int): Spacing between kernel elements. Default: 1
dilation (int): Spacing between kernel elements. Default: 1
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录