Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
fe302aec
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
1 年多 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
fe302aec
编写于
6月 29, 2020
作者:
L
littletomatodonkey
提交者:
GitHub
6月 29, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #183 from wqz960/PaddleClas-dy
add Inception, ResNeXt101_wsl, EfficientNet and other models
上级
be35b7cc
a4ee2f2c
变更
10
展开全部
隐藏空白更改
内联
并排
Showing
10 changed file
with
2531 addition
and
1989 deletion
+2531
-1989
ppcls/modeling/architectures/alexnet.py
ppcls/modeling/architectures/alexnet.py
+100
-169
ppcls/modeling/architectures/darknet.py
ppcls/modeling/architectures/darknet.py
+146
-106
ppcls/modeling/architectures/efficientnet.py
ppcls/modeling/architectures/efficientnet.py
+638
-364
ppcls/modeling/architectures/googlenet.py
ppcls/modeling/architectures/googlenet.py
+196
-225
ppcls/modeling/architectures/inception_v4.py
ppcls/modeling/architectures/inception_v4.py
+316
-231
ppcls/modeling/architectures/resnext101_wsl.py
ppcls/modeling/architectures/resnext101_wsl.py
+237
-173
ppcls/modeling/architectures/squeezenet.py
ppcls/modeling/architectures/squeezenet.py
+136
-118
ppcls/modeling/architectures/vgg.py
ppcls/modeling/architectures/vgg.py
+124
-101
ppcls/modeling/architectures/xception.py
ppcls/modeling/architectures/xception.py
+302
-231
ppcls/modeling/architectures/xception_deeplab.py
ppcls/modeling/architectures/xception_deeplab.py
+336
-271
未找到文件。
ppcls/modeling/architectures/alexnet.py
浏览文件 @
fe302aec
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
import
math
__all__
=
[
'AlexNet'
]
class
AlexNet
():
def
__init__
(
self
):
pass
def
net
(
self
,
input
,
class_dim
=
1000
):
stdv
=
1.0
/
math
.
sqrt
(
input
.
shape
[
1
]
*
11
*
11
)
layer_name
=
[
"conv1"
,
"conv2"
,
"conv3"
,
"conv4"
,
"conv5"
,
"fc6"
,
"fc7"
,
"fc8"
]
conv1
=
fluid
.
layers
.
conv2d
(
input
=
input
,
num_filters
=
64
,
filter_size
=
11
,
stride
=
4
,
padding
=
2
,
groups
=
1
,
act
=
'relu'
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
0
]
+
"_offset"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
0
]
+
"_weights"
))
pool1
=
fluid
.
layers
.
pool2d
(
input
=
conv1
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
'max'
)
stdv
=
1.0
/
math
.
sqrt
(
pool1
.
shape
[
1
]
*
5
*
5
)
conv2
=
fluid
.
layers
.
conv2d
(
input
=
pool1
,
num_filters
=
192
,
filter_size
=
5
,
stride
=
1
,
padding
=
2
,
groups
=
1
,
act
=
'relu'
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
1
]
+
"_offset"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
1
]
+
"_weights"
))
pool2
=
fluid
.
layers
.
pool2d
(
input
=
conv2
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
'max'
)
stdv
=
1.0
/
math
.
sqrt
(
pool2
.
shape
[
1
]
*
3
*
3
)
conv3
=
fluid
.
layers
.
conv2d
(
input
=
pool2
,
num_filters
=
384
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
groups
=
1
,
act
=
'relu'
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
2
]
+
"_offset"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
2
]
+
"_weights"
))
stdv
=
1.0
/
math
.
sqrt
(
conv3
.
shape
[
1
]
*
3
*
3
)
conv4
=
fluid
.
layers
.
conv2d
(
input
=
conv3
,
num_filters
=
256
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
groups
=
1
,
act
=
'relu'
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
3
]
+
"_offset"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
3
]
+
"_weights"
))
stdv
=
1.0
/
math
.
sqrt
(
conv4
.
shape
[
1
]
*
3
*
3
)
conv5
=
fluid
.
layers
.
conv2d
(
input
=
conv4
,
num_filters
=
256
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
groups
=
1
,
act
=
'relu'
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
4
]
+
"_offset"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
4
]
+
"_weights"
))
pool5
=
fluid
.
layers
.
pool2d
(
input
=
conv5
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
'max'
)
drop6
=
fluid
.
layers
.
dropout
(
x
=
pool5
,
dropout_prob
=
0.5
)
stdv
=
1.0
/
math
.
sqrt
(
drop6
.
shape
[
1
]
*
drop6
.
shape
[
2
]
*
drop6
.
shape
[
3
]
*
1.0
)
fc6
=
fluid
.
layers
.
fc
(
input
=
drop6
,
size
=
4096
,
act
=
'relu'
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
5
]
+
"_offset"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
5
]
+
"_weights"
))
drop7
=
fluid
.
layers
.
dropout
(
x
=
fc6
,
dropout_prob
=
0.5
)
stdv
=
1.0
/
math
.
sqrt
(
drop7
.
shape
[
1
]
*
1.0
)
fc7
=
fluid
.
layers
.
fc
(
input
=
drop7
,
size
=
4096
,
act
=
'relu'
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
6
]
+
"_offset"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
6
]
+
"_weights"
))
stdv
=
1.0
/
math
.
sqrt
(
fc7
.
shape
[
1
]
*
1.0
)
out
=
fluid
.
layers
.
fc
(
input
=
fc7
,
size
=
class_dim
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
7
]
+
"_offset"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
7
]
+
"_weights"
))
return
out
__all__
=
[
"AlexNet"
]
class
ConvPoolLayer
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
inputc_channels
,
output_channels
,
filter_size
,
stride
,
padding
,
stdv
,
groups
=
1
,
act
=
None
,
name
=
None
):
super
(
ConvPoolLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2D
(
num_channels
=
inputc_channels
,
num_filters
=
output_channels
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
groups
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
name
+
"_offset"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
act
=
act
)
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
"max"
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv
(
inputs
)
x
=
self
.
_pool
(
x
)
return
x
class
AlexNetDY
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
):
super
(
AlexNetDY
,
self
).
__init__
()
stdv
=
1.0
/
math
.
sqrt
(
3
*
11
*
11
)
self
.
_conv1
=
ConvPoolLayer
(
3
,
64
,
11
,
4
,
2
,
stdv
,
act
=
"relu"
,
name
=
"conv1"
)
stdv
=
1.0
/
math
.
sqrt
(
64
*
5
*
5
)
self
.
_conv2
=
ConvPoolLayer
(
64
,
192
,
5
,
1
,
2
,
stdv
,
act
=
"relu"
,
name
=
"conv2"
)
stdv
=
1.0
/
math
.
sqrt
(
192
*
3
*
3
)
self
.
_conv3
=
Conv2D
(
192
,
384
,
3
,
stride
=
1
,
padding
=
1
,
param_attr
=
ParamAttr
(
name
=
"conv3_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"conv3_offset"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
act
=
"relu"
)
stdv
=
1.0
/
math
.
sqrt
(
384
*
3
*
3
)
self
.
_conv4
=
Conv2D
(
384
,
256
,
3
,
stride
=
1
,
padding
=
1
,
param_attr
=
ParamAttr
(
name
=
"conv4_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"conv4_offset"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
act
=
"relu"
)
stdv
=
1.0
/
math
.
sqrt
(
256
*
3
*
3
)
self
.
_conv5
=
ConvPoolLayer
(
256
,
256
,
3
,
1
,
1
,
stdv
,
act
=
"relu"
,
name
=
"conv5"
)
stdv
=
1.0
/
math
.
sqrt
(
256
*
6
*
6
)
self
.
_drop1
=
Dropout
(
p
=
0.5
)
self
.
_fc6
=
Linear
(
input_dim
=
256
*
6
*
6
,
output_dim
=
4096
,
param_attr
=
ParamAttr
(
name
=
"fc6_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc6_offset"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
act
=
"relu"
)
self
.
_drop2
=
Dropout
(
p
=
0.5
)
self
.
_fc7
=
Linear
(
input_dim
=
4096
,
output_dim
=
4096
,
param_attr
=
ParamAttr
(
name
=
"fc7_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc7_offset"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
act
=
"relu"
)
self
.
_fc8
=
Linear
(
input_dim
=
4096
,
output_dim
=
class_dim
,
param_attr
=
ParamAttr
(
name
=
"fc8_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc8_offset"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)))
def
forward
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_conv3
(
x
)
x
=
self
.
_conv4
(
x
)
x
=
self
.
_conv5
(
x
)
x
=
fluid
.
layers
.
flatten
(
x
,
axis
=
0
)
x
=
self
.
_drop1
(
x
)
x
=
self
.
_fc6
(
x
)
x
=
self
.
_drop2
(
x
)
x
=
self
.
_fc7
(
x
)
x
=
self
.
_fc8
(
x
)
return
x
def
AlexNet
(
**
args
):
model
=
AlexNetDY
(
**
args
)
return
model
ppcls/modeling/architectures/darknet.py
浏览文件 @
fe302aec
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
import
math
__all__
=
[
"DarkNet53"
]
class
DarkNet53
():
def
__init__
(
self
):
pass
def
net
(
self
,
input
,
class_dim
=
1000
):
DarkNet_cfg
=
{
53
:
([
1
,
2
,
8
,
8
,
4
],
self
.
basicblock
)}
stages
,
block_func
=
DarkNet_cfg
[
53
]
stages
=
stages
[
0
:
5
]
conv1
=
self
.
conv_bn_layer
(
input
,
ch_out
=
32
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
name
=
"yolo_input"
)
conv
=
self
.
downsample
(
conv1
,
ch_out
=
conv1
.
shape
[
1
]
*
2
,
name
=
"yolo_input.downsample"
)
for
i
,
stage
in
enumerate
(
stages
):
conv
=
self
.
layer_warp
(
block_func
,
conv
,
32
*
(
2
**
i
),
stage
,
name
=
"stage.{}"
.
format
(
i
))
if
i
<
len
(
stages
)
-
1
:
# do not downsaple in the last stage
conv
=
self
.
downsample
(
conv
,
ch_out
=
conv
.
shape
[
1
]
*
2
,
name
=
"stage.{}.downsample"
.
format
(
i
))
pool
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_type
=
'avg'
,
global_pooling
=
True
)
stdv
=
1.0
/
math
.
sqrt
(
pool
.
shape
[
1
]
*
1.0
)
out
=
fluid
.
layers
.
fc
(
input
=
pool
,
size
=
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
'fc_weights'
),
bias_attr
=
ParamAttr
(
name
=
'fc_offset'
))
return
out
def
conv_bn_layer
(
self
,
input
,
ch_out
,
filter_size
,
stride
,
padding
,
name
=
None
):
conv
=
fluid
.
layers
.
conv2d
(
input
=
input
,
num_filters
=
ch_out
,
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
filter_size
,
stride
,
padding
,
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2D
(
num_channels
=
input_channels
,
num_filters
=
output_channels
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
...
...
@@ -82,39 +28,133 @@ class DarkNet53():
bias_attr
=
False
)
bn_name
=
name
+
".bn"
out
=
fluid
.
layers
.
batch_norm
(
input
=
conv
,
act
=
'relu'
,
param_attr
=
ParamAttr
(
name
=
bn_name
+
'.scale'
),
bias_attr
=
ParamAttr
(
name
=
bn_name
+
'.offset'
),
moving_mean_name
=
bn_name
+
'.mean'
,
moving_variance_name
=
bn_name
+
'.var'
)
return
out
def
downsample
(
self
,
input
,
ch_out
,
filter_size
=
3
,
stride
=
2
,
padding
=
1
,
name
=
None
):
return
self
.
conv_bn_layer
(
input
,
ch_out
=
ch_out
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
name
=
name
)
def
basicblock
(
self
,
input
,
ch_out
,
name
=
None
):
conv1
=
self
.
conv_bn_layer
(
input
,
ch_out
,
1
,
1
,
0
,
name
=
name
+
".0"
)
conv2
=
self
.
conv_bn_layer
(
conv1
,
ch_out
*
2
,
3
,
1
,
1
,
name
=
name
+
".1"
)
out
=
fluid
.
layers
.
elementwise_add
(
x
=
input
,
y
=
conv2
,
act
=
None
)
return
out
def
layer_warp
(
self
,
block_func
,
input
,
ch_out
,
count
,
name
=
None
):
res_out
=
block_func
(
input
,
ch_out
,
name
=
'{}.0'
.
format
(
name
))
for
j
in
range
(
1
,
count
):
res_out
=
block_func
(
res_out
,
ch_out
,
name
=
'{}.{}'
.
format
(
name
,
j
))
return
res_out
self
.
_bn
=
BatchNorm
(
num_channels
=
output_channels
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
bn_name
+
".scale"
),
bias_attr
=
ParamAttr
(
name
=
bn_name
+
".offset"
),
moving_mean_name
=
bn_name
+
".mean"
,
moving_variance_name
=
bn_name
+
".var"
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv
(
inputs
)
x
=
self
.
_bn
(
x
)
return
x
class
BasicBlock
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
name
=
None
):
super
(
BasicBlock
,
self
).
__init__
()
self
.
_conv1
=
ConvBNLayer
(
input_channels
,
output_channels
,
1
,
1
,
0
,
name
=
name
+
".0"
)
self
.
_conv2
=
ConvBNLayer
(
output_channels
,
output_channels
*
2
,
3
,
1
,
1
,
name
=
name
+
".1"
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
x
=
self
.
_conv2
(
x
)
return
fluid
.
layers
.
elementwise_add
(
x
=
inputs
,
y
=
x
)
class
DarkNet
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
):
super
(
DarkNet
,
self
).
__init__
()
self
.
stages
=
[
1
,
2
,
8
,
8
,
4
]
self
.
_conv1
=
ConvBNLayer
(
3
,
32
,
3
,
1
,
1
,
name
=
"yolo_input"
)
self
.
_conv2
=
ConvBNLayer
(
32
,
64
,
3
,
2
,
1
,
name
=
"yolo_input.downsample"
)
self
.
_basic_block_01
=
BasicBlock
(
64
,
32
,
name
=
"stage.0.0"
)
self
.
_downsample_0
=
ConvBNLayer
(
64
,
128
,
3
,
2
,
1
,
name
=
"stage.0.downsample"
)
self
.
_basic_block_11
=
BasicBlock
(
128
,
64
,
name
=
"stage.1.0"
)
self
.
_basic_block_12
=
BasicBlock
(
128
,
64
,
name
=
"stage.1.1"
)
self
.
_downsample_1
=
ConvBNLayer
(
128
,
256
,
3
,
2
,
1
,
name
=
"stage.1.downsample"
)
self
.
_basic_block_21
=
BasicBlock
(
256
,
128
,
name
=
"stage.2.0"
)
self
.
_basic_block_22
=
BasicBlock
(
256
,
128
,
name
=
"stage.2.1"
)
self
.
_basic_block_23
=
BasicBlock
(
256
,
128
,
name
=
"stage.2.2"
)
self
.
_basic_block_24
=
BasicBlock
(
256
,
128
,
name
=
"stage.2.3"
)
self
.
_basic_block_25
=
BasicBlock
(
256
,
128
,
name
=
"stage.2.4"
)
self
.
_basic_block_26
=
BasicBlock
(
256
,
128
,
name
=
"stage.2.5"
)
self
.
_basic_block_27
=
BasicBlock
(
256
,
128
,
name
=
"stage.2.6"
)
self
.
_basic_block_28
=
BasicBlock
(
256
,
128
,
name
=
"stage.2.7"
)
self
.
_downsample_2
=
ConvBNLayer
(
256
,
512
,
3
,
2
,
1
,
name
=
"stage.2.downsample"
)
self
.
_basic_block_31
=
BasicBlock
(
512
,
256
,
name
=
"stage.3.0"
)
self
.
_basic_block_32
=
BasicBlock
(
512
,
256
,
name
=
"stage.3.1"
)
self
.
_basic_block_33
=
BasicBlock
(
512
,
256
,
name
=
"stage.3.2"
)
self
.
_basic_block_34
=
BasicBlock
(
512
,
256
,
name
=
"stage.3.3"
)
self
.
_basic_block_35
=
BasicBlock
(
512
,
256
,
name
=
"stage.3.4"
)
self
.
_basic_block_36
=
BasicBlock
(
512
,
256
,
name
=
"stage.3.5"
)
self
.
_basic_block_37
=
BasicBlock
(
512
,
256
,
name
=
"stage.3.6"
)
self
.
_basic_block_38
=
BasicBlock
(
512
,
256
,
name
=
"stage.3.7"
)
self
.
_downsample_3
=
ConvBNLayer
(
512
,
1024
,
3
,
2
,
1
,
name
=
"stage.3.downsample"
)
self
.
_basic_block_41
=
BasicBlock
(
1024
,
512
,
name
=
"stage.4.0"
)
self
.
_basic_block_42
=
BasicBlock
(
1024
,
512
,
name
=
"stage.4.1"
)
self
.
_basic_block_43
=
BasicBlock
(
1024
,
512
,
name
=
"stage.4.2"
)
self
.
_basic_block_44
=
BasicBlock
(
1024
,
512
,
name
=
"stage.4.3"
)
self
.
_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
stdv
=
1.0
/
math
.
sqrt
(
1024.0
)
self
.
_out
=
Linear
(
input_dim
=
1024
,
output_dim
=
class_dim
,
param_attr
=
ParamAttr
(
name
=
"fc_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_basic_block_01
(
x
)
x
=
self
.
_downsample_0
(
x
)
x
=
self
.
_basic_block_11
(
x
)
x
=
self
.
_basic_block_12
(
x
)
x
=
self
.
_downsample_1
(
x
)
x
=
self
.
_basic_block_21
(
x
)
x
=
self
.
_basic_block_22
(
x
)
x
=
self
.
_basic_block_23
(
x
)
x
=
self
.
_basic_block_24
(
x
)
x
=
self
.
_basic_block_25
(
x
)
x
=
self
.
_basic_block_26
(
x
)
x
=
self
.
_basic_block_27
(
x
)
x
=
self
.
_basic_block_28
(
x
)
x
=
self
.
_downsample_2
(
x
)
x
=
self
.
_basic_block_31
(
x
)
x
=
self
.
_basic_block_32
(
x
)
x
=
self
.
_basic_block_33
(
x
)
x
=
self
.
_basic_block_34
(
x
)
x
=
self
.
_basic_block_35
(
x
)
x
=
self
.
_basic_block_36
(
x
)
x
=
self
.
_basic_block_37
(
x
)
x
=
self
.
_basic_block_38
(
x
)
x
=
self
.
_downsample_3
(
x
)
x
=
self
.
_basic_block_41
(
x
)
x
=
self
.
_basic_block_42
(
x
)
x
=
self
.
_basic_block_43
(
x
)
x
=
self
.
_basic_block_44
(
x
)
x
=
self
.
_pool
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axes
=
[
2
,
3
])
x
=
self
.
_out
(
x
)
return
x
def
DarkNet53
(
**
args
):
model
=
DarkNet
(
**
args
)
return
model
\ No newline at end of file
ppcls/modeling/architectures/efficientnet.py
浏览文件 @
fe302aec
此差异已折叠。
点击以展开。
ppcls/modeling/architectures/googlenet.py
浏览文件 @
fe302aec
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
__all__
=
[
'GoogLeNet'
]
class
GoogLeNet
():
def
__init__
(
self
):
pass
def
conv_layer
(
self
,
input
,
num_filters
,
filter_size
,
stride
=
1
,
groups
=
1
,
act
=
None
,
name
=
None
):
channels
=
input
.
shape
[
1
]
stdv
=
(
3.0
/
(
filter_size
**
2
*
channels
))
**
0.5
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_weights"
)
conv
=
fluid
.
layers
.
conv2d
(
input
=
input
,
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
import
math
__all__
=
[
'GoogLeNet_DY'
]
def
xavier
(
channels
,
filter_size
,
name
):
stdv
=
(
3.0
/
(
filter_size
**
2
*
channels
))
**
0.5
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_weights"
)
return
param_attr
class
ConvLayer
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
filter_size
,
stride
=
1
,
groups
=
1
,
act
=
None
,
name
=
None
):
super
(
ConvLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2D
(
num_channels
=
num_channels
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
act
,
param_attr
=
param_attr
,
bias_attr
=
False
,
name
=
name
)
return
conv
def
xavier
(
self
,
channels
,
filter_size
,
name
):
stdv
=
(
3.0
/
(
filter_size
**
2
*
channels
))
**
0.5
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_weights"
)
return
param_attr
def
inception
(
self
,
input
,
channels
,
filter1
,
filter3R
,
filter3
,
filter5R
,
filter5
,
proj
,
name
=
None
):
conv1
=
self
.
conv_layer
(
input
=
input
,
num_filters
=
filter1
,
filter_size
=
1
,
stride
=
1
,
act
=
None
,
name
=
"inception_"
+
name
+
"_1x1"
)
conv3r
=
self
.
conv_layer
(
input
=
input
,
num_filters
=
filter3R
,
filter_size
=
1
,
stride
=
1
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
def
forward
(
self
,
inputs
):
y
=
self
.
_conv
(
inputs
)
return
y
class
Inception
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
filter1
,
filter3R
,
filter3
,
filter5R
,
filter5
,
proj
,
name
=
None
):
super
(
Inception
,
self
).
__init__
()
self
.
_conv1
=
ConvLayer
(
input_channels
,
filter1
,
1
,
name
=
"inception_"
+
name
+
"_1x1"
)
self
.
_conv3r
=
ConvLayer
(
input_channels
,
filter3R
,
1
,
name
=
"inception_"
+
name
+
"_3x3_reduce"
)
conv3
=
self
.
conv_layer
(
input
=
conv3r
,
num_filters
=
filter3
,
filter_size
=
3
,
stride
=
1
,
act
=
None
,
name
=
"inception_"
+
name
+
"_3x3"
)
conv5r
=
self
.
conv_layer
(
input
=
input
,
num_filters
=
filter5R
,
filter_size
=
1
,
stride
=
1
,
act
=
None
,
self
.
_conv3
=
ConvLayer
(
filter3R
,
filter3
,
3
,
name
=
"inception_"
+
name
+
"_3x3"
)
self
.
_conv5r
=
ConvLayer
(
input_channels
,
filter5R
,
1
,
name
=
"inception_"
+
name
+
"_5x5_reduce"
)
conv5
=
self
.
conv_layer
(
input
=
conv5r
,
num_filters
=
filter5
,
filter_size
=
5
,
stride
=
1
,
act
=
None
,
name
=
"inception_"
+
name
+
"_5x5"
)
pool
=
fluid
.
layers
.
pool2d
(
input
=
input
,
pool_size
=
3
,
pool_stride
=
1
,
pool_padding
=
1
,
pool_type
=
'max'
)
convprj
=
fluid
.
layers
.
conv2d
(
input
=
pool
,
filter_size
=
1
,
num_filters
=
proj
,
stride
=
1
,
padding
=
0
,
name
=
"inception_"
+
name
+
"_3x3_proj"
,
param_attr
=
ParamAttr
(
name
=
"inception_"
+
name
+
"_3x3_proj_weights"
),
bias_attr
=
False
)
cat
=
fluid
.
layers
.
concat
(
input
=
[
conv1
,
conv3
,
conv5
,
convprj
],
axis
=
1
)
cat
=
fluid
.
layers
.
relu
(
cat
)
return
cat
def
net
(
self
,
input
,
class_dim
=
1000
):
conv
=
self
.
conv_layer
(
input
=
input
,
num_filters
=
64
,
filter_size
=
7
,
stride
=
2
,
act
=
None
,
name
=
"conv1"
)
pool
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
3
,
pool_type
=
'max'
,
pool_stride
=
2
)
conv
=
self
.
conv_layer
(
input
=
pool
,
num_filters
=
64
,
filter_size
=
1
,
stride
=
1
,
act
=
None
,
name
=
"conv2_1x1"
)
conv
=
self
.
conv_layer
(
input
=
conv
,
num_filters
=
192
,
filter_size
=
3
,
stride
=
1
,
act
=
None
,
name
=
"conv2_3x3"
)
pool
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
3
,
pool_type
=
'max'
,
pool_stride
=
2
)
ince3a
=
self
.
inception
(
pool
,
192
,
64
,
96
,
128
,
16
,
32
,
32
,
"ince3a"
)
ince3b
=
self
.
inception
(
ince3a
,
256
,
128
,
128
,
192
,
32
,
96
,
64
,
"ince3b"
)
pool3
=
fluid
.
layers
.
pool2d
(
input
=
ince3b
,
pool_size
=
3
,
pool_type
=
'max'
,
pool_stride
=
2
)
ince4a
=
self
.
inception
(
pool3
,
480
,
192
,
96
,
208
,
16
,
48
,
64
,
"ince4a"
)
ince4b
=
self
.
inception
(
ince4a
,
512
,
160
,
112
,
224
,
24
,
64
,
64
,
"ince4b"
)
ince4c
=
self
.
inception
(
ince4b
,
512
,
128
,
128
,
256
,
24
,
64
,
64
,
"ince4c"
)
ince4d
=
self
.
inception
(
ince4c
,
512
,
112
,
144
,
288
,
32
,
64
,
64
,
"ince4d"
)
ince4e
=
self
.
inception
(
ince4d
,
528
,
256
,
160
,
320
,
32
,
128
,
128
,
"ince4e"
)
pool4
=
fluid
.
layers
.
pool2d
(
input
=
ince4e
,
pool_size
=
3
,
pool_type
=
'max'
,
pool_stride
=
2
)
ince5a
=
self
.
inception
(
pool4
,
832
,
256
,
160
,
320
,
32
,
128
,
128
,
"ince5a"
)
ince5b
=
self
.
inception
(
ince5a
,
832
,
384
,
192
,
384
,
48
,
128
,
128
,
"ince5b"
)
pool5
=
fluid
.
layers
.
pool2d
(
input
=
ince5b
,
pool_size
=
7
,
pool_type
=
'avg'
,
pool_stride
=
7
)
dropout
=
fluid
.
layers
.
dropout
(
x
=
pool5
,
dropout_prob
=
0.4
)
out
=
fluid
.
layers
.
fc
(
input
=
dropout
,
size
=
class_dim
,
act
=
'softmax'
,
param_attr
=
self
.
xavier
(
1024
,
1
,
"out"
),
name
=
"out"
,
bias_attr
=
ParamAttr
(
name
=
"out_offset"
))
pool_o1
=
fluid
.
layers
.
pool2d
(
input
=
ince4a
,
pool_size
=
5
,
pool_type
=
'avg'
,
pool_stride
=
3
)
conv_o1
=
self
.
conv_layer
(
input
=
pool_o1
,
num_filters
=
128
,
filter_size
=
1
,
stride
=
1
,
act
=
None
,
name
=
"conv_o1"
)
fc_o1
=
fluid
.
layers
.
fc
(
input
=
conv_o1
,
size
=
1024
,
act
=
'relu'
,
param_attr
=
self
.
xavier
(
2048
,
1
,
"fc_o1"
),
name
=
"fc_o1"
,
bias_attr
=
ParamAttr
(
name
=
"fc_o1_offset"
))
dropout_o1
=
fluid
.
layers
.
dropout
(
x
=
fc_o1
,
dropout_prob
=
0.7
)
out1
=
fluid
.
layers
.
fc
(
input
=
dropout_o1
,
size
=
class_dim
,
act
=
'softmax'
,
param_attr
=
self
.
xavier
(
1024
,
1
,
"out1"
),
name
=
"out1"
,
bias_attr
=
ParamAttr
(
name
=
"out1_offset"
))
pool_o2
=
fluid
.
layers
.
pool2d
(
input
=
ince4d
,
pool_size
=
5
,
pool_type
=
'avg'
,
pool_stride
=
3
)
conv_o2
=
self
.
conv_layer
(
input
=
pool_o2
,
num_filters
=
128
,
filter_size
=
1
,
stride
=
1
,
act
=
None
,
name
=
"conv_o2"
)
fc_o2
=
fluid
.
layers
.
fc
(
input
=
conv_o2
,
size
=
1024
,
act
=
'relu'
,
param_attr
=
self
.
xavier
(
2048
,
1
,
"fc_o2"
),
name
=
"fc_o2"
,
bias_attr
=
ParamAttr
(
name
=
"fc_o2_offset"
))
dropout_o2
=
fluid
.
layers
.
dropout
(
x
=
fc_o2
,
dropout_prob
=
0.7
)
out2
=
fluid
.
layers
.
fc
(
input
=
dropout_o2
,
size
=
class_dim
,
act
=
'softmax'
,
param_attr
=
self
.
xavier
(
1024
,
1
,
"out2"
),
name
=
"out2"
,
bias_attr
=
ParamAttr
(
name
=
"out2_offset"
))
# last fc layer is "out"
self
.
_conv5
=
ConvLayer
(
filter5R
,
filter5
,
5
,
name
=
"inception_"
+
name
+
"_5x5"
)
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_type
=
"max"
,
pool_stride
=
1
,
pool_padding
=
1
)
self
.
_convprj
=
ConvLayer
(
input_channels
,
proj
,
1
,
name
=
"inception_"
+
name
+
"_3x3_proj"
)
def
forward
(
self
,
inputs
):
conv1
=
self
.
_conv1
(
inputs
)
conv3r
=
self
.
_conv3r
(
inputs
)
conv3
=
self
.
_conv3
(
conv3r
)
conv5r
=
self
.
_conv5r
(
inputs
)
conv5
=
self
.
_conv5
(
conv5r
)
pool
=
self
.
_pool
(
inputs
)
convprj
=
self
.
_convprj
(
pool
)
cat
=
fluid
.
layers
.
concat
([
conv1
,
conv3
,
conv5
,
convprj
],
axis
=
1
)
layer_helper
=
LayerHelper
(
self
.
full_name
(),
act
=
"relu"
)
return
layer_helper
.
append_activation
(
cat
)
class
GoogleNetDY
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
):
super
(
GoogleNetDY
,
self
).
__init__
()
self
.
_conv
=
ConvLayer
(
3
,
64
,
7
,
2
,
name
=
"conv1"
)
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_type
=
"max"
,
pool_stride
=
2
)
self
.
_conv_1
=
ConvLayer
(
64
,
64
,
1
,
name
=
"conv2_1x1"
)
self
.
_conv_2
=
ConvLayer
(
64
,
192
,
3
,
name
=
"conv2_3x3"
)
self
.
_ince3a
=
Inception
(
192
,
192
,
64
,
96
,
128
,
16
,
32
,
32
,
name
=
"ince3a"
)
self
.
_ince3b
=
Inception
(
256
,
256
,
128
,
128
,
192
,
32
,
96
,
64
,
name
=
"ince3b"
)
self
.
_ince4a
=
Inception
(
480
,
480
,
192
,
96
,
208
,
16
,
48
,
64
,
name
=
"ince4a"
)
self
.
_ince4b
=
Inception
(
512
,
512
,
160
,
112
,
224
,
24
,
64
,
64
,
name
=
"ince4b"
)
self
.
_ince4c
=
Inception
(
512
,
512
,
128
,
128
,
256
,
24
,
64
,
64
,
name
=
"ince4c"
)
self
.
_ince4d
=
Inception
(
512
,
512
,
112
,
144
,
288
,
32
,
64
,
64
,
name
=
"ince4d"
)
self
.
_ince4e
=
Inception
(
528
,
528
,
256
,
160
,
320
,
32
,
128
,
128
,
name
=
"ince4e"
)
self
.
_ince5a
=
Inception
(
832
,
832
,
256
,
160
,
320
,
32
,
128
,
128
,
name
=
"ince5a"
)
self
.
_ince5b
=
Inception
(
832
,
832
,
384
,
192
,
384
,
48
,
128
,
128
,
name
=
"ince5b"
)
self
.
_pool_5
=
Pool2D
(
pool_size
=
7
,
pool_type
=
'avg'
,
pool_stride
=
7
)
self
.
_drop
=
fluid
.
dygraph
.
Dropout
(
p
=
0.4
)
self
.
_fc_out
=
Linear
(
1024
,
class_dim
,
param_attr
=
xavier
(
1024
,
1
,
"out"
),
bias_attr
=
ParamAttr
(
name
=
"out_offset"
),
act
=
"softmax"
)
self
.
_pool_o1
=
Pool2D
(
pool_size
=
5
,
pool_stride
=
3
,
pool_type
=
"avg"
)
self
.
_conv_o1
=
ConvLayer
(
512
,
128
,
1
,
name
=
"conv_o1"
)
self
.
_fc_o1
=
Linear
(
1152
,
1024
,
param_attr
=
xavier
(
2048
,
1
,
"fc_o1"
),
bias_attr
=
ParamAttr
(
name
=
"fc_o1_offset"
),
act
=
"relu"
)
self
.
_drop_o1
=
fluid
.
dygraph
.
Dropout
(
p
=
0.7
)
self
.
_out1
=
Linear
(
1024
,
class_dim
,
param_attr
=
xavier
(
1024
,
1
,
"out1"
),
bias_attr
=
ParamAttr
(
name
=
"out1_offset"
),
act
=
"softmax"
)
self
.
_pool_o2
=
Pool2D
(
pool_size
=
5
,
pool_stride
=
3
,
pool_type
=
'avg'
)
self
.
_conv_o2
=
ConvLayer
(
528
,
128
,
1
,
name
=
"conv_o2"
)
self
.
_fc_o2
=
Linear
(
1152
,
1024
,
param_attr
=
xavier
(
2048
,
1
,
"fc_o2"
),
bias_attr
=
ParamAttr
(
name
=
"fc_o2_offset"
))
self
.
_drop_o2
=
fluid
.
dygraph
.
Dropout
(
p
=
0.7
)
self
.
_out2
=
Linear
(
1024
,
class_dim
,
param_attr
=
xavier
(
1024
,
1
,
"out2"
),
bias_attr
=
ParamAttr
(
name
=
"out2_offset"
))
def
forward
(
self
,
inputs
):
x
=
self
.
_conv
(
inputs
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_conv_1
(
x
)
x
=
self
.
_conv_2
(
x
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_ince3a
(
x
)
x
=
self
.
_ince3b
(
x
)
x
=
self
.
_pool
(
x
)
ince4a
=
self
.
_ince4a
(
x
)
x
=
self
.
_ince4b
(
ince4a
)
x
=
self
.
_ince4c
(
x
)
ince4d
=
self
.
_ince4d
(
x
)
x
=
self
.
_ince4e
(
ince4d
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_ince5a
(
x
)
ince5b
=
self
.
_ince5b
(
x
)
x
=
self
.
_pool_5
(
ince5b
)
x
=
self
.
_drop
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axes
=
[
2
,
3
])
out
=
self
.
_fc_out
(
x
)
x
=
self
.
_pool_o1
(
ince4a
)
x
=
self
.
_conv_o1
(
x
)
x
=
fluid
.
layers
.
flatten
(
x
)
x
=
self
.
_fc_o1
(
x
)
x
=
self
.
_drop_o1
(
x
)
out1
=
self
.
_out1
(
x
)
x
=
self
.
_pool_o2
(
ince4d
)
x
=
self
.
_conv_o2
(
x
)
x
=
fluid
.
layers
.
flatten
(
x
)
x
=
self
.
_fc_o2
(
x
)
x
=
self
.
_drop_o2
(
x
)
out2
=
self
.
_out2
(
x
)
return
[
out
,
out1
,
out2
]
def
GoogLeNet
(
**
args
):
model
=
GoogleNetDY
(
**
args
)
return
model
\ No newline at end of file
ppcls/modeling/architectures/inception_v4.py
浏览文件 @
fe302aec
此差异已折叠。
点击以展开。
ppcls/modeling/architectures/resnext101_wsl.py
浏览文件 @
fe302aec
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
__all__
=
[
"ResNeXt101_32x8d_wsl"
,
"ResNeXt101_32x16d_wsl"
,
"ResNeXt101_32x32d_wsl"
,
"ResNeXt101_32x48d_wsl"
,
"Fix_ResNeXt101_32x48d_wsl"
]
class
ResNeXt101_wsl
():
def
__init__
(
self
,
layers
=
101
,
cardinality
=
32
,
width
=
48
):
self
.
layers
=
layers
self
.
cardinality
=
cardinality
self
.
width
=
width
def
net
(
self
,
input
,
class_dim
=
1000
):
layers
=
self
.
layers
cardinality
=
self
.
cardinality
width
=
self
.
width
depth
=
[
3
,
4
,
23
,
3
]
base_width
=
cardinality
*
width
num_filters
=
[
base_width
*
i
for
i
in
[
1
,
2
,
4
,
8
]]
conv
=
self
.
conv_bn_layer
(
input
=
input
,
num_filters
=
64
,
filter_size
=
7
,
stride
=
2
,
act
=
'relu'
,
name
=
"conv1"
)
#debug
conv
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
for
block
in
range
(
len
(
depth
)):
for
i
in
range
(
depth
[
block
]):
conv_name
=
'layer'
+
str
(
block
+
1
)
+
"."
+
str
(
i
)
conv
=
self
.
bottleneck_block
(
input
=
conv
,
num_filters
=
num_filters
[
block
],
stride
=
2
if
i
==
0
and
block
!=
0
else
1
,
cardinality
=
cardinality
,
name
=
conv_name
)
pool
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_type
=
'avg'
,
global_pooling
=
True
)
stdv
=
1.0
/
math
.
sqrt
(
pool
.
shape
[
1
]
*
1.0
)
out
=
fluid
.
layers
.
fc
(
input
=
pool
,
size
=
class_dim
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
'fc.weight'
),
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
'fc.bias'
))
return
out
def
conv_bn_layer
(
self
,
input
,
num_filters
,
filter_size
,
stride
=
1
,
groups
=
1
,
act
=
None
,
name
=
None
):
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
__all__
=
[
"ResNeXt101_32x8d_wsl"
,
"ResNeXt101_wsl_32x16d_wsl"
,
"ResNeXt101_wsl_32x32d_wsl"
,
"ResNeXt101_wsl_32x48d_wsl"
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
filter_size
,
stride
=
1
,
groups
=
1
,
act
=
None
,
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
if
"downsample"
in
name
:
conv_name
=
name
+
'.0'
conv_name
=
name
+
".0"
else
:
conv_name
=
name
conv
=
fluid
.
layers
.
conv2d
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
conv_name
+
".weight"
),
bias_attr
=
False
)
conv_name
=
name
self
.
_conv
=
Conv2D
(
num_channels
=
input_channels
,
num_filters
=
output_channels
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
conv_name
+
".weight"
),
bias_attr
=
False
)
if
"downsample"
in
name
:
bn_name
=
name
[:
9
]
+
'downsample'
+
'.1'
bn_name
=
name
[:
9
]
+
"downsample.1"
else
:
if
"conv1"
==
name
:
bn_name
=
'bn'
+
name
[
-
1
]
bn_name
=
"bn"
+
name
[
-
1
]
else
:
bn_name
=
(
name
[:
10
]
if
name
[
7
:
9
].
isdigit
()
else
name
[:
9
]
)
+
'bn'
+
name
[
-
1
]
return
fluid
.
layers
.
batch_norm
(
input
=
conv
,
act
=
act
,
param_attr
=
ParamAttr
(
name
=
bn_name
+
'.weight'
),
bias_attr
=
ParamAttr
(
bn_name
+
'.bias'
),
moving_mean_name
=
bn_name
+
'.running_mean'
,
moving_variance_name
=
bn_name
+
'.running_var'
,
)
def
shortcut
(
self
,
input
,
ch_out
,
stride
,
name
):
ch_in
=
input
.
shape
[
1
]
if
ch_in
!=
ch_out
or
stride
!=
1
:
return
self
.
conv_bn_layer
(
input
,
ch_out
,
1
,
stride
,
name
=
name
)
else
:
return
input
def
bottleneck_block
(
self
,
input
,
num_filters
,
stride
,
cardinality
,
name
):
cardinality
=
self
.
cardinality
width
=
self
.
width
conv0
=
self
.
conv_bn_layer
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
1
,
act
=
'relu'
,
name
=
name
+
".conv1"
)
conv1
=
self
.
conv_bn_layer
(
input
=
conv0
,
num_filters
=
num_filters
,
filter_size
=
3
,
stride
=
stride
,
groups
=
cardinality
,
act
=
'relu'
,
name
=
name
+
".conv2"
)
conv2
=
self
.
conv_bn_layer
(
input
=
conv1
,
num_filters
=
num_filters
//
(
width
//
8
),
filter_size
=
1
,
act
=
None
,
name
=
name
+
".conv3"
)
short
=
self
.
shortcut
(
input
,
num_filters
//
(
width
//
8
),
stride
,
name
=
name
+
".downsample"
)
return
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
def
ResNeXt101_32x8d_wsl
():
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
8
)
return
model
def
ResNeXt101_32x16d_wsl
():
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
16
)
return
model
def
ResNeXt101_32x32d_wsl
():
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
32
)
return
model
def
ResNeXt101_32x48d_wsl
():
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
48
)
return
model
def
Fix_ResNeXt101_32x48d_wsl
():
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
48
)
return
model
bn_name
=
(
name
[:
10
]
if
name
[
7
:
9
].
isdigit
()
else
name
[:
9
])
+
"bn"
+
name
[
-
1
]
self
.
_bn
=
BatchNorm
(
num_channels
=
output_channels
,
act
=
act
,
param_attr
=
ParamAttr
(
name
=
bn_name
+
".weight"
),
bias_attr
=
ParamAttr
(
name
=
bn_name
+
".bias"
),
moving_mean_name
=
bn_name
+
".running_mean"
,
moving_variance_name
=
bn_name
+
".running_var"
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv
(
inputs
)
x
=
self
.
_bn
(
x
)
return
x
class
ShortCut
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
stride
,
name
=
None
):
super
(
ShortCut
,
self
).
__init__
()
self
.
input_channels
=
input_channels
self
.
output_channels
=
output_channels
self
.
stride
=
stride
if
input_channels
!=
output_channels
or
stride
!=
1
:
self
.
_conv
=
ConvBNLayer
(
input_channels
,
output_channels
,
filter_size
=
1
,
stride
=
stride
,
name
=
name
)
def
forward
(
self
,
inputs
):
if
self
.
input_channels
!=
self
.
output_channels
or
self
.
stride
!=
1
:
return
self
.
_conv
(
inputs
)
return
inputs
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
stride
,
cardinality
,
width
,
name
):
super
(
BottleneckBlock
,
self
).
__init__
()
self
.
_conv0
=
ConvBNLayer
(
input_channels
,
output_channels
,
filter_size
=
1
,
act
=
"relu"
,
name
=
name
+
".conv1"
)
self
.
_conv1
=
ConvBNLayer
(
output_channels
,
output_channels
,
filter_size
=
3
,
act
=
"relu"
,
stride
=
stride
,
groups
=
cardinality
,
name
=
name
+
".conv2"
)
self
.
_conv2
=
ConvBNLayer
(
output_channels
,
output_channels
//
(
width
//
8
),
filter_size
=
1
,
act
=
None
,
name
=
name
+
".conv3"
)
self
.
_short
=
ShortCut
(
input_channels
,
output_channels
//
(
width
//
8
),
stride
=
stride
,
name
=
name
+
".downsample"
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv0
(
inputs
)
x
=
self
.
_conv1
(
x
)
x
=
self
.
_conv2
(
x
)
y
=
self
.
_short
(
inputs
)
return
fluid
.
layers
.
elementwise_add
(
x
,
y
,
act
=
"relu"
)
class
ResNeXt101WSL
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
layers
=
101
,
cardinality
=
32
,
width
=
48
,
class_dim
=
1000
):
super
(
ResNeXt101WSL
,
self
).
__init__
()
self
.
class_dim
=
class_dim
self
.
layers
=
layers
self
.
cardinality
=
cardinality
self
.
width
=
width
self
.
scale
=
width
//
8
self
.
depth
=
[
3
,
4
,
23
,
3
]
self
.
base_width
=
cardinality
*
width
num_filters
=
[
self
.
base_width
*
i
for
i
in
[
1
,
2
,
4
,
8
]]
#[256, 512, 1024, 2048]
self
.
_conv_stem
=
ConvBNLayer
(
3
,
64
,
7
,
stride
=
2
,
act
=
"relu"
,
name
=
"conv1"
)
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
"max"
)
self
.
_conv1_0
=
BottleneckBlock
(
64
,
num_filters
[
0
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer1.0"
)
self
.
_conv1_1
=
BottleneckBlock
(
num_filters
[
0
]
//
(
width
//
8
),
num_filters
[
0
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer1.1"
)
self
.
_conv1_2
=
BottleneckBlock
(
num_filters
[
0
]
//
(
width
//
8
),
num_filters
[
0
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer1.2"
)
self
.
_conv2_0
=
BottleneckBlock
(
num_filters
[
0
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
2
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.0"
)
self
.
_conv2_1
=
BottleneckBlock
(
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.1"
)
self
.
_conv2_2
=
BottleneckBlock
(
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.2"
)
self
.
_conv2_3
=
BottleneckBlock
(
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.3"
)
self
.
_conv3_0
=
BottleneckBlock
(
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
2
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.0"
)
self
.
_conv3_1
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.1"
)
self
.
_conv3_2
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.2"
)
self
.
_conv3_3
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.3"
)
self
.
_conv3_4
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.4"
)
self
.
_conv3_5
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.5"
)
self
.
_conv3_6
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.6"
)
self
.
_conv3_7
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.7"
)
self
.
_conv3_8
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.8"
)
self
.
_conv3_9
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.9"
)
self
.
_conv3_10
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.10"
)
self
.
_conv3_11
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.11"
)
self
.
_conv3_12
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.12"
)
self
.
_conv3_13
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.13"
)
self
.
_conv3_14
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.14"
)
self
.
_conv3_15
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.15"
)
self
.
_conv3_16
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.16"
)
self
.
_conv3_17
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.17"
)
self
.
_conv3_18
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.18"
)
self
.
_conv3_19
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.19"
)
self
.
_conv3_20
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.20"
)
self
.
_conv3_21
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.21"
)
self
.
_conv3_22
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.22"
)
self
.
_conv4_0
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
3
],
stride
=
2
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer4.0"
)
self
.
_conv4_1
=
BottleneckBlock
(
num_filters
[
3
]
//
(
width
//
8
),
num_filters
[
3
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer4.1"
)
self
.
_conv4_2
=
BottleneckBlock
(
num_filters
[
3
]
//
(
width
//
8
),
num_filters
[
3
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer4.2"
)
self
.
_avg_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
self
.
_out
=
Linear
(
input_dim
=
num_filters
[
3
]
//
(
width
//
8
),
output_dim
=
class_dim
,
param_attr
=
ParamAttr
(
name
=
"fc.weight"
),
bias_attr
=
ParamAttr
(
name
=
"fc.bias"
))
def
forward
(
self
,
inputs
):
x
=
self
.
_conv_stem
(
inputs
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_conv1_0
(
x
)
x
=
self
.
_conv1_1
(
x
)
x
=
self
.
_conv1_2
(
x
)
x
=
self
.
_conv2_0
(
x
)
x
=
self
.
_conv2_1
(
x
)
x
=
self
.
_conv2_2
(
x
)
x
=
self
.
_conv2_3
(
x
)
x
=
self
.
_conv3_0
(
x
)
x
=
self
.
_conv3_1
(
x
)
x
=
self
.
_conv3_2
(
x
)
x
=
self
.
_conv3_3
(
x
)
x
=
self
.
_conv3_4
(
x
)
x
=
self
.
_conv3_5
(
x
)
x
=
self
.
_conv3_6
(
x
)
x
=
self
.
_conv3_7
(
x
)
x
=
self
.
_conv3_8
(
x
)
x
=
self
.
_conv3_9
(
x
)
x
=
self
.
_conv3_10
(
x
)
x
=
self
.
_conv3_11
(
x
)
x
=
self
.
_conv3_12
(
x
)
x
=
self
.
_conv3_13
(
x
)
x
=
self
.
_conv3_14
(
x
)
x
=
self
.
_conv3_15
(
x
)
x
=
self
.
_conv3_16
(
x
)
x
=
self
.
_conv3_17
(
x
)
x
=
self
.
_conv3_18
(
x
)
x
=
self
.
_conv3_19
(
x
)
x
=
self
.
_conv3_20
(
x
)
x
=
self
.
_conv3_21
(
x
)
x
=
self
.
_conv3_22
(
x
)
x
=
self
.
_conv4_0
(
x
)
x
=
self
.
_conv4_1
(
x
)
x
=
self
.
_conv4_2
(
x
)
x
=
self
.
_avg_pool
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axes
=
[
2
,
3
])
x
=
self
.
_out
(
x
)
return
x
def
ResNeXt101_32x8d_wsl
(
**
args
):
model
=
ResNeXt101WSL
(
cardinality
=
32
,
width
=
8
,
**
args
)
return
model
def
ResNeXt101_32x16d_wsl
(
**
args
):
model
=
ResNeXt101WSL
(
cardinality
=
32
,
width
=
16
,
**
args
)
return
model
def
ResNeXt101_32x32d_wsl
(
**
args
):
model
=
ResNeXt101WSL
(
cardinality
=
32
,
width
=
32
,
**
args
)
return
model
def
ResNeXt101_32x48d_wsl
(
**
args
):
model
=
ResNeXt101WSL
(
cardinality
=
32
,
width
=
48
,
**
args
)
return
model
\ No newline at end of file
ppcls/modeling/architectures/squeezenet.py
浏览文件 @
fe302aec
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
__all__
=
[
"SqueezeNet1_0"
,
"SqueezeNet1_1"
]
class
MakeFireConv
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
filter_size
,
padding
=
0
,
name
=
None
):
super
(
MakeFireConv
,
self
).
__init__
()
self
.
_conv
=
Conv2D
(
input_channels
,
output_channels
,
filter_size
,
padding
=
padding
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
"_offset"
))
__all__
=
[
"SqueezeNet"
,
"SqueezeNet1_0"
,
"SqueezeNet1_1"
]
def
forward
(
self
,
inputs
):
return
self
.
_conv
(
inputs
)
class
MakeFire
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
squeeze_channels
,
expand1x1_channels
,
expand3x3_channels
,
name
=
None
):
super
(
MakeFire
,
self
).
__init__
()
self
.
_conv
=
MakeFireConv
(
input_channels
,
squeeze_channels
,
1
,
name
=
name
+
"_squeeze1x1"
)
self
.
_conv_path1
=
MakeFireConv
(
squeeze_channels
,
expand1x1_channels
,
1
,
name
=
name
+
"_expand1x1"
)
self
.
_conv_path2
=
MakeFireConv
(
squeeze_channels
,
expand3x3_channels
,
3
,
padding
=
1
,
name
=
name
+
"_expand3x3"
)
class
SqueezeNet
():
def
__init__
(
self
,
version
=
'1.0'
):
def
forward
(
self
,
inputs
):
x
=
self
.
_conv
(
inputs
)
x1
=
self
.
_conv_path1
(
x
)
x2
=
self
.
_conv_path2
(
x
)
return
fluid
.
layers
.
concat
([
x1
,
x2
],
axis
=
1
)
class
SqueezeNet
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
version
,
class_dim
=
1000
):
super
(
SqueezeNet
,
self
).
__init__
()
self
.
version
=
version
def
net
(
self
,
input
,
class_dim
=
1000
):
version
=
self
.
version
assert
version
in
[
'1.0'
,
'1.1'
],
\
"supported version are {} but input version is {}"
.
format
([
'1.0'
,
'1.1'
],
version
)
if
version
==
'1.0'
:
conv
=
fluid
.
layers
.
conv2d
(
input
,
num_filters
=
96
,
filter_size
=
7
,
stride
=
2
,
act
=
'relu'
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
"conv1_weights"
),
bias_attr
=
ParamAttr
(
name
=
'conv1_offset'
))
conv
=
fluid
.
layers
.
pool2d
(
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
'max'
)
conv
=
self
.
make_fire
(
conv
,
16
,
64
,
64
,
name
=
'fire2'
)
conv
=
self
.
make_fire
(
conv
,
16
,
64
,
64
,
name
=
'fire3'
)
conv
=
self
.
make_fire
(
conv
,
32
,
128
,
128
,
name
=
'fire4'
)
conv
=
fluid
.
layers
.
pool2d
(
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
'max'
)
conv
=
self
.
make_fire
(
conv
,
32
,
128
,
128
,
name
=
'fire5'
)
conv
=
self
.
make_fire
(
conv
,
48
,
192
,
192
,
name
=
'fire6'
)
conv
=
self
.
make_fire
(
conv
,
48
,
192
,
192
,
name
=
'fire7'
)
conv
=
self
.
make_fire
(
conv
,
64
,
256
,
256
,
name
=
'fire8'
)
conv
=
fluid
.
layers
.
pool2d
(
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
'max'
)
conv
=
self
.
make_fire
(
conv
,
64
,
256
,
256
,
name
=
'fire9'
)
if
self
.
version
==
"1.0"
:
self
.
_conv
=
Conv2D
(
3
,
96
,
7
,
stride
=
2
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
"conv1_weights"
),
bias_attr
=
ParamAttr
(
name
=
"conv1_offset"
))
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
"max"
)
self
.
_conv1
=
MakeFire
(
96
,
16
,
64
,
64
,
name
=
"fire2"
)
self
.
_conv2
=
MakeFire
(
128
,
16
,
64
,
64
,
name
=
"fire3"
)
self
.
_conv3
=
MakeFire
(
128
,
32
,
128
,
128
,
name
=
"fire4"
)
self
.
_conv4
=
MakeFire
(
256
,
32
,
128
,
128
,
name
=
"fire5"
)
self
.
_conv5
=
MakeFire
(
256
,
48
,
192
,
192
,
name
=
"fire6"
)
self
.
_conv6
=
MakeFire
(
384
,
48
,
192
,
192
,
name
=
"fire7"
)
self
.
_conv7
=
MakeFire
(
384
,
64
,
256
,
256
,
name
=
"fire8"
)
self
.
_conv8
=
MakeFire
(
512
,
64
,
256
,
256
,
name
=
"fire9"
)
else
:
conv
=
fluid
.
layers
.
conv2d
(
input
,
num_filters
=
64
,
filter_size
=
3
,
stride
=
2
,
padding
=
1
,
act
=
'relu'
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
"conv1_weights"
),
bias_attr
=
ParamAttr
(
name
=
'conv1_offset'
))
conv
=
fluid
.
layers
.
pool2d
(
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
'max'
)
conv
=
self
.
make_fire
(
conv
,
16
,
64
,
64
,
name
=
'fire2'
)
conv
=
self
.
make_fire
(
conv
,
16
,
64
,
64
,
name
=
'fire3'
)
conv
=
fluid
.
layers
.
pool2d
(
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
'max'
)
conv
=
self
.
make_fire
(
conv
,
32
,
128
,
128
,
name
=
'fire4'
)
conv
=
self
.
make_fire
(
conv
,
32
,
128
,
128
,
name
=
'fire5'
)
conv
=
fluid
.
layers
.
pool2d
(
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
'max'
)
conv
=
self
.
make_fire
(
conv
,
48
,
192
,
192
,
name
=
'fire6'
)
conv
=
self
.
make_fire
(
conv
,
48
,
192
,
192
,
name
=
'fire7'
)
conv
=
self
.
make_fire
(
conv
,
64
,
256
,
256
,
name
=
'fire8'
)
conv
=
self
.
make_fire
(
conv
,
64
,
256
,
256
,
name
=
'fire9'
)
conv
=
fluid
.
layers
.
dropout
(
conv
,
dropout_prob
=
0.5
)
conv
=
fluid
.
layers
.
conv2d
(
conv
,
num_filters
=
class_dim
,
filter_size
=
1
,
act
=
'relu'
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
"conv10_weights"
),
bias_attr
=
ParamAttr
(
name
=
'conv10_offset'
))
conv
=
fluid
.
layers
.
pool2d
(
conv
,
pool_type
=
'avg'
,
global_pooling
=
True
)
out
=
fluid
.
layers
.
flatten
(
conv
)
return
out
self
.
_conv
=
Conv2D
(
3
,
64
,
3
,
stride
=
2
,
padding
=
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
"conv1_weights"
),
bias_attr
=
ParamAttr
(
name
=
"conv1_offset"
))
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
"max"
)
self
.
_conv1
=
MakeFire
(
64
,
16
,
64
,
64
,
name
=
"fire2"
)
self
.
_conv2
=
MakeFire
(
128
,
16
,
64
,
64
,
name
=
"fire3"
)
def
make_fire_conv
(
self
,
input
,
num_filters
,
filter_size
,
padding
=
0
,
name
=
None
):
conv
=
fluid
.
layers
.
conv2d
(
input
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
padding
=
padding
,
act
=
'relu'
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
'_offset'
))
return
conv
self
.
_conv3
=
MakeFire
(
128
,
32
,
128
,
128
,
name
=
"fire4"
)
self
.
_conv4
=
MakeFire
(
256
,
32
,
128
,
128
,
name
=
"fire5"
)
def
make_fire
(
self
,
input
,
squeeze_channels
,
expand1x1_channels
,
expand3x3_channels
,
name
=
None
):
conv
=
self
.
make_fire_conv
(
input
,
squeeze_channels
,
1
,
name
=
name
+
'_squeeze1x1'
)
conv_path1
=
self
.
make_fire_conv
(
conv
,
expand1x1_channels
,
1
,
name
=
name
+
'_expand1x1'
)
conv_path2
=
self
.
make_fire_conv
(
conv
,
expand3x3_channels
,
3
,
1
,
name
=
name
+
'_expand3x3'
)
out
=
fluid
.
layers
.
concat
([
conv_path1
,
conv_path2
],
axis
=
1
)
return
out
self
.
_conv5
=
MakeFire
(
256
,
48
,
192
,
192
,
name
=
"fire6"
)
self
.
_conv6
=
MakeFire
(
384
,
48
,
192
,
192
,
name
=
"fire7"
)
self
.
_conv7
=
MakeFire
(
384
,
64
,
256
,
256
,
name
=
"fire8"
)
self
.
_conv8
=
MakeFire
(
512
,
64
,
256
,
256
,
name
=
"fire9"
)
self
.
_drop
=
Dropout
(
p
=
0.5
)
self
.
_conv9
=
Conv2D
(
512
,
class_dim
,
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
"conv10_weights"
),
bias_attr
=
ParamAttr
(
name
=
"conv10_offset"
))
self
.
_avg_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
def
SqueezeNet1_0
():
model
=
SqueezeNet
(
version
=
'1.0'
)
return
model
def
forward
(
self
,
inputs
):
x
=
self
.
_conv
(
inputs
)
x
=
self
.
_pool
(
x
)
if
self
.
version
==
"1.0"
:
x
=
self
.
_conv1
(
x
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_conv3
(
x
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_conv4
(
x
)
x
=
self
.
_conv5
(
x
)
x
=
self
.
_conv6
(
x
)
x
=
self
.
_conv7
(
x
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_conv8
(
x
)
else
:
x
=
self
.
_conv1
(
x
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_conv3
(
x
)
x
=
self
.
_conv4
(
x
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_conv5
(
x
)
x
=
self
.
_conv6
(
x
)
x
=
self
.
_conv7
(
x
)
x
=
self
.
_conv8
(
x
)
x
=
self
.
_drop
(
x
)
x
=
self
.
_conv9
(
x
)
x
=
self
.
_avg_pool
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axes
=
[
2
,
3
])
return
x
def
SqueezeNet1_0
(
**
args
):
model
=
SqueezeNet
(
version
=
"1.0"
,
**
args
)
return
model
def
SqueezeNet1_1
():
model
=
SqueezeNet
(
version
=
'1.1'
)
return
model
def
SqueezeNet1_1
(
**
args
):
model
=
SqueezeNet
(
version
=
"1.1"
,
**
args
)
return
model
\ No newline at end of file
ppcls/modeling/architectures/vgg.py
浏览文件 @
fe302aec
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
__all__
=
[
"VGG11"
,
"VGG13"
,
"VGG16"
,
"VGG19"
]
class
ConvBlock
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
groups
,
name
=
None
):
super
(
ConvBlock
,
self
).
__init__
()
self
.
groups
=
groups
self
.
_conv_1
=
Conv2D
(
num_channels
=
input_channels
,
num_filters
=
output_channels
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
name
+
"1_weights"
),
bias_attr
=
False
)
if
groups
==
2
or
groups
==
3
or
groups
==
4
:
self
.
_conv_2
=
Conv2D
(
num_channels
=
output_channels
,
num_filters
=
output_channels
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
name
+
"2_weights"
),
bias_attr
=
False
)
if
groups
==
3
or
groups
==
4
:
self
.
_conv_3
=
Conv2D
(
num_channels
=
output_channels
,
num_filters
=
output_channels
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
name
+
"3_weights"
),
bias_attr
=
False
)
if
groups
==
4
:
self
.
_conv_4
=
Conv2D
(
num_channels
=
output_channels
,
num_filters
=
output_channels
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
name
+
"4_weights"
),
bias_attr
=
False
)
self
.
_pool
=
Pool2D
(
pool_size
=
2
,
pool_type
=
"max"
,
pool_stride
=
2
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv_1
(
inputs
)
if
self
.
groups
==
2
or
self
.
groups
==
3
or
self
.
groups
==
4
:
x
=
self
.
_conv_2
(
x
)
if
self
.
groups
==
3
or
self
.
groups
==
4
:
x
=
self
.
_conv_3
(
x
)
if
self
.
groups
==
4
:
x
=
self
.
_conv_4
(
x
)
x
=
self
.
_pool
(
x
)
return
x
class
VGGNet
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
layers
=
11
,
class_dim
=
1000
):
super
(
VGGNet
,
self
).
__init__
()
__all__
=
[
"VGGNet"
,
"VGG11"
,
"VGG13"
,
"VGG16"
,
"VGG19"
]
class
VGGNet
():
def
__init__
(
self
,
layers
=
16
):
self
.
layers
=
layers
def
net
(
self
,
input
,
class_dim
=
1000
):
layers
=
self
.
layers
vgg_spec
=
{
11
:
([
1
,
1
,
2
,
2
,
2
]),
13
:
([
2
,
2
,
2
,
2
,
2
]),
16
:
([
2
,
2
,
3
,
3
,
3
]),
19
:
([
2
,
2
,
4
,
4
,
4
])
}
assert
layers
in
vgg_spec
.
keys
(),
\
"supported layers are {} but input layer is {}"
.
format
(
vgg_spec
.
keys
(),
layers
)
nums
=
vgg_spec
[
layers
]
conv1
=
self
.
conv_block
(
input
,
64
,
nums
[
0
],
name
=
"conv1_"
)
conv2
=
self
.
conv_block
(
conv1
,
128
,
nums
[
1
],
name
=
"conv2_"
)
conv3
=
self
.
conv_block
(
conv2
,
256
,
nums
[
2
],
name
=
"conv3_"
)
conv4
=
self
.
conv_block
(
conv3
,
512
,
nums
[
3
],
name
=
"conv4_"
)
conv5
=
self
.
conv_block
(
conv4
,
512
,
nums
[
4
],
name
=
"conv5_"
)
fc_dim
=
4096
fc_name
=
[
"fc6"
,
"fc7"
,
"fc8"
]
fc1
=
fluid
.
layers
.
fc
(
input
=
conv5
,
size
=
fc_dim
,
act
=
'relu'
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
fc_name
[
0
]
+
"_weights"
),
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
fc_name
[
0
]
+
"_offset"
))
fc1
=
fluid
.
layers
.
dropout
(
x
=
fc1
,
dropout_prob
=
0.5
)
fc2
=
fluid
.
layers
.
fc
(
input
=
fc1
,
size
=
fc_dim
,
act
=
'relu'
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
fc_name
[
1
]
+
"_weights"
),
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
fc_name
[
1
]
+
"_offset"
))
fc2
=
fluid
.
layers
.
dropout
(
x
=
fc2
,
dropout_prob
=
0.5
)
out
=
fluid
.
layers
.
fc
(
input
=
fc2
,
size
=
class_dim
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
fc_name
[
2
]
+
"_weights"
),
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
fc_name
[
2
]
+
"_offset"
))
return
out
def
conv_block
(
self
,
input
,
num_filter
,
groups
,
name
=
None
):
conv
=
input
for
i
in
range
(
groups
):
conv
=
fluid
.
layers
.
conv2d
(
input
=
conv
,
num_filters
=
num_filter
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
act
=
'relu'
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
name
+
str
(
i
+
1
)
+
"_weights"
),
bias_attr
=
False
)
return
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
2
,
pool_type
=
'max'
,
pool_stride
=
2
)
def
VGG11
():
model
=
VGGNet
(
layers
=
11
)
return
model
def
VGG13
():
model
=
VGGNet
(
layers
=
13
)
self
.
vgg_configure
=
{
11
:
[
1
,
1
,
2
,
2
,
2
],
13
:
[
2
,
2
,
2
,
2
,
2
],
16
:
[
2
,
2
,
3
,
3
,
3
],
19
:
[
2
,
2
,
4
,
4
,
4
]}
assert
self
.
layers
in
self
.
vgg_configure
.
keys
(),
\
"supported layers are {} but input layer is {}"
.
format
(
vgg_configure
.
keys
(),
layers
)
self
.
groups
=
self
.
vgg_configure
[
self
.
layers
]
self
.
_conv_block_1
=
ConvBlock
(
3
,
64
,
self
.
groups
[
0
],
name
=
"conv1_"
)
self
.
_conv_block_2
=
ConvBlock
(
64
,
128
,
self
.
groups
[
1
],
name
=
"conv2_"
)
self
.
_conv_block_3
=
ConvBlock
(
128
,
256
,
self
.
groups
[
2
],
name
=
"conv3_"
)
self
.
_conv_block_4
=
ConvBlock
(
256
,
512
,
self
.
groups
[
3
],
name
=
"conv4_"
)
self
.
_conv_block_5
=
ConvBlock
(
512
,
512
,
self
.
groups
[
4
],
name
=
"conv5_"
)
self
.
_drop
=
fluid
.
dygraph
.
Dropout
(
p
=
0.5
)
self
.
_fc1
=
Linear
(
input_dim
=
7
*
7
*
512
,
output_dim
=
4096
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
"fc6_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc6_offset"
))
self
.
_fc2
=
Linear
(
input_dim
=
4096
,
output_dim
=
4096
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
"fc7_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc7_offset"
))
self
.
_out
=
Linear
(
input_dim
=
4096
,
output_dim
=
class_dim
,
param_attr
=
ParamAttr
(
name
=
"fc8_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc8_offset"
))
def
forward
(
self
,
inputs
):
x
=
self
.
_conv_block_1
(
inputs
)
x
=
self
.
_conv_block_2
(
x
)
x
=
self
.
_conv_block_3
(
x
)
x
=
self
.
_conv_block_4
(
x
)
x
=
self
.
_conv_block_5
(
x
)
x
=
fluid
.
layers
.
flatten
(
x
,
axis
=
0
)
x
=
self
.
_fc1
(
x
)
x
=
self
.
_drop
(
x
)
x
=
self
.
_fc2
(
x
)
x
=
self
.
_drop
(
x
)
x
=
self
.
_out
(
x
)
return
x
def
VGG11
(
**
args
):
model
=
VGGNet
(
layers
=
11
,
**
args
)
return
model
def
VGG13
(
**
args
):
model
=
VGGNet
(
layers
=
13
,
**
args
)
return
model
def
VGG16
(
**
args
):
model
=
VGGNet
(
layers
=
16
,
**
args
)
return
model
def
VGG16
():
model
=
VGGNet
(
layers
=
16
)
return
model
def
VGG19
():
model
=
VGGNet
(
layers
=
19
)
return
model
def
VGG19
(
**
args
):
model
=
VGGNet
(
layers
=
19
,
**
args
)
return
model
\ No newline at end of file
ppcls/modeling/architectures/xception.py
浏览文件 @
fe302aec
此差异已折叠。
点击以展开。
ppcls/modeling/architectures/xception_deeplab.py
浏览文件 @
fe302aec
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录