Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
0dfe15d2
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
1 年多 前同步成功
通知
116
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
0dfe15d2
编写于
6月 28, 2020
作者:
W
wqz960
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add Inception, ResNeXt101_wsl, EfficientNet and other models
上级
be35b7cc
变更
10
展开全部
隐藏空白更改
内联
并排
Showing
10 changed file
with
2803 addition
and
1934 deletion
+2803
-1934
ppcls/modeling/architectures/alexnet.py
ppcls/modeling/architectures/alexnet.py
+351
-163
ppcls/modeling/architectures/darknet.py
ppcls/modeling/architectures/darknet.py
+156
-106
ppcls/modeling/architectures/efficientnet.py
ppcls/modeling/architectures/efficientnet.py
+622
-355
ppcls/modeling/architectures/googlenet.py
ppcls/modeling/architectures/googlenet.py
+204
-224
ppcls/modeling/architectures/inception_v4.py
ppcls/modeling/architectures/inception_v4.py
+325
-230
ppcls/modeling/architectures/resnext101_wsl.py
ppcls/modeling/architectures/resnext101_wsl.py
+230
-156
ppcls/modeling/architectures/squeezenet.py
ppcls/modeling/architectures/squeezenet.py
+147
-119
ppcls/modeling/architectures/vgg.py
ppcls/modeling/architectures/vgg.py
+125
-91
ppcls/modeling/architectures/xception.py
ppcls/modeling/architectures/xception.py
+304
-225
ppcls/modeling/architectures/xception_deeplab.py
ppcls/modeling/architectures/xception_deeplab.py
+339
-265
未找到文件。
ppcls/modeling/architectures/alexnet.py
浏览文件 @
0dfe15d2
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
import
numpy
as
np
#
import
argparse
#Licensed under the Apache License, Version 2.0 (the "License");
import
paddle
#you may not use this file except in compliance with the License.
import
paddle.fluid
as
fluid
#You may obtain a copy of the License at
from
paddle.fluid.param_attr
import
ParamAttr
#
from
paddle.fluid.layer_helper
import
LayerHelper
# http://www.apache.org/licenses/LICENSE-2.0
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
#
from
paddle.fluid.dygraph.base
import
to_variable
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
from
paddle.fluid
import
framework
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
math
import
sys
import
time
__all__
=
[
'Xception41'
,
'Xception65'
,
'Xception71'
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
filter_size
,
stride
=
1
,
groups
=
1
,
act
=
None
,
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2D
(
num_channels
=
num_channels
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
bn_name
=
"bn_"
+
name
self
.
_batch_norm
=
BatchNorm
(
num_filters
,
act
=
act
,
param_attr
=
ParamAttr
(
name
=
bn_name
+
"_scale"
),
bias_attr
=
ParamAttr
(
name
=
bn_name
+
"_offset"
),
moving_mean_name
=
bn_name
+
'_mean'
,
moving_variance_name
=
bn_name
+
'_variance'
)
def
forward
(
self
,
inputs
):
y
=
self
.
_conv
(
inputs
)
y
=
self
.
_batch_norm
(
y
)
return
y
class
Separable_Conv
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
stride
=
1
,
name
=
None
):
super
(
Separable_Conv
,
self
).
__init__
()
self
.
_pointwise_conv
=
ConvBNLayer
(
input_channels
,
output_channels
,
1
,
name
=
name
+
"_sep"
)
self
.
_depthwise_conv
=
ConvBNLayer
(
output_channels
,
output_channels
,
3
,
stride
=
stride
,
groups
=
output_channels
,
name
=
name
+
"_dw"
)
def
forward
(
self
,
inputs
):
x
=
self
.
_pointwise_conv
(
inputs
)
x
=
self
.
_depthwise_conv
(
x
)
return
x
class
Entry_Flow_Bottleneck_Block
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
stride
=
2
,
name
=
None
,
relu_first
=
False
):
super
(
Entry_Flow_Bottleneck_Block
,
self
).
__init__
()
self
.
relu_first
=
relu_first
self
.
_short
=
Conv2D
(
num_channels
=
input_channels
,
num_filters
=
output_channels
,
filter_size
=
1
,
stride
=
stride
,
padding
=
0
,
act
=
None
,
param_attr
=
ParamAttr
(
name
+
"_branch1_weights"
),
bias_attr
=
False
)
self
.
_conv1
=
Separable_Conv
(
input_channels
,
output_channels
,
stride
=
1
,
name
=
name
+
"_branch2a_weights"
)
self
.
_conv2
=
Separable_Conv
(
output_channels
,
output_channels
,
stride
=
1
,
name
=
name
+
"_branch2b_weights"
)
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
stride
,
pool_padding
=
1
,
pool_type
=
"max"
)
def
forward
(
self
,
inputs
):
conv0
=
inputs
short
=
self
.
_short
(
inputs
)
layer_helper
=
LayerHelper
(
self
.
full_name
(),
act
=
"relu"
)
if
self
.
relu_first
:
conv0
=
layer_helper
.
append_activation
(
conv0
)
conv1
=
self
.
_conv1
(
conv0
)
conv2
=
layer_helper
.
append_activation
(
conv1
)
conv2
=
self
.
_conv2
(
conv2
)
pool
=
self
.
_pool
(
conv2
)
return
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
pool
)
class
Entry_Flow
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
block_num
=
3
):
super
(
Entry_Flow
,
self
).
__init__
()
name
=
"entry_flow"
self
.
block_num
=
block_num
self
.
_conv1
=
ConvBNLayer
(
3
,
32
,
3
,
stride
=
2
,
act
=
"relu"
,
name
=
name
+
"_conv1"
)
self
.
_conv2
=
ConvBNLayer
(
32
,
64
,
3
,
act
=
"relu"
,
name
=
name
+
"_conv2"
)
if
block_num
==
3
:
self
.
_conv_0
=
Entry_Flow_Bottleneck_Block
(
64
,
128
,
stride
=
2
,
name
=
name
+
"_0"
,
relu_first
=
False
)
self
.
_conv_1
=
Entry_Flow_Bottleneck_Block
(
128
,
256
,
stride
=
2
,
name
=
name
+
"_1"
,
relu_first
=
True
)
self
.
_conv_2
=
Entry_Flow_Bottleneck_Block
(
256
,
728
,
stride
=
2
,
name
=
name
+
"_2"
,
relu_first
=
True
)
elif
block_num
==
5
:
self
.
_conv_0
=
Entry_Flow_Bottleneck_Block
(
64
,
128
,
stride
=
2
,
name
=
name
+
"_0"
,
relu_first
=
False
)
self
.
_conv_1
=
Entry_Flow_Bottleneck_Block
(
128
,
256
,
stride
=
1
,
name
=
name
+
"_1"
,
relu_first
=
True
)
self
.
_conv_2
=
Entry_Flow_Bottleneck_Block
(
256
,
256
,
stride
=
2
,
name
=
name
+
"_2"
,
relu_first
=
True
)
self
.
_conv_3
=
Entry_Flow_Bottleneck_Block
(
256
,
728
,
stride
=
1
,
name
=
name
+
"_3"
,
relu_first
=
True
)
self
.
_conv_4
=
Entry_Flow_Bottleneck_Block
(
728
,
728
,
stride
=
2
,
name
=
name
+
"_4"
,
relu_first
=
True
)
else
:
sys
.
exit
(
-
1
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
x
=
self
.
_conv2
(
x
)
if
self
.
block_num
==
3
:
x
=
self
.
_conv_0
(
x
)
x
=
self
.
_conv_1
(
x
)
x
=
self
.
_conv_2
(
x
)
elif
self
.
block_num
==
5
:
x
=
self
.
_conv_0
(
x
)
x
=
self
.
_conv_1
(
x
)
x
=
self
.
_conv_2
(
x
)
x
=
self
.
_conv_3
(
x
)
x
=
self
.
_conv_4
(
x
)
return
x
import
paddle
import
paddle.fluid
as
fluid
__all__
=
[
'AlexNet'
]
class
Middle_Flow_Bottleneck_Block
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
name
):
super
(
Middle_Flow_Bottleneck_Block
,
self
).
__init__
()
class
AlexNet
():
def
__init__
(
self
):
self
.
_conv_0
=
Separable_Conv
(
pass
input_channels
,
output_channels
,
def
net
(
self
,
input
,
class_dim
=
1000
):
stdv
=
1.0
/
math
.
sqrt
(
input
.
shape
[
1
]
*
11
*
11
)
layer_name
=
[
"conv1"
,
"conv2"
,
"conv3"
,
"conv4"
,
"conv5"
,
"fc6"
,
"fc7"
,
"fc8"
]
conv1
=
fluid
.
layers
.
conv2d
(
input
=
input
,
num_filters
=
64
,
filter_size
=
11
,
stride
=
4
,
padding
=
2
,
groups
=
1
,
act
=
'relu'
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
0
]
+
"_offset"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
0
]
+
"_weights"
))
pool1
=
fluid
.
layers
.
pool2d
(
input
=
conv1
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
'max'
)
stdv
=
1.0
/
math
.
sqrt
(
pool1
.
shape
[
1
]
*
5
*
5
)
conv2
=
fluid
.
layers
.
conv2d
(
input
=
pool1
,
num_filters
=
192
,
filter_size
=
5
,
stride
=
1
,
stride
=
1
,
padding
=
2
,
name
=
name
+
"_branch2a_weights"
)
groups
=
1
,
self
.
_conv_1
=
Separable_Conv
(
act
=
'relu'
,
output_channels
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
output_channels
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
1
]
+
"_offset"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
1
]
+
"_weights"
))
pool2
=
fluid
.
layers
.
pool2d
(
input
=
conv2
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
'max'
)
stdv
=
1.0
/
math
.
sqrt
(
pool2
.
shape
[
1
]
*
3
*
3
)
conv3
=
fluid
.
layers
.
conv2d
(
input
=
pool2
,
num_filters
=
384
,
filter_size
=
3
,
stride
=
1
,
stride
=
1
,
padding
=
1
,
name
=
name
+
"_branch2b_weights"
)
groups
=
1
,
self
.
_conv_2
=
Separable_Conv
(
act
=
'relu'
,
output_channels
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
output_channels
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
2
]
+
"_offset"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
2
]
+
"_weights"
))
stdv
=
1.0
/
math
.
sqrt
(
conv3
.
shape
[
1
]
*
3
*
3
)
conv4
=
fluid
.
layers
.
conv2d
(
input
=
conv3
,
num_filters
=
256
,
filter_size
=
3
,
stride
=
1
,
stride
=
1
,
padding
=
1
,
name
=
name
+
"_branch2c_weights"
)
groups
=
1
,
act
=
'relu'
,
def
forward
(
self
,
inputs
):
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
layer_helper
=
LayerHelper
(
self
.
full_name
(),
act
=
"relu"
)
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
conv0
=
layer_helper
.
append_activation
(
inputs
)
name
=
layer_name
[
3
]
+
"_offset"
),
conv0
=
self
.
_conv_0
(
conv0
)
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
conv1
=
layer_helper
.
append_activation
(
conv0
)
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
conv1
=
self
.
_conv_1
(
conv1
)
name
=
layer_name
[
3
]
+
"_weights"
))
conv2
=
layer_helper
.
append_activation
(
conv1
)
conv2
=
self
.
_conv_2
(
conv2
)
stdv
=
1.0
/
math
.
sqrt
(
conv4
.
shape
[
1
]
*
3
*
3
)
return
fluid
.
layers
.
elementwise_add
(
x
=
inputs
,
y
=
conv2
)
conv5
=
fluid
.
layers
.
conv2d
(
input
=
conv4
,
num_filters
=
256
,
class
Middle_Flow
(
fluid
.
dygraph
.
Layer
):
filter_size
=
3
,
def
__init__
(
self
,
block_num
=
8
):
super
(
Middle_Flow
,
self
).
__init__
()
self
.
block_num
=
block_num
self
.
_conv_0
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_0"
)
self
.
_conv_1
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_1"
)
self
.
_conv_2
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_2"
)
self
.
_conv_3
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_3"
)
self
.
_conv_4
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_4"
)
self
.
_conv_5
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_5"
)
self
.
_conv_6
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_6"
)
self
.
_conv_7
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_7"
)
if
block_num
==
16
:
self
.
_conv_8
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_8"
)
self
.
_conv_9
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_9"
)
self
.
_conv_10
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_10"
)
self
.
_conv_11
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_11"
)
self
.
_conv_12
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_12"
)
self
.
_conv_13
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_13"
)
self
.
_conv_14
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_14"
)
self
.
_conv_15
=
Middle_Flow_Bottleneck_Block
(
728
,
728
,
name
=
"middle_flow_15"
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv_0
(
inputs
)
x
=
self
.
_conv_1
(
x
)
x
=
self
.
_conv_2
(
x
)
x
=
self
.
_conv_3
(
x
)
x
=
self
.
_conv_4
(
x
)
x
=
self
.
_conv_5
(
x
)
x
=
self
.
_conv_6
(
x
)
x
=
self
.
_conv_7
(
x
)
if
self
.
block_num
==
16
:
x
=
self
.
_conv_8
(
x
)
x
=
self
.
_conv_9
(
x
)
x
=
self
.
_conv_10
(
x
)
x
=
self
.
_conv_11
(
x
)
x
=
self
.
_conv_12
(
x
)
x
=
self
.
_conv_13
(
x
)
x
=
self
.
_conv_14
(
x
)
x
=
self
.
_conv_15
(
x
)
return
x
class
Exit_Flow_Bottleneck_Block
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels1
,
output_channels2
,
name
):
super
(
Exit_Flow_Bottleneck_Block
,
self
).
__init__
()
self
.
_short
=
Conv2D
(
num_channels
=
input_channels
,
num_filters
=
output_channels2
,
filter_size
=
1
,
stride
=
2
,
padding
=
0
,
act
=
None
,
param_attr
=
ParamAttr
(
name
+
"_branch1_weights"
),
bias_attr
=
False
)
self
.
_conv_1
=
Separable_Conv
(
input_channels
,
output_channels1
,
stride
=
1
,
name
=
name
+
"_branch2a_weights"
)
self
.
_conv_2
=
Separable_Conv
(
output_channels1
,
output_channels2
,
stride
=
1
,
stride
=
1
,
padding
=
1
,
name
=
name
+
"_branch2b_weights"
)
groups
=
1
,
self
.
_pool
=
Pool2D
(
act
=
'relu'
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
"max"
)
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
def
forward
(
self
,
inputs
):
name
=
layer_name
[
4
]
+
"_offset"
),
short
=
self
.
_short
(
inputs
)
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
layer_helper
=
LayerHelper
(
self
.
full_name
(),
act
=
"relu"
)
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
conv0
=
layer_helper
.
append_activation
(
inputs
)
name
=
layer_name
[
4
]
+
"_weights"
))
conv1
=
self
.
_conv_1
(
conv0
)
pool5
=
fluid
.
layers
.
pool2d
(
conv2
=
layer_helper
.
append_activation
(
conv1
)
input
=
conv5
,
conv2
=
self
.
_conv_2
(
conv2
)
pool_size
=
3
,
pool
=
self
.
_pool
(
conv2
)
pool_stride
=
2
,
return
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
pool
)
pool_padding
=
0
,
pool_type
=
'max'
)
class
Exit_Flow
(
fluid
.
dygraph
.
Layer
):
drop6
=
fluid
.
layers
.
dropout
(
x
=
pool5
,
dropout_prob
=
0.5
)
def
__init__
(
self
,
class_dim
):
stdv
=
1.0
/
math
.
sqrt
(
drop6
.
shape
[
1
]
*
drop6
.
shape
[
2
]
*
super
(
Exit_Flow
,
self
).
__init__
()
drop6
.
shape
[
3
]
*
1.0
)
name
=
"exit_flow"
fc6
=
fluid
.
layers
.
fc
(
input
=
drop6
,
self
.
_conv_0
=
Exit_Flow_Bottleneck_Block
(
size
=
4096
,
728
,
728
,
1024
,
name
=
name
+
"_1"
)
act
=
'relu'
,
self
.
_conv_1
=
Separable_Conv
(
1024
,
1536
,
stride
=
1
,
name
=
name
+
"_2"
)
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
self
.
_conv_2
=
Separable_Conv
(
1536
,
2048
,
stride
=
1
,
name
=
name
+
"_3"
)
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
self
.
_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
name
=
layer_name
[
5
]
+
"_offset"
),
stdv
=
1.0
/
math
.
sqrt
(
2048
*
1.0
)
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
self
.
_out
=
Linear
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
2048
,
name
=
layer_name
[
5
]
+
"_weights"
))
class_dim
,
param_attr
=
ParamAttr
(
drop7
=
fluid
.
layers
.
dropout
(
x
=
fc6
,
dropout_prob
=
0.5
)
name
=
"fc_weights"
,
stdv
=
1.0
/
math
.
sqrt
(
drop7
.
shape
[
1
]
*
1.0
)
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
fc7
=
fluid
.
layers
.
fc
(
input
=
drop7
,
def
forward
(
self
,
inputs
):
size
=
4096
,
layer_helper
=
LayerHelper
(
self
.
full_name
(),
act
=
"relu"
)
act
=
'relu'
,
conv0
=
self
.
_conv_0
(
inputs
)
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
conv1
=
self
.
_conv_1
(
conv0
)
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
conv1
=
layer_helper
.
append_activation
(
conv1
)
name
=
layer_name
[
6
]
+
"_offset"
),
conv2
=
self
.
_conv_2
(
conv1
)
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
conv2
=
layer_helper
.
append_activation
(
conv2
)
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
pool
=
self
.
_pool
(
conv2
)
name
=
layer_name
[
6
]
+
"_weights"
))
pool
=
fluid
.
layers
.
reshape
(
pool
,
[
0
,
-
1
])
out
=
self
.
_out
(
pool
)
stdv
=
1.0
/
math
.
sqrt
(
fc7
.
shape
[
1
]
*
1.0
)
out
=
fluid
.
layers
.
fc
(
input
=
fc7
,
size
=
class_dim
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
7
]
+
"_offset"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
layer_name
[
7
]
+
"_weights"
))
return
out
return
out
class
Xception
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
entry_flow_block_num
=
3
,
middle_flow_block_num
=
8
,
class_dim
=
1000
):
super
(
Xception
,
self
).
__init__
()
self
.
entry_flow_block_num
=
entry_flow_block_num
self
.
middle_flow_block_num
=
middle_flow_block_num
self
.
_entry_flow
=
Entry_Flow
(
entry_flow_block_num
)
self
.
_middle_flow
=
Middle_Flow
(
middle_flow_block_num
)
self
.
_exit_flow
=
Exit_Flow
(
class_dim
)
def
forward
(
self
,
inputs
):
x
=
self
.
_entry_flow
(
inputs
)
x
=
self
.
_middle_flow
(
x
)
x
=
self
.
_exit_flow
(
x
)
return
x
def
Xception41
():
model
=
Xception
(
entry_flow_block_num
=
3
,
middle_flow_block_num
=
8
)
return
model
def
Xception65
():
model
=
Xception
(
entry_flow_block_num
=
3
,
middle_flow_block_num
=
16
)
return
model
def
Xception71
():
model
=
Xception
(
entry_flow_block_num
=
5
,
middle_flow_block_num
=
16
)
return
model
ppcls/modeling/architectures/darknet.py
浏览文件 @
0dfe15d2
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
# coding=UTF-8
#
import
numpy
as
np
#Licensed under the Apache License, Version 2.0 (the "License");
import
argparse
#you may not use this file except in compliance with the License.
import
paddle
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid
import
framework
import
math
import
math
import
sys
import
time
__all__
=
[
"DarkNet53"
]
__all__
=
[
"DarkNet53"
]
class
DarkNet53
():
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
):
def
__init__
(
self
,
input_channels
,
pass
output_channels
,
filter_size
,
def
net
(
self
,
input
,
class_dim
=
1000
):
stride
,
DarkNet_cfg
=
{
53
:
([
1
,
2
,
8
,
8
,
4
],
self
.
basicblock
)}
padding
,
stages
,
block_func
=
DarkNet_cfg
[
53
]
name
=
None
):
stages
=
stages
[
0
:
5
]
super
(
ConvBNLayer
,
self
).
__init__
()
conv1
=
self
.
conv_bn_layer
(
input
,
self
.
_conv
=
Conv2D
(
ch_out
=
32
,
num_channels
=
input_channels
,
filter_size
=
3
,
num_filters
=
output_channels
,
stride
=
1
,
padding
=
1
,
name
=
"yolo_input"
)
conv
=
self
.
downsample
(
conv1
,
ch_out
=
conv1
.
shape
[
1
]
*
2
,
name
=
"yolo_input.downsample"
)
for
i
,
stage
in
enumerate
(
stages
):
conv
=
self
.
layer_warp
(
block_func
,
conv
,
32
*
(
2
**
i
),
stage
,
name
=
"stage.{}"
.
format
(
i
))
if
i
<
len
(
stages
)
-
1
:
# do not downsaple in the last stage
conv
=
self
.
downsample
(
conv
,
ch_out
=
conv
.
shape
[
1
]
*
2
,
name
=
"stage.{}.downsample"
.
format
(
i
))
pool
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_type
=
'avg'
,
global_pooling
=
True
)
stdv
=
1.0
/
math
.
sqrt
(
pool
.
shape
[
1
]
*
1.0
)
out
=
fluid
.
layers
.
fc
(
input
=
pool
,
size
=
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
'fc_weights'
),
bias_attr
=
ParamAttr
(
name
=
'fc_offset'
))
return
out
def
conv_bn_layer
(
self
,
input
,
ch_out
,
filter_size
,
stride
,
padding
,
name
=
None
):
conv
=
fluid
.
layers
.
conv2d
(
input
=
input
,
num_filters
=
ch_out
,
filter_size
=
filter_size
,
filter_size
=
filter_size
,
stride
=
stride
,
stride
=
stride
,
padding
=
padding
,
padding
=
padding
,
...
@@ -82,39 +38,133 @@ class DarkNet53():
...
@@ -82,39 +38,133 @@ class DarkNet53():
bias_attr
=
False
)
bias_attr
=
False
)
bn_name
=
name
+
".bn"
bn_name
=
name
+
".bn"
out
=
fluid
.
layers
.
batch_norm
(
self
.
_bn
=
BatchNorm
(
input
=
conv
,
num_channels
=
output_channels
,
act
=
'relu'
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
bn_name
+
'.scale'
),
param_attr
=
ParamAttr
(
name
=
bn_name
+
".scale"
),
bias_attr
=
ParamAttr
(
name
=
bn_name
+
'.offset'
),
bias_attr
=
ParamAttr
(
name
=
bn_name
+
".offset"
),
moving_mean_name
=
bn_name
+
'.mean'
,
moving_mean_name
=
bn_name
+
".mean"
,
moving_variance_name
=
bn_name
+
'.var'
)
moving_variance_name
=
bn_name
+
".var"
)
return
out
def
forward
(
self
,
inputs
):
def
downsample
(
self
,
x
=
self
.
_conv
(
inputs
)
input
,
x
=
self
.
_bn
(
x
)
ch_out
,
return
x
filter_size
=
3
,
stride
=
2
,
padding
=
1
,
class
Basic_Block
(
fluid
.
dygraph
.
Layer
):
name
=
None
):
def
__init__
(
self
,
input_channels
,
output_channels
,
name
=
None
):
return
self
.
conv_bn_layer
(
super
(
Basic_Block
,
self
).
__init__
()
input
,
ch_out
=
ch_out
,
self
.
_conv1
=
ConvBNLayer
(
filter_size
=
filter_size
,
input_channels
,
output_channels
,
1
,
1
,
0
,
name
=
name
+
".0"
)
stride
=
stride
,
self
.
_conv2
=
ConvBNLayer
(
padding
=
padding
,
output_channels
,
output_channels
*
2
,
3
,
1
,
1
,
name
=
name
+
".1"
)
name
=
name
)
def
forward
(
self
,
inputs
):
def
basicblock
(
self
,
input
,
ch_out
,
name
=
None
):
x
=
self
.
_conv1
(
inputs
)
conv1
=
self
.
conv_bn_layer
(
input
,
ch_out
,
1
,
1
,
0
,
name
=
name
+
".0"
)
x
=
self
.
_conv2
(
x
)
conv2
=
self
.
conv_bn_layer
(
return
fluid
.
layers
.
elementwise_add
(
x
=
inputs
,
y
=
x
)
conv1
,
ch_out
*
2
,
3
,
1
,
1
,
name
=
name
+
".1"
)
out
=
fluid
.
layers
.
elementwise_add
(
x
=
input
,
y
=
conv2
,
act
=
None
)
return
out
class
DarkNet
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
):
def
layer_warp
(
self
,
block_func
,
input
,
ch_out
,
count
,
name
=
None
):
super
(
DarkNet53
,
self
).
__init__
()
res_out
=
block_func
(
input
,
ch_out
,
name
=
'{}.0'
.
format
(
name
))
for
j
in
range
(
1
,
count
):
self
.
stages
=
[
1
,
2
,
8
,
8
,
4
]
res_out
=
block_func
(
res_out
,
ch_out
,
name
=
'{}.{}'
.
format
(
name
,
j
))
self
.
_conv1
=
ConvBNLayer
(
3
,
32
,
3
,
1
,
1
,
name
=
"yolo_input"
)
return
res_out
self
.
_conv2
=
ConvBNLayer
(
32
,
64
,
3
,
2
,
1
,
name
=
"yolo_input.downsample"
)
self
.
_basic_block_01
=
Basic_Block
(
64
,
32
,
name
=
"stage.0.0"
)
self
.
_downsample_0
=
ConvBNLayer
(
64
,
128
,
3
,
2
,
1
,
name
=
"stage.0.downsample"
)
self
.
_basic_block_11
=
Basic_Block
(
128
,
64
,
name
=
"stage.1.0"
)
self
.
_basic_block_12
=
Basic_Block
(
128
,
64
,
name
=
"stage.1.1"
)
self
.
_downsample_1
=
ConvBNLayer
(
128
,
256
,
3
,
2
,
1
,
name
=
"stage.1.downsample"
)
self
.
_basic_block_21
=
Basic_Block
(
256
,
128
,
name
=
"stage.2.0"
)
self
.
_basic_block_22
=
Basic_Block
(
256
,
128
,
name
=
"stage.2.1"
)
self
.
_basic_block_23
=
Basic_Block
(
256
,
128
,
name
=
"stage.2.2"
)
self
.
_basic_block_24
=
Basic_Block
(
256
,
128
,
name
=
"stage.2.3"
)
self
.
_basic_block_25
=
Basic_Block
(
256
,
128
,
name
=
"stage.2.4"
)
self
.
_basic_block_26
=
Basic_Block
(
256
,
128
,
name
=
"stage.2.5"
)
self
.
_basic_block_27
=
Basic_Block
(
256
,
128
,
name
=
"stage.2.6"
)
self
.
_basic_block_28
=
Basic_Block
(
256
,
128
,
name
=
"stage.2.7"
)
self
.
_downsample_2
=
ConvBNLayer
(
256
,
512
,
3
,
2
,
1
,
name
=
"stage.2.downsample"
)
self
.
_basic_block_31
=
Basic_Block
(
512
,
256
,
name
=
"stage.3.0"
)
self
.
_basic_block_32
=
Basic_Block
(
512
,
256
,
name
=
"stage.3.1"
)
self
.
_basic_block_33
=
Basic_Block
(
512
,
256
,
name
=
"stage.3.2"
)
self
.
_basic_block_34
=
Basic_Block
(
512
,
256
,
name
=
"stage.3.3"
)
self
.
_basic_block_35
=
Basic_Block
(
512
,
256
,
name
=
"stage.3.4"
)
self
.
_basic_block_36
=
Basic_Block
(
512
,
256
,
name
=
"stage.3.5"
)
self
.
_basic_block_37
=
Basic_Block
(
512
,
256
,
name
=
"stage.3.6"
)
self
.
_basic_block_38
=
Basic_Block
(
512
,
256
,
name
=
"stage.3.7"
)
self
.
_downsample_3
=
ConvBNLayer
(
512
,
1024
,
3
,
2
,
1
,
name
=
"stage.3.downsample"
)
self
.
_basic_block_41
=
Basic_Block
(
1024
,
512
,
name
=
"stage.4.0"
)
self
.
_basic_block_42
=
Basic_Block
(
1024
,
512
,
name
=
"stage.4.1"
)
self
.
_basic_block_43
=
Basic_Block
(
1024
,
512
,
name
=
"stage.4.2"
)
self
.
_basic_block_44
=
Basic_Block
(
1024
,
512
,
name
=
"stage.4.3"
)
self
.
_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
stdv
=
1.0
/
math
.
sqrt
(
1024.0
)
self
.
_out
=
Linear
(
input_dim
=
1024
,
output_dim
=
class_dim
,
param_attr
=
ParamAttr
(
name
=
"fc_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_basic_block_01
(
x
)
x
=
self
.
_downsample_0
(
x
)
x
=
self
.
_basic_block_11
(
x
)
x
=
self
.
_basic_block_12
(
x
)
x
=
self
.
_downsample_1
(
x
)
x
=
self
.
_basic_block_21
(
x
)
x
=
self
.
_basic_block_22
(
x
)
x
=
self
.
_basic_block_23
(
x
)
x
=
self
.
_basic_block_24
(
x
)
x
=
self
.
_basic_block_25
(
x
)
x
=
self
.
_basic_block_26
(
x
)
x
=
self
.
_basic_block_27
(
x
)
x
=
self
.
_basic_block_28
(
x
)
x
=
self
.
_downsample_2
(
x
)
x
=
self
.
_basic_block_31
(
x
)
x
=
self
.
_basic_block_32
(
x
)
x
=
self
.
_basic_block_33
(
x
)
x
=
self
.
_basic_block_34
(
x
)
x
=
self
.
_basic_block_35
(
x
)
x
=
self
.
_basic_block_36
(
x
)
x
=
self
.
_basic_block_37
(
x
)
x
=
self
.
_basic_block_38
(
x
)
x
=
self
.
_downsample_3
(
x
)
x
=
self
.
_basic_block_41
(
x
)
x
=
self
.
_basic_block_42
(
x
)
x
=
self
.
_basic_block_43
(
x
)
x
=
self
.
_basic_block_44
(
x
)
x
=
self
.
_pool
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axes
=
[
2
,
3
])
x
=
self
.
_out
(
x
)
return
x
def
DarkNet53
(
**
args
):
model
=
DarkNet
(
**
args
)
return
model
ppcls/modeling/architectures/efficientnet.py
浏览文件 @
0dfe15d2
此差异已折叠。
点击以展开。
ppcls/modeling/architectures/googlenet.py
浏览文件 @
0dfe15d2
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
import
numpy
as
np
#
import
argparse
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid
import
framework
import
math
import
sys
import
time
__all__
=
[
'GoogLeNet_DY'
]
def
xavier
(
channels
,
filter_size
,
name
):
stdv
=
(
3.0
/
(
filter_size
**
2
*
channels
))
**
0.5
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_weights"
)
__all__
=
[
'GoogLeNet'
]
return
param_attr
class
GoogLeNet
():
class
ConvLayer
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
):
def
__init__
(
self
,
num_channels
,
pass
num_filters
,
filter_size
,
def
conv_layer
(
self
,
stride
=
1
,
input
,
groups
=
1
,
num_filters
,
act
=
None
,
filter_size
,
name
=
None
):
stride
=
1
,
super
(
ConvLayer
,
self
).
__init__
()
groups
=
1
,
act
=
None
,
self
.
_conv
=
Conv2D
(
name
=
None
):
num_channels
=
num_channels
,
channels
=
input
.
shape
[
1
]
stdv
=
(
3.0
/
(
filter_size
**
2
*
channels
))
**
0.5
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_weights"
)
conv
=
fluid
.
layers
.
conv2d
(
input
=
input
,
num_filters
=
num_filters
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
filter_size
=
filter_size
,
stride
=
stride
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
groups
=
groups
,
act
=
act
,
param_attr
=
param_attr
,
bias_attr
=
False
,
name
=
name
)
return
conv
def
xavier
(
self
,
channels
,
filter_size
,
name
):
stdv
=
(
3.0
/
(
filter_size
**
2
*
channels
))
**
0.5
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_weights"
)
return
param_attr
def
inception
(
self
,
input
,
channels
,
filter1
,
filter3R
,
filter3
,
filter5R
,
filter5
,
proj
,
name
=
None
):
conv1
=
self
.
conv_layer
(
input
=
input
,
num_filters
=
filter1
,
filter_size
=
1
,
stride
=
1
,
act
=
None
,
name
=
"inception_"
+
name
+
"_1x1"
)
conv3r
=
self
.
conv_layer
(
input
=
input
,
num_filters
=
filter3R
,
filter_size
=
1
,
stride
=
1
,
act
=
None
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
def
forward
(
self
,
inputs
):
y
=
self
.
_conv
(
inputs
)
return
y
class
Inception
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
filter1
,
filter3R
,
filter3
,
filter5R
,
filter5
,
proj
,
name
=
None
):
super
(
Inception
,
self
).
__init__
()
self
.
_conv1
=
ConvLayer
(
input_channels
,
filter1
,
1
,
name
=
"inception_"
+
name
+
"_1x1"
)
self
.
_conv3r
=
ConvLayer
(
input_channels
,
filter3R
,
1
,
name
=
"inception_"
+
name
+
"_3x3_reduce"
)
name
=
"inception_"
+
name
+
"_3x3_reduce"
)
conv3
=
self
.
conv_layer
(
self
.
_conv3
=
ConvLayer
(
input
=
conv3r
,
filter3R
,
filter3
,
3
,
name
=
"inception_"
+
name
+
"_3x3"
)
num_filters
=
filter3
,
self
.
_conv5r
=
ConvLayer
(
filter_size
=
3
,
input_channels
,
stride
=
1
,
filter5R
,
act
=
None
,
1
,
name
=
"inception_"
+
name
+
"_3x3"
)
conv5r
=
self
.
conv_layer
(
input
=
input
,
num_filters
=
filter5R
,
filter_size
=
1
,
stride
=
1
,
act
=
None
,
name
=
"inception_"
+
name
+
"_5x5_reduce"
)
name
=
"inception_"
+
name
+
"_5x5_reduce"
)
conv5
=
self
.
conv_layer
(
self
.
_conv5
=
ConvLayer
(
input
=
conv5r
,
filter5R
,
filter5
,
5
,
name
=
"inception_"
+
name
+
"_5x5"
)
num_filters
=
filter5
,
self
.
_pool
=
Pool2D
(
filter_size
=
5
,
pool_size
=
3
,
pool_type
=
"max"
,
pool_stride
=
1
,
pool_padding
=
1
)
stride
=
1
,
self
.
_convprj
=
ConvLayer
(
act
=
None
,
input_channels
,
proj
,
1
,
name
=
"inception_"
+
name
+
"_3x3_proj"
)
name
=
"inception_"
+
name
+
"_5x5"
)
pool
=
fluid
.
layers
.
pool2d
(
def
forward
(
self
,
inputs
):
input
=
input
,
conv1
=
self
.
_conv1
(
inputs
)
pool_size
=
3
,
pool_stride
=
1
,
conv3r
=
self
.
_conv3r
(
inputs
)
pool_padding
=
1
,
conv3
=
self
.
_conv3
(
conv3r
)
pool_type
=
'max'
)
convprj
=
fluid
.
layers
.
conv2d
(
conv5r
=
self
.
_conv5r
(
inputs
)
input
=
pool
,
conv5
=
self
.
_conv5
(
conv5r
)
filter_size
=
1
,
num_filters
=
proj
,
pool
=
self
.
_pool
(
inputs
)
stride
=
1
,
convprj
=
self
.
_convprj
(
pool
)
padding
=
0
,
name
=
"inception_"
+
name
+
"_3x3_proj"
,
cat
=
fluid
.
layers
.
concat
([
conv1
,
conv3
,
conv5
,
convprj
],
axis
=
1
)
param_attr
=
ParamAttr
(
layer_helper
=
LayerHelper
(
self
.
full_name
(),
act
=
"relu"
)
name
=
"inception_"
+
name
+
"_3x3_proj_weights"
),
return
layer_helper
.
append_activation
(
cat
)
bias_attr
=
False
)
cat
=
fluid
.
layers
.
concat
(
input
=
[
conv1
,
conv3
,
conv5
,
convprj
],
axis
=
1
)
cat
=
fluid
.
layers
.
relu
(
cat
)
class
GoogleNet_DY
(
fluid
.
dygraph
.
Layer
):
return
cat
def
__init__
(
self
,
class_dim
=
1000
):
super
(
GoogleNet_DY
,
self
).
__init__
()
def
net
(
self
,
input
,
class_dim
=
1000
):
self
.
_conv
=
ConvLayer
(
3
,
64
,
7
,
2
,
name
=
"conv1"
)
conv
=
self
.
conv_layer
(
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_type
=
"max"
,
pool_stride
=
2
)
input
=
input
,
self
.
_conv_1
=
ConvLayer
(
64
,
64
,
1
,
name
=
"conv2_1x1"
)
num_filters
=
64
,
self
.
_conv_2
=
ConvLayer
(
64
,
192
,
3
,
name
=
"conv2_3x3"
)
filter_size
=
7
,
stride
=
2
,
self
.
_ince3a
=
Inception
(
act
=
None
,
192
,
192
,
64
,
96
,
128
,
16
,
32
,
32
,
name
=
"ince3a"
)
name
=
"conv1"
)
self
.
_ince3b
=
Inception
(
pool
=
fluid
.
layers
.
pool2d
(
256
,
256
,
128
,
128
,
192
,
32
,
96
,
64
,
name
=
"ince3b"
)
input
=
conv
,
pool_size
=
3
,
pool_type
=
'max'
,
pool_stride
=
2
)
self
.
_ince4a
=
Inception
(
conv
=
self
.
conv_layer
(
480
,
480
,
192
,
96
,
208
,
16
,
48
,
64
,
name
=
"ince4a"
)
input
=
pool
,
self
.
_ince4b
=
Inception
(
num_filters
=
64
,
512
,
512
,
160
,
112
,
224
,
24
,
64
,
64
,
name
=
"ince4b"
)
filter_size
=
1
,
self
.
_ince4c
=
Inception
(
stride
=
1
,
512
,
512
,
128
,
128
,
256
,
24
,
64
,
64
,
name
=
"ince4c"
)
act
=
None
,
self
.
_ince4d
=
Inception
(
name
=
"conv2_1x1"
)
512
,
512
,
112
,
144
,
288
,
32
,
64
,
64
,
name
=
"ince4d"
)
conv
=
self
.
conv_layer
(
self
.
_ince4e
=
Inception
(
input
=
conv
,
528
,
528
,
256
,
160
,
320
,
32
,
128
,
128
,
name
=
"ince4e"
)
num_filters
=
192
,
filter_size
=
3
,
self
.
_ince5a
=
Inception
(
stride
=
1
,
832
,
832
,
256
,
160
,
320
,
32
,
128
,
128
,
name
=
"ince5a"
)
act
=
None
,
self
.
_ince5b
=
Inception
(
name
=
"conv2_3x3"
)
832
,
832
,
384
,
192
,
384
,
48
,
128
,
128
,
name
=
"ince5b"
)
pool
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
3
,
pool_type
=
'max'
,
pool_stride
=
2
)
self
.
_pool_5
=
Pool2D
(
pool_size
=
7
,
pool_type
=
'avg'
,
pool_stride
=
7
)
ince3a
=
self
.
inception
(
pool
,
192
,
64
,
96
,
128
,
16
,
32
,
32
,
"ince3a"
)
self
.
_drop
=
fluid
.
dygraph
.
Dropout
(
p
=
0.4
)
ince3b
=
self
.
inception
(
ince3a
,
256
,
128
,
128
,
192
,
32
,
96
,
64
,
self
.
_fc_out
=
Linear
(
"ince3b"
)
1024
,
pool3
=
fluid
.
layers
.
pool2d
(
class_dim
,
input
=
ince3b
,
pool_size
=
3
,
pool_type
=
'max'
,
pool_stride
=
2
)
param_attr
=
xavier
(
1024
,
1
,
"out"
),
bias_attr
=
ParamAttr
(
name
=
"out_offset"
),
ince4a
=
self
.
inception
(
pool3
,
480
,
192
,
96
,
208
,
16
,
48
,
64
,
"ince4a"
)
act
=
"softmax"
)
ince4b
=
self
.
inception
(
ince4a
,
512
,
160
,
112
,
224
,
24
,
64
,
64
,
self
.
_pool_o1
=
Pool2D
(
pool_size
=
5
,
pool_stride
=
3
,
pool_type
=
"avg"
)
"ince4b"
)
self
.
_conv_o1
=
ConvLayer
(
512
,
128
,
1
,
name
=
"conv_o1"
)
ince4c
=
self
.
inception
(
ince4b
,
512
,
128
,
128
,
256
,
24
,
64
,
64
,
self
.
_fc_o1
=
Linear
(
"ince4c"
)
1152
,
ince4d
=
self
.
inception
(
ince4c
,
512
,
112
,
144
,
288
,
32
,
64
,
64
,
1024
,
"ince4d"
)
param_attr
=
xavier
(
2048
,
1
,
"fc_o1"
),
ince4e
=
self
.
inception
(
ince4d
,
528
,
256
,
160
,
320
,
32
,
128
,
128
,
bias_attr
=
ParamAttr
(
name
=
"fc_o1_offset"
),
"ince4e"
)
act
=
"relu"
)
pool4
=
fluid
.
layers
.
pool2d
(
self
.
_drop_o1
=
fluid
.
dygraph
.
Dropout
(
p
=
0.7
)
input
=
ince4e
,
pool_size
=
3
,
pool_type
=
'max'
,
pool_stride
=
2
)
self
.
_out1
=
Linear
(
1024
,
ince5a
=
self
.
inception
(
pool4
,
832
,
256
,
160
,
320
,
32
,
128
,
128
,
class_dim
,
"ince5a"
)
param_attr
=
xavier
(
1024
,
1
,
"out1"
),
ince5b
=
self
.
inception
(
ince5a
,
832
,
384
,
192
,
384
,
48
,
128
,
128
,
bias_attr
=
ParamAttr
(
name
=
"out1_offset"
),
"ince5b"
)
act
=
"softmax"
)
pool5
=
fluid
.
layers
.
pool2d
(
self
.
_pool_o2
=
Pool2D
(
pool_size
=
5
,
pool_stride
=
3
,
pool_type
=
'avg'
)
input
=
ince5b
,
pool_size
=
7
,
pool_type
=
'avg'
,
pool_stride
=
7
)
self
.
_conv_o2
=
ConvLayer
(
528
,
128
,
1
,
name
=
"conv_o2"
)
dropout
=
fluid
.
layers
.
dropout
(
x
=
pool5
,
dropout_prob
=
0.4
)
self
.
_fc_o2
=
Linear
(
out
=
fluid
.
layers
.
fc
(
input
=
dropout
,
1152
,
size
=
class_dim
,
1024
,
act
=
'softmax'
,
param_attr
=
xavier
(
2048
,
1
,
"fc_o2"
),
param_attr
=
self
.
xavier
(
1024
,
1
,
"out"
),
bias_attr
=
ParamAttr
(
name
=
"fc_o2_offset"
))
name
=
"out"
,
self
.
_drop_o2
=
fluid
.
dygraph
.
Dropout
(
p
=
0.7
)
bias_attr
=
ParamAttr
(
name
=
"out_offset"
))
self
.
_out2
=
Linear
(
1024
,
pool_o1
=
fluid
.
layers
.
pool2d
(
class_dim
,
input
=
ince4a
,
pool_size
=
5
,
pool_type
=
'avg'
,
pool_stride
=
3
)
param_attr
=
xavier
(
1024
,
1
,
"out2"
),
conv_o1
=
self
.
conv_layer
(
bias_attr
=
ParamAttr
(
name
=
"out2_offset"
))
input
=
pool_o1
,
num_filters
=
128
,
def
forward
(
self
,
inputs
):
filter_size
=
1
,
x
=
self
.
_conv
(
inputs
)
stride
=
1
,
x
=
self
.
_pool
(
x
)
act
=
None
,
x
=
self
.
_conv_1
(
x
)
name
=
"conv_o1"
)
x
=
self
.
_conv_2
(
x
)
fc_o1
=
fluid
.
layers
.
fc
(
input
=
conv_o1
,
x
=
self
.
_pool
(
x
)
size
=
1024
,
act
=
'relu'
,
x
=
self
.
_ince3a
(
x
)
param_attr
=
self
.
xavier
(
2048
,
1
,
"fc_o1"
),
x
=
self
.
_ince3b
(
x
)
name
=
"fc_o1"
,
x
=
self
.
_pool
(
x
)
bias_attr
=
ParamAttr
(
name
=
"fc_o1_offset"
))
dropout_o1
=
fluid
.
layers
.
dropout
(
x
=
fc_o1
,
dropout_prob
=
0.7
)
ince4a
=
self
.
_ince4a
(
x
)
out1
=
fluid
.
layers
.
fc
(
input
=
dropout_o1
,
x
=
self
.
_ince4b
(
ince4a
)
size
=
class_dim
,
x
=
self
.
_ince4c
(
x
)
act
=
'softmax'
,
ince4d
=
self
.
_ince4d
(
x
)
param_attr
=
self
.
xavier
(
1024
,
1
,
"out1"
),
x
=
self
.
_ince4e
(
ince4d
)
name
=
"out1"
,
x
=
self
.
_pool
(
x
)
bias_attr
=
ParamAttr
(
name
=
"out1_offset"
))
x
=
self
.
_ince5a
(
x
)
pool_o2
=
fluid
.
layers
.
pool2d
(
ince5b
=
self
.
_ince5b
(
x
)
input
=
ince4d
,
pool_size
=
5
,
pool_type
=
'avg'
,
pool_stride
=
3
)
conv_o2
=
self
.
conv_layer
(
x
=
self
.
_pool_5
(
ince5b
)
input
=
pool_o2
,
x
=
self
.
_drop
(
x
)
num_filters
=
128
,
x
=
fluid
.
layers
.
squeeze
(
x
,
axes
=
[
2
,
3
])
filter_size
=
1
,
out
=
self
.
_fc_out
(
x
)
stride
=
1
,
act
=
None
,
x
=
self
.
_pool_o1
(
ince4a
)
name
=
"conv_o2"
)
x
=
self
.
_conv_o1
(
x
)
fc_o2
=
fluid
.
layers
.
fc
(
input
=
conv_o2
,
x
=
fluid
.
layers
.
flatten
(
x
)
size
=
1024
,
x
=
self
.
_fc_o1
(
x
)
act
=
'relu'
,
x
=
self
.
_drop_o1
(
x
)
param_attr
=
self
.
xavier
(
2048
,
1
,
"fc_o2"
),
out1
=
self
.
_out1
(
x
)
name
=
"fc_o2"
,
bias_attr
=
ParamAttr
(
name
=
"fc_o2_offset"
))
x
=
self
.
_pool_o2
(
ince4d
)
dropout_o2
=
fluid
.
layers
.
dropout
(
x
=
fc_o2
,
dropout_prob
=
0.7
)
x
=
self
.
_conv_o2
(
x
)
out2
=
fluid
.
layers
.
fc
(
input
=
dropout_o2
,
x
=
fluid
.
layers
.
flatten
(
x
)
size
=
class_dim
,
x
=
self
.
_fc_o2
(
x
)
act
=
'softmax'
,
x
=
self
.
_drop_o2
(
x
)
param_attr
=
self
.
xavier
(
1024
,
1
,
"out2"
),
out2
=
self
.
_out2
(
x
)
name
=
"out2"
,
bias_attr
=
ParamAttr
(
name
=
"out2_offset"
))
# last fc layer is "out"
return
[
out
,
out1
,
out2
]
return
[
out
,
out1
,
out2
]
def
GoogLeNet
():
model
=
GoogleNet_DY
()
return
model
ppcls/modeling/architectures/inception_v4.py
浏览文件 @
0dfe15d2
此差异已折叠。
点击以展开。
ppcls/modeling/architectures/resnext101_wsl.py
浏览文件 @
0dfe15d2
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
import
numpy
as
np
#
import
argparse
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.base
import
to_variable
__all__
=
[
from
paddle.fluid
import
framework
"ResNeXt101_32x8d_wsl"
,
"ResNeXt101_32x16d_wsl"
,
"ResNeXt101_32x32d_wsl"
,
"ResNeXt101_32x48d_wsl"
,
"Fix_ResNeXt101_32x48d_wsl"
]
import
math
import
sys
import
time
class
ResNeXt101_wsl
():
__all__
=
[
"ResNeXt101_32x8d_wsl"
,
def
__init__
(
self
,
layers
=
101
,
cardinality
=
32
,
width
=
48
):
"ResNeXt101_wsl_32x16d_wsl"
,
self
.
layers
=
layers
"ResNeXt101_wsl_32x32d_wsl"
,
self
.
cardinality
=
cardinality
"ResNeXt101_wsl_32x48d_wsl"
]
self
.
width
=
width
def
net
(
self
,
input
,
class_dim
=
1000
):
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
layers
=
self
.
layers
def
__init__
(
self
,
cardinality
=
self
.
cardinality
input_channels
,
width
=
self
.
width
output_channels
,
filter_size
,
depth
=
[
3
,
4
,
23
,
3
]
stride
=
1
,
base_width
=
cardinality
*
width
groups
=
1
,
num_filters
=
[
base_width
*
i
for
i
in
[
1
,
2
,
4
,
8
]]
act
=
None
,
name
=
None
):
conv
=
self
.
conv_bn_layer
(
super
(
ConvBNLayer
,
self
).
__init__
()
input
=
input
,
num_filters
=
64
,
filter_size
=
7
,
stride
=
2
,
act
=
'relu'
,
name
=
"conv1"
)
#debug
conv
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
for
block
in
range
(
len
(
depth
)):
for
i
in
range
(
depth
[
block
]):
conv_name
=
'layer'
+
str
(
block
+
1
)
+
"."
+
str
(
i
)
conv
=
self
.
bottleneck_block
(
input
=
conv
,
num_filters
=
num_filters
[
block
],
stride
=
2
if
i
==
0
and
block
!=
0
else
1
,
cardinality
=
cardinality
,
name
=
conv_name
)
pool
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_type
=
'avg'
,
global_pooling
=
True
)
stdv
=
1.0
/
math
.
sqrt
(
pool
.
shape
[
1
]
*
1.0
)
out
=
fluid
.
layers
.
fc
(
input
=
pool
,
size
=
class_dim
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
'fc.weight'
),
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
'fc.bias'
))
return
out
def
conv_bn_layer
(
self
,
input
,
num_filters
,
filter_size
,
stride
=
1
,
groups
=
1
,
act
=
None
,
name
=
None
):
if
"downsample"
in
name
:
if
"downsample"
in
name
:
conv_name
=
name
+
'.0'
conv_name
=
name
+
".0"
else
:
else
:
conv_name
=
name
conv_name
=
name
conv
=
fluid
.
layers
.
conv2d
(
self
.
_conv
=
Conv2D
(
num_channels
=
input_channels
,
input
=
input
,
num_filters
=
output_channels
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
filter_size
=
filter_size
,
stride
=
stride
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
groups
=
groups
,
act
=
None
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
conv_name
+
".weight"
),
param_attr
=
ParamAttr
(
name
=
conv_name
+
".weight"
),
bias_attr
=
False
)
bias_attr
=
False
)
if
"downsample"
in
name
:
if
"downsample"
in
name
:
bn_name
=
name
[:
9
]
+
'downsample'
+
'.1'
bn_name
=
name
[:
9
]
+
"downsample.1"
else
:
else
:
if
"conv1"
==
name
:
if
"conv1"
==
name
:
bn_name
=
'bn'
+
name
[
-
1
]
bn_name
=
"bn"
+
name
[
-
1
]
else
:
else
:
bn_name
=
(
name
[:
10
]
if
name
[
7
:
9
].
isdigit
()
else
name
[:
9
]
bn_name
=
(
name
[:
10
]
if
name
[
7
:
9
].
isdigit
()
else
name
[:
9
])
+
"bn"
+
name
[
-
1
]
)
+
'bn'
+
name
[
-
1
]
self
.
_bn
=
BatchNorm
(
num_channels
=
output_channels
,
return
fluid
.
layers
.
batch_norm
(
act
=
act
,
input
=
conv
,
param_attr
=
ParamAttr
(
name
=
bn_name
+
".weight"
),
act
=
act
,
bias_attr
=
ParamAttr
(
name
=
bn_name
+
".bias"
),
param_attr
=
ParamAttr
(
name
=
bn_name
+
'.weight'
),
moving_mean_name
=
bn_name
+
".running_mean"
,
bias_attr
=
ParamAttr
(
bn_name
+
'.bias'
),
moving_variance_name
=
bn_name
+
".running_var"
)
moving_mean_name
=
bn_name
+
'.running_mean'
,
moving_variance_name
=
bn_name
+
'.running_var'
,
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv
(
inputs
)
def
shortcut
(
self
,
input
,
ch_out
,
stride
,
name
):
x
=
self
.
_bn
(
x
)
ch_in
=
input
.
shape
[
1
]
return
x
if
ch_in
!=
ch_out
or
stride
!=
1
:
return
self
.
conv_bn_layer
(
input
,
ch_out
,
1
,
stride
,
name
=
name
)
class
Short_Cut
(
fluid
.
dygraph
.
Layer
):
else
:
def
__init__
(
self
,
input_channels
,
output_channels
,
stride
,
name
=
None
):
return
input
super
(
Short_Cut
,
self
).
__init__
()
def
bottleneck_block
(
self
,
input
,
num_filters
,
stride
,
cardinality
,
name
):
self
.
input_channels
=
input_channels
cardinality
=
self
.
cardinality
self
.
output_channels
=
output_channels
width
=
self
.
width
self
.
stride
=
stride
conv0
=
self
.
conv_bn_layer
(
if
input_channels
!=
output_channels
or
stride
!=
1
:
input
=
input
,
self
.
_conv
=
ConvBNLayer
(
num_filters
=
num_filters
,
input_channels
,
output_channels
,
filter_size
=
1
,
stride
=
stride
,
name
=
name
)
filter_size
=
1
,
act
=
'relu'
,
def
forward
(
self
,
inputs
):
name
=
name
+
".conv1"
)
if
self
.
input_channels
!=
self
.
output_channels
or
self
.
stride
!=
1
:
conv1
=
self
.
conv_bn_layer
(
return
self
.
_conv
(
inputs
)
input
=
conv0
,
return
inputs
num_filters
=
num_filters
,
filter_size
=
3
,
class
Bottleneck_Block
(
fluid
.
dygraph
.
Layer
):
stride
=
stride
,
def
__init__
(
self
,
input_channels
,
output_channels
,
stride
,
cardinality
,
width
,
name
):
groups
=
cardinality
,
super
(
Bottleneck_Block
,
self
).
__init__
()
act
=
'relu'
,
name
=
name
+
".conv2"
)
self
.
_conv0
=
ConvBNLayer
(
conv2
=
self
.
conv_bn_layer
(
input_channels
,
output_channels
,
filter_size
=
1
,
act
=
"relu"
,
name
=
name
+
".conv1"
)
input
=
conv1
,
self
.
_conv1
=
ConvBNLayer
(
num_filters
=
num_filters
//
(
width
//
8
),
output_channels
,
output_channels
,
filter_size
=
3
,
act
=
"relu"
,
stride
=
stride
,
groups
=
cardinality
,
name
=
name
+
".conv2"
)
filter_size
=
1
,
self
.
_conv2
=
ConvBNLayer
(
act
=
None
,
output_channels
,
output_channels
//
(
width
//
8
),
filter_size
=
1
,
act
=
None
,
name
=
name
+
".conv3"
)
name
=
name
+
".conv3"
)
self
.
_short
=
Short_Cut
(
input_channels
,
output_channels
//
(
width
//
8
),
stride
=
stride
,
name
=
name
+
".downsample"
)
short
=
self
.
shortcut
(
input
,
def
forward
(
self
,
inputs
):
num_filters
//
(
width
//
8
),
x
=
self
.
_conv0
(
inputs
)
stride
,
x
=
self
.
_conv1
(
x
)
name
=
name
+
".downsample"
)
x
=
self
.
_conv2
(
x
)
y
=
self
.
_short
(
inputs
)
return
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
return
fluid
.
layers
.
elementwise_add
(
x
,
y
,
act
=
"relu"
)
class
ResNeXt101_wsl
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
layers
=
101
,
cardinality
=
32
,
width
=
48
,
class_dim
=
1000
):
super
(
ResNeXt101_wsl
,
self
).
__init__
()
self
.
class_dim
=
class_dim
self
.
layers
=
layers
self
.
cardinality
=
cardinality
self
.
width
=
width
self
.
scale
=
width
//
8
self
.
depth
=
[
3
,
4
,
23
,
3
]
self
.
base_width
=
cardinality
*
width
num_filters
=
[
self
.
base_width
*
i
for
i
in
[
1
,
2
,
4
,
8
]]
#[256, 512, 1024, 2048]
self
.
_conv_stem
=
ConvBNLayer
(
3
,
64
,
7
,
stride
=
2
,
act
=
"relu"
,
name
=
"conv1"
)
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
"max"
)
self
.
_conv1_0
=
Bottleneck_Block
(
64
,
num_filters
[
0
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer1.0"
)
self
.
_conv1_1
=
Bottleneck_Block
(
num_filters
[
0
]
//
(
width
//
8
),
num_filters
[
0
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer1.1"
)
self
.
_conv1_2
=
Bottleneck_Block
(
num_filters
[
0
]
//
(
width
//
8
),
num_filters
[
0
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer1.2"
)
self
.
_conv2_0
=
Bottleneck_Block
(
num_filters
[
0
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
2
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.0"
)
self
.
_conv2_1
=
Bottleneck_Block
(
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.1"
)
self
.
_conv2_2
=
Bottleneck_Block
(
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.2"
)
self
.
_conv2_3
=
Bottleneck_Block
(
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.3"
)
self
.
_conv3_0
=
Bottleneck_Block
(
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
2
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.0"
)
self
.
_conv3_1
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.1"
)
self
.
_conv3_2
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.2"
)
self
.
_conv3_3
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.3"
)
self
.
_conv3_4
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.4"
)
self
.
_conv3_5
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.5"
)
self
.
_conv3_6
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.6"
)
self
.
_conv3_7
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.7"
)
self
.
_conv3_8
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.8"
)
self
.
_conv3_9
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.9"
)
self
.
_conv3_10
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.10"
)
self
.
_conv3_11
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.11"
)
self
.
_conv3_12
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.12"
)
self
.
_conv3_13
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.13"
)
self
.
_conv3_14
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.14"
)
self
.
_conv3_15
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.15"
)
self
.
_conv3_16
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.16"
)
self
.
_conv3_17
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.17"
)
self
.
_conv3_18
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.18"
)
self
.
_conv3_19
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.19"
)
self
.
_conv3_20
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.20"
)
self
.
_conv3_21
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.21"
)
self
.
_conv3_22
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.22"
)
self
.
_conv4_0
=
Bottleneck_Block
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
3
],
stride
=
2
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer4.0"
)
self
.
_conv4_1
=
Bottleneck_Block
(
num_filters
[
3
]
//
(
width
//
8
),
num_filters
[
3
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer4.1"
)
self
.
_conv4_2
=
Bottleneck_Block
(
num_filters
[
3
]
//
(
width
//
8
),
num_filters
[
3
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer4.2"
)
self
.
_avg_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
self
.
_out
=
Linear
(
input_dim
=
num_filters
[
3
]
//
(
width
//
8
),
output_dim
=
class_dim
,
param_attr
=
ParamAttr
(
name
=
"fc.weight"
),
bias_attr
=
ParamAttr
(
name
=
"fc.bias"
))
def
forward
(
self
,
inputs
):
x
=
self
.
_conv_stem
(
inputs
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_conv1_0
(
x
)
x
=
self
.
_conv1_1
(
x
)
x
=
self
.
_conv1_2
(
x
)
x
=
self
.
_conv2_0
(
x
)
x
=
self
.
_conv2_1
(
x
)
x
=
self
.
_conv2_2
(
x
)
x
=
self
.
_conv2_3
(
x
)
x
=
self
.
_conv3_0
(
x
)
x
=
self
.
_conv3_1
(
x
)
x
=
self
.
_conv3_2
(
x
)
x
=
self
.
_conv3_3
(
x
)
x
=
self
.
_conv3_4
(
x
)
x
=
self
.
_conv3_5
(
x
)
x
=
self
.
_conv3_6
(
x
)
x
=
self
.
_conv3_7
(
x
)
x
=
self
.
_conv3_8
(
x
)
x
=
self
.
_conv3_9
(
x
)
x
=
self
.
_conv3_10
(
x
)
x
=
self
.
_conv3_11
(
x
)
x
=
self
.
_conv3_12
(
x
)
x
=
self
.
_conv3_13
(
x
)
x
=
self
.
_conv3_14
(
x
)
x
=
self
.
_conv3_15
(
x
)
x
=
self
.
_conv3_16
(
x
)
x
=
self
.
_conv3_17
(
x
)
x
=
self
.
_conv3_18
(
x
)
x
=
self
.
_conv3_19
(
x
)
x
=
self
.
_conv3_20
(
x
)
x
=
self
.
_conv3_21
(
x
)
x
=
self
.
_conv3_22
(
x
)
x
=
self
.
_conv4_0
(
x
)
x
=
self
.
_conv4_1
(
x
)
x
=
self
.
_conv4_2
(
x
)
x
=
self
.
_avg_pool
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axes
=
[
2
,
3
])
x
=
self
.
_out
(
x
)
return
x
def
ResNeXt101_32x8d_wsl
():
def
ResNeXt101_32x8d_wsl
():
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
8
)
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
8
)
return
model
return
model
def
ResNeXt101_32x16d_wsl
():
def
ResNeXt101_32x16d_wsl
():
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
16
)
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
16
)
return
model
return
model
def
ResNeXt101_32x32d_wsl
():
def
ResNeXt101_32x32d_wsl
():
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
32
)
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
32
)
return
model
return
model
def
ResNeXt101_32x48d_wsl
():
def
ResNeXt101_32x48d_wsl
():
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
48
)
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
48
)
return
model
return
model
def
Fix_ResNeXt101_32x48d_wsl
():
model
=
ResNeXt101_wsl
(
cardinality
=
32
,
width
=
48
)
return
model
ppcls/modeling/architectures/squeezenet.py
浏览文件 @
0dfe15d2
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
import
numpy
as
np
#
import
argparse
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid
import
framework
import
math
import
sys
import
time
__all__
=
[
"SqueezeNet1_0"
,
"SqueezeNet1_1"
]
class
Make_Fire_Conv
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
filter_size
,
padding
=
0
,
name
=
None
):
super
(
Make_Fire_Conv
,
self
).
__init__
()
self
.
_conv
=
Conv2D
(
input_channels
,
output_channels
,
filter_size
,
padding
=
padding
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
"_offset"
))
def
forward
(
self
,
inputs
):
return
self
.
_conv
(
inputs
)
__all__
=
[
"SqueezeNet"
,
"SqueezeNet1_0"
,
"SqueezeNet1_1"
]
class
Make_Fire
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
squeeze_channels
,
expand1x1_channels
,
expand3x3_channels
,
name
=
None
):
super
(
Make_Fire
,
self
).
__init__
()
self
.
_conv
=
Make_Fire_Conv
(
input_channels
,
squeeze_channels
,
1
,
name
=
name
+
"_squeeze1x1"
)
self
.
_conv_path1
=
Make_Fire_Conv
(
squeeze_channels
,
expand1x1_channels
,
1
,
name
=
name
+
"_expand1x1"
)
self
.
_conv_path2
=
Make_Fire_Conv
(
squeeze_channels
,
expand3x3_channels
,
3
,
padding
=
1
,
name
=
name
+
"_expand3x3"
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv
(
inputs
)
x1
=
self
.
_conv_path1
(
x
)
x2
=
self
.
_conv_path2
(
x
)
return
fluid
.
layers
.
concat
([
x1
,
x2
],
axis
=
1
)
class
SqueezeNet
():
class
SqueezeNet
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
version
=
'1.0'
):
def
__init__
(
self
,
version
,
class_dim
=
1000
):
super
(
SqueezeNet
,
self
).
__init__
()
self
.
version
=
version
self
.
version
=
version
def
net
(
self
,
input
,
class_dim
=
1000
):
if
self
.
version
==
"1.0"
:
version
=
self
.
version
self
.
_conv
=
Conv2D
(
3
,
assert
version
in
[
'1.0'
,
'1.1'
],
\
96
,
"supported version are {} but input version is {}"
.
format
([
'1.0'
,
'1.1'
],
version
)
7
,
if
version
==
'1.0'
:
stride
=
2
,
conv
=
fluid
.
layers
.
conv2d
(
act
=
"relu"
,
input
,
param_attr
=
ParamAttr
(
name
=
"conv1_weights"
),
num_filters
=
96
,
bias_attr
=
ParamAttr
(
name
=
"conv1_offset"
))
filter_size
=
7
,
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
stride
=
2
,
pool_stride
=
2
,
act
=
'relu'
,
pool_type
=
"max"
)
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
"conv1_weights"
),
self
.
_conv1
=
Make_Fire
(
96
,
16
,
64
,
64
,
name
=
"fire2"
)
bias_attr
=
ParamAttr
(
name
=
'conv1_offset'
))
self
.
_conv2
=
Make_Fire
(
128
,
16
,
64
,
64
,
name
=
"fire3"
)
conv
=
fluid
.
layers
.
pool2d
(
self
.
_conv3
=
Make_Fire
(
128
,
32
,
128
,
128
,
name
=
"fire4"
)
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
'max'
)
conv
=
self
.
make_fire
(
conv
,
16
,
64
,
64
,
name
=
'fire2'
)
self
.
_conv4
=
Make_Fire
(
256
,
32
,
128
,
128
,
name
=
"fire5"
)
conv
=
self
.
make_fire
(
conv
,
16
,
64
,
64
,
name
=
'fire3'
)
self
.
_conv5
=
Make_Fire
(
256
,
48
,
192
,
192
,
name
=
"fire6"
)
conv
=
self
.
make_fire
(
conv
,
32
,
128
,
128
,
name
=
'fire4'
)
self
.
_conv6
=
Make_Fire
(
384
,
48
,
192
,
192
,
name
=
"fire7"
)
conv
=
fluid
.
layers
.
pool2d
(
self
.
_conv7
=
Make_Fire
(
384
,
64
,
256
,
256
,
name
=
"fire8"
)
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
'max'
)
conv
=
self
.
make_fire
(
conv
,
32
,
128
,
128
,
name
=
'fire5'
)
self
.
_conv8
=
Make_Fire
(
512
,
64
,
256
,
256
,
name
=
"fire9"
)
conv
=
self
.
make_fire
(
conv
,
48
,
192
,
192
,
name
=
'fire6'
)
conv
=
self
.
make_fire
(
conv
,
48
,
192
,
192
,
name
=
'fire7'
)
conv
=
self
.
make_fire
(
conv
,
64
,
256
,
256
,
name
=
'fire8'
)
conv
=
fluid
.
layers
.
pool2d
(
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
'max'
)
conv
=
self
.
make_fire
(
conv
,
64
,
256
,
256
,
name
=
'fire9'
)
else
:
else
:
conv
=
fluid
.
layers
.
conv2d
(
self
.
_conv
=
Conv2D
(
3
,
input
,
64
,
num_filters
=
64
,
3
,
filter_size
=
3
,
stride
=
2
,
stride
=
2
,
padding
=
1
,
padding
=
1
,
act
=
"relu"
,
act
=
'relu'
,
param_attr
=
ParamAttr
(
name
=
"conv1_weights"
),
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
"conv1_weights"
),
bias_attr
=
ParamAttr
(
name
=
"conv1_offset"
))
bias_attr
=
ParamAttr
(
name
=
'conv1_offset'
))
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
conv
=
fluid
.
layers
.
pool2d
(
pool_stride
=
2
,
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
'max'
)
pool_type
=
"max"
)
conv
=
self
.
make_fire
(
conv
,
16
,
64
,
64
,
name
=
'fire2'
)
self
.
_conv1
=
Make_Fire
(
64
,
16
,
64
,
64
,
name
=
"fire2"
)
conv
=
self
.
make_fire
(
conv
,
16
,
64
,
64
,
name
=
'fire3'
)
self
.
_conv2
=
Make_Fire
(
128
,
16
,
64
,
64
,
name
=
"fire3"
)
conv
=
fluid
.
layers
.
pool2d
(
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
'max'
)
conv
=
self
.
make_fire
(
conv
,
32
,
128
,
128
,
name
=
'fire4'
)
conv
=
self
.
make_fire
(
conv
,
32
,
128
,
128
,
name
=
'fire5'
)
conv
=
fluid
.
layers
.
pool2d
(
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_type
=
'max'
)
conv
=
self
.
make_fire
(
conv
,
48
,
192
,
192
,
name
=
'fire6'
)
conv
=
self
.
make_fire
(
conv
,
48
,
192
,
192
,
name
=
'fire7'
)
conv
=
self
.
make_fire
(
conv
,
64
,
256
,
256
,
name
=
'fire8'
)
conv
=
self
.
make_fire
(
conv
,
64
,
256
,
256
,
name
=
'fire9'
)
conv
=
fluid
.
layers
.
dropout
(
conv
,
dropout_prob
=
0.5
)
conv
=
fluid
.
layers
.
conv2d
(
conv
,
num_filters
=
class_dim
,
filter_size
=
1
,
act
=
'relu'
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
"conv10_weights"
),
bias_attr
=
ParamAttr
(
name
=
'conv10_offset'
))
conv
=
fluid
.
layers
.
pool2d
(
conv
,
pool_type
=
'avg'
,
global_pooling
=
True
)
out
=
fluid
.
layers
.
flatten
(
conv
)
return
out
def
make_fire_conv
(
self
,
input
,
num_filters
,
filter_size
,
padding
=
0
,
name
=
None
):
conv
=
fluid
.
layers
.
conv2d
(
input
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
padding
=
padding
,
act
=
'relu'
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
'_offset'
))
return
conv
def
make_fire
(
self
,
input
,
squeeze_channels
,
expand1x1_channels
,
expand3x3_channels
,
name
=
None
):
conv
=
self
.
make_fire_conv
(
input
,
squeeze_channels
,
1
,
name
=
name
+
'_squeeze1x1'
)
conv_path1
=
self
.
make_fire_conv
(
conv
,
expand1x1_channels
,
1
,
name
=
name
+
'_expand1x1'
)
conv_path2
=
self
.
make_fire_conv
(
conv
,
expand3x3_channels
,
3
,
1
,
name
=
name
+
'_expand3x3'
)
out
=
fluid
.
layers
.
concat
([
conv_path1
,
conv_path2
],
axis
=
1
)
return
out
self
.
_conv3
=
Make_Fire
(
128
,
32
,
128
,
128
,
name
=
"fire4"
)
self
.
_conv4
=
Make_Fire
(
256
,
32
,
128
,
128
,
name
=
"fire5"
)
def
SqueezeNet1_0
():
self
.
_conv5
=
Make_Fire
(
256
,
48
,
192
,
192
,
name
=
"fire6"
)
model
=
SqueezeNet
(
version
=
'1.0'
)
self
.
_conv6
=
Make_Fire
(
384
,
48
,
192
,
192
,
name
=
"fire7"
)
return
model
self
.
_conv7
=
Make_Fire
(
384
,
64
,
256
,
256
,
name
=
"fire8"
)
self
.
_conv8
=
Make_Fire
(
512
,
64
,
256
,
256
,
name
=
"fire9"
)
self
.
_drop
=
Dropout
(
p
=
0.5
)
self
.
_conv9
=
Conv2D
(
512
,
class_dim
,
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
"conv10_weights"
),
bias_attr
=
ParamAttr
(
name
=
"conv10_offset"
))
self
.
_avg_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv
(
inputs
)
x
=
self
.
_pool
(
x
)
if
self
.
version
==
"1.0"
:
x
=
self
.
_conv1
(
x
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_conv3
(
x
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_conv4
(
x
)
x
=
self
.
_conv5
(
x
)
x
=
self
.
_conv6
(
x
)
x
=
self
.
_conv7
(
x
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_conv8
(
x
)
else
:
x
=
self
.
_conv1
(
x
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_conv3
(
x
)
x
=
self
.
_conv4
(
x
)
x
=
self
.
_pool
(
x
)
x
=
self
.
_conv5
(
x
)
x
=
self
.
_conv6
(
x
)
x
=
self
.
_conv7
(
x
)
x
=
self
.
_conv8
(
x
)
x
=
self
.
_drop
(
x
)
x
=
self
.
_conv9
(
x
)
x
=
self
.
_avg_pool
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axes
=
[
2
,
3
])
return
x
def
SqueezeNet1_0
():
model
=
SqueezeNet
(
version
=
"1.0"
)
return
model
def
SqueezeNet1_1
():
def
SqueezeNet1_1
():
model
=
SqueezeNet
(
version
=
'1.1'
)
model
=
SqueezeNet
(
version
=
"1.1"
)
return
model
return
model
\ No newline at end of file
ppcls/modeling/architectures/vgg.py
浏览文件 @
0dfe15d2
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#coding:utf-8
#
import
numpy
as
np
#Licensed under the Apache License, Version 2.0 (the "License");
import
argparse
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle.fluid.dygraph.base
import
to_variable
from
paddle.fluid
import
framework
import
math
import
sys
import
time
__all__
=
[
"VGG11"
,
"VGG13"
,
"VGG16"
,
"VGG19"
]
class
Conv_Block
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
groups
,
name
=
None
):
super
(
Conv_Block
,
self
).
__init__
()
self
.
groups
=
groups
self
.
_conv_1
=
Conv2D
(
num_channels
=
input_channels
,
num_filters
=
output_channels
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
name
+
"1_weights"
),
bias_attr
=
False
)
if
groups
==
2
or
groups
==
3
or
groups
==
4
:
self
.
_conv_2
=
Conv2D
(
num_channels
=
output_channels
,
num_filters
=
output_channels
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
name
+
"2_weights"
),
bias_attr
=
False
)
if
groups
==
3
or
groups
==
4
:
self
.
_conv_3
=
Conv2D
(
num_channels
=
output_channels
,
num_filters
=
output_channels
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
name
+
"3_weights"
),
bias_attr
=
False
)
if
groups
==
4
:
self
.
_conv_4
=
Conv2D
(
number_channels
=
output_channels
,
number_filters
=
output_channels
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
name
+
"4_weights"
),
bias_attr
=
False
)
self
.
_pool
=
Pool2D
(
pool_size
=
2
,
pool_type
=
"max"
,
pool_stride
=
2
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv_1
(
inputs
)
if
self
.
groups
==
2
or
self
.
groups
==
3
or
self
.
groups
==
4
:
x
=
self
.
_conv_2
(
x
)
if
self
.
groups
==
3
or
self
.
groups
==
4
:
x
=
self
.
_conv_3
(
x
)
if
self
.
groups
==
4
:
x
=
self
.
_conv_4
(
x
)
x
=
self
.
_pool
(
x
)
return
x
class
VGGNet
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
layers
=
11
,
class_dim
=
1000
):
super
(
VGGNet
,
self
).
__init__
()
__all__
=
[
"VGGNet"
,
"VGG11"
,
"VGG13"
,
"VGG16"
,
"VGG19"
]
class
VGGNet
():
def
__init__
(
self
,
layers
=
16
):
self
.
layers
=
layers
self
.
layers
=
layers
self
.
vgg_configure
=
{
11
:
[
1
,
1
,
2
,
2
,
2
],
def
net
(
self
,
input
,
class_dim
=
1000
):
13
:
[
2
,
2
,
2
,
2
,
2
],
layers
=
self
.
layers
16
:
[
2
,
2
,
3
,
3
,
3
],
vgg_spec
=
{
19
:
[
2
,
2
,
4
,
4
,
4
]}
11
:
([
1
,
1
,
2
,
2
,
2
]),
assert
self
.
layers
in
self
.
vgg_configure
.
keys
(),
\
13
:
([
2
,
2
,
2
,
2
,
2
]),
"supported layers are {} but input layer is {}"
.
format
(
vgg_configure
.
keys
(),
layers
)
16
:
([
2
,
2
,
3
,
3
,
3
]),
self
.
groups
=
self
.
vgg_configure
[
self
.
layers
]
19
:
([
2
,
2
,
4
,
4
,
4
])
}
self
.
_conv_block_1
=
Conv_Block
(
3
,
64
,
self
.
groups
[
0
],
name
=
"conv1_"
)
assert
layers
in
vgg_spec
.
keys
(),
\
self
.
_conv_block_2
=
Conv_Block
(
64
,
128
,
self
.
groups
[
1
],
name
=
"conv2_"
)
"supported layers are {} but input layer is {}"
.
format
(
vgg_spec
.
keys
(),
layers
)
self
.
_conv_block_3
=
Conv_Block
(
128
,
256
,
self
.
groups
[
2
],
name
=
"conv3_"
)
self
.
_conv_block_4
=
Conv_Block
(
256
,
512
,
self
.
groups
[
3
],
name
=
"conv4_"
)
nums
=
vgg_spec
[
layers
]
self
.
_conv_block_5
=
Conv_Block
(
512
,
512
,
self
.
groups
[
4
],
name
=
"conv5_"
)
conv1
=
self
.
conv_block
(
input
,
64
,
nums
[
0
],
name
=
"conv1_"
)
conv2
=
self
.
conv_block
(
conv1
,
128
,
nums
[
1
],
name
=
"conv2_"
)
#self._drop = fluid.dygraph.nn.Dropout(p=0.5)
conv3
=
self
.
conv_block
(
conv2
,
256
,
nums
[
2
],
name
=
"conv3_"
)
self
.
_fc1
=
Linear
(
input_dim
=
7
*
7
*
512
,
conv4
=
self
.
conv_block
(
conv3
,
512
,
nums
[
3
],
name
=
"conv4_"
)
output_dim
=
4096
,
conv5
=
self
.
conv_block
(
conv4
,
512
,
nums
[
4
],
name
=
"conv5_"
)
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
"fc6_weights"
),
fc_dim
=
4096
bias_attr
=
ParamAttr
(
name
=
"fc6_offset"
))
fc_name
=
[
"fc6"
,
"fc7"
,
"fc8"
]
self
.
_fc2
=
Linear
(
input_dim
=
4096
,
fc1
=
fluid
.
layers
.
fc
(
output_dim
=
4096
,
input
=
conv5
,
act
=
"relu"
,
size
=
fc_dim
,
param_attr
=
ParamAttr
(
name
=
"fc7_weights"
),
act
=
'relu'
,
bias_attr
=
ParamAttr
(
name
=
"fc7_offset"
))
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
self
.
_out
=
Linear
(
input_dim
=
4096
,
name
=
fc_name
[
0
]
+
"_weights"
),
output_dim
=
class_dim
,
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
fc_name
[
0
]
+
"_offset"
))
param_attr
=
ParamAttr
(
name
=
"fc8_weights"
),
fc1
=
fluid
.
layers
.
dropout
(
x
=
fc1
,
dropout_prob
=
0.5
)
bias_attr
=
ParamAttr
(
name
=
"fc8_offset"
))
fc2
=
fluid
.
layers
.
fc
(
input
=
fc1
,
def
forward
(
self
,
inputs
):
size
=
fc_dim
,
x
=
self
.
_conv_block_1
(
inputs
)
act
=
'relu'
,
x
=
self
.
_conv_block_2
(
x
)
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
x
=
self
.
_conv_block_3
(
x
)
name
=
fc_name
[
1
]
+
"_weights"
),
x
=
self
.
_conv_block_4
(
x
)
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
fc_name
[
1
]
+
"_offset"
))
x
=
self
.
_conv_block_5
(
x
)
fc2
=
fluid
.
layers
.
dropout
(
x
=
fc2
,
dropout_prob
=
0.5
)
out
=
fluid
.
layers
.
fc
(
x
=
fluid
.
layers
.
flatten
(
x
,
axis
=
0
)
input
=
fc2
,
x
=
self
.
_fc1
(
x
)
size
=
class_dim
,
# x = self._drop(x)
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
x
=
self
.
_fc2
(
x
)
name
=
fc_name
[
2
]
+
"_weights"
),
# x = self._drop(x)
bias_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
fc_name
[
2
]
+
"_offset"
))
x
=
self
.
_out
(
x
)
return
x
return
out
def
conv_block
(
self
,
input
,
num_filter
,
groups
,
name
=
None
):
conv
=
input
for
i
in
range
(
groups
):
conv
=
fluid
.
layers
.
conv2d
(
input
=
conv
,
num_filters
=
num_filter
,
filter_size
=
3
,
stride
=
1
,
padding
=
1
,
act
=
'relu'
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
name
+
str
(
i
+
1
)
+
"_weights"
),
bias_attr
=
False
)
return
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
2
,
pool_type
=
'max'
,
pool_stride
=
2
)
def
VGG11
():
def
VGG11
():
model
=
VGGNet
(
layers
=
11
)
model
=
VGGNet
(
layers
=
11
)
return
model
return
model
def
VGG13
():
def
VGG13
():
model
=
VGGNet
(
layers
=
13
)
model
=
VGGNet
(
layers
=
13
)
return
model
return
model
def
VGG16
():
def
VGG16
():
model
=
VGGNet
(
layers
=
16
)
model
=
VGGNet
(
layers
=
16
)
return
model
return
model
def
VGG19
():
def
VGG19
():
model
=
VGGNet
(
layers
=
19
)
model
=
VGGNet
(
layers
=
19
)
return
model
return
model
\ No newline at end of file
ppcls/modeling/architectures/xception.py
浏览文件 @
0dfe15d2
此差异已折叠。
点击以展开。
ppcls/modeling/architectures/xception_deeplab.py
浏览文件 @
0dfe15d2
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录