Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
ea746480
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 1 年 前同步成功
通知
115
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ea746480
编写于
9月 16, 2020
作者:
L
littletomatodonkey
提交者:
GitHub
9月 16, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #275 from littletomatodonkey/dyg/adp-2.0b
Fix clas api to paddle2.0.0b0
上级
5e092259
922f33fe
变更
43
展开全部
显示空白变更内容
内联
并排
Showing
43 changed file
with
1493 addition
and
2490 deletion
+1493
-2490
ppcls/data/reader.py
ppcls/data/reader.py
+1
-1
ppcls/modeling/architectures/__init__.py
ppcls/modeling/architectures/__init__.py
+10
-1
ppcls/modeling/architectures/alexnet.py
ppcls/modeling/architectures/alexnet.py
+92
-63
ppcls/modeling/architectures/csp_resnet.py
ppcls/modeling/architectures/csp_resnet.py
+0
-258
ppcls/modeling/architectures/darknet.py
ppcls/modeling/architectures/darknet.py
+22
-21
ppcls/modeling/architectures/darts_gs.py
ppcls/modeling/architectures/darts_gs.py
+0
-543
ppcls/modeling/architectures/densenet.py
ppcls/modeling/architectures/densenet.py
+29
-31
ppcls/modeling/architectures/distillation_models.py
ppcls/modeling/architectures/distillation_models.py
+3
-4
ppcls/modeling/architectures/dpn.py
ppcls/modeling/architectures/dpn.py
+32
-35
ppcls/modeling/architectures/efficientnet.py
ppcls/modeling/architectures/efficientnet.py
+39
-32
ppcls/modeling/architectures/googlenet.py
ppcls/modeling/architectures/googlenet.py
+41
-39
ppcls/modeling/architectures/hrnet.py
ppcls/modeling/architectures/hrnet.py
+38
-39
ppcls/modeling/architectures/inception_v4.py
ppcls/modeling/architectures/inception_v4.py
+54
-38
ppcls/modeling/architectures/layers.py
ppcls/modeling/architectures/layers.py
+0
-250
ppcls/modeling/architectures/mobilenet_v1.py
ppcls/modeling/architectures/mobilenet_v1.py
+17
-19
ppcls/modeling/architectures/mobilenet_v2.py
ppcls/modeling/architectures/mobilenet_v2.py
+19
-19
ppcls/modeling/architectures/mobilenet_v3.py
ppcls/modeling/architectures/mobilenet_v3.py
+58
-58
ppcls/modeling/architectures/model_libs.py
ppcls/modeling/architectures/model_libs.py
+0
-143
ppcls/modeling/architectures/res2net.py
ppcls/modeling/architectures/res2net.py
+23
-25
ppcls/modeling/architectures/res2net_vd.py
ppcls/modeling/architectures/res2net_vd.py
+26
-30
ppcls/modeling/architectures/resnest.py
ppcls/modeling/architectures/resnest.py
+42
-63
ppcls/modeling/architectures/resnet.py
ppcls/modeling/architectures/resnet.py
+21
-23
ppcls/modeling/architectures/resnet_vc.py
ppcls/modeling/architectures/resnet_vc.py
+21
-23
ppcls/modeling/architectures/resnet_vd.py
ppcls/modeling/architectures/resnet_vd.py
+23
-29
ppcls/modeling/architectures/resnext.py
ppcls/modeling/architectures/resnext.py
+19
-21
ppcls/modeling/architectures/resnext101_wsl.py
ppcls/modeling/architectures/resnext101_wsl.py
+296
-97
ppcls/modeling/architectures/resnext_vd.py
ppcls/modeling/architectures/resnext_vd.py
+21
-27
ppcls/modeling/architectures/se_resnet_vd.py
ppcls/modeling/architectures/se_resnet_vd.py
+35
-41
ppcls/modeling/architectures/shufflenet_v2.py
ppcls/modeling/architectures/shufflenet_v2.py
+28
-35
ppcls/modeling/architectures/squeezenet.py
ppcls/modeling/architectures/squeezenet.py
+77
-74
ppcls/modeling/architectures/vgg.py
ppcls/modeling/architectures/vgg.py
+83
-70
ppcls/modeling/architectures/xception.py
ppcls/modeling/architectures/xception.py
+49
-52
ppcls/modeling/architectures/xception_deeplab.py
ppcls/modeling/architectures/xception_deeplab.py
+30
-28
ppcls/modeling/loss.py
ppcls/modeling/loss.py
+20
-17
ppcls/optimizer/learning_rate.py
ppcls/optimizer/learning_rate.py
+45
-83
ppcls/optimizer/optimizer.py
ppcls/optimizer/optimizer.py
+11
-13
ppcls/utils/check.py
ppcls/utils/check.py
+6
-5
ppcls/utils/logger.py
ppcls/utils/logger.py
+1
-1
ppcls/utils/save_load.py
ppcls/utils/save_load.py
+7
-6
tools/eval.py
tools/eval.py
+24
-23
tools/infer/infer.py
tools/infer/infer.py
+34
-28
tools/program.py
tools/program.py
+32
-18
tools/train.py
tools/train.py
+64
-64
未找到文件。
ppcls/data/reader.py
浏览文件 @
ea746480
...
...
@@ -17,7 +17,7 @@ import imghdr
import
os
import
signal
from
paddle.
fluid.io
import
multiprocess_reader
from
paddle.
reader
import
multiprocess_reader
from
.
import
imaug
from
.imaug
import
transform
...
...
ppcls/modeling/architectures/__init__.py
浏览文件 @
ea746480
...
...
@@ -23,7 +23,7 @@ from .se_resnet_vd import SE_ResNet18_vd, SE_ResNet34_vd, SE_ResNet50_vd, SE_Res
from
.se_resnext_vd
import
SE_ResNeXt50_vd_32x4d
,
SE_ResNeXt50_vd_32x4d
,
SENet154_vd
from
.dpn
import
DPN68
from
.densenet
import
DenseNet121
from
.hrnet
import
HRNet_W18_C
from
.hrnet
import
HRNet_W18_C
,
HRNet_W30_C
,
HRNet_W32_C
,
HRNet_W40_C
,
HRNet_W44_C
,
HRNet_W48_C
,
HRNet_W60_C
,
HRNet_W64_C
,
SE_HRNet_W18_C
,
SE_HRNet_W30_C
,
SE_HRNet_W32_C
,
SE_HRNet_W40_C
,
SE_HRNet_W44_C
,
SE_HRNet_W48_C
,
SE_HRNet_W60_C
,
SE_HRNet_W64_C
from
.efficientnet
import
EfficientNetB0
from
.resnest
import
ResNeSt50_fast_1s1x64d
,
ResNeSt50
from
.googlenet
import
GoogLeNet
...
...
@@ -31,5 +31,14 @@ from .mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75
from
.mobilenet_v2
import
MobileNetV2_x0_25
,
MobileNetV2_x0_5
,
MobileNetV2_x0_75
,
MobileNetV2
,
MobileNetV2_x1_5
,
MobileNetV2_x2_0
from
.mobilenet_v3
import
MobileNetV3_small_x0_35
,
MobileNetV3_small_x0_5
,
MobileNetV3_small_x0_75
,
MobileNetV3_small_x1_0
,
MobileNetV3_small_x1_25
,
MobileNetV3_large_x0_35
,
MobileNetV3_large_x0_5
,
MobileNetV3_large_x0_75
,
MobileNetV3_large_x1_0
,
MobileNetV3_large_x1_25
from
.shufflenet_v2
import
ShuffleNetV2_x0_25
,
ShuffleNetV2_x0_33
,
ShuffleNetV2_x0_5
,
ShuffleNetV2
,
ShuffleNetV2_x1_5
,
ShuffleNetV2_x2_0
,
ShuffleNetV2_swish
from
.alexnet
import
AlexNet
from
.inception_v4
import
InceptionV4
from
.xception
import
Xception41
,
Xception65
,
Xception71
from
.xception_deeplab
import
Xception41_deeplab
,
Xception65_deeplab
,
Xception71_deeplab
from
.resnext101_wsl
import
ResNeXt101_32x8d_wsl
,
ResNeXt101_32x16d_wsl
,
ResNeXt101_32x32d_wsl
,
ResNeXt101_32x48d_wsl
from
.shufflenet_v2
import
ShuffleNetV2_x0_25
,
ShuffleNetV2_x0_33
,
ShuffleNetV2_x0_5
,
ShuffleNetV2
,
ShuffleNetV2_x1_5
,
ShuffleNetV2_x2_0
,
ShuffleNetV2_swish
from
.squeezenet
import
SqueezeNet1_0
,
SqueezeNet1_1
from
.vgg
import
VGG11
,
VGG13
,
VGG16
,
VGG19
from
.darknet
import
DarkNet53
from
.distillation_models
import
ResNet50_vd_distill_MobileNetV3_large_x1_0
ppcls/modeling/architectures/alexnet.py
浏览文件 @
ea746480
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
,
ReLU
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
__all__
=
[
"AlexNet"
]
class
ConvPoolLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvPoolLayer
(
nn
.
Layer
):
def
__init__
(
self
,
inputc
_channels
,
input
_channels
,
output_channels
,
filter_size
,
stride
,
...
...
@@ -19,85 +23,110 @@ class ConvPoolLayer(fluid.dygraph.Layer):
name
=
None
):
super
(
ConvPoolLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2D
(
num_channels
=
inputc_channels
,
num_filters
=
output_channels
,
filter_size
=
filter_size
,
self
.
relu
=
ReLU
()
if
act
==
"relu"
else
None
self
.
_conv
=
Conv2d
(
in_channels
=
input_channels
,
out_channels
=
output_channels
,
kernel_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
groups
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
name
+
"_offset"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
act
=
act
)
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
"max"
)
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
name
+
"_offset"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)))
self
.
_pool
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
0
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv
(
inputs
)
if
self
.
relu
is
not
None
:
x
=
self
.
relu
(
x
)
x
=
self
.
_pool
(
x
)
return
x
class
AlexNetDY
(
fluid
.
dygraph
.
Layer
):
class
AlexNetDY
(
nn
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
):
super
(
AlexNetDY
,
self
).
__init__
()
stdv
=
1.0
/
math
.
sqrt
(
3
*
11
*
11
)
stdv
=
1.0
/
math
.
sqrt
(
3
*
11
*
11
)
self
.
_conv1
=
ConvPoolLayer
(
3
,
64
,
11
,
4
,
2
,
stdv
,
act
=
"relu"
,
name
=
"conv1"
)
stdv
=
1.0
/
math
.
sqrt
(
64
*
5
*
5
)
stdv
=
1.0
/
math
.
sqrt
(
64
*
5
*
5
)
self
.
_conv2
=
ConvPoolLayer
(
64
,
192
,
5
,
1
,
2
,
stdv
,
act
=
"relu"
,
name
=
"conv2"
)
stdv
=
1.0
/
math
.
sqrt
(
192
*
3
*
3
)
self
.
_conv3
=
Conv2D
(
192
,
384
,
3
,
stride
=
1
,
padding
=
1
,
param_attr
=
ParamAttr
(
name
=
"conv3_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"conv3_offset"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
act
=
"relu"
)
stdv
=
1.0
/
math
.
sqrt
(
384
*
3
*
3
)
self
.
_conv4
=
Conv2D
(
384
,
256
,
3
,
stride
=
1
,
padding
=
1
,
param_attr
=
ParamAttr
(
name
=
"conv4_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"conv4_offset"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
act
=
"relu"
)
stdv
=
1.0
/
math
.
sqrt
(
256
*
3
*
3
)
stdv
=
1.0
/
math
.
sqrt
(
192
*
3
*
3
)
self
.
_conv3
=
Conv2d
(
192
,
384
,
3
,
stride
=
1
,
padding
=
1
,
weight_attr
=
ParamAttr
(
name
=
"conv3_weights"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"conv3_offset"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)))
stdv
=
1.0
/
math
.
sqrt
(
384
*
3
*
3
)
self
.
_conv4
=
Conv2d
(
384
,
256
,
3
,
stride
=
1
,
padding
=
1
,
weight_attr
=
ParamAttr
(
name
=
"conv4_weights"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"conv4_offset"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)))
stdv
=
1.0
/
math
.
sqrt
(
256
*
3
*
3
)
self
.
_conv5
=
ConvPoolLayer
(
256
,
256
,
3
,
1
,
1
,
stdv
,
act
=
"relu"
,
name
=
"conv5"
)
stdv
=
1.0
/
math
.
sqrt
(
256
*
6
*
6
)
stdv
=
1.0
/
math
.
sqrt
(
256
*
6
*
6
)
self
.
_drop1
=
Dropout
(
p
=
0.5
)
self
.
_fc6
=
Linear
(
input_dim
=
256
*
6
*
6
,
output_dim
=
4096
,
param_attr
=
ParamAttr
(
name
=
"fc6_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc6_offset"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
act
=
"relu"
)
self
.
_drop1
=
Dropout
(
p
=
0.5
,
mode
=
"downscale_in_infer"
)
self
.
_fc6
=
Linear
(
in_features
=
256
*
6
*
6
,
out_features
=
4096
,
weight_attr
=
ParamAttr
(
name
=
"fc6_weights"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc6_offset"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)))
self
.
_drop2
=
Dropout
(
p
=
0.5
)
self
.
_fc7
=
Linear
(
input_dim
=
4096
,
output_dim
=
4096
,
param_attr
=
ParamAttr
(
name
=
"fc7_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc7_offset"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
act
=
"relu"
)
self
.
_fc8
=
Linear
(
input_dim
=
4096
,
output_dim
=
class_dim
,
param_attr
=
ParamAttr
(
name
=
"fc8_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc8_offset"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)))
self
.
_drop2
=
Dropout
(
p
=
0.5
,
mode
=
"downscale_in_infer"
)
self
.
_fc7
=
Linear
(
in_features
=
4096
,
out_features
=
4096
,
weight_attr
=
ParamAttr
(
name
=
"fc7_weights"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc7_offset"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)))
self
.
_fc8
=
Linear
(
in_features
=
4096
,
out_features
=
class_dim
,
weight_attr
=
ParamAttr
(
name
=
"fc8_weights"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc8_offset"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)))
def
forward
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
x
=
self
.
_conv2
(
x
)
x
=
self
.
_conv3
(
x
)
x
=
F
.
relu
(
x
)
x
=
self
.
_conv4
(
x
)
x
=
F
.
relu
(
x
)
x
=
self
.
_conv5
(
x
)
x
=
fluid
.
layers
.
flatten
(
x
,
axis
=
0
)
x
=
paddle
.
flatten
(
x
,
start_axis
=
1
,
stop_axis
=-
1
)
x
=
self
.
_drop1
(
x
)
x
=
self
.
_fc6
(
x
)
x
=
F
.
relu
(
x
)
x
=
self
.
_drop2
(
x
)
x
=
self
.
_fc7
(
x
)
x
=
F
.
relu
(
x
)
x
=
self
.
_fc8
(
x
)
return
x
def
AlexNet
(
**
args
):
model
=
AlexNetDY
(
**
args
)
return
model
ppcls/modeling/architectures/csp_resnet.py
已删除
100644 → 0
浏览文件 @
5e092259
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
__all__
=
[
"CSPResNet50_leaky"
,
"CSPResNet50_mish"
,
"CSPResNet101_leaky"
,
"CSPResNet101_mish"
]
class
CSPResNet
():
def
__init__
(
self
,
layers
=
50
,
act
=
"leaky_relu"
):
self
.
layers
=
layers
self
.
act
=
act
def
net
(
self
,
input
,
class_dim
=
1000
,
data_format
=
"NCHW"
):
layers
=
self
.
layers
supported_layers
=
[
50
,
101
]
assert
layers
in
supported_layers
,
\
"supported layers are {} but input layer is {}"
.
format
(
supported_layers
,
layers
)
if
layers
==
50
:
depth
=
[
3
,
3
,
5
,
2
]
elif
layers
==
101
:
depth
=
[
3
,
3
,
22
,
2
]
num_filters
=
[
64
,
128
,
256
,
512
]
conv
=
self
.
conv_bn_layer
(
input
=
input
,
num_filters
=
64
,
filter_size
=
7
,
stride
=
2
,
act
=
self
.
act
,
name
=
"conv1"
,
data_format
=
data_format
)
conv
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
2
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
'max'
,
data_format
=
data_format
)
for
block
in
range
(
len
(
depth
)):
conv_name
=
"res"
+
str
(
block
+
2
)
+
chr
(
97
)
if
block
!=
0
:
conv
=
self
.
conv_bn_layer
(
input
=
conv
,
num_filters
=
num_filters
[
block
],
filter_size
=
3
,
stride
=
2
,
act
=
self
.
act
,
name
=
conv_name
+
"_downsample"
,
data_format
=
data_format
)
# split
left
=
conv
right
=
conv
if
block
==
0
:
ch
=
num_filters
[
block
]
else
:
ch
=
num_filters
[
block
]
*
2
right
=
self
.
conv_bn_layer
(
input
=
right
,
num_filters
=
ch
,
filter_size
=
1
,
act
=
self
.
act
,
name
=
conv_name
+
"_right_first_route"
,
data_format
=
data_format
)
for
i
in
range
(
depth
[
block
]):
conv_name
=
"res"
+
str
(
block
+
2
)
+
chr
(
97
+
i
)
right
=
self
.
bottleneck_block
(
input
=
right
,
num_filters
=
num_filters
[
block
],
stride
=
1
,
name
=
conv_name
,
data_format
=
data_format
)
# route
left
=
self
.
conv_bn_layer
(
input
=
left
,
num_filters
=
num_filters
[
block
]
*
2
,
filter_size
=
1
,
act
=
self
.
act
,
name
=
conv_name
+
"_left_route"
,
data_format
=
data_format
)
right
=
self
.
conv_bn_layer
(
input
=
right
,
num_filters
=
num_filters
[
block
]
*
2
,
filter_size
=
1
,
act
=
self
.
act
,
name
=
conv_name
+
"_right_route"
,
data_format
=
data_format
)
conv
=
fluid
.
layers
.
concat
([
left
,
right
],
axis
=
1
)
conv
=
self
.
conv_bn_layer
(
input
=
conv
,
num_filters
=
num_filters
[
block
]
*
2
,
filter_size
=
1
,
stride
=
1
,
act
=
self
.
act
,
name
=
conv_name
+
"_merged_transition"
,
data_format
=
data_format
)
pool
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_type
=
'avg'
,
global_pooling
=
True
,
data_format
=
data_format
)
stdv
=
1.0
/
math
.
sqrt
(
pool
.
shape
[
1
]
*
1.0
)
out
=
fluid
.
layers
.
fc
(
input
=
pool
,
size
=
class_dim
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
name
=
"fc_0.w_0"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc_0.b_0"
))
return
out
def
conv_bn_layer
(
self
,
input
,
num_filters
,
filter_size
,
stride
=
1
,
groups
=
1
,
act
=
None
,
name
=
None
,
data_format
=
'NCHW'
):
conv
=
fluid
.
layers
.
conv2d
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
,
name
=
name
+
'.conv2d.output.1'
,
data_format
=
data_format
)
if
name
==
"conv1"
:
bn_name
=
"bn_"
+
name
else
:
bn_name
=
"bn"
+
name
[
3
:]
bn
=
fluid
.
layers
.
batch_norm
(
input
=
conv
,
act
=
None
,
name
=
bn_name
+
'.output.1'
,
param_attr
=
ParamAttr
(
name
=
bn_name
+
'_scale'
),
bias_attr
=
ParamAttr
(
bn_name
+
'_offset'
),
moving_mean_name
=
bn_name
+
'_mean'
,
moving_variance_name
=
bn_name
+
'_variance'
,
data_layout
=
data_format
)
if
act
==
"relu"
:
bn
=
fluid
.
layers
.
relu
(
bn
)
elif
act
==
"leaky_relu"
:
bn
=
fluid
.
layers
.
leaky_relu
(
bn
)
elif
act
==
"mish"
:
bn
=
self
.
_mish
(
bn
)
return
bn
def
_mish
(
self
,
input
):
return
input
*
fluid
.
layers
.
tanh
(
self
.
_softplus
(
input
))
def
_softplus
(
self
,
input
):
expf
=
fluid
.
layers
.
exp
(
fluid
.
layers
.
clip
(
input
,
-
200
,
50
))
return
fluid
.
layers
.
log
(
1
+
expf
)
def
shortcut
(
self
,
input
,
ch_out
,
stride
,
is_first
,
name
,
data_format
):
if
data_format
==
'NCHW'
:
ch_in
=
input
.
shape
[
1
]
else
:
ch_in
=
input
.
shape
[
-
1
]
if
ch_in
!=
ch_out
or
stride
!=
1
or
is_first
is
True
:
return
self
.
conv_bn_layer
(
input
,
ch_out
,
1
,
stride
,
name
=
name
,
data_format
=
data_format
)
else
:
return
input
def
bottleneck_block
(
self
,
input
,
num_filters
,
stride
,
name
,
data_format
):
conv0
=
self
.
conv_bn_layer
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
1
,
act
=
"leaky_relu"
,
name
=
name
+
"_branch2a"
,
data_format
=
data_format
)
conv1
=
self
.
conv_bn_layer
(
input
=
conv0
,
num_filters
=
num_filters
,
filter_size
=
3
,
stride
=
stride
,
act
=
"leaky_relu"
,
name
=
name
+
"_branch2b"
,
data_format
=
data_format
)
conv2
=
self
.
conv_bn_layer
(
input
=
conv1
,
num_filters
=
num_filters
*
2
,
filter_size
=
1
,
act
=
None
,
name
=
name
+
"_branch2c"
,
data_format
=
data_format
)
short
=
self
.
shortcut
(
input
,
num_filters
*
2
,
stride
,
is_first
=
False
,
name
=
name
+
"_branch1"
,
data_format
=
data_format
)
ret
=
short
+
conv2
ret
=
fluid
.
layers
.
leaky_relu
(
ret
,
alpha
=
0.1
)
return
ret
def
CSPResNet50_leaky
():
model
=
CSPResNet
(
layers
=
50
,
act
=
"leaky_relu"
)
return
model
def
CSPResNet50_mish
():
model
=
CSPResNet
(
layers
=
50
,
act
=
"mish"
)
return
model
def
CSPResNet101_leaky
():
model
=
CSPResNet
(
layers
=
101
,
act
=
"leaky_relu"
)
return
model
def
CSPResNet101_mish
():
model
=
CSPResNet
(
layers
=
101
,
act
=
"mish"
)
return
model
ppcls/modeling/architectures/darknet.py
浏览文件 @
ea746480
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
__all__
=
[
"DarkNet53"
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
...
...
@@ -17,14 +20,13 @@ class ConvBNLayer(fluid.dygraph.Layer):
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
input_channels
,
num_filter
s
=
output_channels
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
input_channels
,
out_channel
s
=
output_channels
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
".conv.weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
".conv.weights"
),
bias_attr
=
False
)
bn_name
=
name
+
".bn"
...
...
@@ -42,7 +44,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
x
class
BasicBlock
(
fluid
.
dygraph
.
Layer
):
class
BasicBlock
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
name
=
None
):
super
(
BasicBlock
,
self
).
__init__
()
...
...
@@ -54,10 +56,10 @@ class BasicBlock(fluid.dygraph.Layer):
def
forward
(
self
,
inputs
):
x
=
self
.
_conv1
(
inputs
)
x
=
self
.
_conv2
(
x
)
return
fluid
.
layers
.
elementwise_add
(
x
=
inputs
,
y
=
x
)
return
paddle
.
elementwise_add
(
x
=
inputs
,
y
=
x
)
class
DarkNet
(
fluid
.
dygraph
.
Layer
):
class
DarkNet
(
nn
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
):
super
(
DarkNet
,
self
).
__init__
()
...
...
@@ -102,15 +104,14 @@ class DarkNet(fluid.dygraph.Layer):
self
.
_basic_block_43
=
BasicBlock
(
1024
,
512
,
name
=
"stage.4.2"
)
self
.
_basic_block_44
=
BasicBlock
(
1024
,
512
,
name
=
"stage.4.3"
)
self
.
_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
self
.
_pool
=
AdaptiveAvgPool2d
(
1
)
stdv
=
1.0
/
math
.
sqrt
(
1024.0
)
self
.
_out
=
Linear
(
input_dim
=
1024
,
output_dim
=
class_dim
,
param_attr
=
ParamAttr
(
name
=
"fc_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)),
1024
,
class_dim
,
weight_attr
=
ParamAttr
(
name
=
"fc_weights"
,
initializer
=
Uniform
(
-
stdv
,
stdv
)),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
inputs
):
...
...
@@ -150,7 +151,7 @@ class DarkNet(fluid.dygraph.Layer):
x
=
self
.
_basic_block_44
(
x
)
x
=
self
.
_pool
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axe
s
=
[
2
,
3
])
x
=
paddle
.
squeeze
(
x
,
axi
s
=
[
2
,
3
])
x
=
self
.
_out
(
x
)
return
x
...
...
ppcls/modeling/architectures/darts_gs.py
已删除
100644 → 0
浏览文件 @
5e092259
此差异已折叠。
点击以展开。
ppcls/modeling/architectures/densenet.py
浏览文件 @
ea746480
...
...
@@ -18,9 +18,11 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
...
...
@@ -29,7 +31,7 @@ __all__ = [
]
class
BNACConvLayer
(
fluid
.
dygraph
.
Layer
):
class
BNACConvLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -49,15 +51,14 @@ class BNACConvLayer(fluid.dygraph.Layer):
moving_mean_name
=
name
+
'_bn_mean'
,
moving_variance_name
=
name
+
'_bn_variance'
)
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
pad
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
def
forward
(
self
,
input
):
...
...
@@ -66,7 +67,7 @@ class BNACConvLayer(fluid.dygraph.Layer):
return
y
class
DenseLayer
(
fluid
.
dygraph
.
Layer
):
class
DenseLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
growth_rate
,
bn_size
,
dropout
,
name
=
None
):
super
(
DenseLayer
,
self
).
__init__
()
self
.
dropout
=
dropout
...
...
@@ -88,18 +89,18 @@ class DenseLayer(fluid.dygraph.Layer):
name
=
name
+
"_x2"
)
if
dropout
:
self
.
dropout_func
=
Dropout
(
p
=
dropout
)
self
.
dropout_func
=
Dropout
(
p
=
dropout
,
mode
=
"downscale_in_infer"
)
def
forward
(
self
,
input
):
conv
=
self
.
bn_ac_func1
(
input
)
conv
=
self
.
bn_ac_func2
(
conv
)
if
self
.
dropout
:
conv
=
self
.
dropout_func
(
conv
)
conv
=
fluid
.
layers
.
concat
([
input
,
conv
],
axis
=
1
)
conv
=
paddle
.
concat
([
input
,
conv
],
axis
=
1
)
return
conv
class
DenseBlock
(
fluid
.
dygraph
.
Layer
):
class
DenseBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_layers
,
...
...
@@ -132,7 +133,7 @@ class DenseBlock(fluid.dygraph.Layer):
return
conv
class
TransitionLayer
(
fluid
.
dygraph
.
Layer
):
class
TransitionLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_output_features
,
name
=
None
):
super
(
TransitionLayer
,
self
).
__init__
()
...
...
@@ -144,7 +145,7 @@ class TransitionLayer(fluid.dygraph.Layer):
stride
=
1
,
name
=
name
)
self
.
pool2d_avg
=
Pool2D
(
pool_size
=
2
,
pool_stride
=
2
,
pool_type
=
'avg'
)
self
.
pool2d_avg
=
AvgPool2d
(
kernel_size
=
2
,
stride
=
2
,
padding
=
0
)
def
forward
(
self
,
input
):
y
=
self
.
conv_ac_func
(
input
)
...
...
@@ -152,7 +153,7 @@ class TransitionLayer(fluid.dygraph.Layer):
return
y
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -164,15 +165,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
pad
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
self
.
_batch_norm
=
BatchNorm
(
num_filters
,
...
...
@@ -188,7 +188,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
DenseNet
(
fluid
.
dygraph
.
Layer
):
class
DenseNet
(
nn
.
Layer
):
def
__init__
(
self
,
layers
=
60
,
bn_size
=
4
,
dropout
=
0
,
class_dim
=
1000
):
super
(
DenseNet
,
self
).
__init__
()
...
...
@@ -214,8 +214,7 @@ class DenseNet(fluid.dygraph.Layer):
act
=
'relu'
,
name
=
"conv1"
)
self
.
pool2d_max
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
self
.
pool2d_max
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
block_config
=
block_config
...
...
@@ -257,16 +256,15 @@ class DenseNet(fluid.dygraph.Layer):
moving_mean_name
=
'conv5_blk_bn_mean'
,
moving_variance_name
=
'conv5_blk_bn_variance'
)
self
.
pool2d_avg
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
stdv
=
1.0
/
math
.
sqrt
(
num_features
*
1.0
)
self
.
out
=
Linear
(
num_features
,
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
input
):
...
...
@@ -280,7 +278,7 @@ class DenseNet(fluid.dygraph.Layer):
conv
=
self
.
batch_norm
(
conv
)
y
=
self
.
pool2d_avg
(
conv
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
0
,
-
1
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
0
,
-
1
])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/distillation_models.py
浏览文件 @
ea746480
...
...
@@ -19,8 +19,7 @@ from __future__ import print_function
import
math
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
import
paddle.nn
as
nn
from
.resnet_vd
import
ResNet50_vd
from
.mobilenet_v3
import
MobileNetV3_large_x1_0
...
...
@@ -32,7 +31,7 @@ __all__ = [
]
class
ResNet50_vd_distill_MobileNetV3_large_x1_0
(
fluid
.
dygraph
.
Layer
):
class
ResNet50_vd_distill_MobileNetV3_large_x1_0
(
nn
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
,
**
args
):
super
(
ResNet50_vd_distill_MobileNetV3_large_x1_0
,
self
).
__init__
()
...
...
@@ -49,7 +48,7 @@ class ResNet50_vd_distill_MobileNetV3_large_x1_0(fluid.dygraph.Layer):
return
teacher_label
,
student_label
class
ResNeXt101_32x16d_wsl_distill_ResNet50_vd
(
fluid
.
dygraph
.
Layer
):
class
ResNeXt101_32x16d_wsl_distill_ResNet50_vd
(
nn
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
,
**
args
):
super
(
ResNet50_vd_distill_MobileNetV3_large_x1_0
,
self
).
__init__
()
...
...
ppcls/modeling/architectures/dpn.py
浏览文件 @
ea746480
...
...
@@ -19,9 +19,11 @@ from __future__ import print_function
import
numpy
as
np
import
sys
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
...
...
@@ -35,7 +37,7 @@ __all__ = [
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -47,15 +49,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
pad
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
self
.
_batch_norm
=
BatchNorm
(
num_filters
,
...
...
@@ -71,7 +72,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
BNACConvLayer
(
fluid
.
dygraph
.
Layer
):
class
BNACConvLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -83,7 +84,6 @@ class BNACConvLayer(fluid.dygraph.Layer):
name
=
None
):
super
(
BNACConvLayer
,
self
).
__init__
()
self
.
num_channels
=
num_channels
self
.
name
=
name
self
.
_batch_norm
=
BatchNorm
(
num_channels
,
...
...
@@ -93,15 +93,14 @@ class BNACConvLayer(fluid.dygraph.Layer):
moving_mean_name
=
name
+
'_bn_mean'
,
moving_variance_name
=
name
+
'_bn_variance'
)
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
pad
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
def
forward
(
self
,
input
):
...
...
@@ -110,7 +109,7 @@ class BNACConvLayer(fluid.dygraph.Layer):
return
y
class
DualPathFactory
(
fluid
.
dygraph
.
Layer
):
class
DualPathFactory
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_1x1_a
,
...
...
@@ -183,14 +182,14 @@ class DualPathFactory(fluid.dygraph.Layer):
def
forward
(
self
,
input
):
# PROJ
if
isinstance
(
input
,
list
):
data_in
=
fluid
.
layers
.
concat
([
input
[
0
],
input
[
1
]],
axis
=
1
)
data_in
=
paddle
.
concat
([
input
[
0
],
input
[
1
]],
axis
=
1
)
else
:
data_in
=
input
if
self
.
has_proj
:
c1x1_w
=
self
.
c1x1_w_func
(
data_in
)
data_o1
,
data_o2
=
fluid
.
layers
.
split
(
c1x1_w
,
num_or_sections
=
[
self
.
num_1x1_c
,
2
*
self
.
inc
],
dim
=
1
)
data_o1
,
data_o2
=
paddle
.
split
(
c1x1_w
,
num_or_sections
=
[
self
.
num_1x1_c
,
2
*
self
.
inc
],
axis
=
1
)
else
:
data_o1
=
input
[
0
]
data_o2
=
input
[
1
]
...
...
@@ -199,17 +198,17 @@ class DualPathFactory(fluid.dygraph.Layer):
c3x3_b
=
self
.
c3x3_b_func
(
c1x1_a
)
c1x1_c
=
self
.
c1x1_c_func
(
c3x3_b
)
c1x1_c1
,
c1x1_c2
=
fluid
.
layers
.
split
(
c1x1_c
,
num_or_sections
=
[
self
.
num_1x1_c
,
self
.
inc
],
dim
=
1
)
c1x1_c1
,
c1x1_c2
=
paddle
.
split
(
c1x1_c
,
num_or_sections
=
[
self
.
num_1x1_c
,
self
.
inc
],
axis
=
1
)
# OUTPUTS
summ
=
fluid
.
layers
.
elementwise_add
(
x
=
data_o1
,
y
=
c1x1_c1
)
dense
=
fluid
.
layers
.
concat
([
data_o2
,
c1x1_c2
],
axis
=
1
)
summ
=
paddle
.
elementwise_add
(
x
=
data_o1
,
y
=
c1x1_c1
)
dense
=
paddle
.
concat
([
data_o2
,
c1x1_c2
],
axis
=
1
)
# tensor, channels
return
[
summ
,
dense
]
class
DPN
(
fluid
.
dygraph
.
Layer
):
class
DPN
(
nn
.
Layer
):
def
__init__
(
self
,
layers
=
60
,
class_dim
=
1000
):
super
(
DPN
,
self
).
__init__
()
...
...
@@ -237,8 +236,7 @@ class DPN(fluid.dygraph.Layer):
act
=
'relu'
,
name
=
"conv1"
)
self
.
pool2d_max
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
self
.
pool2d_max
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
num_channel_dpn
=
init_num_filter
...
...
@@ -303,16 +301,15 @@ class DPN(fluid.dygraph.Layer):
moving_mean_name
=
'final_concat_bn_mean'
,
moving_variance_name
=
'final_concat_bn_variance'
)
self
.
pool2d_avg
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
stdv
=
0.01
self
.
out
=
Linear
(
out_channel
,
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
input
):
...
...
@@ -327,11 +324,11 @@ class DPN(fluid.dygraph.Layer):
convX_x_x
=
self
.
dpn_func_list
[
dpn_idx
](
convX_x_x
)
dpn_idx
+=
1
conv5_x_x
=
fluid
.
layers
.
concat
(
convX_x_x
,
axis
=
1
)
conv5_x_x
=
paddle
.
concat
(
convX_x_x
,
axis
=
1
)
conv5_x_x
=
self
.
conv5_x_x_bn
(
conv5_x_x
)
y
=
self
.
pool2d_avg
(
conv5_x_x
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
0
,
-
1
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
0
,
-
1
])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/efficientnet.py
浏览文件 @
ea746480
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
import
math
import
collections
import
re
...
...
@@ -242,15 +244,14 @@ def _drop_connect(inputs, prob, is_test):
if
is_test
:
return
inputs
keep_prob
=
1.0
-
prob
inputs_shape
=
fluid
.
layers
.
shape
(
inputs
)
random_tensor
=
keep_prob
+
fluid
.
layers
.
uniform_random
(
shape
=
[
inputs_shape
[
0
],
1
,
1
,
1
],
min
=
0.
,
max
=
1.
)
binary_tensor
=
fluid
.
layers
.
floor
(
random_tensor
)
inputs_shape
=
paddle
.
shape
(
inputs
)
random_tensor
=
keep_prob
+
paddle
.
rand
(
shape
=
[
inputs_shape
[
0
],
1
,
1
,
1
])
binary_tensor
=
paddle
.
floor
(
random_tensor
)
output
=
inputs
/
keep_prob
*
binary_tensor
return
output
class
Conv2ds
(
fluid
.
dygraph
.
Layer
):
class
Conv2ds
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
...
...
@@ -265,6 +266,8 @@ class Conv2ds(fluid.dygraph.Layer):
model_name
=
None
,
cur_stage
=
None
):
super
(
Conv2ds
,
self
).
__init__
()
assert
act
in
[
None
,
"swish"
,
"sigmoid"
]
self
.
act
=
act
param_attr
,
bias_attr
=
initial_type
(
name
=
name
,
use_bias
=
use_bias
)
...
...
@@ -296,25 +299,31 @@ class Conv2ds(fluid.dygraph.Layer):
else
:
padding
=
padding_type
self
.
_conv
=
Conv2D
(
groups
=
1
if
groups
is
None
else
groups
self
.
_conv
=
Conv2d
(
input_channels
,
output_channels
,
filter_size
,
groups
=
groups
,
stride
=
stride
,
act
=
act
,
#
act=act,
padding
=
padding
,
param
_attr
=
param_attr
,
weight
_attr
=
param_attr
,
bias_attr
=
bias_attr
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv
(
inputs
)
if
self
.
act
==
"swish"
:
x
=
F
.
swish
(
x
)
elif
self
.
act
==
"sigmoid"
:
x
=
F
.
sigmoid
(
x
)
if
self
.
need_crop
:
x
=
x
[:,
:,
1
:,
1
:]
return
x
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
filter_size
,
...
...
@@ -369,7 +378,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
self
.
_conv
(
inputs
)
class
ExpandConvNorm
(
fluid
.
dygraph
.
Layer
):
class
ExpandConvNorm
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
block_args
,
...
...
@@ -402,7 +411,7 @@ class ExpandConvNorm(fluid.dygraph.Layer):
return
inputs
class
DepthwiseConvNorm
(
fluid
.
dygraph
.
Layer
):
class
DepthwiseConvNorm
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
block_args
,
...
...
@@ -436,7 +445,7 @@ class DepthwiseConvNorm(fluid.dygraph.Layer):
return
self
.
_conv
(
inputs
)
class
ProjectConvNorm
(
fluid
.
dygraph
.
Layer
):
class
ProjectConvNorm
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
block_args
,
...
...
@@ -464,7 +473,7 @@ class ProjectConvNorm(fluid.dygraph.Layer):
return
self
.
_conv
(
inputs
)
class
SEBlock
(
fluid
.
dygraph
.
Layer
):
class
SEBlock
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
num_squeezed_channels
,
...
...
@@ -475,8 +484,7 @@ class SEBlock(fluid.dygraph.Layer):
cur_stage
=
None
):
super
(
SEBlock
,
self
).
__init__
()
self
.
_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
,
use_cudnn
=
False
)
self
.
_pool
=
AdaptiveAvgPool2d
(
1
)
self
.
_conv1
=
Conv2ds
(
input_channels
,
num_squeezed_channels
,
...
...
@@ -499,10 +507,10 @@ class SEBlock(fluid.dygraph.Layer):
x
=
self
.
_pool
(
inputs
)
x
=
self
.
_conv1
(
x
)
x
=
self
.
_conv2
(
x
)
return
fluid
.
layers
.
elementwise_mul
(
inputs
,
x
)
return
paddle
.
multiply
(
inputs
,
x
)
class
MbConvBlock
(
fluid
.
dygraph
.
Layer
):
class
MbConvBlock
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
block_args
,
...
...
@@ -565,9 +573,9 @@ class MbConvBlock(fluid.dygraph.Layer):
x
=
inputs
if
self
.
expand_ratio
!=
1
:
x
=
self
.
_ecn
(
x
)
x
=
fluid
.
layers
.
swish
(
x
)
x
=
F
.
swish
(
x
)
x
=
self
.
_dcn
(
x
)
x
=
fluid
.
layers
.
swish
(
x
)
x
=
F
.
swish
(
x
)
if
self
.
has_se
:
x
=
self
.
_se
(
x
)
x
=
self
.
_pcn
(
x
)
...
...
@@ -576,11 +584,11 @@ class MbConvBlock(fluid.dygraph.Layer):
self
.
block_args
.
input_filters
==
self
.
block_args
.
output_filters
:
if
self
.
drop_connect_rate
:
x
=
_drop_connect
(
x
,
self
.
drop_connect_rate
,
self
.
is_test
)
x
=
fluid
.
layers
.
elementwise_add
(
x
,
inputs
)
x
=
paddle
.
elementwise_add
(
x
,
inputs
)
return
x
class
ConvStemNorm
(
fluid
.
dygraph
.
Layer
):
class
ConvStemNorm
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
padding_type
,
...
...
@@ -608,7 +616,7 @@ class ConvStemNorm(fluid.dygraph.Layer):
return
self
.
_conv
(
inputs
)
class
ExtractFeatures
(
fluid
.
dygraph
.
Layer
):
class
ExtractFeatures
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
_block_args
,
...
...
@@ -694,13 +702,13 @@ class ExtractFeatures(fluid.dygraph.Layer):
def
forward
(
self
,
inputs
):
x
=
self
.
_conv_stem
(
inputs
)
x
=
fluid
.
layers
.
swish
(
x
)
x
=
F
.
swish
(
x
)
for
_mc_block
in
self
.
conv_seq
:
x
=
_mc_block
(
x
)
return
x
class
EfficientNet
(
fluid
.
dygraph
.
Layer
):
class
EfficientNet
(
nn
.
Layer
):
def
__init__
(
self
,
name
=
"b0"
,
is_test
=
True
,
...
...
@@ -753,18 +761,17 @@ class EfficientNet(fluid.dygraph.Layer):
bn_name
=
"_bn1"
,
model_name
=
self
.
name
,
cur_stage
=
7
)
self
.
_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
self
.
_pool
=
AdaptiveAvgPool2d
(
1
)
if
self
.
_global_params
.
dropout_rate
:
self
.
_drop
=
Dropout
(
p
=
self
.
_global_params
.
dropout_rate
,
dropout_implementation
=
"upscale_in_train"
)
p
=
self
.
_global_params
.
dropout_rate
,
mode
=
"upscale_in_train"
)
param_attr
,
bias_attr
=
init_fc_layer
(
"_fc"
)
self
.
_fc
=
Linear
(
output_channels
,
class_dim
,
param
_attr
=
param_attr
,
weight
_attr
=
param_attr
,
bias_attr
=
bias_attr
)
def
forward
(
self
,
inputs
):
...
...
@@ -773,7 +780,7 @@ class EfficientNet(fluid.dygraph.Layer):
x
=
self
.
_pool
(
x
)
if
self
.
_global_params
.
dropout_rate
:
x
=
self
.
_drop
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axe
s
=
[
2
,
3
])
x
=
paddle
.
squeeze
(
x
,
axi
s
=
[
2
,
3
])
x
=
self
.
_fc
(
x
)
return
x
...
...
ppcls/modeling/architectures/googlenet.py
浏览文件 @
ea746480
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
__all__
=
[
'GoogLeNet'
]
...
...
@@ -10,12 +14,11 @@ __all__ = ['GoogLeNet']
def
xavier
(
channels
,
filter_size
,
name
):
stdv
=
(
3.0
/
(
filter_size
**
2
*
channels
))
**
0.5
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_weights"
)
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_weights"
)
return
param_attr
class
ConvLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -26,15 +29,14 @@ class ConvLayer(fluid.dygraph.Layer):
name
=
None
):
super
(
ConvLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
def
forward
(
self
,
inputs
):
...
...
@@ -42,7 +44,7 @@ class ConvLayer(fluid.dygraph.Layer):
return
y
class
Inception
(
fluid
.
dygraph
.
Layer
):
class
Inception
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
...
...
@@ -71,8 +73,8 @@ class Inception(fluid.dygraph.Layer):
name
=
"inception_"
+
name
+
"_5x5_reduce"
)
self
.
_conv5
=
ConvLayer
(
filter5R
,
filter5
,
5
,
name
=
"inception_"
+
name
+
"_5x5"
)
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_type
=
"max"
,
pool_stride
=
1
,
pool_padding
=
1
)
self
.
_pool
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
self
.
_convprj
=
ConvLayer
(
input_channels
,
proj
,
1
,
name
=
"inception_"
+
name
+
"_3x3_proj"
)
...
...
@@ -88,16 +90,16 @@ class Inception(fluid.dygraph.Layer):
pool
=
self
.
_pool
(
inputs
)
convprj
=
self
.
_convprj
(
pool
)
cat
=
fluid
.
layers
.
concat
([
conv1
,
conv3
,
conv5
,
convprj
],
axis
=
1
)
cat
=
fluid
.
layers
.
relu
(
cat
)
cat
=
paddle
.
concat
([
conv1
,
conv3
,
conv5
,
convprj
],
axis
=
1
)
cat
=
F
.
relu
(
cat
)
return
cat
class
GoogleNetDY
(
fluid
.
dygraph
.
Layer
):
class
GoogleNetDY
(
nn
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
):
super
(
GoogleNetDY
,
self
).
__init__
()
self
.
_conv
=
ConvLayer
(
3
,
64
,
7
,
2
,
name
=
"conv1"
)
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_type
=
"max"
,
pool_
stride
=
2
)
self
.
_pool
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
)
self
.
_conv_1
=
ConvLayer
(
64
,
64
,
1
,
name
=
"conv2_1x1"
)
self
.
_conv_2
=
ConvLayer
(
64
,
192
,
3
,
name
=
"conv2_3x3"
)
...
...
@@ -122,42 +124,39 @@ class GoogleNetDY(fluid.dygraph.Layer):
self
.
_ince5b
=
Inception
(
832
,
832
,
384
,
192
,
384
,
48
,
128
,
128
,
name
=
"ince5b"
)
self
.
_pool_5
=
Pool2D
(
pool_size
=
7
,
pool_type
=
'avg'
,
pool_
stride
=
7
)
self
.
_pool_5
=
AvgPool2d
(
kernel_size
=
7
,
stride
=
7
)
self
.
_drop
=
fluid
.
dygraph
.
Dropout
(
p
=
0.4
)
self
.
_drop
=
Dropout
(
p
=
0.4
,
mode
=
"downscale_in_infer"
)
self
.
_fc_out
=
Linear
(
1024
,
class_dim
,
param_attr
=
xavier
(
1024
,
1
,
"out"
),
bias_attr
=
ParamAttr
(
name
=
"out_offset"
),
act
=
"softmax"
)
self
.
_pool_o1
=
Pool2D
(
pool_size
=
5
,
pool_stride
=
3
,
pool_type
=
"avg"
)
weight_attr
=
xavier
(
1024
,
1
,
"out"
),
bias_attr
=
ParamAttr
(
name
=
"out_offset"
))
self
.
_pool_o1
=
AvgPool2d
(
kernel_size
=
5
,
stride
=
3
)
self
.
_conv_o1
=
ConvLayer
(
512
,
128
,
1
,
name
=
"conv_o1"
)
self
.
_fc_o1
=
Linear
(
1152
,
1024
,
param_attr
=
xavier
(
2048
,
1
,
"fc_o1"
),
bias_attr
=
ParamAttr
(
name
=
"fc_o1_offset"
),
act
=
"relu"
)
self
.
_drop_o1
=
fluid
.
dygraph
.
Dropout
(
p
=
0.7
)
weight_attr
=
xavier
(
2048
,
1
,
"fc_o1"
),
bias_attr
=
ParamAttr
(
name
=
"fc_o1_offset"
))
self
.
_drop_o1
=
Dropout
(
p
=
0.7
,
mode
=
"downscale_in_infer"
)
self
.
_out1
=
Linear
(
1024
,
class_dim
,
param_attr
=
xavier
(
1024
,
1
,
"out1"
),
bias_attr
=
ParamAttr
(
name
=
"out1_offset"
),
act
=
"softmax"
)
self
.
_pool_o2
=
Pool2D
(
pool_size
=
5
,
pool_stride
=
3
,
pool_type
=
'avg'
)
weight_attr
=
xavier
(
1024
,
1
,
"out1"
),
bias_attr
=
ParamAttr
(
name
=
"out1_offset"
))
self
.
_pool_o2
=
AvgPool2d
(
kernel_size
=
5
,
stride
=
3
)
self
.
_conv_o2
=
ConvLayer
(
528
,
128
,
1
,
name
=
"conv_o2"
)
self
.
_fc_o2
=
Linear
(
1152
,
1024
,
param
_attr
=
xavier
(
2048
,
1
,
"fc_o2"
),
weight
_attr
=
xavier
(
2048
,
1
,
"fc_o2"
),
bias_attr
=
ParamAttr
(
name
=
"fc_o2_offset"
))
self
.
_drop_o2
=
fluid
.
dygraph
.
Dropout
(
p
=
0.7
)
self
.
_drop_o2
=
Dropout
(
p
=
0.7
,
mode
=
"downscale_in_infer"
)
self
.
_out2
=
Linear
(
1024
,
class_dim
,
param
_attr
=
xavier
(
1024
,
1
,
"out2"
),
weight
_attr
=
xavier
(
1024
,
1
,
"out2"
),
bias_attr
=
ParamAttr
(
name
=
"out2_offset"
))
def
forward
(
self
,
inputs
):
...
...
@@ -183,19 +182,22 @@ class GoogleNetDY(fluid.dygraph.Layer):
x
=
self
.
_pool_5
(
ince5b
)
x
=
self
.
_drop
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axe
s
=
[
2
,
3
])
x
=
paddle
.
squeeze
(
x
,
axi
s
=
[
2
,
3
])
out
=
self
.
_fc_out
(
x
)
out
=
F
.
softmax
(
out
)
x
=
self
.
_pool_o1
(
ince4a
)
x
=
self
.
_conv_o1
(
x
)
x
=
fluid
.
layers
.
flatten
(
x
)
x
=
paddle
.
flatten
(
x
,
start_axis
=
1
,
stop_axis
=-
1
)
x
=
self
.
_fc_o1
(
x
)
x
=
F
.
relu
(
x
)
x
=
self
.
_drop_o1
(
x
)
out1
=
self
.
_out1
(
x
)
out1
=
F
.
softmax
(
out1
)
x
=
self
.
_pool_o2
(
ince4d
)
x
=
self
.
_conv_o2
(
x
)
x
=
fluid
.
layers
.
flatten
(
x
)
x
=
paddle
.
flatten
(
x
,
start_axis
=
1
,
stop_axis
=-
1
)
x
=
self
.
_fc_o2
(
x
)
x
=
self
.
_drop_o2
(
x
)
out2
=
self
.
_out2
(
x
)
...
...
ppcls/modeling/architectures/hrnet.py
浏览文件 @
ea746480
...
...
@@ -18,9 +18,12 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
...
...
@@ -44,7 +47,7 @@ __all__ = [
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -55,15 +58,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
bn_name
=
name
+
'_bn'
self
.
_batch_norm
=
BatchNorm
(
...
...
@@ -80,7 +82,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
Layer1
(
fluid
.
dygraph
.
Layer
):
class
Layer1
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
has_se
=
False
,
name
=
None
):
super
(
Layer1
,
self
).
__init__
()
...
...
@@ -105,7 +107,7 @@ class Layer1(fluid.dygraph.Layer):
return
conv
class
TransitionLayer
(
fluid
.
dygraph
.
Layer
):
class
TransitionLayer
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
name
=
None
):
super
(
TransitionLayer
,
self
).
__init__
()
...
...
@@ -148,7 +150,7 @@ class TransitionLayer(fluid.dygraph.Layer):
return
outs
class
Branches
(
fluid
.
dygraph
.
Layer
):
class
Branches
(
nn
.
Layer
):
def
__init__
(
self
,
block_num
,
in_channels
,
...
...
@@ -183,7 +185,7 @@ class Branches(fluid.dygraph.Layer):
return
outs
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
class
BottleneckBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -243,11 +245,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
if
self
.
has_se
:
conv3
=
self
.
se
(
conv3
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
conv3
,
y
=
residual
,
act
=
"relu"
)
y
=
paddle
.
elementwise_add
(
x
=
conv3
,
y
=
residual
,
act
=
"relu"
)
return
y
class
BasicBlock
(
fluid
.
dygraph
.
Layer
):
class
BasicBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -301,15 +303,15 @@ class BasicBlock(fluid.dygraph.Layer):
if
self
.
has_se
:
conv2
=
self
.
se
(
conv2
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
conv2
,
y
=
residual
,
act
=
"relu"
)
y
=
paddle
.
elementwise_add
(
x
=
conv2
,
y
=
residual
,
act
=
"relu"
)
return
y
class
SELayer
(
fluid
.
dygraph
.
Layer
):
class
SELayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
reduction_ratio
,
name
=
None
):
super
(
SELayer
,
self
).
__init__
()
self
.
pool2d_gap
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_gap
=
AdaptiveAvgPool2d
(
1
)
self
.
_num_channels
=
num_channels
...
...
@@ -320,8 +322,7 @@ class SELayer(fluid.dygraph.Layer):
med_ch
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_sqz_weights"
),
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_sqz_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
'_sqz_offset'
))
stdv
=
1.0
/
math
.
sqrt
(
med_ch
*
1.0
)
...
...
@@ -330,22 +331,21 @@ class SELayer(fluid.dygraph.Layer):
num_filters
,
act
=
"sigmoid"
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_exc_weights"
),
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_exc_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
'_exc_offset'
))
def
forward
(
self
,
input
):
pool
=
self
.
pool2d_gap
(
input
)
pool
=
fluid
.
layers
.
reshape
(
pool
,
shape
=
[
-
1
,
self
.
_num_channels
])
pool
=
paddle
.
reshape
(
pool
,
shape
=
[
-
1
,
self
.
_num_channels
])
squeeze
=
self
.
squeeze
(
pool
)
excitation
=
self
.
excitation
(
squeeze
)
excitation
=
fluid
.
layers
.
reshape
(
excitation
=
paddle
.
reshape
(
excitation
,
shape
=
[
-
1
,
self
.
_num_channels
,
1
,
1
])
out
=
input
*
excitation
return
out
class
Stage
(
fluid
.
dygraph
.
Layer
):
class
Stage
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_modules
,
...
...
@@ -386,7 +386,7 @@ class Stage(fluid.dygraph.Layer):
return
out
class
HighResolutionModule
(
fluid
.
dygraph
.
Layer
):
class
HighResolutionModule
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -414,7 +414,7 @@ class HighResolutionModule(fluid.dygraph.Layer):
return
out
class
FuseLayers
(
fluid
.
dygraph
.
Layer
):
class
FuseLayers
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
...
...
@@ -482,8 +482,8 @@ class FuseLayers(fluid.dygraph.Layer):
y
=
self
.
residual_func_list
[
residual_func_idx
](
input
[
j
])
residual_func_idx
+=
1
y
=
fluid
.
layers
.
resize_nearest
(
input
=
y
,
scale
=
2
**
(
j
-
i
))
residual
=
fluid
.
layers
.
elementwise_add
(
y
=
F
.
resize_nearest
(
input
=
y
,
scale
=
2
**
(
j
-
i
))
residual
=
paddle
.
elementwise_add
(
x
=
residual
,
y
=
y
,
act
=
None
)
elif
j
<
i
:
y
=
input
[
j
]
...
...
@@ -491,16 +491,16 @@ class FuseLayers(fluid.dygraph.Layer):
y
=
self
.
residual_func_list
[
residual_func_idx
](
y
)
residual_func_idx
+=
1
residual
=
fluid
.
layers
.
elementwise_add
(
residual
=
paddle
.
elementwise_add
(
x
=
residual
,
y
=
y
,
act
=
None
)
residual
=
fluid
.
layers
.
relu
(
residual
)
residual
=
F
.
relu
(
residual
)
outs
.
append
(
residual
)
return
outs
class
LastClsOut
(
fluid
.
dygraph
.
Layer
):
class
LastClsOut
(
nn
.
Layer
):
def
__init__
(
self
,
num_channel_list
,
has_se
,
...
...
@@ -528,7 +528,7 @@ class LastClsOut(fluid.dygraph.Layer):
return
outs
class
HRNet
(
fluid
.
dygraph
.
Layer
):
class
HRNet
(
nn
.
Layer
):
def
__init__
(
self
,
width
=
18
,
has_se
=
False
,
class_dim
=
1000
):
super
(
HRNet
,
self
).
__init__
()
...
...
@@ -623,16 +623,15 @@ class HRNet(fluid.dygraph.Layer):
stride
=
1
,
name
=
"cls_head_last_conv"
)
self
.
pool2d_avg
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
stdv
=
1.0
/
math
.
sqrt
(
2048
*
1.0
)
self
.
out
=
Linear
(
2048
,
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
input
):
...
...
@@ -658,7 +657,7 @@ class HRNet(fluid.dygraph.Layer):
y
=
self
.
conv_last
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
0
,
-
1
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
0
,
-
1
])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/inception_v4.py
浏览文件 @
ea746480
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
__all__
=
[
"InceptionV4"
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -18,15 +36,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
bn_name
=
name
+
"_bn"
self
.
_batch_norm
=
BatchNorm
(
...
...
@@ -43,7 +60,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
InceptionStem
(
fluid
.
dygraph
.
Layer
):
class
InceptionStem
(
nn
.
Layer
):
def
__init__
(
self
):
super
(
InceptionStem
,
self
).
__init__
()
self
.
_conv_1
=
ConvBNLayer
(
...
...
@@ -51,7 +68,7 @@ class InceptionStem(fluid.dygraph.Layer):
self
.
_conv_2
=
ConvBNLayer
(
32
,
32
,
3
,
act
=
"relu"
,
name
=
"conv2_3x3_s1"
)
self
.
_conv_3
=
ConvBNLayer
(
32
,
64
,
3
,
padding
=
1
,
act
=
"relu"
,
name
=
"conv3_3x3_s1"
)
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_type
=
"max"
,
pool_stride
=
2
)
self
.
_pool
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
0
)
self
.
_conv2
=
ConvBNLayer
(
64
,
96
,
3
,
stride
=
2
,
act
=
"relu"
,
name
=
"inception_stem1_3x3_s2"
)
self
.
_conv1_1
=
ConvBNLayer
(
...
...
@@ -84,7 +101,7 @@ class InceptionStem(fluid.dygraph.Layer):
pool1
=
self
.
_pool
(
conv
)
conv2
=
self
.
_conv2
(
conv
)
concat
=
fluid
.
layers
.
concat
([
pool1
,
conv2
],
axis
=
1
)
concat
=
paddle
.
concat
([
pool1
,
conv2
],
axis
=
1
)
conv1
=
self
.
_conv1_1
(
concat
)
conv1
=
self
.
_conv1_2
(
conv1
)
...
...
@@ -94,19 +111,19 @@ class InceptionStem(fluid.dygraph.Layer):
conv2
=
self
.
_conv2_3
(
conv2
)
conv2
=
self
.
_conv2_4
(
conv2
)
concat
=
fluid
.
layers
.
concat
([
conv1
,
conv2
],
axis
=
1
)
concat
=
paddle
.
concat
([
conv1
,
conv2
],
axis
=
1
)
conv1
=
self
.
_conv3
(
concat
)
pool1
=
self
.
_pool
(
concat
)
concat
=
fluid
.
layers
.
concat
([
conv1
,
pool1
],
axis
=
1
)
concat
=
paddle
.
concat
([
conv1
,
pool1
],
axis
=
1
)
return
concat
class
InceptionA
(
fluid
.
dygraph
.
Layer
):
class
InceptionA
(
nn
.
Layer
):
def
__init__
(
self
,
name
):
super
(
InceptionA
,
self
).
__init__
()
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_type
=
"avg"
,
pool_
padding
=
1
)
self
.
_pool
=
AvgPool2d
(
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
self
.
_conv1
=
ConvBNLayer
(
384
,
96
,
1
,
act
=
"relu"
,
name
=
"inception_a"
+
name
+
"_1x1"
)
self
.
_conv2
=
ConvBNLayer
(
...
...
@@ -154,14 +171,14 @@ class InceptionA(fluid.dygraph.Layer):
conv4
=
self
.
_conv4_2
(
conv4
)
conv4
=
self
.
_conv4_3
(
conv4
)
concat
=
fluid
.
layers
.
concat
([
conv1
,
conv2
,
conv3
,
conv4
],
axis
=
1
)
concat
=
paddle
.
concat
([
conv1
,
conv2
,
conv3
,
conv4
],
axis
=
1
)
return
concat
class
ReductionA
(
fluid
.
dygraph
.
Layer
):
class
ReductionA
(
nn
.
Layer
):
def
__init__
(
self
):
super
(
ReductionA
,
self
).
__init__
()
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_type
=
"max"
,
pool_stride
=
2
)
self
.
_pool
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
0
)
self
.
_conv2
=
ConvBNLayer
(
384
,
384
,
3
,
stride
=
2
,
act
=
"relu"
,
name
=
"reduction_a_3x3"
)
self
.
_conv3_1
=
ConvBNLayer
(
...
...
@@ -177,14 +194,14 @@ class ReductionA(fluid.dygraph.Layer):
conv3
=
self
.
_conv3_1
(
inputs
)
conv3
=
self
.
_conv3_2
(
conv3
)
conv3
=
self
.
_conv3_3
(
conv3
)
concat
=
fluid
.
layers
.
concat
([
pool1
,
conv2
,
conv3
],
axis
=
1
)
concat
=
paddle
.
concat
([
pool1
,
conv2
,
conv3
],
axis
=
1
)
return
concat
class
InceptionB
(
fluid
.
dygraph
.
Layer
):
class
InceptionB
(
nn
.
Layer
):
def
__init__
(
self
,
name
=
None
):
super
(
InceptionB
,
self
).
__init__
()
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_type
=
"avg"
,
pool_
padding
=
1
)
self
.
_pool
=
AvgPool2d
(
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
self
.
_conv1
=
ConvBNLayer
(
1024
,
128
,
1
,
act
=
"relu"
,
name
=
"inception_b"
+
name
+
"_1x1"
)
self
.
_conv2
=
ConvBNLayer
(
...
...
@@ -254,14 +271,14 @@ class InceptionB(fluid.dygraph.Layer):
conv4
=
self
.
_conv4_4
(
conv4
)
conv4
=
self
.
_conv4_5
(
conv4
)
concat
=
fluid
.
layers
.
concat
([
conv1
,
conv2
,
conv3
,
conv4
],
axis
=
1
)
concat
=
paddle
.
concat
([
conv1
,
conv2
,
conv3
,
conv4
],
axis
=
1
)
return
concat
class
ReductionB
(
fluid
.
dygraph
.
Layer
):
class
ReductionB
(
nn
.
Layer
):
def
__init__
(
self
):
super
(
ReductionB
,
self
).
__init__
()
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_type
=
"max"
,
pool_stride
=
2
)
self
.
_pool
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
0
)
self
.
_conv2_1
=
ConvBNLayer
(
1024
,
192
,
1
,
act
=
"relu"
,
name
=
"reduction_b_3x3_reduce"
)
self
.
_conv2_2
=
ConvBNLayer
(
...
...
@@ -294,15 +311,15 @@ class ReductionB(fluid.dygraph.Layer):
conv3
=
self
.
_conv3_3
(
conv3
)
conv3
=
self
.
_conv3_4
(
conv3
)
concat
=
fluid
.
layers
.
concat
([
pool1
,
conv2
,
conv3
],
axis
=
1
)
concat
=
paddle
.
concat
([
pool1
,
conv2
,
conv3
],
axis
=
1
)
return
concat
class
InceptionC
(
fluid
.
dygraph
.
Layer
):
class
InceptionC
(
nn
.
Layer
):
def
__init__
(
self
,
name
=
None
):
super
(
InceptionC
,
self
).
__init__
()
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_type
=
"avg"
,
pool_
padding
=
1
)
self
.
_pool
=
AvgPool2d
(
kernel_size
=
3
,
stride
=
1
,
padding
=
1
)
self
.
_conv1
=
ConvBNLayer
(
1536
,
256
,
1
,
act
=
"relu"
,
name
=
"inception_c"
+
name
+
"_1x1"
)
self
.
_conv2
=
ConvBNLayer
(
...
...
@@ -364,13 +381,13 @@ class InceptionC(fluid.dygraph.Layer):
conv4_1
=
self
.
_conv4_1
(
conv4
)
conv4_2
=
self
.
_conv4_2
(
conv4
)
concat
=
fluid
.
layers
.
concat
(
concat
=
paddle
.
concat
(
[
conv1
,
conv2
,
conv3_1
,
conv3_2
,
conv4_1
,
conv4_2
],
axis
=
1
)
return
concat
class
InceptionV4DY
(
fluid
.
dygraph
.
Layer
):
class
InceptionV4DY
(
nn
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
):
super
(
InceptionV4DY
,
self
).
__init__
()
self
.
_inception_stem
=
InceptionStem
()
...
...
@@ -394,15 +411,14 @@ class InceptionV4DY(fluid.dygraph.Layer):
self
.
_inceptionC_2
=
InceptionC
(
name
=
"2"
)
self
.
_inceptionC_3
=
InceptionC
(
name
=
"3"
)
self
.
avg_pool
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
_drop
=
Dropout
(
p
=
0.2
)
self
.
avg_pool
=
AdaptiveAvgPool2d
(
1
)
self
.
_drop
=
Dropout
(
p
=
0.2
,
mode
=
"downscale_in_infer"
)
stdv
=
1.0
/
math
.
sqrt
(
1536
*
1.0
)
self
.
out
=
Linear
(
1536
,
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"final_fc_weights"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"final_fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"final_fc_offset"
))
def
forward
(
self
,
inputs
):
...
...
@@ -428,7 +444,7 @@ class InceptionV4DY(fluid.dygraph.Layer):
x
=
self
.
_inceptionC_3
(
x
)
x
=
self
.
avg_pool
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axe
s
=
[
2
,
3
])
x
=
paddle
.
squeeze
(
x
,
axi
s
=
[
2
,
3
])
x
=
self
.
_drop
(
x
)
x
=
self
.
out
(
x
)
return
x
...
...
ppcls/modeling/architectures/layers.py
已删除
100644 → 0
浏览文件 @
5e092259
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
warnings
import
paddle.fluid
as
fluid
def
initial_type
(
name
,
input
,
op_type
,
fan_out
,
init
=
"google"
,
use_bias
=
False
,
filter_size
=
0
,
stddev
=
0.02
):
if
init
==
"kaiming"
:
if
op_type
==
'conv'
:
fan_in
=
input
.
shape
[
1
]
*
filter_size
*
filter_size
elif
op_type
==
'deconv'
:
fan_in
=
fan_out
*
filter_size
*
filter_size
else
:
if
len
(
input
.
shape
)
>
2
:
fan_in
=
input
.
shape
[
1
]
*
input
.
shape
[
2
]
*
input
.
shape
[
3
]
else
:
fan_in
=
input
.
shape
[
1
]
bound
=
1
/
math
.
sqrt
(
fan_in
)
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_weights"
,
initializer
=
fluid
.
initializer
.
Uniform
(
low
=-
bound
,
high
=
bound
))
if
use_bias
==
True
:
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_offset'
,
initializer
=
fluid
.
initializer
.
Uniform
(
low
=-
bound
,
high
=
bound
))
else
:
bias_attr
=
False
elif
init
==
'google'
:
n
=
filter_size
*
filter_size
*
fan_out
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_weights"
,
initializer
=
fluid
.
initializer
.
NormalInitializer
(
loc
=
0.0
,
scale
=
math
.
sqrt
(
2.0
/
n
)))
if
use_bias
==
True
:
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_offset"
,
initializer
=
fluid
.
initializer
.
Constant
(
0.0
))
else
:
bias_attr
=
False
else
:
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_weights"
,
initializer
=
fluid
.
initializer
.
NormalInitializer
(
loc
=
0.0
,
scale
=
stddev
))
if
use_bias
==
True
:
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
"_offset"
,
initializer
=
fluid
.
initializer
.
Constant
(
0.0
))
else
:
bias_attr
=
False
return
param_attr
,
bias_attr
def
cal_padding
(
img_size
,
stride
,
filter_size
,
dilation
=
1
):
"""Calculate padding size."""
if
img_size
%
stride
==
0
:
out_size
=
max
(
filter_size
-
stride
,
0
)
else
:
out_size
=
max
(
filter_size
-
(
img_size
%
stride
),
0
)
return
out_size
//
2
,
out_size
-
out_size
//
2
def
init_batch_norm_layer
(
name
=
"batch_norm"
):
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_scale'
,
initializer
=
fluid
.
initializer
.
Constant
(
1.0
))
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_offset'
,
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.0
))
return
param_attr
,
bias_attr
def
init_fc_layer
(
fout
,
name
=
'fc'
):
n
=
fout
# fan-out
init_range
=
1.0
/
math
.
sqrt
(
n
)
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_weights'
,
initializer
=
fluid
.
initializer
.
UniformInitializer
(
low
=-
init_range
,
high
=
init_range
))
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_offset'
,
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.0
))
return
param_attr
,
bias_attr
def
norm_layer
(
input
,
norm_type
=
'batch_norm'
,
name
=
None
):
if
norm_type
==
'batch_norm'
:
param_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_weights'
,
initializer
=
fluid
.
initializer
.
Constant
(
1.0
))
bias_attr
=
fluid
.
ParamAttr
(
name
=
name
+
'_offset'
,
initializer
=
fluid
.
initializer
.
Constant
(
value
=
0.0
))
return
fluid
.
layers
.
batch_norm
(
input
,
param_attr
=
param_attr
,
bias_attr
=
bias_attr
,
moving_mean_name
=
name
+
'_mean'
,
moving_variance_name
=
name
+
'_variance'
)
elif
norm_type
==
'instance_norm'
:
helper
=
fluid
.
layer_helper
.
LayerHelper
(
"instance_norm"
,
**
locals
())
dtype
=
helper
.
input_dtype
()
epsilon
=
1e-5
mean
=
fluid
.
layers
.
reduce_mean
(
input
,
dim
=
[
2
,
3
],
keep_dim
=
True
)
var
=
fluid
.
layers
.
reduce_mean
(
fluid
.
layers
.
square
(
input
-
mean
),
dim
=
[
2
,
3
],
keep_dim
=
True
)
if
name
is
not
None
:
scale_name
=
name
+
"_scale"
offset_name
=
name
+
"_offset"
scale_param
=
fluid
.
ParamAttr
(
name
=
scale_name
,
initializer
=
fluid
.
initializer
.
Constant
(
1.0
),
trainable
=
True
)
offset_param
=
fluid
.
ParamAttr
(
name
=
offset_name
,
initializer
=
fluid
.
initializer
.
Constant
(
0.0
),
trainable
=
True
)
scale
=
helper
.
create_parameter
(
attr
=
scale_param
,
shape
=
input
.
shape
[
1
:
2
],
dtype
=
dtype
)
offset
=
helper
.
create_parameter
(
attr
=
offset_param
,
shape
=
input
.
shape
[
1
:
2
],
dtype
=
dtype
)
tmp
=
fluid
.
layers
.
elementwise_mul
(
x
=
(
input
-
mean
),
y
=
scale
,
axis
=
1
)
tmp
=
tmp
/
fluid
.
layers
.
sqrt
(
var
+
epsilon
)
tmp
=
fluid
.
layers
.
elementwise_add
(
tmp
,
offset
,
axis
=
1
)
return
tmp
else
:
raise
NotImplementedError
(
"norm tyoe: [%s] is not support"
%
norm_type
)
def
conv2d
(
input
,
num_filters
=
64
,
filter_size
=
7
,
stride
=
1
,
stddev
=
0.02
,
padding
=
0
,
groups
=
None
,
name
=
"conv2d"
,
norm
=
None
,
act
=
None
,
relufactor
=
0.0
,
use_bias
=
False
,
padding_type
=
None
,
initial
=
"normal"
,
use_cudnn
=
True
):
if
padding
!=
0
and
padding_type
!=
None
:
warnings
.
warn
(
'padding value and padding type are set in the same time, and the final padding width and padding height are computed by padding_type'
)
param_attr
,
bias_attr
=
initial_type
(
name
=
name
,
input
=
input
,
op_type
=
'conv'
,
fan_out
=
num_filters
,
init
=
initial
,
use_bias
=
use_bias
,
filter_size
=
filter_size
,
stddev
=
stddev
)
def
get_padding
(
filter_size
,
stride
=
1
,
dilation
=
1
):
padding
=
((
stride
-
1
)
+
dilation
*
(
filter_size
-
1
))
//
2
return
padding
need_crop
=
False
if
padding_type
==
"SAME"
:
top_padding
,
bottom_padding
=
cal_padding
(
input
.
shape
[
2
],
stride
,
filter_size
)
left_padding
,
right_padding
=
cal_padding
(
input
.
shape
[
2
],
stride
,
filter_size
)
height_padding
=
bottom_padding
width_padding
=
right_padding
if
top_padding
!=
bottom_padding
or
left_padding
!=
right_padding
:
height_padding
=
top_padding
+
stride
width_padding
=
left_padding
+
stride
need_crop
=
True
padding
=
[
height_padding
,
width_padding
]
elif
padding_type
==
"VALID"
:
height_padding
=
0
width_padding
=
0
padding
=
[
height_padding
,
width_padding
]
elif
padding_type
==
"DYNAMIC"
:
padding
=
get_padding
(
filter_size
,
stride
)
else
:
padding
=
padding
conv
=
fluid
.
layers
.
conv2d
(
input
,
num_filters
,
filter_size
,
groups
=
groups
,
name
=
name
,
stride
=
stride
,
padding
=
padding
,
use_cudnn
=
use_cudnn
,
param_attr
=
param_attr
,
bias_attr
=
bias_attr
)
if
need_crop
:
conv
=
conv
[:,
:,
1
:,
1
:]
if
norm
is
not
None
:
conv
=
norm_layer
(
input
=
conv
,
norm_type
=
norm
,
name
=
name
+
"_norm"
)
if
act
==
'relu'
:
conv
=
fluid
.
layers
.
relu
(
conv
,
name
=
name
+
'_relu'
)
elif
act
==
'leaky_relu'
:
conv
=
fluid
.
layers
.
leaky_relu
(
conv
,
alpha
=
relufactor
,
name
=
name
+
'_leaky_relu'
)
elif
act
==
'tanh'
:
conv
=
fluid
.
layers
.
tanh
(
conv
,
name
=
name
+
'_tanh'
)
elif
act
==
'sigmoid'
:
conv
=
fluid
.
layers
.
sigmoid
(
conv
,
name
=
name
+
'_sigmoid'
)
elif
act
==
'swish'
:
conv
=
fluid
.
layers
.
swish
(
conv
,
name
=
name
+
'_swish'
)
elif
act
==
None
:
conv
=
conv
else
:
raise
NotImplementedError
(
"activation: [%s] is not support"
%
act
)
return
conv
ppcls/modeling/architectures/mobilenet_v1.py
浏览文件 @
ea746480
...
...
@@ -18,10 +18,12 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle.fluid.initializer
import
MSRA
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
MSRA
import
math
__all__
=
[
...
...
@@ -29,7 +31,7 @@ __all__ = [
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
filter_size
,
...
...
@@ -39,20 +41,17 @@ class ConvBNLayer(fluid.dygraph.Layer):
channels
=
None
,
num_groups
=
1
,
act
=
'relu'
,
use_cudnn
=
True
,
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
num_groups
,
act
=
None
,
use_cudnn
=
use_cudnn
,
param_attr
=
ParamAttr
(
weight_attr
=
ParamAttr
(
initializer
=
MSRA
(),
name
=
name
+
"_weights"
),
bias_attr
=
False
)
...
...
@@ -70,7 +69,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
DepthwiseSeparable
(
fluid
.
dygraph
.
Layer
):
class
DepthwiseSeparable
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters1
,
...
...
@@ -88,7 +87,6 @@ class DepthwiseSeparable(fluid.dygraph.Layer):
stride
=
stride
,
padding
=
1
,
num_groups
=
int
(
num_groups
*
scale
),
use_cudnn
=
False
,
name
=
name
+
"_dw"
)
self
.
_pointwise_conv
=
ConvBNLayer
(
...
...
@@ -105,7 +103,7 @@ class DepthwiseSeparable(fluid.dygraph.Layer):
return
y
class
MobileNet
(
fluid
.
dygraph
.
Layer
):
class
MobileNet
(
nn
.
Layer
):
def
__init__
(
self
,
scale
=
1.0
,
class_dim
=
1000
):
super
(
MobileNet
,
self
).
__init__
()
self
.
scale
=
scale
...
...
@@ -229,12 +227,12 @@ class MobileNet(fluid.dygraph.Layer):
name
=
"conv6"
))
self
.
block_list
.
append
(
conv6
)
self
.
pool2d_avg
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
out
=
Linear
(
int
(
1024
*
scale
),
class_dim
,
param
_attr
=
ParamAttr
(
weight
_attr
=
ParamAttr
(
initializer
=
MSRA
(),
name
=
"fc7_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc7_offset"
))
...
...
@@ -243,7 +241,7 @@ class MobileNet(fluid.dygraph.Layer):
for
block
in
self
.
block_list
:
y
=
block
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
int
(
1024
*
self
.
scale
)])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
int
(
1024
*
self
.
scale
)])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/mobilenet_v2.py
浏览文件 @
ea746480
...
...
@@ -18,9 +18,11 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
import
math
...
...
@@ -30,7 +32,7 @@ __all__ = [
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
filter_size
,
...
...
@@ -43,16 +45,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
use_cudnn
=
True
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
num_groups
,
act
=
None
,
use_cudnn
=
use_cudnn
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
self
.
_batch_norm
=
BatchNorm
(
...
...
@@ -66,11 +66,11 @@ class ConvBNLayer(fluid.dygraph.Layer):
y
=
self
.
_conv
(
inputs
)
y
=
self
.
_batch_norm
(
y
)
if
if_act
:
y
=
fluid
.
layers
.
relu6
(
y
)
y
=
F
.
relu6
(
y
)
return
y
class
InvertedResidualUnit
(
fluid
.
dygraph
.
Layer
):
class
InvertedResidualUnit
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_in_filter
,
num_filters
,
stride
,
filter_size
,
padding
,
expansion_factor
,
name
):
super
(
InvertedResidualUnit
,
self
).
__init__
()
...
...
@@ -108,11 +108,11 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
y
=
self
.
_bottleneck_conv
(
y
,
if_act
=
True
)
y
=
self
.
_linear_conv
(
y
,
if_act
=
False
)
if
ifshortcut
:
y
=
fluid
.
layers
.
elementwise_add
(
inputs
,
y
)
y
=
paddle
.
elementwise_add
(
inputs
,
y
)
return
y
class
InvresiBlocks
(
fluid
.
dygraph
.
Layer
):
class
InvresiBlocks
(
nn
.
Layer
):
def
__init__
(
self
,
in_c
,
t
,
c
,
n
,
s
,
name
):
super
(
InvresiBlocks
,
self
).
__init__
()
...
...
@@ -148,7 +148,7 @@ class InvresiBlocks(fluid.dygraph.Layer):
return
y
class
MobileNet
(
fluid
.
dygraph
.
Layer
):
class
MobileNet
(
nn
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
,
scale
=
1.0
):
super
(
MobileNet
,
self
).
__init__
()
self
.
scale
=
scale
...
...
@@ -199,12 +199,12 @@ class MobileNet(fluid.dygraph.Layer):
padding
=
0
,
name
=
"conv9"
)
self
.
pool2d_avg
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
out
=
Linear
(
self
.
out_c
,
class_dim
,
param
_attr
=
ParamAttr
(
name
=
"fc10_weights"
),
weight
_attr
=
ParamAttr
(
name
=
"fc10_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc10_offset"
))
def
forward
(
self
,
inputs
):
...
...
@@ -213,7 +213,7 @@ class MobileNet(fluid.dygraph.Layer):
y
=
block
(
y
)
y
=
self
.
conv9
(
y
,
if_act
=
True
)
y
=
self
.
pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
out_c
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
out_c
])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/mobilenet_v3.py
浏览文件 @
ea746480
...
...
@@ -18,9 +18,12 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.regularizer
import
L2Decay
import
math
...
...
@@ -42,8 +45,12 @@ def make_divisible(v, divisor=8, min_value=None):
return
new_v
class
MobileNetV3
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
scale
=
1.0
,
model_name
=
"small"
,
class_dim
=
1000
):
class
MobileNetV3
(
nn
.
Layer
):
def
__init__
(
self
,
scale
=
1.0
,
model_name
=
"small"
,
dropout_prob
=
0.2
,
class_dim
=
1000
):
super
(
MobileNetV3
,
self
).
__init__
()
inplanes
=
16
...
...
@@ -130,41 +137,42 @@ class MobileNetV3(fluid.dygraph.Layer):
act
=
"hard_swish"
,
name
=
"conv_last"
)
self
.
pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
,
use_cudnn
=
False
)
self
.
pool
=
AdaptiveAvgPool2d
(
1
)
self
.
last_conv
=
Conv2
D
(
num
_channels
=
make_divisible
(
scale
*
self
.
cls_ch_squeeze
),
num_filter
s
=
self
.
cls_ch_expand
,
filter
_size
=
1
,
self
.
last_conv
=
Conv2
d
(
in
_channels
=
make_divisible
(
scale
*
self
.
cls_ch_squeeze
),
out_channel
s
=
self
.
cls_ch_expand
,
kernel
_size
=
1
,
stride
=
1
,
padding
=
0
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
"last_1x1_conv_weights"
),
weight_attr
=
ParamAttr
(
name
=
"last_1x1_conv_weights"
),
bias_attr
=
False
)
self
.
dropout
=
Dropout
(
p
=
dropout_prob
,
mode
=
"downscale_in_infer"
)
self
.
out
=
Linear
(
input_dim
=
self
.
cls_ch_expand
,
output_dim
=
class_dim
,
param
_attr
=
ParamAttr
(
"fc_weights"
),
self
.
cls_ch_expand
,
class_dim
,
weight
_attr
=
ParamAttr
(
"fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
inputs
,
label
=
None
,
dropout_prob
=
0.2
):
def
forward
(
self
,
inputs
,
label
=
None
):
x
=
self
.
conv1
(
inputs
)
for
block
in
self
.
block_list
:
x
=
block
(
x
)
x
=
self
.
last_second_conv
(
x
)
x
=
self
.
pool
(
x
)
x
=
self
.
last_conv
(
x
)
x
=
fluid
.
layers
.
hard_swish
(
x
)
x
=
fluid
.
layers
.
dropout
(
x
=
x
,
dropout_prob
=
dropout_prob
)
x
=
fluid
.
layers
.
reshape
(
x
,
shape
=
[
x
.
shape
[
0
],
x
.
shape
[
1
]])
x
=
F
.
hard_swish
(
x
)
x
=
self
.
dropout
(
x
)
x
=
paddle
.
reshape
(
x
,
shape
=
[
x
.
shape
[
0
],
x
.
shape
[
1
]])
x
=
self
.
out
(
x
)
return
x
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
in_c
,
out_c
,
...
...
@@ -179,28 +187,22 @@ class ConvBNLayer(fluid.dygraph.Layer):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
if_act
=
if_act
self
.
act
=
act
self
.
conv
=
fluid
.
dygraph
.
Conv2D
(
num
_channels
=
in_c
,
num_filter
s
=
out_c
,
filter
_size
=
filter_size
,
self
.
conv
=
Conv2d
(
in
_channels
=
in_c
,
out_channel
s
=
out_c
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
num_groups
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
,
use_cudnn
=
use_cudnn
,
act
=
None
)
self
.
bn
=
fluid
.
dygraph
.
BatchNorm
(
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
self
.
bn
=
BatchNorm
(
num_channels
=
out_c
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_bn_scale"
,
regularizer
=
fluid
.
regularizer
.
L2DecayRegularizer
(
regularization_coeff
=
0.0
)),
name
=
name
+
"_bn_scale"
,
regularizer
=
L2Decay
(
0.0
)),
bias_attr
=
ParamAttr
(
name
=
name
+
"_bn_offset"
,
regularizer
=
fluid
.
regularizer
.
L2DecayRegularizer
(
regularization_coeff
=
0.0
)),
name
=
name
+
"_bn_offset"
,
regularizer
=
L2Decay
(
0.0
)),
moving_mean_name
=
name
+
"_bn_mean"
,
moving_variance_name
=
name
+
"_bn_variance"
)
...
...
@@ -209,16 +211,16 @@ class ConvBNLayer(fluid.dygraph.Layer):
x
=
self
.
bn
(
x
)
if
self
.
if_act
:
if
self
.
act
==
"relu"
:
x
=
fluid
.
layers
.
relu
(
x
)
x
=
F
.
relu
(
x
)
elif
self
.
act
==
"hard_swish"
:
x
=
fluid
.
layers
.
hard_swish
(
x
)
x
=
F
.
hard_swish
(
x
)
else
:
print
(
"The activation function is selected incorrectly."
)
exit
()
return
x
class
ResidualUnit
(
fluid
.
dygraph
.
Layer
):
class
ResidualUnit
(
nn
.
Layer
):
def
__init__
(
self
,
in_c
,
mid_c
,
...
...
@@ -270,40 +272,38 @@ class ResidualUnit(fluid.dygraph.Layer):
x
=
self
.
mid_se
(
x
)
x
=
self
.
linear_conv
(
x
)
if
self
.
if_shortcut
:
x
=
fluid
.
layers
.
elementwise_add
(
inputs
,
x
)
x
=
paddle
.
elementwise_add
(
inputs
,
x
)
return
x
class
SEModule
(
fluid
.
dygraph
.
Layer
):
class
SEModule
(
nn
.
Layer
):
def
__init__
(
self
,
channel
,
reduction
=
4
,
name
=
""
):
super
(
SEModule
,
self
).
__init__
()
self
.
avg_pool
=
fluid
.
dygraph
.
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
,
use_cudnn
=
False
)
self
.
conv1
=
fluid
.
dygraph
.
Conv2D
(
num_channels
=
channel
,
num_filters
=
channel
//
reduction
,
filter_size
=
1
,
self
.
avg_pool
=
AdaptiveAvgPool2d
(
1
)
self
.
conv1
=
Conv2d
(
in_channels
=
channel
,
out_channels
=
channel
//
reduction
,
kernel_size
=
1
,
stride
=
1
,
padding
=
0
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
name
=
name
+
"_1_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_1_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
"_1_offset"
))
self
.
conv2
=
fluid
.
dygraph
.
Conv2D
(
num
_channels
=
channel
//
reduction
,
num_filter
s
=
channel
,
filter
_size
=
1
,
self
.
conv2
=
Conv2d
(
in
_channels
=
channel
//
reduction
,
out_channel
s
=
channel
,
kernel
_size
=
1
,
stride
=
1
,
padding
=
0
,
act
=
None
,
param_attr
=
ParamAttr
(
name
+
"_2_weights"
),
weight_attr
=
ParamAttr
(
name
+
"_2_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
"_2_offset"
))
def
forward
(
self
,
inputs
):
outputs
=
self
.
avg_pool
(
inputs
)
outputs
=
self
.
conv1
(
outputs
)
outputs
=
F
.
relu
(
outputs
)
outputs
=
self
.
conv2
(
outputs
)
outputs
=
fluid
.
layers
.
hard_sigmoid
(
outputs
)
return
fluid
.
layers
.
elementwise_mul
(
x
=
inputs
,
y
=
outputs
,
axis
=
0
)
outputs
=
F
.
hard_sigmoid
(
outputs
)
return
paddle
.
multiply
(
x
=
inputs
,
y
=
outputs
,
axis
=
0
)
def
MobileNetV3_small_x0_35
(
**
args
):
...
...
ppcls/modeling/architectures/model_libs.py
已删除
100644 → 0
浏览文件 @
5e092259
#copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
paddle
import
paddle.fluid
as
fluid
import
contextlib
bn_regularizer
=
fluid
.
regularizer
.
L2DecayRegularizer
(
regularization_coeff
=
0.0
)
name_scope
=
""
@
contextlib
.
contextmanager
def
scope
(
name
):
global
name_scope
bk
=
name_scope
name_scope
=
name_scope
+
name
+
'/'
yield
name_scope
=
bk
def
max_pool
(
input
,
kernel
,
stride
,
padding
):
data
=
fluid
.
layers
.
pool2d
(
input
,
pool_size
=
kernel
,
pool_type
=
'max'
,
pool_stride
=
stride
,
pool_padding
=
padding
)
return
data
def
group_norm
(
input
,
G
,
eps
=
1e-5
,
param_attr
=
None
,
bias_attr
=
None
):
N
,
C
,
H
,
W
=
input
.
shape
if
C
%
G
!=
0
:
# print "group can not divide channle:", C, G
for
d
in
range
(
10
):
for
t
in
[
d
,
-
d
]:
if
G
+
t
<=
0
:
continue
if
C
%
(
G
+
t
)
==
0
:
G
=
G
+
t
break
if
C
%
G
==
0
:
# print "use group size:", G
break
assert
C
%
G
==
0
x
=
fluid
.
layers
.
group_norm
(
input
,
groups
=
G
,
param_attr
=
param_attr
,
bias_attr
=
bias_attr
,
name
=
name_scope
+
'group_norm'
)
return
x
def
bn
(
*
args
,
**
kargs
):
with
scope
(
'BatchNorm'
):
return
fluid
.
layers
.
batch_norm
(
*
args
,
epsilon
=
1e-3
,
momentum
=
0.99
,
param_attr
=
fluid
.
ParamAttr
(
name
=
name_scope
+
'gamma'
,
regularizer
=
bn_regularizer
),
bias_attr
=
fluid
.
ParamAttr
(
name
=
name_scope
+
'beta'
,
regularizer
=
bn_regularizer
),
moving_mean_name
=
name_scope
+
'moving_mean'
,
moving_variance_name
=
name_scope
+
'moving_variance'
,
**
kargs
)
def
bn_relu
(
data
):
return
fluid
.
layers
.
relu
(
bn
(
data
))
def
relu
(
data
):
return
fluid
.
layers
.
relu
(
data
)
def
conv
(
*
args
,
**
kargs
):
kargs
[
'param_attr'
]
=
name_scope
+
'weights'
if
'bias_attr'
in
kargs
and
kargs
[
'bias_attr'
]:
kargs
[
'bias_attr'
]
=
fluid
.
ParamAttr
(
name
=
name_scope
+
'biases'
,
regularizer
=
None
,
initializer
=
fluid
.
initializer
.
ConstantInitializer
(
value
=
0.0
))
else
:
kargs
[
'bias_attr'
]
=
False
return
fluid
.
layers
.
conv2d
(
*
args
,
**
kargs
)
def
deconv
(
*
args
,
**
kargs
):
kargs
[
'param_attr'
]
=
name_scope
+
'weights'
if
'bias_attr'
in
kargs
and
kargs
[
'bias_attr'
]:
kargs
[
'bias_attr'
]
=
name_scope
+
'biases'
else
:
kargs
[
'bias_attr'
]
=
False
return
fluid
.
layers
.
conv2d_transpose
(
*
args
,
**
kargs
)
def
seperate_conv
(
input
,
channel
,
stride
,
filter
,
dilation
=
1
,
act
=
None
):
param_attr
=
fluid
.
ParamAttr
(
name
=
name_scope
+
'weights'
,
regularizer
=
fluid
.
regularizer
.
L2DecayRegularizer
(
regularization_coeff
=
0.0
),
initializer
=
fluid
.
initializer
.
TruncatedNormal
(
loc
=
0.0
,
scale
=
0.33
))
with
scope
(
'depthwise'
):
input
=
conv
(
input
,
input
.
shape
[
1
],
filter
,
stride
,
groups
=
input
.
shape
[
1
],
padding
=
(
filter
//
2
)
*
dilation
,
dilation
=
dilation
,
use_cudnn
=
False
,
param_attr
=
param_attr
)
input
=
bn
(
input
)
if
act
:
input
=
act
(
input
)
param_attr
=
fluid
.
ParamAttr
(
name
=
name_scope
+
'weights'
,
regularizer
=
None
,
initializer
=
fluid
.
initializer
.
TruncatedNormal
(
loc
=
0.0
,
scale
=
0.06
))
with
scope
(
'pointwise'
):
input
=
conv
(
input
,
channel
,
1
,
1
,
groups
=
1
,
padding
=
0
,
param_attr
=
param_attr
)
input
=
bn
(
input
)
if
act
:
input
=
act
(
input
)
return
input
ppcls/modeling/architectures/res2net.py
浏览文件 @
ea746480
...
...
@@ -18,9 +18,12 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
...
...
@@ -31,7 +34,7 @@ __all__ = [
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
...
...
@@ -43,15 +46,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name
=
None
,
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
if
name
==
"conv1"
:
bn_name
=
"bn_"
+
name
...
...
@@ -71,7 +73,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
class
BottleneckBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels1
,
num_channels2
,
...
...
@@ -102,8 +104,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
act
=
'relu'
,
name
=
name
+
'_branch2b_'
+
str
(
s
+
1
)))
self
.
conv1_list
.
append
(
conv1
)
self
.
pool2d_avg
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
stride
,
pool_padding
=
1
,
pool_type
=
'avg'
)
self
.
pool2d_avg
=
AvgPool2d
(
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
)
self
.
conv2
=
ConvBNLayer
(
num_channels
=
num_filters
,
...
...
@@ -124,7 +125,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
def
forward
(
self
,
inputs
):
y
=
self
.
conv0
(
inputs
)
xs
=
fluid
.
layers
.
split
(
y
,
self
.
scales
,
1
)
xs
=
paddle
.
split
(
y
,
self
.
scales
,
1
)
ys
=
[]
for
s
,
conv1
in
enumerate
(
self
.
conv1_list
):
if
s
==
0
or
self
.
stride
==
2
:
...
...
@@ -135,18 +136,18 @@ class BottleneckBlock(fluid.dygraph.Layer):
ys
.
append
(
xs
[
-
1
])
else
:
ys
.
append
(
self
.
pool2d_avg
(
xs
[
-
1
]))
conv1
=
fluid
.
layers
.
concat
(
ys
,
axis
=
1
)
conv1
=
paddle
.
concat
(
ys
,
axis
=
1
)
conv2
=
self
.
conv2
(
conv1
)
if
self
.
shortcut
:
short
=
inputs
else
:
short
=
self
.
short
(
inputs
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
y
=
paddle
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
return
y
class
Res2Net
(
fluid
.
dygraph
.
Layer
):
class
Res2Net
(
nn
.
Layer
):
def
__init__
(
self
,
layers
=
50
,
scales
=
4
,
width
=
26
,
class_dim
=
1000
):
super
(
Res2Net
,
self
).
__init__
()
...
...
@@ -178,8 +179,7 @@ class Res2Net(fluid.dygraph.Layer):
stride
=
2
,
act
=
'relu'
,
name
=
"conv1"
)
self
.
pool2d_max
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
self
.
pool2d_max
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
block_list
=
[]
for
block
in
range
(
len
(
depth
)):
...
...
@@ -207,8 +207,7 @@ class Res2Net(fluid.dygraph.Layer):
self
.
block_list
.
append
(
bottleneck_block
)
shortcut
=
True
self
.
pool2d_avg
=
Pool2D
(
pool_size
=
7
,
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
pool2d_avg_channels
=
num_channels
[
-
1
]
*
2
...
...
@@ -217,9 +216,8 @@ class Res2Net(fluid.dygraph.Layer):
self
.
out
=
Linear
(
self
.
pool2d_avg_channels
,
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
inputs
):
...
...
@@ -228,7 +226,7 @@ class Res2Net(fluid.dygraph.Layer):
for
block
in
self
.
block_list
:
y
=
block
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/res2net_vd.py
浏览文件 @
ea746480
...
...
@@ -18,9 +18,12 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
...
...
@@ -31,7 +34,7 @@ __all__ = [
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
...
...
@@ -45,21 +48,17 @@ class ConvBNLayer(fluid.dygraph.Layer):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
is_vd_mode
=
is_vd_mode
self
.
_pool2d_avg
=
Pool2D
(
pool_size
=
2
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
'avg'
,
ceil_mode
=
True
)
self
.
_conv
=
Conv2D
(
num_channels
=
num_channels
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
self
.
_pool2d_avg
=
AvgPool2d
(
kernel_size
=
2
,
stride
=
2
,
padding
=
0
,
ceil_mode
=
True
)
self
.
_conv
=
Conv2d
(
in_channels
=
num_channels
,
out_channels
=
num_filters
,
kernel_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param
_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight
_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
if
name
==
"conv1"
:
bn_name
=
"bn_"
+
name
...
...
@@ -81,7 +80,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
class
BottleneckBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels1
,
num_channels2
,
...
...
@@ -112,8 +111,8 @@ class BottleneckBlock(fluid.dygraph.Layer):
act
=
'relu'
,
name
=
name
+
'_branch2b_'
+
str
(
s
+
1
)))
self
.
conv1_list
.
append
(
conv1
)
self
.
pool2d_avg
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
stride
,
pool_padding
=
1
,
pool_type
=
'avg'
)
self
.
pool2d_avg
=
AvgPool2d
(
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
,
ceil_mode
=
True
)
self
.
conv2
=
ConvBNLayer
(
num_channels
=
num_filters
,
...
...
@@ -135,7 +134,7 @@ class BottleneckBlock(fluid.dygraph.Layer):
def
forward
(
self
,
inputs
):
y
=
self
.
conv0
(
inputs
)
xs
=
fluid
.
layers
.
split
(
y
,
self
.
scales
,
1
)
xs
=
paddle
.
split
(
y
,
self
.
scales
,
1
)
ys
=
[]
for
s
,
conv1
in
enumerate
(
self
.
conv1_list
):
if
s
==
0
or
self
.
stride
==
2
:
...
...
@@ -146,18 +145,18 @@ class BottleneckBlock(fluid.dygraph.Layer):
ys
.
append
(
xs
[
-
1
])
else
:
ys
.
append
(
self
.
pool2d_avg
(
xs
[
-
1
]))
conv1
=
fluid
.
layers
.
concat
(
ys
,
axis
=
1
)
conv1
=
paddle
.
concat
(
ys
,
axis
=
1
)
conv2
=
self
.
conv2
(
conv1
)
if
self
.
shortcut
:
short
=
inputs
else
:
short
=
self
.
short
(
inputs
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
y
=
paddle
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
return
y
class
Res2Net_vd
(
fluid
.
dygraph
.
Layer
):
class
Res2Net_vd
(
nn
.
Layer
):
def
__init__
(
self
,
layers
=
50
,
scales
=
4
,
width
=
26
,
class_dim
=
1000
):
super
(
Res2Net_vd
,
self
).
__init__
()
...
...
@@ -203,8 +202,7 @@ class Res2Net_vd(fluid.dygraph.Layer):
stride
=
1
,
act
=
'relu'
,
name
=
"conv1_3"
)
self
.
pool2d_max
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
self
.
pool2d_max
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
block_list
=
[]
for
block
in
range
(
len
(
depth
)):
...
...
@@ -232,8 +230,7 @@ class Res2Net_vd(fluid.dygraph.Layer):
self
.
block_list
.
append
(
bottleneck_block
)
shortcut
=
True
self
.
pool2d_avg
=
Pool2D
(
pool_size
=
7
,
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
pool2d_avg_channels
=
num_channels
[
-
1
]
*
2
...
...
@@ -242,9 +239,8 @@ class Res2Net_vd(fluid.dygraph.Layer):
self
.
out
=
Linear
(
self
.
pool2d_avg_channels
,
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
inputs
):
...
...
@@ -255,7 +251,7 @@ class Res2Net_vd(fluid.dygraph.Layer):
for
block
in
self
.
block_list
:
y
=
block
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/resnest.py
浏览文件 @
ea746480
...
...
@@ -20,11 +20,11 @@ import numpy as np
import
paddle
import
math
import
paddle.nn
as
nn
import
paddle.fluid
as
fluid
from
paddle.
fluid.param_attr
import
ParamAttr
from
paddle.
fluid.regularizer
import
L2DecayRegularizer
from
paddle.
fluid.initializer
import
MSRA
,
ConstantInitializer
from
paddle.
fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
from
paddle.
nn.initializer
import
MSRA
from
paddle.
nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.
nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.
regularizer
import
L2Decay
__all__
=
[
"ResNeSt50_fast_1s1x64d"
,
"ResNeSt50"
]
...
...
@@ -43,26 +43,23 @@ class ConvBNLayer(nn.Layer):
bn_decay
=
0.0
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
dilation
=
dilation
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weight"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weight"
),
bias_attr
=
False
)
self
.
_batch_norm
=
BatchNorm
(
num_filters
,
act
=
act
,
param_attr
=
ParamAttr
(
name
=
name
+
"_scale"
,
regularizer
=
L2DecayRegularizer
(
regularization_coeff
=
bn_decay
)),
name
=
name
+
"_scale"
,
regularizer
=
L2Decay
(
bn_decay
)),
bias_attr
=
ParamAttr
(
name
+
"_offset"
,
regularizer
=
L2DecayRegularizer
(
regularization_coeff
=
bn_decay
)),
name
+
"_offset"
,
regularizer
=
L2Decay
(
bn_decay
)),
moving_mean_name
=
name
+
"_mean"
,
moving_variance_name
=
name
+
"_variance"
)
...
...
@@ -124,7 +121,7 @@ class SplatConv(nn.Layer):
act
=
"relu"
,
name
=
name
+
"_splat1"
)
self
.
avg_pool2d
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
avg_pool2d
=
AdaptiveAvgPool2d
(
1
)
inter_channels
=
int
(
max
(
in_channels
*
radix
//
reduction_factor
,
32
))
...
...
@@ -139,15 +136,14 @@ class SplatConv(nn.Layer):
name
=
name
+
"_splat2"
)
# to calc atten
self
.
conv3
=
Conv2
D
(
num
_channels
=
inter_channels
,
num_filter
s
=
channels
*
radix
,
filter
_size
=
1
,
self
.
conv3
=
Conv2
d
(
in
_channels
=
inter_channels
,
out_channel
s
=
channels
*
radix
,
kernel
_size
=
1
,
stride
=
1
,
padding
=
0
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
weight_attr
=
ParamAttr
(
name
=
name
+
"_splat_weights"
,
initializer
=
MSRA
()),
bias_attr
=
False
)
...
...
@@ -221,11 +217,8 @@ class BottleneckBlock(nn.Layer):
name
=
name
+
"_conv1"
)
if
avd
and
avd_first
and
(
stride
>
1
or
is_first
):
self
.
avg_pool2d_1
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
stride
,
pool_padding
=
1
,
pool_type
=
"avg"
)
self
.
avg_pool2d_1
=
AvgPool2d
(
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
)
if
radix
>=
1
:
self
.
conv2
=
SplatConv
(
...
...
@@ -252,11 +245,8 @@ class BottleneckBlock(nn.Layer):
name
=
name
+
"_conv2"
)
if
avd
and
avd_first
==
False
and
(
stride
>
1
or
is_first
):
self
.
avg_pool2d_2
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
stride
,
pool_padding
=
1
,
pool_type
=
"avg"
)
self
.
avg_pool2d_2
=
AvgPool2d
(
kernel_size
=
3
,
stride
=
stride
,
padding
=
1
)
self
.
conv3
=
ConvBNLayer
(
num_channels
=
group_width
,
...
...
@@ -270,39 +260,31 @@ class BottleneckBlock(nn.Layer):
if
stride
!=
1
or
self
.
inplanes
!=
self
.
planes
*
4
:
if
avg_down
:
if
dilation
==
1
:
self
.
avg_pool2d_3
=
Pool2D
(
pool_size
=
stride
,
pool_stride
=
stride
,
pool_type
=
"avg"
,
ceil_mode
=
True
)
self
.
avg_pool2d_3
=
AvgPool2d
(
kernel_size
=
stride
,
stride
=
stride
,
padding
=
0
)
else
:
self
.
avg_pool2d_3
=
Pool2D
(
pool_size
=
1
,
pool_stride
=
1
,
pool_type
=
"avg"
,
ceil_mode
=
True
)
self
.
avg_pool2d_3
=
AvgPool2d
(
kernel_size
=
1
,
stride
=
1
,
padding
=
0
,
ceil_mode
=
True
)
self
.
conv4
=
Conv2
D
(
num
_channels
=
self
.
inplanes
,
num_filter
s
=
planes
*
4
,
filter
_size
=
1
,
self
.
conv4
=
Conv2
d
(
in
_channels
=
self
.
inplanes
,
out_channel
s
=
planes
*
4
,
kernel
_size
=
1
,
stride
=
1
,
padding
=
0
,
groups
=
1
,
act
=
None
,
param_attr
=
ParamAttr
(
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
,
initializer
=
MSRA
()),
bias_attr
=
False
)
else
:
self
.
conv4
=
Conv2
D
(
num
_channels
=
self
.
inplanes
,
num_filter
s
=
planes
*
4
,
filter
_size
=
1
,
self
.
conv4
=
Conv2
d
(
in
_channels
=
self
.
inplanes
,
out_channel
s
=
planes
*
4
,
kernel
_size
=
1
,
stride
=
stride
,
padding
=
0
,
groups
=
1
,
act
=
None
,
param_attr
=
ParamAttr
(
weight_attr
=
ParamAttr
(
name
=
name
+
"_shortcut_weights"
,
initializer
=
MSRA
()),
bias_attr
=
False
)
...
...
@@ -312,12 +294,10 @@ class BottleneckBlock(nn.Layer):
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_shortcut_scale"
,
regularizer
=
L2DecayRegularizer
(
regularization_coeff
=
bn_decay
)),
regularizer
=
L2Decay
(
regularization_coeff
=
bn_decay
)),
bias_attr
=
ParamAttr
(
name
+
"_shortcut_offset"
,
regularizer
=
L2DecayRegularizer
(
regularization_coeff
=
bn_decay
)),
regularizer
=
L2Decay
(
regularization_coeff
=
bn_decay
)),
moving_mean_name
=
name
+
"_shortcut_mean"
,
moving_variance_name
=
name
+
"_shortcut_variance"
)
...
...
@@ -515,8 +495,7 @@ class ResNeSt(nn.Layer):
act
=
"relu"
,
name
=
"conv1"
)
self
.
max_pool2d
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
"max"
)
self
.
max_pool2d
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
layer1
=
ResNeStLayer
(
inplanes
=
self
.
stem_width
*
2
...
...
@@ -645,7 +624,7 @@ class ResNeSt(nn.Layer):
stride
=
2
,
name
=
"layer4"
)
self
.
pool2d_avg
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
out_channels
=
2048
...
...
@@ -654,7 +633,7 @@ class ResNeSt(nn.Layer):
self
.
out
=
Linear
(
self
.
out_channels
,
class_dim
,
param
_attr
=
ParamAttr
(
weight
_attr
=
ParamAttr
(
initializer
=
nn
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
...
...
ppcls/modeling/architectures/resnet.py
浏览文件 @
ea746480
...
...
@@ -18,16 +18,18 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
__all__
=
[
"ResNet18"
,
"ResNet34"
,
"ResNet50"
,
"ResNet101"
,
"ResNet152"
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -38,15 +40,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
if
name
==
"conv1"
:
bn_name
=
"bn_"
+
name
...
...
@@ -66,7 +67,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
class
BottleneckBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -117,11 +118,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
else
:
short
=
self
.
short
(
inputs
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
"relu"
)
y
=
paddle
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
"relu"
)
return
y
class
BasicBlock
(
fluid
.
dygraph
.
Layer
):
class
BasicBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -162,11 +163,11 @@ class BasicBlock(fluid.dygraph.Layer):
short
=
inputs
else
:
short
=
self
.
short
(
inputs
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv1
,
act
=
"relu"
)
y
=
paddle
.
elementwise_add
(
x
=
short
,
y
=
conv1
,
act
=
"relu"
)
return
y
class
ResNet
(
fluid
.
dygraph
.
Layer
):
class
ResNet
(
nn
.
Layer
):
def
__init__
(
self
,
layers
=
50
,
class_dim
=
1000
):
super
(
ResNet
,
self
).
__init__
()
...
...
@@ -195,8 +196,7 @@ class ResNet(fluid.dygraph.Layer):
stride
=
2
,
act
=
"relu"
,
name
=
"conv1"
)
self
.
pool2d_max
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
"max"
)
self
.
pool2d_max
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
block_list
=
[]
if
layers
>=
50
:
...
...
@@ -238,8 +238,7 @@ class ResNet(fluid.dygraph.Layer):
self
.
block_list
.
append
(
basic_block
)
shortcut
=
True
self
.
pool2d_avg
=
Pool2D
(
pool_size
=
7
,
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
pool2d_avg_channels
=
num_channels
[
-
1
]
*
2
...
...
@@ -248,9 +247,8 @@ class ResNet(fluid.dygraph.Layer):
self
.
out
=
Linear
(
self
.
pool2d_avg_channels
,
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_0.w_0"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_0.w_0"
),
bias_attr
=
ParamAttr
(
name
=
"fc_0.b_0"
))
def
forward
(
self
,
inputs
):
...
...
@@ -259,7 +257,7 @@ class ResNet(fluid.dygraph.Layer):
for
block
in
self
.
block_list
:
y
=
block
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/resnet_vc.py
浏览文件 @
ea746480
...
...
@@ -18,9 +18,11 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
...
...
@@ -29,7 +31,7 @@ __all__ = [
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -40,15 +42,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
if
name
==
"conv1"
:
bn_name
=
"bn_"
+
name
...
...
@@ -68,7 +69,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
class
BottleneckBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -119,11 +120,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
else
:
short
=
self
.
short
(
inputs
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
y
=
paddle
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
return
y
class
BasicBlock
(
fluid
.
dygraph
.
Layer
):
class
BasicBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -164,11 +165,11 @@ class BasicBlock(fluid.dygraph.Layer):
short
=
inputs
else
:
short
=
self
.
short
(
inputs
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv1
,
act
=
'relu'
)
y
=
paddle
.
elementwise_add
(
x
=
short
,
y
=
conv1
,
act
=
'relu'
)
return
y
class
ResNet_vc
(
fluid
.
dygraph
.
Layer
):
class
ResNet_vc
(
nn
.
Layer
):
def
__init__
(
self
,
layers
=
50
,
class_dim
=
1000
):
super
(
ResNet_vc
,
self
).
__init__
()
...
...
@@ -212,8 +213,7 @@ class ResNet_vc(fluid.dygraph.Layer):
act
=
'relu'
,
name
=
"conv1_3"
)
self
.
pool2d_max
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
self
.
pool2d_max
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
block_list
=
[]
if
layers
>=
50
:
...
...
@@ -255,8 +255,7 @@ class ResNet_vc(fluid.dygraph.Layer):
self
.
block_list
.
append
(
basic_block
)
shortcut
=
True
self
.
pool2d_avg
=
Pool2D
(
pool_size
=
7
,
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
pool2d_avg_channels
=
num_channels
[
-
1
]
*
2
...
...
@@ -265,9 +264,8 @@ class ResNet_vc(fluid.dygraph.Layer):
self
.
out
=
Linear
(
self
.
pool2d_avg_channels
,
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_0.w_0"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_0.w_0"
),
bias_attr
=
ParamAttr
(
name
=
"fc_0.b_0"
))
def
forward
(
self
,
inputs
):
...
...
@@ -278,7 +276,7 @@ class ResNet_vc(fluid.dygraph.Layer):
for
block
in
self
.
block_list
:
y
=
block
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/resnet_vd.py
浏览文件 @
ea746480
...
...
@@ -18,9 +18,11 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
...
...
@@ -29,7 +31,7 @@ __all__ = [
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
...
...
@@ -43,21 +45,16 @@ class ConvBNLayer(fluid.dygraph.Layer):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
is_vd_mode
=
is_vd_mode
self
.
_pool2d_avg
=
Pool2D
(
pool_size
=
2
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
'avg'
,
ceil_mode
=
True
)
self
.
_conv
=
Conv2D
(
num_channels
=
num_channels
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
self
.
_pool2d_avg
=
AvgPool2d
(
kernel_size
=
2
,
stride
=
2
,
padding
=
0
,
ceil_mode
=
True
)
self
.
_conv
=
Conv2d
(
in_channels
=
num_channels
,
out_channels
=
num_filters
,
kernel_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
if
name
==
"conv1"
:
bn_name
=
"bn_"
+
name
...
...
@@ -79,7 +76,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
class
BottleneckBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -129,11 +126,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
short
=
inputs
else
:
short
=
self
.
short
(
inputs
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
y
=
paddle
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
return
y
class
BasicBlock
(
fluid
.
dygraph
.
Layer
):
class
BasicBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -176,11 +173,11 @@ class BasicBlock(fluid.dygraph.Layer):
short
=
inputs
else
:
short
=
self
.
short
(
inputs
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv1
,
act
=
'relu'
)
y
=
paddle
.
elementwise_add
(
x
=
short
,
y
=
conv1
,
act
=
'relu'
)
return
y
class
ResNet_vd
(
fluid
.
dygraph
.
Layer
):
class
ResNet_vd
(
nn
.
Layer
):
def
__init__
(
self
,
layers
=
50
,
class_dim
=
1000
):
super
(
ResNet_vd
,
self
).
__init__
()
...
...
@@ -225,8 +222,7 @@ class ResNet_vd(fluid.dygraph.Layer):
stride
=
1
,
act
=
'relu'
,
name
=
"conv1_3"
)
self
.
pool2d_max
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
self
.
pool2d_max
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
block_list
=
[]
if
layers
>=
50
:
...
...
@@ -270,8 +266,7 @@ class ResNet_vd(fluid.dygraph.Layer):
self
.
block_list
.
append
(
basic_block
)
shortcut
=
True
self
.
pool2d_avg
=
Pool2D
(
pool_size
=
7
,
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
pool2d_avg_channels
=
num_channels
[
-
1
]
*
2
...
...
@@ -280,9 +275,8 @@ class ResNet_vd(fluid.dygraph.Layer):
self
.
out
=
Linear
(
self
.
pool2d_avg_channels
,
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_0.w_0"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_0.w_0"
),
bias_attr
=
ParamAttr
(
name
=
"fc_0.b_0"
))
def
forward
(
self
,
inputs
):
...
...
@@ -293,7 +287,7 @@ class ResNet_vd(fluid.dygraph.Layer):
for
block
in
self
.
block_list
:
y
=
block
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/resnext.py
浏览文件 @
ea746480
...
...
@@ -18,9 +18,11 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
...
...
@@ -30,7 +32,7 @@ __all__ = [
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -41,15 +43,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
if
name
==
"conv1"
:
bn_name
=
"bn_"
+
name
...
...
@@ -69,7 +70,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
class
BottleneckBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -121,11 +122,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
else
:
short
=
self
.
short
(
inputs
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
y
=
paddle
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
return
y
class
ResNeXt
(
fluid
.
dygraph
.
Layer
):
class
ResNeXt
(
nn
.
Layer
):
def
__init__
(
self
,
layers
=
50
,
class_dim
=
1000
,
cardinality
=
32
):
super
(
ResNeXt
,
self
).
__init__
()
...
...
@@ -156,8 +157,7 @@ class ResNeXt(fluid.dygraph.Layer):
stride
=
2
,
act
=
'relu'
,
name
=
"res_conv1"
)
self
.
pool2d_max
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
self
.
pool2d_max
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
block_list
=
[]
for
block
in
range
(
len
(
depth
)):
...
...
@@ -183,8 +183,7 @@ class ResNeXt(fluid.dygraph.Layer):
self
.
block_list
.
append
(
bottleneck_block
)
shortcut
=
True
self
.
pool2d_avg
=
Pool2D
(
pool_size
=
7
,
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
pool2d_avg_channels
=
num_channels
[
-
1
]
*
2
...
...
@@ -193,9 +192,8 @@ class ResNeXt(fluid.dygraph.Layer):
self
.
out
=
Linear
(
self
.
pool2d_avg_channels
,
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
inputs
):
...
...
@@ -204,7 +202,7 @@ class ResNeXt(fluid.dygraph.Layer):
for
block
in
self
.
block_list
:
y
=
block
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/resnext101_wsl.py
浏览文件 @
ea746480
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
__all__
=
[
"ResNeXt101_32x8d_wsl"
,
"ResNeXt101_wsl_32x16
d_wsl"
,
"ResNeXt101_wsl_32x32d_wsl"
,
"ResNeXt101_wsl_32x48d_wsl"
]
__all__
=
[
"ResNeXt101_32x8d_wsl"
,
"ResNeXt101_32x16d_wsl"
,
"ResNeXt101_32x32
d_wsl"
,
"ResNeXt101_32x48d_wsl"
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
...
...
@@ -22,14 +26,14 @@ class ConvBNLayer(fluid.dygraph.Layer):
conv_name
=
name
+
".0"
else
:
conv_name
=
name
self
.
_conv
=
Conv2D
(
num_channels
=
input_channels
,
num_filters
=
output_channels
,
filter_size
=
filter_size
,
self
.
_conv
=
Conv2d
(
in_channels
=
input_channels
,
out_channels
=
output_channels
,
kernel_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
conv_name
+
".weight"
),
weight_attr
=
ParamAttr
(
name
=
conv_name
+
".weight"
),
bias_attr
=
False
)
if
"downsample"
in
name
:
bn_name
=
name
[:
9
]
+
"downsample.1"
...
...
@@ -37,8 +41,10 @@ class ConvBNLayer(fluid.dygraph.Layer):
if
"conv1"
==
name
:
bn_name
=
"bn"
+
name
[
-
1
]
else
:
bn_name
=
(
name
[:
10
]
if
name
[
7
:
9
].
isdigit
()
else
name
[:
9
])
+
"bn"
+
name
[
-
1
]
self
.
_bn
=
BatchNorm
(
num_channels
=
output_channels
,
bn_name
=
(
name
[:
10
]
if
name
[
7
:
9
].
isdigit
()
else
name
[:
9
]
)
+
"bn"
+
name
[
-
1
]
self
.
_bn
=
BatchNorm
(
num_channels
=
output_channels
,
act
=
act
,
param_attr
=
ParamAttr
(
name
=
bn_name
+
".weight"
),
bias_attr
=
ParamAttr
(
name
=
bn_name
+
".bias"
),
...
...
@@ -50,43 +56,68 @@ class ConvBNLayer(fluid.dygraph.Layer):
x
=
self
.
_bn
(
x
)
return
x
class
ShortCut
(
fluid
.
dygraph
.
Layer
):
class
ShortCut
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
stride
,
name
=
None
):
super
(
ShortCut
,
self
).
__init__
()
self
.
input_channels
=
input_channels
self
.
output_channels
=
output_channels
self
.
stride
=
stride
if
input_channels
!=
output_channels
or
stride
!=
1
:
if
input_channels
!=
output_channels
or
stride
!=
1
:
self
.
_conv
=
ConvBNLayer
(
input_channels
,
output_channels
,
filter_size
=
1
,
stride
=
stride
,
name
=
name
)
input_channels
,
output_channels
,
filter_size
=
1
,
stride
=
stride
,
name
=
name
)
def
forward
(
self
,
inputs
):
if
self
.
input_channels
!=
self
.
output_channels
or
self
.
stride
!=
1
:
if
self
.
input_channels
!=
self
.
output_channels
or
self
.
stride
!=
1
:
return
self
.
_conv
(
inputs
)
return
inputs
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
stride
,
cardinality
,
width
,
name
):
class
BottleneckBlock
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
stride
,
cardinality
,
width
,
name
):
super
(
BottleneckBlock
,
self
).
__init__
()
self
.
_conv0
=
ConvBNLayer
(
input_channels
,
output_channels
,
filter_size
=
1
,
act
=
"relu"
,
name
=
name
+
".conv1"
)
input_channels
,
output_channels
,
filter_size
=
1
,
act
=
"relu"
,
name
=
name
+
".conv1"
)
self
.
_conv1
=
ConvBNLayer
(
output_channels
,
output_channels
,
filter_size
=
3
,
act
=
"relu"
,
stride
=
stride
,
groups
=
cardinality
,
name
=
name
+
".conv2"
)
output_channels
,
output_channels
,
filter_size
=
3
,
act
=
"relu"
,
stride
=
stride
,
groups
=
cardinality
,
name
=
name
+
".conv2"
)
self
.
_conv2
=
ConvBNLayer
(
output_channels
,
output_channels
//
(
width
//
8
),
filter_size
=
1
,
act
=
None
,
name
=
name
+
".conv3"
)
output_channels
,
output_channels
//
(
width
//
8
),
filter_size
=
1
,
act
=
None
,
name
=
name
+
".conv3"
)
self
.
_short
=
ShortCut
(
input_channels
,
output_channels
//
(
width
//
8
),
stride
=
stride
,
name
=
name
+
".downsample"
)
input_channels
,
output_channels
//
(
width
//
8
),
stride
=
stride
,
name
=
name
+
".downsample"
)
def
forward
(
self
,
inputs
):
x
=
self
.
_conv0
(
inputs
)
x
=
self
.
_conv1
(
x
)
x
=
self
.
_conv2
(
x
)
y
=
self
.
_short
(
inputs
)
return
fluid
.
layers
.
elementwise_add
(
x
,
y
,
act
=
"relu"
)
return
paddle
.
elementwise_add
(
x
,
y
,
act
=
"relu"
)
class
ResNeXt101WSL
(
fluid
.
dygraph
.
Layer
):
class
ResNeXt101WSL
(
nn
.
Layer
):
def
__init__
(
self
,
layers
=
101
,
cardinality
=
32
,
width
=
48
,
class_dim
=
1000
):
super
(
ResNeXt101WSL
,
self
).
__init__
()
...
...
@@ -95,92 +126,256 @@ class ResNeXt101WSL(fluid.dygraph.Layer):
self
.
layers
=
layers
self
.
cardinality
=
cardinality
self
.
width
=
width
self
.
scale
=
width
//
8
self
.
scale
=
width
//
8
self
.
depth
=
[
3
,
4
,
23
,
3
]
self
.
base_width
=
cardinality
*
width
num_filters
=
[
self
.
base_width
*
i
for
i
in
[
1
,
2
,
4
,
8
]]
#[256, 512, 1024, 2048]
num_filters
=
[
self
.
base_width
*
i
for
i
in
[
1
,
2
,
4
,
8
]]
# [256, 512, 1024, 2048]
self
.
_conv_stem
=
ConvBNLayer
(
3
,
64
,
7
,
stride
=
2
,
act
=
"relu"
,
name
=
"conv1"
)
self
.
_pool
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
"max"
)
self
.
_pool
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
_conv1_0
=
BottleneckBlock
(
64
,
num_filters
[
0
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer1.0"
)
64
,
num_filters
[
0
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer1.0"
)
self
.
_conv1_1
=
BottleneckBlock
(
num_filters
[
0
]
//
(
width
//
8
),
num_filters
[
0
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer1.1"
)
num_filters
[
0
]
//
(
width
//
8
),
num_filters
[
0
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer1.1"
)
self
.
_conv1_2
=
BottleneckBlock
(
num_filters
[
0
]
//
(
width
//
8
),
num_filters
[
0
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer1.2"
)
num_filters
[
0
]
//
(
width
//
8
),
num_filters
[
0
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer1.2"
)
self
.
_conv2_0
=
BottleneckBlock
(
num_filters
[
0
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
2
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.0"
)
num_filters
[
0
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
2
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.0"
)
self
.
_conv2_1
=
BottleneckBlock
(
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.1"
)
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.1"
)
self
.
_conv2_2
=
BottleneckBlock
(
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.2"
)
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.2"
)
self
.
_conv2_3
=
BottleneckBlock
(
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.3"
)
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
1
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer2.3"
)
self
.
_conv3_0
=
BottleneckBlock
(
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
2
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.0"
)
num_filters
[
1
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
2
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.0"
)
self
.
_conv3_1
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.1"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.1"
)
self
.
_conv3_2
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.2"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.2"
)
self
.
_conv3_3
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.3"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.3"
)
self
.
_conv3_4
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.4"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.4"
)
self
.
_conv3_5
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.5"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.5"
)
self
.
_conv3_6
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.6"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.6"
)
self
.
_conv3_7
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.7"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.7"
)
self
.
_conv3_8
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.8"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.8"
)
self
.
_conv3_9
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.9"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.9"
)
self
.
_conv3_10
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.10"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.10"
)
self
.
_conv3_11
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.11"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.11"
)
self
.
_conv3_12
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.12"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.12"
)
self
.
_conv3_13
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.13"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.13"
)
self
.
_conv3_14
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.14"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.14"
)
self
.
_conv3_15
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.15"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.15"
)
self
.
_conv3_16
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.16"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.16"
)
self
.
_conv3_17
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.17"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.17"
)
self
.
_conv3_18
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.18"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.18"
)
self
.
_conv3_19
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.19"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.19"
)
self
.
_conv3_20
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.20"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.20"
)
self
.
_conv3_21
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.21"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.21"
)
self
.
_conv3_22
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.22"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
2
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer3.22"
)
self
.
_conv4_0
=
BottleneckBlock
(
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
3
],
stride
=
2
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer4.0"
)
num_filters
[
2
]
//
(
width
//
8
),
num_filters
[
3
],
stride
=
2
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer4.0"
)
self
.
_conv4_1
=
BottleneckBlock
(
num_filters
[
3
]
//
(
width
//
8
),
num_filters
[
3
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer4.1"
)
num_filters
[
3
]
//
(
width
//
8
),
num_filters
[
3
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer4.1"
)
self
.
_conv4_2
=
BottleneckBlock
(
num_filters
[
3
]
//
(
width
//
8
),
num_filters
[
3
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer4.2"
)
num_filters
[
3
]
//
(
width
//
8
),
num_filters
[
3
],
stride
=
1
,
cardinality
=
self
.
cardinality
,
width
=
self
.
width
,
name
=
"layer4.2"
)
self
.
_avg_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
self
.
_out
=
Linear
(
input_dim
=
num_filters
[
3
]
//
(
width
//
8
),
output_dim
=
class_dim
,
param_attr
=
ParamAttr
(
name
=
"fc.weight"
),
self
.
_avg_pool
=
AdaptiveAvgPool2d
(
1
)
self
.
_out
=
Linear
(
num_filters
[
3
]
//
(
width
//
8
),
class_dim
,
weight_attr
=
ParamAttr
(
name
=
"fc.weight"
),
bias_attr
=
ParamAttr
(
name
=
"fc.bias"
))
def
forward
(
self
,
inputs
):
...
...
@@ -225,22 +420,26 @@ class ResNeXt101WSL(fluid.dygraph.Layer):
x
=
self
.
_conv4_2
(
x
)
x
=
self
.
_avg_pool
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axe
s
=
[
2
,
3
])
x
=
paddle
.
squeeze
(
x
,
axi
s
=
[
2
,
3
])
x
=
self
.
_out
(
x
)
return
x
def
ResNeXt101_32x8d_wsl
(
**
args
):
model
=
ResNeXt101WSL
(
cardinality
=
32
,
width
=
8
,
**
args
)
return
model
def
ResNeXt101_32x16d_wsl
(
**
args
):
model
=
ResNeXt101WSL
(
cardinality
=
32
,
width
=
16
,
**
args
)
return
model
def
ResNeXt101_32x32d_wsl
(
**
args
):
model
=
ResNeXt101WSL
(
cardinality
=
32
,
width
=
32
,
**
args
)
return
model
def
ResNeXt101_32x48d_wsl
(
**
args
):
model
=
ResNeXt101WSL
(
cardinality
=
32
,
width
=
48
,
**
args
)
return
model
ppcls/modeling/architectures/resnext_vd.py
浏览文件 @
ea746480
...
...
@@ -18,9 +18,11 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
...
...
@@ -30,7 +32,7 @@ __all__ = [
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
...
...
@@ -44,21 +46,16 @@ class ConvBNLayer(fluid.dygraph.Layer):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
is_vd_mode
=
is_vd_mode
self
.
_pool2d_avg
=
Pool2D
(
pool_size
=
2
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
'avg'
,
ceil_mode
=
True
)
self
.
_conv
=
Conv2D
(
num_channels
=
num_channels
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
self
.
_pool2d_avg
=
AvgPool2d
(
kernel_size
=
2
,
stride
=
2
,
padding
=
0
,
ceil_mode
=
True
)
self
.
_conv
=
Conv2d
(
in_channels
=
num_channels
,
out_channels
=
num_filters
,
kernel_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
if
name
==
"conv1"
:
bn_name
=
"bn_"
+
name
...
...
@@ -80,7 +77,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
class
BottleneckBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -134,11 +131,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
else
:
short
=
self
.
short
(
inputs
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
y
=
paddle
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
)
return
y
class
ResNeXt
(
fluid
.
dygraph
.
Layer
):
class
ResNeXt
(
nn
.
Layer
):
def
__init__
(
self
,
layers
=
50
,
class_dim
=
1000
,
cardinality
=
32
):
super
(
ResNeXt
,
self
).
__init__
()
...
...
@@ -184,8 +181,7 @@ class ResNeXt(fluid.dygraph.Layer):
act
=
'relu'
,
name
=
"conv1_3"
)
self
.
pool2d_max
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
self
.
pool2d_max
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
block_list
=
[]
for
block
in
range
(
len
(
depth
)):
...
...
@@ -212,8 +208,7 @@ class ResNeXt(fluid.dygraph.Layer):
self
.
block_list
.
append
(
bottleneck_block
)
shortcut
=
True
self
.
pool2d_avg
=
Pool2D
(
pool_size
=
7
,
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
pool2d_avg_channels
=
num_channels
[
-
1
]
*
2
...
...
@@ -222,9 +217,8 @@ class ResNeXt(fluid.dygraph.Layer):
self
.
out
=
Linear
(
self
.
pool2d_avg_channels
,
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
inputs
):
...
...
@@ -235,7 +229,7 @@ class ResNeXt(fluid.dygraph.Layer):
for
block
in
self
.
block_list
:
y
=
block
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/se_resnet_vd.py
浏览文件 @
ea746480
...
...
@@ -17,9 +17,12 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
Uniform
import
math
...
...
@@ -29,7 +32,7 @@ __all__ = [
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
...
...
@@ -43,21 +46,17 @@ class ConvBNLayer(fluid.dygraph.Layer):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
is_vd_mode
=
is_vd_mode
self
.
_pool2d_avg
=
Pool2D
(
pool_size
=
2
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
'avg'
,
ceil_mode
=
True
)
self
.
_conv
=
Conv2D
(
num_channels
=
num_channels
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
self
.
_pool2d_avg
=
AvgPool2d
(
kernel_size
=
2
,
stride
=
2
,
padding
=
0
,
ceil_mode
=
True
)
self
.
_conv
=
Conv2d
(
in_channels
=
num_channels
,
out_channels
=
num_filters
,
kernel_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
if
name
==
"conv1"
:
bn_name
=
"bn_"
+
name
...
...
@@ -79,7 +78,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
y
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
class
BottleneckBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -136,11 +135,11 @@ class BottleneckBlock(fluid.dygraph.Layer):
short
=
inputs
else
:
short
=
self
.
short
(
inputs
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
scale
,
act
=
'relu'
)
y
=
paddle
.
elementwise_add
(
x
=
short
,
y
=
scale
,
act
=
'relu'
)
return
y
class
BasicBlock
(
fluid
.
dygraph
.
Layer
):
class
BasicBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -191,15 +190,15 @@ class BasicBlock(fluid.dygraph.Layer):
short
=
inputs
else
:
short
=
self
.
short
(
inputs
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
scale
,
act
=
'relu'
)
y
=
paddle
.
elementwise_add
(
x
=
short
,
y
=
scale
,
act
=
'relu'
)
return
y
class
SELayer
(
fluid
.
dygraph
.
Layer
):
class
SELayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
reduction_ratio
,
name
=
None
):
super
(
SELayer
,
self
).
__init__
()
self
.
pool2d_gap
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_gap
=
AdaptiveAvgPool2d
(
1
)
self
.
_num_channels
=
num_channels
...
...
@@ -208,34 +207,32 @@ class SELayer(fluid.dygraph.Layer):
self
.
squeeze
=
Linear
(
num_channels
,
med_ch
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_sqz_weights"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_sqz_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
'_sqz_offset'
))
stdv
=
1.0
/
math
.
sqrt
(
med_ch
*
1.0
)
self
.
excitation
=
Linear
(
med_ch
,
num_filters
,
act
=
"sigmoid"
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_exc_weights"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_exc_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
'_exc_offset'
))
def
forward
(
self
,
input
):
pool
=
self
.
pool2d_gap
(
input
)
pool
=
fluid
.
layers
.
reshape
(
pool
,
shape
=
[
-
1
,
self
.
_num_channels
])
pool
=
paddle
.
reshape
(
pool
,
shape
=
[
-
1
,
self
.
_num_channels
])
squeeze
=
self
.
squeeze
(
pool
)
squeeze
=
F
.
relu
(
squeeze
)
excitation
=
self
.
excitation
(
squeeze
)
excitation
=
fluid
.
layers
.
reshape
(
excitation
=
F
.
sigmoid
(
excitation
)
excitation
=
paddle
.
reshape
(
excitation
,
shape
=
[
-
1
,
self
.
_num_channels
,
1
,
1
])
out
=
input
*
excitation
return
out
class
SE_ResNet_vd
(
fluid
.
dygraph
.
Layer
):
class
SE_ResNet_vd
(
nn
.
Layer
):
def
__init__
(
self
,
layers
=
50
,
class_dim
=
1000
):
super
(
SE_ResNet_vd
,
self
).
__init__
()
...
...
@@ -280,8 +277,7 @@ class SE_ResNet_vd(fluid.dygraph.Layer):
stride
=
1
,
act
=
'relu'
,
name
=
"conv1_3"
)
self
.
pool2d_max
=
Pool2D
(
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
self
.
pool2d_max
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
self
.
block_list
=
[]
if
layers
>=
50
:
...
...
@@ -325,8 +321,7 @@ class SE_ResNet_vd(fluid.dygraph.Layer):
self
.
block_list
.
append
(
basic_block
)
shortcut
=
True
self
.
pool2d_avg
=
Pool2D
(
pool_size
=
7
,
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
pool2d_avg_channels
=
num_channels
[
-
1
]
*
2
...
...
@@ -335,9 +330,8 @@ class SE_ResNet_vd(fluid.dygraph.Layer):
self
.
out
=
Linear
(
self
.
pool2d_avg_channels
,
class_dim
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
"fc6_weights"
),
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"fc6_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc6_offset"
))
def
forward
(
self
,
inputs
):
...
...
@@ -348,7 +342,7 @@ class SE_ResNet_vd(fluid.dygraph.Layer):
for
block
in
self
.
block_list
:
y
=
block
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_channels
])
y
=
self
.
out
(
y
)
return
y
...
...
ppcls/modeling/architectures/shufflenet_v2.py
浏览文件 @
ea746480
...
...
@@ -18,15 +18,17 @@ from __future__ import print_function
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle.fluid.initializer
import
MSRA
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddle.nn.initializer
import
MSRA
import
math
__all__
=
[
"ShuffleNetV2_x0_25"
,
"ShuffleNetV2_x0_33"
,
"ShuffleNetV2_x0_5"
,
"ShuffleNetV2
_x1_0
"
,
"ShuffleNetV2_x1_5"
,
"ShuffleNetV2_x2_0"
,
"ShuffleNetV2"
,
"ShuffleNetV2_x1_5"
,
"ShuffleNetV2_x2_0"
,
"ShuffleNetV2_swish"
]
...
...
@@ -37,17 +39,16 @@ def channel_shuffle(x, groups):
channels_per_group
=
num_channels
//
groups
# reshape
x
=
fluid
.
layers
.
reshape
(
x
=
paddle
.
reshape
(
x
=
x
,
shape
=
[
batchsize
,
groups
,
channels_per_group
,
height
,
width
])
x
=
fluid
.
layers
.
transpose
(
x
=
x
,
perm
=
[
0
,
2
,
1
,
3
,
4
])
x
=
paddle
.
transpose
(
x
=
x
,
perm
=
[
0
,
2
,
1
,
3
,
4
])
# flatten
x
=
fluid
.
layers
.
reshape
(
x
=
x
,
shape
=
[
batchsize
,
num_channels
,
height
,
width
])
x
=
paddle
.
reshape
(
x
=
x
,
shape
=
[
batchsize
,
num_channels
,
height
,
width
])
return
x
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
filter_size
,
...
...
@@ -58,24 +59,21 @@ class ConvBNLayer(fluid.dygraph.Layer):
num_groups
=
1
,
if_act
=
True
,
act
=
'relu'
,
name
=
None
,
use_cudnn
=
True
):
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_if_act
=
if_act
assert
act
in
[
'relu'
,
'swish'
],
\
"supported act are {} but your act is {}"
.
format
(
[
'relu'
,
'swish'
],
act
)
self
.
_act
=
act
self
.
_conv
=
Conv2
D
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
groups
=
num_groups
,
act
=
None
,
use_cudnn
=
use_cudnn
,
param_attr
=
ParamAttr
(
weight_attr
=
ParamAttr
(
initializer
=
MSRA
(),
name
=
name
+
"_weights"
),
bias_attr
=
False
)
...
...
@@ -90,12 +88,11 @@ class ConvBNLayer(fluid.dygraph.Layer):
y
=
self
.
_conv
(
inputs
)
y
=
self
.
_batch_norm
(
y
)
if
self
.
_if_act
:
y
=
fluid
.
layers
.
relu
(
y
)
if
self
.
_act
==
'relu'
else
fluid
.
layers
.
swish
(
y
)
y
=
F
.
relu
(
y
)
if
self
.
_act
==
'relu'
else
F
.
swish
(
y
)
return
y
class
InvertedResidualUnit
(
fluid
.
dygraph
.
Layer
):
class
InvertedResidualUnit
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -130,7 +127,6 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
num_groups
=
oup_inc
,
if_act
=
False
,
act
=
act
,
use_cudnn
=
False
,
name
=
'stage_'
+
name
+
'_conv2'
)
self
.
_conv_linear
=
ConvBNLayer
(
num_channels
=
oup_inc
,
...
...
@@ -153,7 +149,6 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
num_groups
=
inp
,
if_act
=
False
,
act
=
act
,
use_cudnn
=
False
,
name
=
'stage_'
+
name
+
'_conv4'
)
self
.
_conv_linear_1
=
ConvBNLayer
(
num_channels
=
inp
,
...
...
@@ -185,7 +180,6 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
num_groups
=
oup_inc
,
if_act
=
False
,
act
=
act
,
use_cudnn
=
False
,
name
=
'stage_'
+
name
+
'_conv2'
)
self
.
_conv_linear_2
=
ConvBNLayer
(
num_channels
=
oup_inc
,
...
...
@@ -200,14 +194,14 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
def
forward
(
self
,
inputs
):
if
self
.
benchmodel
==
1
:
x1
,
x2
=
fluid
.
layers
.
split
(
x1
,
x2
=
paddle
.
split
(
inputs
,
num_or_sections
=
[
inputs
.
shape
[
1
]
//
2
,
inputs
.
shape
[
1
]
//
2
],
dim
=
1
)
axis
=
1
)
x2
=
self
.
_conv_pw
(
x2
)
x2
=
self
.
_conv_dw
(
x2
)
x2
=
self
.
_conv_linear
(
x2
)
out
=
fluid
.
layers
.
concat
([
x1
,
x2
],
axis
=
1
)
out
=
paddle
.
concat
([
x1
,
x2
],
axis
=
1
)
else
:
x1
=
self
.
_conv_dw_1
(
inputs
)
x1
=
self
.
_conv_linear_1
(
x1
)
...
...
@@ -215,12 +209,12 @@ class InvertedResidualUnit(fluid.dygraph.Layer):
x2
=
self
.
_conv_pw_2
(
inputs
)
x2
=
self
.
_conv_dw_2
(
x2
)
x2
=
self
.
_conv_linear_2
(
x2
)
out
=
fluid
.
layers
.
concat
([
x1
,
x2
],
axis
=
1
)
out
=
paddle
.
concat
([
x1
,
x2
],
axis
=
1
)
return
channel_shuffle
(
out
,
2
)
class
ShuffleNet
(
fluid
.
dygraph
.
Layer
):
class
ShuffleNet
(
nn
.
Layer
):
def
__init__
(
self
,
class_dim
=
1000
,
scale
=
1.0
,
act
=
'relu'
):
super
(
ShuffleNet
,
self
).
__init__
()
self
.
scale
=
scale
...
...
@@ -252,8 +246,7 @@ class ShuffleNet(fluid.dygraph.Layer):
if_act
=
True
,
act
=
act
,
name
=
'stage1_conv'
)
self
.
_max_pool
=
Pool2D
(
pool_type
=
'max'
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
)
self
.
_max_pool
=
MaxPool2d
(
kernel_size
=
3
,
stride
=
2
,
padding
=
1
)
# 2. bottleneck sequences
self
.
_block_list
=
[]
...
...
@@ -298,13 +291,13 @@ class ShuffleNet(fluid.dygraph.Layer):
name
=
'conv5'
)
# 4. pool
self
.
_pool2d_avg
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
_pool2d_avg
=
AdaptiveAvgPool2d
(
1
)
self
.
_out_c
=
stage_out_channels
[
-
1
]
# 5. fc
self
.
_fc
=
Linear
(
stage_out_channels
[
-
1
],
class_dim
,
param
_attr
=
ParamAttr
(
name
=
'fc6_weights'
),
weight
_attr
=
ParamAttr
(
name
=
'fc6_weights'
),
bias_attr
=
ParamAttr
(
name
=
'fc6_offset'
))
def
forward
(
self
,
inputs
):
...
...
@@ -314,7 +307,7 @@ class ShuffleNet(fluid.dygraph.Layer):
y
=
inv
(
y
)
y
=
self
.
_last_conv
(
y
)
y
=
self
.
_pool2d_avg
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
_out_c
])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
_out_c
])
y
=
self
.
_fc
(
y
)
return
y
...
...
ppcls/modeling/architectures/squeezenet.py
浏览文件 @
ea746480
此差异已折叠。
点击以展开。
ppcls/modeling/architectures/vgg.py
浏览文件 @
ea746480
此差异已折叠。
点击以展开。
ppcls/modeling/architectures/xception.py
浏览文件 @
ea746480
此差异已折叠。
点击以展开。
ppcls/modeling/architectures/xception_deeplab.py
浏览文件 @
ea746480
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
BatchNorm
,
Linear
,
Dropout
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
Conv2d
,
BatchNorm
,
Linear
,
Dropout
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
__all__
=
[
"Xception41_deeplab"
,
"Xception65_deeplab"
,
"Xception71_deeplab"
]
...
...
@@ -56,7 +58,7 @@ def gen_bottleneck_params(backbone='xception_65'):
return
bottleneck_params
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
...
...
@@ -67,13 +69,13 @@ class ConvBNLayer(fluid.dygraph.Layer):
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2
D
(
num
_channels
=
input_channels
,
num_filter
s
=
output_channels
,
filter
_size
=
filter_size
,
self
.
_conv
=
Conv2
d
(
in
_channels
=
input_channels
,
out_channel
s
=
output_channels
,
kernel
_size
=
filter_size
,
stride
=
stride
,
padding
=
padding
,
param
_attr
=
ParamAttr
(
name
=
name
+
"/weights"
),
weight
_attr
=
ParamAttr
(
name
=
name
+
"/weights"
),
bias_attr
=
False
)
self
.
_bn
=
BatchNorm
(
num_channels
=
output_channels
,
...
...
@@ -89,7 +91,7 @@ class ConvBNLayer(fluid.dygraph.Layer):
return
self
.
_bn
(
self
.
_conv
(
inputs
))
class
Seperate_Conv
(
fluid
.
dygraph
.
Layer
):
class
Seperate_Conv
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
...
...
@@ -100,15 +102,15 @@ class Seperate_Conv(fluid.dygraph.Layer):
name
=
None
):
super
(
Seperate_Conv
,
self
).
__init__
()
self
.
_conv1
=
Conv2
D
(
num
_channels
=
input_channels
,
num_filter
s
=
input_channels
,
filter
_size
=
filter
,
self
.
_conv1
=
Conv2
d
(
in
_channels
=
input_channels
,
out_channel
s
=
input_channels
,
kernel
_size
=
filter
,
stride
=
stride
,
groups
=
input_channels
,
padding
=
(
filter
)
//
2
*
dilation
,
dilation
=
dilation
,
param
_attr
=
ParamAttr
(
name
=
name
+
"/depthwise/weights"
),
weight
_attr
=
ParamAttr
(
name
=
name
+
"/depthwise/weights"
),
bias_attr
=
False
)
self
.
_bn1
=
BatchNorm
(
input_channels
,
...
...
@@ -119,14 +121,14 @@ class Seperate_Conv(fluid.dygraph.Layer):
bias_attr
=
ParamAttr
(
name
=
name
+
"/depthwise/BatchNorm/beta"
),
moving_mean_name
=
name
+
"/depthwise/BatchNorm/moving_mean"
,
moving_variance_name
=
name
+
"/depthwise/BatchNorm/moving_variance"
)
self
.
_conv2
=
Conv2
D
(
self
.
_conv2
=
Conv2
d
(
input_channels
,
output_channels
,
1
,
stride
=
1
,
groups
=
1
,
padding
=
0
,
param
_attr
=
ParamAttr
(
name
=
name
+
"/pointwise/weights"
),
weight
_attr
=
ParamAttr
(
name
=
name
+
"/pointwise/weights"
),
bias_attr
=
False
)
self
.
_bn2
=
BatchNorm
(
output_channels
,
...
...
@@ -146,7 +148,7 @@ class Seperate_Conv(fluid.dygraph.Layer):
return
x
class
Xception_Block
(
fluid
.
dygraph
.
Layer
):
class
Xception_Block
(
nn
.
Layer
):
def
__init__
(
self
,
input_channels
,
output_channels
,
...
...
@@ -226,11 +228,11 @@ class Xception_Block(fluid.dygraph.Layer):
def
forward
(
self
,
inputs
):
if
not
self
.
activation_fn_in_separable_conv
:
x
=
fluid
.
layers
.
relu
(
inputs
)
x
=
F
.
relu
(
inputs
)
x
=
self
.
_conv1
(
x
)
x
=
fluid
.
layers
.
relu
(
x
)
x
=
F
.
relu
(
x
)
x
=
self
.
_conv2
(
x
)
x
=
fluid
.
layers
.
relu
(
x
)
x
=
F
.
relu
(
x
)
x
=
self
.
_conv3
(
x
)
else
:
x
=
self
.
_conv1
(
inputs
)
...
...
@@ -242,10 +244,10 @@ class Xception_Block(fluid.dygraph.Layer):
skip
=
self
.
_short
(
inputs
)
else
:
skip
=
inputs
return
fluid
.
layers
.
elementwise_add
(
x
,
skip
)
return
paddle
.
elementwise_add
(
x
,
skip
)
class
XceptionDeeplab
(
fluid
.
dygraph
.
Layer
):
class
XceptionDeeplab
(
nn
.
Layer
):
def
__init__
(
self
,
backbone
,
class_dim
=
1000
):
super
(
XceptionDeeplab
,
self
).
__init__
()
...
...
@@ -344,12 +346,12 @@ class XceptionDeeplab(fluid.dygraph.Layer):
self
.
stride
=
s
self
.
_drop
=
Dropout
(
p
=
0.5
)
self
.
_pool
=
Pool2D
(
pool_type
=
"avg"
,
global_pooling
=
True
)
self
.
_drop
=
Dropout
(
p
=
0.5
,
mode
=
"downscale_in_infer"
)
self
.
_pool
=
AdaptiveAvgPool2d
(
1
)
self
.
_fc
=
Linear
(
self
.
chns
[
1
][
-
1
],
class_dim
,
param
_attr
=
ParamAttr
(
name
=
"fc_weights"
),
weight
_attr
=
ParamAttr
(
name
=
"fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc_bias"
))
def
forward
(
self
,
inputs
):
...
...
@@ -363,7 +365,7 @@ class XceptionDeeplab(fluid.dygraph.Layer):
x
=
self
.
_exit_flow_2
(
x
)
x
=
self
.
_drop
(
x
)
x
=
self
.
_pool
(
x
)
x
=
fluid
.
layers
.
squeeze
(
x
,
axe
s
=
[
2
,
3
])
x
=
paddle
.
squeeze
(
x
,
axi
s
=
[
2
,
3
])
x
=
self
.
_fc
(
x
)
return
x
...
...
ppcls/modeling/loss.py
浏览文件 @
ea746480
此差异已折叠。
点击以展开。
ppcls/optimizer/learning_rate.py
浏览文件 @
ea746480
此差异已折叠。
点击以展开。
ppcls/optimizer/optimizer.py
浏览文件 @
ea746480
此差异已折叠。
点击以展开。
ppcls/utils/check.py
浏览文件 @
ea746480
此差异已折叠。
点击以展开。
ppcls/utils/logger.py
浏览文件 @
ea746480
...
...
@@ -23,7 +23,7 @@ logging.basicConfig(
def
time_zone
(
sec
,
fmt
):
real_time
=
datetime
.
datetime
.
now
()
+
datetime
.
timedelta
(
hours
=
8
)
real_time
=
datetime
.
datetime
.
now
()
return
real_time
.
timetuple
()
...
...
ppcls/utils/save_load.py
浏览文件 @
ea746480
此差异已折叠。
点击以展开。
tools/eval.py
浏览文件 @
ea746480
此差异已折叠。
点击以展开。
tools/infer/infer.py
浏览文件 @
ea746480
此差异已折叠。
点击以展开。
tools/program.py
浏览文件 @
ea746480
此差异已折叠。
点击以展开。
tools/train.py
浏览文件 @
ea746480
此差异已折叠。
点击以展开。
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录