Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleClas
提交
8ffd18b2
P
PaddleClas
项目概览
PaddlePaddle
/
PaddleClas
大约 2 年 前同步成功
通知
118
Star
4999
Fork
1114
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
19
列表
看板
标记
里程碑
合并请求
6
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleClas
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
19
Issue
19
列表
看板
标记
里程碑
合并请求
6
合并请求
6
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
8ffd18b2
编写于
5月 31, 2021
作者:
W
Walter
提交者:
GitHub
5月 31, 2021
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #748 from weisy11/develop_reg
modify hrnet
上级
38813434
8ede57a4
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
239 addition
and
385 deletion
+239
-385
docs/zh_CN/feature_visiualization/get_started.md
docs/zh_CN/feature_visiualization/get_started.md
+1
-1
ppcls/arch/backbone/legendary_models/hrnet.py
ppcls/arch/backbone/legendary_models/hrnet.py
+238
-384
未找到文件。
docs/zh_CN/feature_visiualization/get_started.md
浏览文件 @
8ffd18b2
...
@@ -37,7 +37,7 @@ def forward(self, inputs):
...
@@ -37,7 +37,7 @@ def forward(self, inputs):
y
=
self
.
pool2d_max
(
y
)
y
=
self
.
pool2d_max
(
y
)
for
bottleneck_block
in
self
.
bottleneck_block_list
:
for
bottleneck_block
in
self
.
bottleneck_block_list
:
y
=
bottleneck_block
(
y
)
y
=
bottleneck_block
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
self
.
avg_pool
(
y
)
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_output
])
y
=
fluid
.
layers
.
reshape
(
y
,
shape
=
[
-
1
,
self
.
pool2d_avg_output
])
y
=
self
.
out
(
y
)
y
=
self
.
out
(
y
)
return
y
,
self
.
fm
return
y
,
self
.
fm
...
...
ppcls/arch/backbone/legendary_models/hrnet.py
浏览文件 @
8ffd18b2
...
@@ -17,34 +17,34 @@ from __future__ import division
...
@@ -17,34 +17,34 @@ from __future__ import division
from
__future__
import
print_function
from
__future__
import
print_function
import
math
import
math
import
numpy
as
np
import
paddle
import
paddle
from
paddle
import
nn
from
paddle
import
ParamAttr
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
from
paddle.nn.functional
import
upsample
import
paddle.nn.functional
as
F
from
paddle.nn
import
AdaptiveAvgPool2D
,
MaxPool2D
,
AvgPool2D
from
paddle.nn.initializer
import
Uniform
from
paddle.nn.initializer
import
Uniform
from
ppcls.arch.backbone.base.theseus_layer
import
TheseusLayer
from
ppcls.arch.backbone.base.theseus_layer
import
TheseusLayer
,
Identity
__all__
=
[
MODEL_URLS
=
{
"HRNet_W18_C"
,
"HRNet_W18_C"
:
""
,
"HRNet_W30_C"
,
"HRNet_W30_C"
:
""
,
"HRNet_W32_C"
,
"HRNet_W32_C"
:
""
,
"HRNet_W40_C"
,
"HRNet_W40_C"
:
""
,
"HRNet_W44_C"
,
"HRNet_W44_C"
:
""
,
"HRNet_W48_C"
,
"HRNet_W48_C"
:
""
,
"HRNet_W60_C"
,
"HRNet_W60_C"
:
""
,
"HRNet_W64_C"
,
"HRNet_W64_C"
:
""
,
"SE_HRNet_W18_C"
,
"SE_HRNet_W18_C"
:
""
,
"SE_HRNet_W30_C"
,
"SE_HRNet_W30_C"
:
""
,
"SE_HRNet_W32_C"
,
"SE_HRNet_W32_C"
:
""
,
"SE_HRNet_W40_C"
,
"SE_HRNet_W40_C"
:
""
,
"SE_HRNet_W44_C"
,
"SE_HRNet_W44_C"
:
""
,
"SE_HRNet_W48_C"
,
"SE_HRNet_W48_C"
:
""
,
"SE_HRNet_W60_C"
,
"SE_HRNet_W60_C"
:
""
,
"SE_HRNet_W64_C"
,
"SE_HRNet_W64_C"
:
""
,
]
}
__all__
=
list
(
MODEL_URLS
.
keys
())
class
ConvBNLayer
(
TheseusLayer
):
class
ConvBNLayer
(
TheseusLayer
):
...
@@ -54,136 +54,39 @@ class ConvBNLayer(TheseusLayer):
...
@@ -54,136 +54,39 @@ class ConvBNLayer(TheseusLayer):
filter_size
,
filter_size
,
stride
=
1
,
stride
=
1
,
groups
=
1
,
groups
=
1
,
act
=
"relu"
,
act
=
"relu"
):
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_
conv
=
nn
.
Conv2D
(
self
.
conv
=
nn
.
Conv2D
(
in_channels
=
num_channels
,
in_channels
=
num_channels
,
out_channels
=
num_filters
,
out_channels
=
num_filters
,
kernel_size
=
filter_size
,
kernel_size
=
filter_size
,
stride
=
stride
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
groups
=
groups
,
weight_attr
=
ParamAttr
(
name
=
name
+
"_weights"
),
bias_attr
=
False
)
bias_attr
=
False
)
bn_name
=
name
+
'_bn'
self
.
bn
=
nn
.
BatchNorm
(
self
.
_batch_norm
=
nn
.
BatchNorm
(
num_filters
,
num_filters
,
act
=
act
,
act
=
None
)
param_attr
=
ParamAttr
(
name
=
bn_name
+
'_scale'
),
self
.
act
=
create_act
(
act
)
bias_attr
=
ParamAttr
(
bn_name
+
'_offset'
),
moving_mean_name
=
bn_name
+
'_mean'
,
moving_variance_name
=
bn_name
+
'_variance'
)
def
forward
(
self
,
x
,
res_dict
=
None
):
def
forward
(
self
,
x
):
y
=
self
.
_conv
(
x
)
x
=
self
.
conv
(
x
)
y
=
self
.
_batch_norm
(
y
)
x
=
self
.
bn
(
x
)
return
y
x
=
self
.
act
(
x
)
return
x
class
Layer1
(
TheseusLayer
):
def
__init__
(
self
,
num_channels
,
has_se
=
False
,
name
=
None
):
super
(
Layer1
,
self
).
__init__
()
self
.
bottleneck_block_list
=
[]
for
i
in
range
(
4
):
bottleneck_block
=
self
.
add_sublayer
(
"bb_{}_{}"
.
format
(
name
,
i
+
1
),
BottleneckBlock
(
num_channels
=
num_channels
if
i
==
0
else
256
,
num_filters
=
64
,
has_se
=
has_se
,
stride
=
1
,
downsample
=
True
if
i
==
0
else
False
,
name
=
name
+
'_'
+
str
(
i
+
1
)))
self
.
bottleneck_block_list
.
append
(
bottleneck_block
)
def
forward
(
self
,
x
,
res_dict
=
None
):
y
=
x
for
block_func
in
self
.
bottleneck_block_list
:
y
=
block_func
(
y
)
return
y
class
TransitionLayer
(
TheseusLayer
):
def
create_act
(
act
):
def
__init__
(
self
,
in_channels
,
out_channels
,
name
=
None
):
if
act
==
'hardswish'
:
super
(
TransitionLayer
,
self
).
__init__
()
return
nn
.
Hardswish
()
elif
act
==
'relu'
:
num_in
=
len
(
in_channels
)
return
nn
.
ReLU
()
num_out
=
len
(
out_channels
)
elif
act
is
None
:
out
=
[]
return
Identity
()
self
.
conv_bn_func_list
=
[]
else
:
for
i
in
range
(
num_out
):
raise
RuntimeError
(
residual
=
None
'The activation function is not supported: {}'
.
format
(
act
))
if
i
<
num_in
:
if
in_channels
[
i
]
!=
out_channels
[
i
]:
residual
=
self
.
add_sublayer
(
"transition_{}_layer_{}"
.
format
(
name
,
i
+
1
),
ConvBNLayer
(
num_channels
=
in_channels
[
i
],
num_filters
=
out_channels
[
i
],
filter_size
=
3
,
name
=
name
+
'_layer_'
+
str
(
i
+
1
)))
else
:
residual
=
self
.
add_sublayer
(
"transition_{}_layer_{}"
.
format
(
name
,
i
+
1
),
ConvBNLayer
(
num_channels
=
in_channels
[
-
1
],
num_filters
=
out_channels
[
i
],
filter_size
=
3
,
stride
=
2
,
name
=
name
+
'_layer_'
+
str
(
i
+
1
)))
self
.
conv_bn_func_list
.
append
(
residual
)
def
forward
(
self
,
x
,
res_dict
=
None
):
outs
=
[]
for
idx
,
conv_bn_func
in
enumerate
(
self
.
conv_bn_func_list
):
if
conv_bn_func
is
None
:
outs
.
append
(
x
[
idx
])
else
:
if
idx
<
len
(
x
):
outs
.
append
(
conv_bn_func
(
x
[
idx
]))
else
:
outs
.
append
(
conv_bn_func
(
x
[
-
1
]))
return
outs
class
Branches
(
TheseusLayer
):
def
__init__
(
self
,
block_num
,
in_channels
,
out_channels
,
has_se
=
False
,
name
=
None
):
super
(
Branches
,
self
).
__init__
()
self
.
basic_block_list
=
[]
for
i
in
range
(
len
(
out_channels
)):
self
.
basic_block_list
.
append
([])
for
j
in
range
(
block_num
):
in_ch
=
in_channels
[
i
]
if
j
==
0
else
out_channels
[
i
]
basic_block_func
=
self
.
add_sublayer
(
"bb_{}_branch_layer_{}_{}"
.
format
(
name
,
i
+
1
,
j
+
1
),
BasicBlock
(
num_channels
=
in_ch
,
num_filters
=
out_channels
[
i
],
has_se
=
has_se
,
name
=
name
+
'_branch_layer_'
+
str
(
i
+
1
)
+
'_'
+
str
(
j
+
1
)))
self
.
basic_block_list
[
i
].
append
(
basic_block_func
)
def
forward
(
self
,
x
,
res_dict
=
None
):
outs
=
[]
for
idx
,
xi
in
enumerate
(
x
):
conv
=
xi
basic_block_list
=
self
.
basic_block_list
[
idx
]
for
basic_block_func
in
basic_block_list
:
conv
=
basic_block_func
(
conv
)
outs
.
append
(
conv
)
return
outs
class
BottleneckBlock
(
TheseusLayer
):
class
BottleneckBlock
(
TheseusLayer
):
...
@@ -192,8 +95,7 @@ class BottleneckBlock(TheseusLayer):
...
@@ -192,8 +95,7 @@ class BottleneckBlock(TheseusLayer):
num_filters
,
num_filters
,
has_se
,
has_se
,
stride
=
1
,
stride
=
1
,
downsample
=
False
,
downsample
=
False
):
name
=
None
):
super
(
BottleneckBlock
,
self
).
__init__
()
super
(
BottleneckBlock
,
self
).
__init__
()
self
.
has_se
=
has_se
self
.
has_se
=
has_se
...
@@ -203,215 +105,175 @@ class BottleneckBlock(TheseusLayer):
...
@@ -203,215 +105,175 @@ class BottleneckBlock(TheseusLayer):
num_channels
=
num_channels
,
num_channels
=
num_channels
,
num_filters
=
num_filters
,
num_filters
=
num_filters
,
filter_size
=
1
,
filter_size
=
1
,
act
=
"relu"
,
act
=
"relu"
)
name
=
name
+
"_conv1"
,
)
self
.
conv2
=
ConvBNLayer
(
self
.
conv2
=
ConvBNLayer
(
num_channels
=
num_filters
,
num_channels
=
num_filters
,
num_filters
=
num_filters
,
num_filters
=
num_filters
,
filter_size
=
3
,
filter_size
=
3
,
stride
=
stride
,
stride
=
stride
,
act
=
"relu"
,
act
=
"relu"
)
name
=
name
+
"_conv2"
)
self
.
conv3
=
ConvBNLayer
(
self
.
conv3
=
ConvBNLayer
(
num_channels
=
num_filters
,
num_channels
=
num_filters
,
num_filters
=
num_filters
*
4
,
num_filters
=
num_filters
*
4
,
filter_size
=
1
,
filter_size
=
1
,
act
=
None
,
act
=
None
)
name
=
name
+
"_conv3"
)
if
self
.
downsample
:
if
self
.
downsample
:
self
.
conv_down
=
ConvBNLayer
(
self
.
conv_down
=
ConvBNLayer
(
num_channels
=
num_channels
,
num_channels
=
num_channels
,
num_filters
=
num_filters
*
4
,
num_filters
=
num_filters
*
4
,
filter_size
=
1
,
filter_size
=
1
,
act
=
None
,
act
=
None
)
name
=
name
+
"_downsample"
)
if
self
.
has_se
:
if
self
.
has_se
:
self
.
se
=
SELayer
(
self
.
se
=
SELayer
(
num_channels
=
num_filters
*
4
,
num_channels
=
num_filters
*
4
,
num_filters
=
num_filters
*
4
,
num_filters
=
num_filters
*
4
,
reduction_ratio
=
16
,
reduction_ratio
=
16
)
name
=
'fc'
+
name
)
self
.
relu
=
nn
.
ReLU
(
)
def
forward
(
self
,
x
,
res_dict
=
None
):
def
forward
(
self
,
x
,
res_dict
=
None
):
residual
=
x
residual
=
x
conv1
=
self
.
conv1
(
x
)
x
=
self
.
conv1
(
x
)
conv2
=
self
.
conv2
(
conv1
)
x
=
self
.
conv2
(
x
)
conv3
=
self
.
conv3
(
conv2
)
x
=
self
.
conv3
(
x
)
if
self
.
downsample
:
if
self
.
downsample
:
residual
=
self
.
conv_down
(
x
)
residual
=
self
.
conv_down
(
residual
)
if
self
.
has_se
:
if
self
.
has_se
:
conv3
=
self
.
se
(
conv3
)
x
=
self
.
se
(
x
)
x
=
paddle
.
add
(
x
=
residual
,
y
=
x
)
y
=
paddle
.
add
(
x
=
residual
,
y
=
conv3
)
x
=
self
.
relu
(
x
)
y
=
F
.
relu
(
y
)
return
x
return
y
class
BasicBlock
(
Theseus
Layer
):
class
BasicBlock
(
nn
.
Layer
):
def
__init__
(
self
,
def
__init__
(
self
,
num_channels
,
num_channels
,
num_filters
,
num_filters
,
stride
=
1
,
has_se
=
False
):
has_se
=
False
,
downsample
=
False
,
name
=
None
):
super
(
BasicBlock
,
self
).
__init__
()
super
(
BasicBlock
,
self
).
__init__
()
self
.
has_se
=
has_se
self
.
has_se
=
has_se
self
.
downsample
=
downsample
self
.
conv1
=
ConvBNLayer
(
self
.
conv1
=
ConvBNLayer
(
num_channels
=
num_channels
,
num_channels
=
num_channels
,
num_filters
=
num_filters
,
num_filters
=
num_filters
,
filter_size
=
3
,
filter_size
=
3
,
stride
=
stride
,
stride
=
1
,
act
=
"relu"
,
act
=
"relu"
)
name
=
name
+
"_conv1"
)
self
.
conv2
=
ConvBNLayer
(
self
.
conv2
=
ConvBNLayer
(
num_channels
=
num_filters
,
num_channels
=
num_filters
,
num_filters
=
num_filters
,
num_filters
=
num_filters
,
filter_size
=
3
,
filter_size
=
3
,
stride
=
1
,
stride
=
1
,
act
=
None
,
act
=
None
)
name
=
name
+
"_conv2"
)
if
self
.
downsample
:
self
.
conv_down
=
ConvBNLayer
(
num_channels
=
num_channels
,
num_filters
=
num_filters
*
4
,
filter_size
=
1
,
act
=
"relu"
,
name
=
name
+
"_downsample"
)
if
self
.
has_se
:
if
self
.
has_se
:
self
.
se
=
SELayer
(
self
.
se
=
SELayer
(
num_channels
=
num_filters
,
num_channels
=
num_filters
,
num_filters
=
num_filters
,
num_filters
=
num_filters
,
reduction_ratio
=
16
,
reduction_ratio
=
16
)
name
=
'fc'
+
name
)
self
.
relu
=
nn
.
ReLU
(
)
def
forward
(
self
,
input
,
res_dict
=
None
):
def
forward
(
self
,
x
):
residual
=
input
residual
=
x
conv1
=
self
.
conv1
(
input
)
x
=
self
.
conv1
(
x
)
conv2
=
self
.
conv2
(
conv1
)
x
=
self
.
conv2
(
x
)
if
self
.
downsample
:
residual
=
self
.
conv_down
(
input
)
if
self
.
has_se
:
if
self
.
has_se
:
conv2
=
self
.
se
(
conv2
)
x
=
self
.
se
(
x
)
y
=
paddle
.
add
(
x
=
residual
,
y
=
conv2
)
x
=
paddle
.
add
(
x
=
residual
,
y
=
x
)
y
=
F
.
relu
(
y
)
x
=
self
.
relu
(
x
)
return
y
return
x
class
SELayer
(
TheseusLayer
):
class
SELayer
(
TheseusLayer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
reduction_ratio
,
name
=
None
):
def
__init__
(
self
,
num_channels
,
num_filters
,
reduction_ratio
):
super
(
SELayer
,
self
).
__init__
()
super
(
SELayer
,
self
).
__init__
()
self
.
pool2d_gap
=
AdaptiveAvgPool2D
(
1
)
self
.
pool2d_gap
=
nn
.
AdaptiveAvgPool2D
(
1
)
self
.
_num_channels
=
num_channels
self
.
_num_channels
=
num_channels
med_ch
=
int
(
num_channels
/
reduction_ratio
)
med_ch
=
int
(
num_channels
/
reduction_ratio
)
stdv
=
1.0
/
math
.
sqrt
(
num_channels
*
1.0
)
stdv
=
1.0
/
math
.
sqrt
(
num_channels
*
1.0
)
self
.
squeeze
=
nn
.
Linear
(
self
.
fc_
squeeze
=
nn
.
Linear
(
num_channels
,
num_channels
,
med_ch
,
med_ch
,
weight_attr
=
ParamAttr
(
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_sqz_weights"
),
initializer
=
Uniform
(
-
stdv
,
stdv
)))
bias_attr
=
ParamAttr
(
name
=
name
+
'_sqz_offset'
))
self
.
relu
=
nn
.
ReLU
()
stdv
=
1.0
/
math
.
sqrt
(
med_ch
*
1.0
)
stdv
=
1.0
/
math
.
sqrt
(
med_ch
*
1.0
)
self
.
excitation
=
nn
.
Linear
(
self
.
fc_
excitation
=
nn
.
Linear
(
med_ch
,
med_ch
,
num_filters
,
num_filters
,
weight_attr
=
ParamAttr
(
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
)))
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_exc_weights"
),
self
.
sigmoid
=
nn
.
Sigmoid
()
bias_attr
=
ParamAttr
(
name
=
name
+
'_exc_offset'
))
def
forward
(
self
,
x
,
res_dict
=
None
):
def
forward
(
self
,
input
,
res_dict
=
None
):
residual
=
x
pool
=
self
.
pool2d_gap
(
input
)
x
=
self
.
pool2d_gap
(
x
)
pool
=
paddle
.
squeeze
(
pool
,
axis
=
[
2
,
3
])
x
=
paddle
.
squeeze
(
x
,
axis
=
[
2
,
3
])
squeeze
=
self
.
squeeze
(
pool
)
x
=
self
.
fc_squeeze
(
x
)
squeeze
=
F
.
relu
(
squeeze
)
x
=
self
.
relu
(
x
)
excitation
=
self
.
excitation
(
squeeze
)
x
=
self
.
fc_excitation
(
x
)
excitation
=
F
.
sigmoid
(
excitation
)
x
=
self
.
sigmoid
(
x
)
excitation
=
paddle
.
unsqueeze
(
excitation
,
axis
=
[
2
,
3
])
x
=
paddle
.
unsqueeze
(
x
,
axis
=
[
2
,
3
])
out
=
input
*
excitation
x
=
residual
*
x
return
out
return
x
class
Stage
(
TheseusLayer
):
class
Stage
(
TheseusLayer
):
def
__init__
(
self
,
def
__init__
(
self
,
num_channels
,
num_modules
,
num_modules
,
num_filters
,
num_filters
,
has_se
=
False
,
has_se
=
False
):
multi_scale_output
=
True
,
name
=
None
):
super
(
Stage
,
self
).
__init__
()
super
(
Stage
,
self
).
__init__
()
self
.
_num_modules
=
num_modules
self
.
_num_modules
=
num_modules
self
.
stage_func_list
=
[]
self
.
stage_func_list
=
nn
.
LayerList
()
for
i
in
range
(
num_modules
):
for
i
in
range
(
num_modules
):
if
i
==
num_modules
-
1
and
not
multi_scale_output
:
self
.
stage_func_list
.
append
(
stage_func
=
self
.
add_sublayer
(
HighResolutionModule
(
"stage_{}_{}"
.
format
(
name
,
i
+
1
),
num_filters
=
num_filters
,
HighResolutionModule
(
has_se
=
has_se
))
num_channels
=
num_channels
,
num_filters
=
num_filters
,
def
forward
(
self
,
x
,
res_dict
=
None
):
has_se
=
has_se
,
x
=
x
multi_scale_output
=
False
,
name
=
name
+
'_'
+
str
(
i
+
1
)))
else
:
stage_func
=
self
.
add_sublayer
(
"stage_{}_{}"
.
format
(
name
,
i
+
1
),
HighResolutionModule
(
num_channels
=
num_channels
,
num_filters
=
num_filters
,
has_se
=
has_se
,
name
=
name
+
'_'
+
str
(
i
+
1
)))
self
.
stage_func_list
.
append
(
stage_func
)
def
forward
(
self
,
input
,
res_dict
=
None
):
out
=
input
for
idx
in
range
(
self
.
_num_modules
):
for
idx
in
range
(
self
.
_num_modules
):
out
=
self
.
stage_func_list
[
idx
](
out
)
x
=
self
.
stage_func_list
[
idx
](
x
)
return
out
return
x
class
HighResolutionModule
(
TheseusLayer
):
class
HighResolutionModule
(
TheseusLayer
):
def
__init__
(
self
,
def
__init__
(
self
,
num_channels
,
num_filters
,
num_filters
,
has_se
=
False
,
has_se
=
False
):
multi_scale_output
=
True
,
name
=
None
):
super
(
HighResolutionModule
,
self
).
__init__
()
super
(
HighResolutionModule
,
self
).
__init__
()
self
.
branches_func
=
Branches
(
self
.
basic_block_list
=
nn
.
LayerList
()
block_num
=
4
,
in_channels
=
num_channels
,
for
i
in
range
(
len
(
num_filters
)):
out_channels
=
num_filters
,
self
.
basic_block_list
.
append
(
has_se
=
has_se
,
nn
.
Sequential
(
*
[
name
=
name
)
BasicBlock
(
num_channels
=
num_filters
[
i
],
num_filters
=
num_filters
[
i
],
has_se
=
has_se
)
for
j
in
range
(
4
)]))
self
.
fuse_func
=
FuseLayers
(
self
.
fuse_func
=
FuseLayers
(
in_channels
=
num_filters
,
in_channels
=
num_filters
,
out_channels
=
num_filters
,
out_channels
=
num_filters
)
multi_scale_output
=
multi_scale_output
,
name
=
name
)
def
forward
(
self
,
input
,
res_dict
=
None
):
def
forward
(
self
,
x
,
res_dict
=
None
):
out
=
self
.
branches_func
(
input
)
out
=
[]
for
idx
,
xi
in
enumerate
(
x
):
basic_block_list
=
self
.
basic_block_list
[
idx
]
for
basic_block_func
in
basic_block_list
:
xi
=
basic_block_func
(
xi
)
out
.
append
(
xi
)
out
=
self
.
fuse_func
(
out
)
out
=
self
.
fuse_func
(
out
)
return
out
return
out
...
@@ -419,246 +281,238 @@ class HighResolutionModule(TheseusLayer):
...
@@ -419,246 +281,238 @@ class HighResolutionModule(TheseusLayer):
class
FuseLayers
(
TheseusLayer
):
class
FuseLayers
(
TheseusLayer
):
def
__init__
(
self
,
def
__init__
(
self
,
in_channels
,
in_channels
,
out_channels
,
out_channels
):
multi_scale_output
=
True
,
name
=
None
):
super
(
FuseLayers
,
self
).
__init__
()
super
(
FuseLayers
,
self
).
__init__
()
self
.
_actual_ch
=
len
(
in_channels
)
if
multi_scale_output
else
1
self
.
_actual_ch
=
len
(
in_channels
)
self
.
_in_channels
=
in_channels
self
.
_in_channels
=
in_channels
self
.
residual_func_list
=
[]
self
.
residual_func_list
=
nn
.
LayerList
()
for
i
in
range
(
self
.
_actual_ch
):
self
.
relu
=
nn
.
ReLU
()
for
i
in
range
(
len
(
in_channels
)):
for
j
in
range
(
len
(
in_channels
)):
for
j
in
range
(
len
(
in_channels
)):
residual_func
=
None
if
j
>
i
:
if
j
>
i
:
residual_func
=
self
.
add_sublayer
(
self
.
residual_func_list
.
append
(
"residual_{}_layer_{}_{}"
.
format
(
name
,
i
+
1
,
j
+
1
),
ConvBNLayer
(
ConvBNLayer
(
num_channels
=
in_channels
[
j
],
num_channels
=
in_channels
[
j
],
num_filters
=
out_channels
[
i
],
num_filters
=
out_channels
[
i
],
filter_size
=
1
,
filter_size
=
1
,
stride
=
1
,
stride
=
1
,
act
=
None
,
act
=
None
))
name
=
name
+
'_layer_'
+
str
(
i
+
1
)
+
'_'
+
str
(
j
+
1
)))
self
.
residual_func_list
.
append
(
residual_func
)
elif
j
<
i
:
elif
j
<
i
:
pre_num_filters
=
in_channels
[
j
]
pre_num_filters
=
in_channels
[
j
]
for
k
in
range
(
i
-
j
):
for
k
in
range
(
i
-
j
):
if
k
==
i
-
j
-
1
:
if
k
==
i
-
j
-
1
:
residual_func
=
self
.
add_sublayer
(
self
.
residual_func_list
.
append
(
"residual_{}_layer_{}_{}_{}"
.
format
(
name
,
i
+
1
,
j
+
1
,
k
+
1
),
ConvBNLayer
(
ConvBNLayer
(
num_channels
=
pre_num_filters
,
num_channels
=
pre_num_filters
,
num_filters
=
out_channels
[
i
],
num_filters
=
out_channels
[
i
],
filter_size
=
3
,
filter_size
=
3
,
stride
=
2
,
stride
=
2
,
act
=
None
,
act
=
None
))
name
=
name
+
'_layer_'
+
str
(
i
+
1
)
+
'_'
+
str
(
j
+
1
)
+
'_'
+
str
(
k
+
1
)))
pre_num_filters
=
out_channels
[
i
]
pre_num_filters
=
out_channels
[
i
]
else
:
else
:
residual_func
=
self
.
add_sublayer
(
self
.
residual_func_list
.
append
(
"residual_{}_layer_{}_{}_{}"
.
format
(
name
,
i
+
1
,
j
+
1
,
k
+
1
),
ConvBNLayer
(
ConvBNLayer
(
num_channels
=
pre_num_filters
,
num_channels
=
pre_num_filters
,
num_filters
=
out_channels
[
j
],
num_filters
=
out_channels
[
j
],
filter_size
=
3
,
filter_size
=
3
,
stride
=
2
,
stride
=
2
,
act
=
"relu"
,
act
=
"relu"
))
name
=
name
+
'_layer_'
+
str
(
i
+
1
)
+
'_'
+
str
(
j
+
1
)
+
'_'
+
str
(
k
+
1
)))
pre_num_filters
=
out_channels
[
j
]
pre_num_filters
=
out_channels
[
j
]
self
.
residual_func_list
.
append
(
residual_func
)
def
forward
(
self
,
input
,
res_dict
=
None
):
def
forward
(
self
,
x
,
res_dict
=
None
):
out
s
=
[]
out
=
[]
residual_func_idx
=
0
residual_func_idx
=
0
for
i
in
range
(
self
.
_actual_ch
):
for
i
in
range
(
len
(
self
.
_in_channels
)
):
residual
=
input
[
i
]
residual
=
x
[
i
]
for
j
in
range
(
len
(
self
.
_in_channels
)):
for
j
in
range
(
len
(
self
.
_in_channels
)):
if
j
>
i
:
if
j
>
i
:
y
=
self
.
residual_func_list
[
residual_func_idx
](
input
[
j
])
xj
=
self
.
residual_func_list
[
residual_func_idx
](
x
[
j
])
residual_func_idx
+=
1
residual_func_idx
+=
1
y
=
F
.
upsample
(
y
,
scale_factor
=
2
**
(
j
-
i
),
mode
=
"nearest"
)
xj
=
upsample
(
xj
,
scale_factor
=
2
**
(
j
-
i
),
mode
=
"nearest"
)
residual
=
paddle
.
add
(
x
=
residual
,
y
=
y
)
residual
=
paddle
.
add
(
x
=
residual
,
y
=
xj
)
elif
j
<
i
:
elif
j
<
i
:
y
=
input
[
j
]
xj
=
x
[
j
]
for
k
in
range
(
i
-
j
):
for
k
in
range
(
i
-
j
):
y
=
self
.
residual_func_list
[
residual_func_idx
](
y
)
xj
=
self
.
residual_func_list
[
residual_func_idx
](
xj
)
residual_func_idx
+=
1
residual_func_idx
+=
1
residual
=
paddle
.
add
(
x
=
residual
,
y
=
y
)
residual
=
paddle
.
add
(
x
=
residual
,
y
=
xj
)
residual
=
F
.
relu
(
residual
)
residual
=
self
.
relu
(
residual
)
out
s
.
append
(
residual
)
out
.
append
(
residual
)
return
out
s
return
out
class
LastClsOut
(
TheseusLayer
):
class
LastClsOut
(
TheseusLayer
):
def
__init__
(
self
,
def
__init__
(
self
,
num_channel_list
,
num_channel_list
,
has_se
,
has_se
,
num_filters_list
=
[
32
,
64
,
128
,
256
],
num_filters_list
=
[
32
,
64
,
128
,
256
]):
name
=
None
):
super
(
LastClsOut
,
self
).
__init__
()
super
(
LastClsOut
,
self
).
__init__
()
self
.
func_list
=
[]
self
.
func_list
=
nn
.
LayerList
()
for
idx
in
range
(
len
(
num_channel_list
)):
for
idx
in
range
(
len
(
num_channel_list
)):
func
=
self
.
add_sublayer
(
self
.
func_list
.
append
(
"conv_{}_conv_{}"
.
format
(
name
,
idx
+
1
),
BottleneckBlock
(
BottleneckBlock
(
num_channels
=
num_channel_list
[
idx
],
num_channels
=
num_channel_list
[
idx
],
num_filters
=
num_filters_list
[
idx
],
num_filters
=
num_filters_list
[
idx
],
has_se
=
has_se
,
has_se
=
has_se
,
downsample
=
True
,
downsample
=
True
))
name
=
name
+
'conv_'
+
str
(
idx
+
1
)))
self
.
func_list
.
append
(
func
)
def
forward
(
self
,
inputs
,
res_dict
=
None
):
def
forward
(
self
,
x
,
res_dict
=
None
):
out
s
=
[]
out
=
[]
for
idx
,
input
in
enumerate
(
inputs
):
for
idx
,
xi
in
enumerate
(
x
):
out
=
self
.
func_list
[
idx
](
input
)
xi
=
self
.
func_list
[
idx
](
xi
)
out
s
.
append
(
out
)
out
.
append
(
xi
)
return
out
s
return
out
class
HRNet
(
TheseusLayer
):
class
HRNet
(
TheseusLayer
):
def
__init__
(
self
,
width
=
18
,
has_se
=
False
,
class_dim
=
1000
):
"""
HRNet
Args:
width: int=18. Base channel number of HRNet.
has_se: bool=False. If 'True', add se module to HRNet.
class_num: int=1000. Output num of last fc layer.
"""
def
__init__
(
self
,
width
=
18
,
has_se
=
False
,
class_num
=
1000
):
super
(
HRNet
,
self
).
__init__
()
super
(
HRNet
,
self
).
__init__
()
self
.
width
=
width
self
.
width
=
width
self
.
has_se
=
has_se
self
.
has_se
=
has_se
self
.
channels
=
{
self
.
_class_num
=
class_num
18
:
[[
18
,
36
],
[
18
,
36
,
72
],
[
18
,
36
,
72
,
144
]],
30
:
[[
30
,
60
],
[
30
,
60
,
120
],
[
30
,
60
,
120
,
240
]],
channels_2
=
[
self
.
width
,
self
.
width
*
2
]
32
:
[[
32
,
64
],
[
32
,
64
,
128
],
[
32
,
64
,
128
,
256
]],
channels_3
=
[
self
.
width
,
self
.
width
*
2
,
self
.
width
*
4
]
40
:
[[
40
,
80
],
[
40
,
80
,
160
],
[
40
,
80
,
160
,
320
]],
channels_4
=
[
self
.
width
,
self
.
width
*
2
,
self
.
width
*
4
,
self
.
width
*
8
]
44
:
[[
44
,
88
],
[
44
,
88
,
176
],
[
44
,
88
,
176
,
352
]],
48
:
[[
48
,
96
],
[
48
,
96
,
192
],
[
48
,
96
,
192
,
384
]],
60
:
[[
60
,
120
],
[
60
,
120
,
240
],
[
60
,
120
,
240
,
480
]],
64
:
[[
64
,
128
],
[
64
,
128
,
256
],
[
64
,
128
,
256
,
512
]]
}
self
.
_class_dim
=
class_dim
channels_2
,
channels_3
,
channels_4
=
self
.
channels
[
width
]
num_modules_2
,
num_modules_3
,
num_modules_4
=
1
,
4
,
3
self
.
conv_layer1_1
=
ConvBNLayer
(
self
.
conv_layer1_1
=
ConvBNLayer
(
num_channels
=
3
,
num_channels
=
3
,
num_filters
=
64
,
num_filters
=
64
,
filter_size
=
3
,
filter_size
=
3
,
stride
=
2
,
stride
=
2
,
act
=
'relu'
,
act
=
'relu'
)
name
=
"layer1_1"
)
self
.
conv_layer1_2
=
ConvBNLayer
(
self
.
conv_layer1_2
=
ConvBNLayer
(
num_channels
=
64
,
num_channels
=
64
,
num_filters
=
64
,
num_filters
=
64
,
filter_size
=
3
,
filter_size
=
3
,
stride
=
2
,
stride
=
2
,
act
=
'relu'
,
act
=
'relu'
)
name
=
"layer1_2"
)
self
.
layer1
=
nn
.
Sequential
(
*
[
self
.
la1
=
Layer1
(
num_channels
=
64
,
has_se
=
has_se
,
name
=
"layer2"
)
BottleneckBlock
(
num_channels
=
64
if
i
==
0
else
256
,
self
.
tr1
=
TransitionLayer
(
num_filters
=
64
,
in_channels
=
[
256
],
out_channels
=
channels_2
,
name
=
"tr1"
)
has_se
=
has_se
,
stride
=
1
,
downsample
=
True
if
i
==
0
else
False
)
for
i
in
range
(
4
)
])
self
.
conv_tr1_1
=
ConvBNLayer
(
num_channels
=
256
,
num_filters
=
width
,
filter_size
=
3
)
self
.
conv_tr1_2
=
ConvBNLayer
(
num_channels
=
256
,
num_filters
=
width
*
2
,
filter_size
=
3
,
stride
=
2
)
self
.
st2
=
Stage
(
self
.
st2
=
Stage
(
num_channels
=
channels_2
,
num_modules
=
1
,
num_modules
=
num_modules_2
,
num_filters
=
channels_2
,
num_filters
=
channels_2
,
has_se
=
self
.
has_se
,
has_se
=
self
.
has_se
)
name
=
"st2"
)
self
.
tr2
=
TransitionLayer
(
self
.
conv_tr2
=
ConvBNLayer
(
in_channels
=
channels_2
,
out_channels
=
channels_3
,
name
=
"tr2"
)
num_channels
=
width
*
2
,
num_filters
=
width
*
4
,
filter_size
=
3
,
stride
=
2
)
self
.
st3
=
Stage
(
self
.
st3
=
Stage
(
num_channels
=
channels_3
,
num_modules
=
4
,
num_modules
=
num_modules_3
,
num_filters
=
channels_3
,
num_filters
=
channels_3
,
has_se
=
self
.
has_se
,
has_se
=
self
.
has_se
)
name
=
"st3"
)
self
.
conv_tr3
=
ConvBNLayer
(
num_channels
=
width
*
4
,
num_filters
=
width
*
8
,
filter_size
=
3
,
stride
=
2
)
self
.
tr3
=
TransitionLayer
(
in_channels
=
channels_3
,
out_channels
=
channels_4
,
name
=
"tr3"
)
self
.
st4
=
Stage
(
self
.
st4
=
Stage
(
num_channels
=
channels_4
,
num_modules
=
3
,
num_modules
=
num_modules_4
,
num_filters
=
channels_4
,
num_filters
=
channels_4
,
has_se
=
self
.
has_se
,
has_se
=
self
.
has_se
)
name
=
"st4"
)
# classification
# classification
num_filters_list
=
[
32
,
64
,
128
,
256
]
num_filters_list
=
[
32
,
64
,
128
,
256
]
self
.
last_cls
=
LastClsOut
(
self
.
last_cls
=
LastClsOut
(
num_channel_list
=
channels_4
,
num_channel_list
=
channels_4
,
has_se
=
self
.
has_se
,
has_se
=
self
.
has_se
,
num_filters_list
=
num_filters_list
,
num_filters_list
=
num_filters_list
)
name
=
"cls_head"
,
)
last_num_filters
=
[
256
,
512
,
1024
]
last_num_filters
=
[
256
,
512
,
1024
]
self
.
cls_head_conv_list
=
[]
self
.
cls_head_conv_list
=
nn
.
LayerList
()
for
idx
in
range
(
3
):
for
idx
in
range
(
3
):
self
.
cls_head_conv_list
.
append
(
self
.
cls_head_conv_list
.
append
(
self
.
add_sublayer
(
"cls_head_add{}"
.
format
(
idx
+
1
),
ConvBNLayer
(
ConvBNLayer
(
num_channels
=
num_filters_list
[
idx
]
*
4
,
num_channels
=
num_filters_list
[
idx
]
*
4
,
num_filters
=
last_num_filters
[
idx
],
num_filters
=
last_num_filters
[
idx
],
filter_size
=
3
,
filter_size
=
3
,
stride
=
2
,
stride
=
2
))
name
=
"cls_head_add"
+
str
(
idx
+
1
))))
self
.
conv_last
=
ConvBNLayer
(
self
.
conv_last
=
ConvBNLayer
(
num_channels
=
1024
,
num_channels
=
1024
,
num_filters
=
2048
,
num_filters
=
2048
,
filter_size
=
1
,
filter_size
=
1
,
stride
=
1
,
stride
=
1
)
name
=
"cls_head_last_conv"
)
self
.
pool2d_avg
=
AdaptiveAvgPool2D
(
1
)
self
.
avg_pool
=
nn
.
AdaptiveAvgPool2D
(
1
)
stdv
=
1.0
/
math
.
sqrt
(
2048
*
1.0
)
stdv
=
1.0
/
math
.
sqrt
(
2048
*
1.0
)
self
.
out
=
nn
.
Linear
(
self
.
fc
=
nn
.
Linear
(
2048
,
2048
,
class_dim
,
class_num
,
weight_attr
=
ParamAttr
(
weight_attr
=
ParamAttr
(
initializer
=
Uniform
(
-
stdv
,
stdv
)))
initializer
=
Uniform
(
-
stdv
,
stdv
),
name
=
"fc_weights"
),
bias_attr
=
ParamAttr
(
name
=
"fc_offset"
))
def
forward
(
self
,
input
,
res_dict
=
None
):
def
forward
(
self
,
x
,
res_dict
=
None
):
conv1
=
self
.
conv_layer1_1
(
input
)
x
=
self
.
conv_layer1_1
(
x
)
conv2
=
self
.
conv_layer1_2
(
conv1
)
x
=
self
.
conv_layer1_2
(
x
)
la1
=
self
.
la1
(
conv2
)
x
=
self
.
layer1
(
x
)
tr1
=
self
.
tr1
([
la1
])
tr1_1
=
self
.
conv_tr1_1
(
x
)
st2
=
self
.
st2
(
tr1
)
tr1_2
=
self
.
conv_tr1_2
(
x
)
x
=
self
.
st2
([
tr1_1
,
tr1_2
])
tr2
=
self
.
tr2
(
st2
)
tr2
=
self
.
conv_tr2
(
x
[
-
1
])
st3
=
self
.
st3
(
tr2
)
x
.
append
(
tr2
)
x
=
self
.
st3
(
x
)
tr3
=
self
.
tr3
(
st3
)
tr3
=
self
.
conv_tr3
(
x
[
-
1
])
st4
=
self
.
st4
(
tr3
)
x
.
append
(
tr3
)
x
=
self
.
st4
(
x
)
last_cls
=
self
.
last_cls
(
st4
)
x
=
self
.
last_cls
(
x
)
y
=
last_cls
[
0
]
y
=
x
[
0
]
for
idx
in
range
(
3
):
for
idx
in
range
(
3
):
y
=
paddle
.
add
(
last_cls
[
idx
+
1
],
self
.
cls_head_conv_list
[
idx
](
y
))
y
=
paddle
.
add
(
x
[
idx
+
1
],
self
.
cls_head_conv_list
[
idx
](
y
))
y
=
self
.
conv_last
(
y
)
y
=
self
.
conv_last
(
y
)
y
=
self
.
pool2d_avg
(
y
)
y
=
self
.
avg_pool
(
y
)
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
y
.
shape
[
1
]])
y
=
paddle
.
reshape
(
y
,
shape
=
[
-
1
,
y
.
shape
[
1
]])
y
=
self
.
out
(
y
)
y
=
self
.
fc
(
y
)
return
y
return
y
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录