Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleSeg
提交
fad18563
P
PaddleSeg
项目概览
PaddlePaddle
/
PaddleSeg
通知
285
Star
8
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
53
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleSeg
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
53
Issue
53
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
fad18563
编写于
9月 22, 2020
作者:
C
chenguowei01
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
update api to 2.0beta
上级
a960fe3e
变更
2
隐藏空白更改
内联
并排
Showing
2 changed file
with
121 addition
and
213 deletion
+121
-213
dygraph/paddleseg/models/backbones/hrnet.py
dygraph/paddleseg/models/backbones/hrnet.py
+119
-184
dygraph/paddleseg/models/fcn.py
dygraph/paddleseg/models/fcn.py
+2
-29
未找到文件。
dygraph/paddleseg/models/backbones/hrnet.py
浏览文件 @
fad18563
...
...
@@ -16,16 +16,17 @@ import math
import
os
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.dygraph.nn
import
Conv2D
,
Pool2D
,
Linear
from
paddle.fluid.initializer
import
Normal
from
paddle
import
ParamAttr
import
paddle.nn
as
nn
import
paddle.nn.functional
as
F
from
paddle.nn
import
SyncBatchNorm
as
BatchNorm
from
paddle.nn
import
Conv2d
,
Linear
from
paddle.nn
import
AdaptiveAvgPool2d
,
MaxPool2d
,
AvgPool2d
from
paddleseg.cvlibs
import
manager
from
paddleseg.utils
import
utils
from
paddleseg.cvlibs
import
param_init
from
paddleseg.models.common
import
layer_libs
__all__
=
[
"HRNet_W18_Small_V1"
,
"HRNet_W18_Small_V2"
,
"HRNet_W18"
,
"HRNet_W30"
,
...
...
@@ -33,7 +34,7 @@ __all__ = [
]
class
HRNet
(
fluid
.
dygraph
.
Layer
):
class
HRNet
(
nn
.
Layer
):
"""
HRNet:Deep High-Resolution Representation Learning for Visual Recognition
https://arxiv.org/pdf/1908.07919.pdf.
...
...
@@ -85,21 +86,21 @@ class HRNet(fluid.dygraph.Layer):
self
.
stage4_num_channels
=
stage4_num_channels
self
.
has_se
=
has_se
self
.
conv_layer1_1
=
ConvBNLayer
(
num
_channels
=
3
,
num_filter
s
=
64
,
filter
_size
=
3
,
self
.
conv_layer1_1
=
layer_libs
.
ConvBNReLU
(
in
_channels
=
3
,
out_channel
s
=
64
,
kernel
_size
=
3
,
stride
=
2
,
act
=
'relu
'
,
name
=
"layer1_1"
)
padding
=
'same
'
,
bias_attr
=
False
)
self
.
conv_layer1_2
=
ConvBNLayer
(
num
_channels
=
64
,
num_filter
s
=
64
,
filter
_size
=
3
,
self
.
conv_layer1_2
=
layer_libs
.
ConvBNReLU
(
in
_channels
=
64
,
out_channel
s
=
64
,
kernel
_size
=
3
,
stride
=
2
,
act
=
'relu
'
,
name
=
"layer1_2"
)
padding
=
'same
'
,
bias_attr
=
False
)
self
.
la1
=
Layer1
(
num_channels
=
64
,
...
...
@@ -162,45 +163,15 @@ class HRNet(fluid.dygraph.Layer):
st4
=
self
.
st4
(
tr3
)
x0_h
,
x0_w
=
st4
[
0
].
shape
[
2
:]
x1
=
fluid
.
layers
.
resize_bilinear
(
st4
[
1
],
out_shape
=
(
x0_h
,
x0_w
))
x2
=
fluid
.
layers
.
resize_bilinear
(
st4
[
2
],
out_shape
=
(
x0_h
,
x0_w
))
x3
=
fluid
.
layers
.
resize_bilinear
(
st4
[
3
],
out_shape
=
(
x0_h
,
x0_w
))
x
=
fluid
.
layers
.
concat
([
st4
[
0
],
x1
,
x2
,
x3
],
axis
=
1
)
x1
=
F
.
resize_bilinear
(
st4
[
1
],
out_shape
=
(
x0_h
,
x0_w
))
x2
=
F
.
resize_bilinear
(
st4
[
2
],
out_shape
=
(
x0_h
,
x0_w
))
x3
=
F
.
resize_bilinear
(
st4
[
3
],
out_shape
=
(
x0_h
,
x0_w
))
x
=
paddle
.
concat
([
st4
[
0
],
x1
,
x2
,
x3
],
axis
=
1
)
return
[
x
]
class
ConvBNLayer
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
filter_size
,
stride
=
1
,
groups
=
1
,
act
=
"relu"
,
name
=
None
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2D
(
num_channels
=
num_channels
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
bias_attr
=
False
)
self
.
_batch_norm
=
BatchNorm
(
num_filters
)
self
.
act
=
act
def
forward
(
self
,
input
):
y
=
self
.
_conv
(
input
)
y
=
self
.
_batch_norm
(
y
)
if
self
.
act
==
'relu'
:
y
=
fluid
.
layers
.
relu
(
y
)
return
y
class
Layer1
(
fluid
.
dygraph
.
Layer
):
class
Layer1
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -230,7 +201,7 @@ class Layer1(fluid.dygraph.Layer):
return
conv
class
TransitionLayer
(
fluid
.
dygraph
.
Layer
):
class
TransitionLayer
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
name
=
None
):
super
(
TransitionLayer
,
self
).
__init__
()
...
...
@@ -243,20 +214,22 @@ class TransitionLayer(fluid.dygraph.Layer):
if
in_channels
[
i
]
!=
out_channels
[
i
]:
residual
=
self
.
add_sublayer
(
"transition_{}_layer_{}"
.
format
(
name
,
i
+
1
),
ConvBNLayer
(
num_channels
=
in_channels
[
i
],
num_filters
=
out_channels
[
i
],
filter_size
=
3
,
name
=
name
+
'_layer_'
+
str
(
i
+
1
)))
layer_libs
.
ConvBNReLU
(
in_channels
=
in_channels
[
i
],
out_channels
=
out_channels
[
i
],
kernel_size
=
3
,
padding
=
'same'
,
bias_attr
=
False
))
else
:
residual
=
self
.
add_sublayer
(
"transition_{}_layer_{}"
.
format
(
name
,
i
+
1
),
ConvBNLayer
(
num
_channels
=
in_channels
[
-
1
],
num_filter
s
=
out_channels
[
i
],
filter
_size
=
3
,
layer_libs
.
ConvBNReLU
(
in
_channels
=
in_channels
[
-
1
],
out_channel
s
=
out_channels
[
i
],
kernel
_size
=
3
,
stride
=
2
,
name
=
name
+
'_layer_'
+
str
(
i
+
1
)))
padding
=
'same'
,
bias_attr
=
False
))
self
.
conv_bn_func_list
.
append
(
residual
)
def
forward
(
self
,
input
):
...
...
@@ -272,7 +245,7 @@ class TransitionLayer(fluid.dygraph.Layer):
return
outs
class
Branches
(
fluid
.
dygraph
.
Layer
):
class
Branches
(
nn
.
Layer
):
def
__init__
(
self
,
num_blocks
,
in_channels
,
...
...
@@ -307,7 +280,7 @@ class Branches(fluid.dygraph.Layer):
return
outs
class
BottleneckBlock
(
fluid
.
dygraph
.
Layer
):
class
BottleneckBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -320,34 +293,35 @@ class BottleneckBlock(fluid.dygraph.Layer):
self
.
has_se
=
has_se
self
.
downsample
=
downsample
self
.
conv1
=
ConvBNLayer
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
1
,
act
=
"relu"
,
name
=
name
+
"_conv1"
,
)
self
.
conv2
=
ConvBNLayer
(
num
_channels
=
num_filters
,
num_filter
s
=
num_filters
,
filter
_size
=
3
,
self
.
conv1
=
layer_libs
.
ConvBNReLU
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
1
,
padding
=
'same'
,
bias_attr
=
False
)
self
.
conv2
=
layer_libs
.
ConvBNReLU
(
in
_channels
=
num_filters
,
out_channel
s
=
num_filters
,
kernel
_size
=
3
,
stride
=
stride
,
act
=
"relu"
,
name
=
name
+
"_conv2"
)
self
.
conv3
=
ConvBNLayer
(
num_channels
=
num_filters
,
num_filters
=
num_filters
*
4
,
filter_size
=
1
,
act
=
None
,
name
=
name
+
"_conv3"
)
padding
=
'same'
,
bias_attr
=
False
)
self
.
conv3
=
layer_libs
.
ConvBN
(
in_channels
=
num_filters
,
out_channels
=
num_filters
*
4
,
kernel_size
=
1
,
padding
=
'same'
,
bias_attr
=
False
)
if
self
.
downsample
:
self
.
conv_down
=
ConvBNLayer
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
*
4
,
filter
_size
=
1
,
act
=
None
,
name
=
name
+
"_downsample"
)
self
.
conv_down
=
layer_libs
.
ConvBN
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
*
4
,
kernel
_size
=
1
,
padding
=
'same'
,
bias_attr
=
False
)
if
self
.
has_se
:
self
.
se
=
SELayer
(
...
...
@@ -368,11 +342,12 @@ class BottleneckBlock(fluid.dygraph.Layer):
if
self
.
has_se
:
conv3
=
self
.
se
(
conv3
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
conv3
,
y
=
residual
,
act
=
"relu"
)
y
=
conv3
+
residual
y
=
F
.
relu
(
y
)
return
y
class
BasicBlock
(
fluid
.
dygraph
.
Layer
):
class
BasicBlock
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
...
...
@@ -385,28 +360,27 @@ class BasicBlock(fluid.dygraph.Layer):
self
.
has_se
=
has_se
self
.
downsample
=
downsample
self
.
conv1
=
ConvBNLayer
(
num
_channels
=
num_channels
,
num_filter
s
=
num_filters
,
filter
_size
=
3
,
self
.
conv1
=
layer_libs
.
ConvBNReLU
(
in
_channels
=
num_channels
,
out_channel
s
=
num_filters
,
kernel
_size
=
3
,
stride
=
stride
,
act
=
"relu"
,
name
=
name
+
"_conv1"
)
self
.
conv2
=
ConvBNLayer
(
num_channels
=
num_filters
,
num_filters
=
num_filters
,
filter_size
=
3
,
stride
=
1
,
act
=
None
,
name
=
name
+
"_conv2"
)
padding
=
'same'
,
bias_attr
=
False
)
self
.
conv2
=
layer_libs
.
ConvBN
(
in_channels
=
num_filters
,
out_channels
=
num_filters
,
kernel_size
=
3
,
padding
=
'same'
,
bias_attr
=
False
)
if
self
.
downsample
:
self
.
conv_down
=
ConvBNLayer
(
num
_channels
=
num_channels
,
num_filters
=
num_filters
*
4
,
filter
_size
=
1
,
act
=
"relu"
,
name
=
name
+
"_downsample"
)
self
.
conv_down
=
layer_libs
.
ConvBNReLU
(
in
_channels
=
num_channels
,
out_channels
=
num_filters
,
kernel
_size
=
1
,
padding
=
'same'
,
bias_attr
=
False
)
if
self
.
has_se
:
self
.
se
=
SELayer
(
...
...
@@ -426,15 +400,16 @@ class BasicBlock(fluid.dygraph.Layer):
if
self
.
has_se
:
conv2
=
self
.
se
(
conv2
)
y
=
fluid
.
layers
.
elementwise_add
(
x
=
conv2
,
y
=
residual
,
act
=
"relu"
)
y
=
conv2
+
residual
y
=
F
.
relu
(
y
)
return
y
class
SELayer
(
fluid
.
dygraph
.
Layer
):
class
SELayer
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_filters
,
reduction_ratio
,
name
=
None
):
super
(
SELayer
,
self
).
__init__
()
self
.
pool2d_gap
=
Pool2D
(
pool_type
=
'avg'
,
global_pooling
=
True
)
self
.
pool2d_gap
=
AdaptiveAvgPool2d
(
1
)
self
.
_num_channels
=
num_channels
...
...
@@ -445,9 +420,7 @@ class SELayer(fluid.dygraph.Layer):
med_ch
,
act
=
"relu"
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_sqz_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
'_sqz_offset'
))
initializer
=
nn
.
initializer
.
Uniform
(
-
stdv
,
stdv
)))
stdv
=
1.0
/
math
.
sqrt
(
med_ch
*
1.0
)
self
.
excitation
=
Linear
(
...
...
@@ -455,22 +428,20 @@ class SELayer(fluid.dygraph.Layer):
num_filters
,
act
=
"sigmoid"
,
param_attr
=
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
),
name
=
name
+
"_exc_weights"
),
bias_attr
=
ParamAttr
(
name
=
name
+
'_exc_offset'
))
initializer
=
nn
.
initializer
.
Uniform
(
-
stdv
,
stdv
)))
def
forward
(
self
,
input
):
pool
=
self
.
pool2d_gap
(
input
)
pool
=
fluid
.
layers
.
reshape
(
pool
,
shape
=
[
-
1
,
self
.
_num_channels
])
pool
=
paddle
.
reshape
(
pool
,
shape
=
[
-
1
,
self
.
_num_channels
])
squeeze
=
self
.
squeeze
(
pool
)
excitation
=
self
.
excitation
(
squeeze
)
excitation
=
fluid
.
layers
.
reshape
(
excitation
=
paddle
.
reshape
(
excitation
,
shape
=
[
-
1
,
self
.
_num_channels
,
1
,
1
])
out
=
input
*
excitation
return
out
class
Stage
(
fluid
.
dygraph
.
Layer
):
class
Stage
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_modules
,
...
...
@@ -514,7 +485,7 @@ class Stage(fluid.dygraph.Layer):
return
out
class
HighResolutionModule
(
fluid
.
dygraph
.
Layer
):
class
HighResolutionModule
(
nn
.
Layer
):
def
__init__
(
self
,
num_channels
,
num_blocks
,
...
...
@@ -543,7 +514,7 @@ class HighResolutionModule(fluid.dygraph.Layer):
return
out
class
FuseLayers
(
fluid
.
dygraph
.
Layer
):
class
FuseLayers
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
...
...
@@ -561,14 +532,12 @@ class FuseLayers(fluid.dygraph.Layer):
if
j
>
i
:
residual_func
=
self
.
add_sublayer
(
"residual_{}_layer_{}_{}"
.
format
(
name
,
i
+
1
,
j
+
1
),
ConvBNLayer
(
num_channels
=
in_channels
[
j
],
num_filters
=
out_channels
[
i
],
filter_size
=
1
,
stride
=
1
,
act
=
None
,
name
=
name
+
'_layer_'
+
str
(
i
+
1
)
+
'_'
+
str
(
j
+
1
)))
layer_libs
.
ConvBN
(
in_channels
=
in_channels
[
j
],
out_channels
=
out_channels
[
i
],
kernel_size
=
1
,
padding
=
'same'
,
bias_attr
=
False
))
self
.
residual_func_list
.
append
(
residual_func
)
elif
j
<
i
:
pre_num_filters
=
in_channels
[
j
]
...
...
@@ -577,27 +546,25 @@ class FuseLayers(fluid.dygraph.Layer):
residual_func
=
self
.
add_sublayer
(
"residual_{}_layer_{}_{}_{}"
.
format
(
name
,
i
+
1
,
j
+
1
,
k
+
1
),
ConvBNLayer
(
num
_channels
=
pre_num_filters
,
num_filter
s
=
out_channels
[
i
],
filter
_size
=
3
,
layer_libs
.
ConvBN
(
in
_channels
=
pre_num_filters
,
out_channel
s
=
out_channels
[
i
],
kernel
_size
=
3
,
stride
=
2
,
act
=
None
,
name
=
name
+
'_layer_'
+
str
(
i
+
1
)
+
'_'
+
str
(
j
+
1
)
+
'_'
+
str
(
k
+
1
)))
padding
=
'same'
,
bias_attr
=
False
))
pre_num_filters
=
out_channels
[
i
]
else
:
residual_func
=
self
.
add_sublayer
(
"residual_{}_layer_{}_{}_{}"
.
format
(
name
,
i
+
1
,
j
+
1
,
k
+
1
),
ConvBNLayer
(
num
_channels
=
pre_num_filters
,
num_filter
s
=
out_channels
[
j
],
filter
_size
=
3
,
layer_libs
.
ConvBNReLU
(
in
_channels
=
pre_num_filters
,
out_channel
s
=
out_channels
[
j
],
kernel
_size
=
3
,
stride
=
2
,
act
=
"relu"
,
name
=
name
+
'_layer_'
+
str
(
i
+
1
)
+
'_'
+
str
(
j
+
1
)
+
'_'
+
str
(
k
+
1
)))
padding
=
'same'
,
bias_attr
=
False
))
pre_num_filters
=
out_channels
[
j
]
self
.
residual_func_list
.
append
(
residual_func
)
...
...
@@ -612,54 +579,22 @@ class FuseLayers(fluid.dygraph.Layer):
y
=
self
.
residual_func_list
[
residual_func_idx
](
input
[
j
])
residual_func_idx
+=
1
y
=
fluid
.
layers
.
resize_bilinear
(
input
=
y
,
out_shape
=
residual_shape
)
residual
=
fluid
.
layers
.
elementwise_add
(
x
=
residual
,
y
=
y
,
act
=
None
)
y
=
F
.
resize_bilinear
(
input
=
y
,
out_shape
=
residual_shape
)
residual
=
residual
+
y
elif
j
<
i
:
y
=
input
[
j
]
for
k
in
range
(
i
-
j
):
y
=
self
.
residual_func_list
[
residual_func_idx
](
y
)
residual_func_idx
+=
1
residual
=
fluid
.
layers
.
elementwise_add
(
x
=
residual
,
y
=
y
,
act
=
None
)
residual
=
residual
+
y
layer_helper
=
LayerHelper
(
self
.
full_name
(),
act
=
'relu'
)
residual
=
layer_helper
.
append_activation
(
residual
)
residual
=
F
.
relu
(
residual
)
outs
.
append
(
residual
)
return
outs
class
LastClsOut
(
fluid
.
dygraph
.
Layer
):
def
__init__
(
self
,
num_channel_list
,
has_se
,
num_filters_list
=
[
32
,
64
,
128
,
256
],
name
=
None
):
super
(
LastClsOut
,
self
).
__init__
()
self
.
func_list
=
[]
for
idx
in
range
(
len
(
num_channel_list
)):
func
=
self
.
add_sublayer
(
"conv_{}_conv_{}"
.
format
(
name
,
idx
+
1
),
BottleneckBlock
(
num_channels
=
num_channel_list
[
idx
],
num_filters
=
num_filters_list
[
idx
],
has_se
=
has_se
,
downsample
=
True
,
name
=
name
+
'conv_'
+
str
(
idx
+
1
)))
self
.
func_list
.
append
(
func
)
def
forward
(
self
,
inputs
):
outs
=
[]
for
idx
,
input
in
enumerate
(
inputs
):
out
=
self
.
func_list
[
idx
](
input
)
outs
.
append
(
out
)
return
outs
@
manager
.
BACKBONES
.
add_component
def
HRNet_W18_Small_V1
(
**
kwargs
):
model
=
HRNet
(
...
...
dygraph/paddleseg/models/fcn.py
浏览文件 @
fad18563
...
...
@@ -72,10 +72,11 @@ class FCN(nn.Layer):
channels
=
backbone_channels
[
0
]
self
.
backbone
=
backbone
self
.
conv_last_2
=
ConvBNLayer
(
self
.
conv_last_2
=
layer_libs
.
ConvBNReLU
(
in_channels
=
backbone_channels
[
0
],
out_channels
=
channels
,
kernel_size
=
1
,
padding
=
'same'
,
stride
=
1
)
self
.
conv_last_1
=
Conv2d
(
in_channels
=
channels
,
...
...
@@ -124,34 +125,6 @@ class FCN(nn.Layer):
logger
.
warning
(
'No pretrained model to load, train from scratch'
)
class
ConvBNLayer
(
nn
.
Layer
):
def
__init__
(
self
,
in_channels
,
out_channels
,
kernel_size
,
stride
=
1
,
groups
=
1
,
act
=
"relu"
):
super
(
ConvBNLayer
,
self
).
__init__
()
self
.
_conv
=
Conv2d
(
in_channels
=
in_channels
,
out_channels
=
out_channels
,
kernel_size
=
kernel_size
,
stride
=
stride
,
padding
=
(
kernel_size
-
1
)
//
2
,
groups
=
groups
,
bias_attr
=
False
)
self
.
_batch_norm
=
BatchNorm
(
out_channels
)
self
.
act
=
activation
.
Activation
(
act
=
act
)
def
forward
(
self
,
input
):
y
=
self
.
_conv
(
input
)
y
=
self
.
_batch_norm
(
y
)
y
=
self
.
act
(
y
)
return
y
@
manager
.
MODELS
.
add_component
def
fcn_hrnet_w18_small_v1
(
*
args
,
**
kwargs
):
return
FCN
(
backbone
=
'HRNet_W18_Small_V1'
,
backbone_channels
=
(
240
),
**
kwargs
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录