Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleSeg
提交
ce9ec972
P
PaddleSeg
项目概览
PaddlePaddle
/
PaddleSeg
通知
289
Star
8
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
53
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleSeg
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
53
Issue
53
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ce9ec972
编写于
6月 29, 2020
作者:
W
wuzewu
提交者:
GitHub
6月 29, 2020
浏览文件
操作
浏览文件
下载
差异文件
Merge pull request #304 from wuyefeilin/develop
上级
08232bbb
0d9aa902
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
461 addition
and
3 deletion
+461
-3
docs/configs/model_deeplabv3p_group.md
docs/configs/model_deeplabv3p_group.md
+1
-1
pdseg/models/backbone/resnet_vd.py
pdseg/models/backbone/resnet_vd.py
+407
-0
pdseg/models/modeling/deeplab.py
pdseg/models/modeling/deeplab.py
+46
-1
pdseg/reader.py
pdseg/reader.py
+2
-0
pdseg/utils/config.py
pdseg/utils/config.py
+5
-1
未找到文件。
docs/configs/model_deeplabv3p_group.md
浏览文件 @
ce9ec972
...
@@ -4,7 +4,7 @@ MODEL.DEEPLAB 子Group存放所有和DeepLabv3+模型相关的配置
...
@@ -4,7 +4,7 @@ MODEL.DEEPLAB 子Group存放所有和DeepLabv3+模型相关的配置
## `BACKBONE`
## `BACKBONE`
DeepLabV3+所用骨干网络,支持
`mobilenetv2`
`xception65`
两种
DeepLabV3+所用骨干网络,支持
`mobilenetv2`
`xception65`
`xception41`
`resnet50_vd`
`resnet101_vd`
### 默认值
### 默认值
...
...
pdseg/models/backbone/resnet_vd.py
0 → 100644
浏览文件 @
ce9ec972
# coding: utf8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
import
math
import
numpy
as
np
import
paddle.fluid
as
fluid
from
paddle.fluid.param_attr
import
ParamAttr
__all__
=
[
"ResNet"
,
"ResNet18"
,
"ResNet34"
,
"ResNet50"
,
"ResNet101"
,
"ResNet152"
]
train_parameters
=
{
"input_size"
:
[
3
,
224
,
224
],
"input_mean"
:
[
0.485
,
0.456
,
0.406
],
"input_std"
:
[
0.229
,
0.224
,
0.225
],
"learning_strategy"
:
{
"name"
:
"piecewise_decay"
,
"batch_size"
:
256
,
"epochs"
:
[
30
,
60
,
90
],
"steps"
:
[
0.1
,
0.01
,
0.001
,
0.0001
]
}
}
class
ResNet
():
def
__init__
(
self
,
layers
=
50
,
scale
=
1.0
,
stem
=
None
,
lr_mult_list
=
[
1.0
,
1.0
,
1.0
,
1.0
,
1.0
]):
self
.
params
=
train_parameters
self
.
layers
=
layers
self
.
scale
=
scale
self
.
stem
=
stem
self
.
lr_mult_list
=
lr_mult_list
assert
len
(
self
.
lr_mult_list
)
==
5
,
"lr_mult_list length in ResNet must be 5 but got {}!!"
.
format
(
len
(
self
.
lr_mult_list
))
self
.
curr_stage
=
0
def
net
(
self
,
input
,
class_dim
=
1000
,
end_points
=
None
,
decode_points
=
None
,
resize_points
=
None
,
dilation_dict
=
None
):
layers
=
self
.
layers
supported_layers
=
[
18
,
34
,
50
,
101
,
152
]
mult_grid
=
[
1
,
2
,
4
]
assert
layers
in
supported_layers
,
\
"supported layers are {} but input layer is {}"
.
format
(
supported_layers
,
layers
)
decode_ends
=
dict
()
def
check_points
(
count
,
points
):
if
points
is
None
:
return
False
else
:
if
isinstance
(
points
,
list
):
return
(
True
if
count
in
points
else
False
)
else
:
return
(
True
if
count
==
points
else
False
)
def
get_dilated_rate
(
dilation_dict
,
idx
):
if
dilation_dict
is
None
or
idx
not
in
dilation_dict
:
return
1
else
:
return
dilation_dict
[
idx
]
if
layers
==
18
:
depth
=
[
2
,
2
,
2
,
2
]
elif
layers
==
34
or
layers
==
50
:
depth
=
[
3
,
4
,
6
,
3
]
elif
layers
==
101
:
depth
=
[
3
,
4
,
23
,
3
]
elif
layers
==
152
:
depth
=
[
3
,
8
,
36
,
3
]
num_filters
=
[
64
,
128
,
256
,
512
]
if
self
.
stem
==
'icnet'
or
self
.
stem
==
'pspnet'
or
self
.
stem
==
'deeplab'
:
conv
=
self
.
conv_bn_layer
(
input
=
input
,
num_filters
=
int
(
32
*
self
.
scale
),
filter_size
=
3
,
stride
=
2
,
act
=
'relu'
,
name
=
"conv1_1"
)
conv
=
self
.
conv_bn_layer
(
input
=
conv
,
num_filters
=
int
(
32
*
self
.
scale
),
filter_size
=
3
,
stride
=
1
,
act
=
'relu'
,
name
=
"conv1_2"
)
conv
=
self
.
conv_bn_layer
(
input
=
conv
,
num_filters
=
int
(
64
*
self
.
scale
),
filter_size
=
3
,
stride
=
1
,
act
=
'relu'
,
name
=
"conv1_3"
)
else
:
conv
=
self
.
conv_bn_layer
(
input
=
input
,
num_filters
=
int
(
64
*
self
.
scale
),
filter_size
=
7
,
stride
=
2
,
act
=
'relu'
,
name
=
"conv1"
)
conv
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
3
,
pool_stride
=
2
,
pool_padding
=
1
,
pool_type
=
'max'
)
layer_count
=
1
if
check_points
(
layer_count
,
decode_points
):
decode_ends
[
layer_count
]
=
conv
if
check_points
(
layer_count
,
end_points
):
return
conv
,
decode_ends
if
layers
>=
50
:
for
block
in
range
(
len
(
depth
)):
self
.
curr_stage
+=
1
for
i
in
range
(
depth
[
block
]):
if
layers
in
[
101
,
152
]
and
block
==
2
:
if
i
==
0
:
conv_name
=
"res"
+
str
(
block
+
2
)
+
"a"
else
:
conv_name
=
"res"
+
str
(
block
+
2
)
+
"b"
+
str
(
i
)
else
:
conv_name
=
"res"
+
str
(
block
+
2
)
+
chr
(
97
+
i
)
dilation_rate
=
get_dilated_rate
(
dilation_dict
,
block
)
if
block
==
3
:
dilation_rate
=
dilation_rate
*
mult_grid
[
i
]
conv
=
self
.
bottleneck_block
(
input
=
conv
,
num_filters
=
int
(
num_filters
[
block
]
*
self
.
scale
),
stride
=
2
if
i
==
0
and
block
!=
0
and
dilation_rate
==
1
else
1
,
name
=
conv_name
,
is_first
=
block
==
i
==
0
,
dilation
=
dilation_rate
)
layer_count
+=
3
if
check_points
(
layer_count
,
decode_points
):
decode_ends
[
layer_count
]
=
conv
if
check_points
(
layer_count
,
end_points
):
return
conv
,
decode_ends
if
check_points
(
layer_count
,
resize_points
):
conv
=
self
.
interp
(
conv
,
np
.
ceil
(
np
.
array
(
conv
.
shape
[
2
:]).
astype
(
'int32'
)
/
2
))
pool
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
7
,
pool_type
=
'avg'
,
global_pooling
=
True
)
stdv
=
1.0
/
math
.
sqrt
(
pool
.
shape
[
1
]
*
1.0
)
out
=
fluid
.
layers
.
fc
(
input
=
pool
,
size
=
class_dim
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)))
else
:
for
block
in
range
(
len
(
depth
)):
self
.
curr_stage
+=
1
for
i
in
range
(
depth
[
block
]):
conv_name
=
"res"
+
str
(
block
+
2
)
+
chr
(
97
+
i
)
conv
=
self
.
basic_block
(
input
=
conv
,
num_filters
=
num_filters
[
block
],
stride
=
2
if
i
==
0
and
block
!=
0
else
1
,
is_first
=
block
==
i
==
0
,
name
=
conv_name
)
layer_count
+=
2
if
check_points
(
layer_count
,
decode_points
):
decode_ends
[
layer_count
]
=
conv
if
check_points
(
layer_count
,
end_points
):
return
conv
,
decode_ends
pool
=
fluid
.
layers
.
pool2d
(
input
=
conv
,
pool_size
=
7
,
pool_type
=
'avg'
,
global_pooling
=
True
)
stdv
=
1.0
/
math
.
sqrt
(
pool
.
shape
[
1
]
*
1.0
)
out
=
fluid
.
layers
.
fc
(
input
=
pool
,
size
=
class_dim
,
param_attr
=
fluid
.
param_attr
.
ParamAttr
(
initializer
=
fluid
.
initializer
.
Uniform
(
-
stdv
,
stdv
)))
return
out
def
zero_padding
(
self
,
input
,
padding
):
return
fluid
.
layers
.
pad
(
input
,
[
0
,
0
,
0
,
0
,
padding
,
padding
,
padding
,
padding
])
def
interp
(
self
,
input
,
out_shape
):
out_shape
=
list
(
out_shape
.
astype
(
"int32"
))
return
fluid
.
layers
.
resize_bilinear
(
input
,
out_shape
=
out_shape
)
def
conv_bn_layer
(
self
,
input
,
num_filters
,
filter_size
,
stride
=
1
,
dilation
=
1
,
groups
=
1
,
act
=
None
,
name
=
None
):
lr_mult
=
self
.
lr_mult_list
[
self
.
curr_stage
]
if
self
.
stem
==
'pspnet'
:
bias_attr
=
ParamAttr
(
name
=
name
+
"_biases"
)
else
:
bias_attr
=
False
conv
=
fluid
.
layers
.
conv2d
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
stride
,
padding
=
(
filter_size
-
1
)
//
2
if
dilation
==
1
else
0
,
dilation
=
dilation
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
,
learning_rate
=
lr_mult
),
bias_attr
=
bias_attr
,
name
=
name
+
'.conv2d.output.1'
)
if
name
==
"conv1"
:
bn_name
=
"bn_"
+
name
else
:
bn_name
=
"bn"
+
name
[
3
:]
return
fluid
.
layers
.
batch_norm
(
input
=
conv
,
act
=
act
,
name
=
bn_name
+
'.output.1'
,
param_attr
=
ParamAttr
(
name
=
bn_name
+
'_scale'
,
learning_rate
=
lr_mult
),
bias_attr
=
ParamAttr
(
bn_name
+
'_offset'
,
learning_rate
=
lr_mult
),
moving_mean_name
=
bn_name
+
'_mean'
,
moving_variance_name
=
bn_name
+
'_variance'
,
)
def
conv_bn_layer_new
(
self
,
input
,
num_filters
,
filter_size
,
stride
=
1
,
groups
=
1
,
act
=
None
,
name
=
None
):
lr_mult
=
self
.
lr_mult_list
[
self
.
curr_stage
]
pool
=
fluid
.
layers
.
pool2d
(
input
=
input
,
pool_size
=
2
,
pool_stride
=
2
,
pool_padding
=
0
,
pool_type
=
'avg'
,
ceil_mode
=
True
)
conv
=
fluid
.
layers
.
conv2d
(
input
=
pool
,
num_filters
=
num_filters
,
filter_size
=
filter_size
,
stride
=
1
,
padding
=
(
filter_size
-
1
)
//
2
,
groups
=
groups
,
act
=
None
,
param_attr
=
ParamAttr
(
name
=
name
+
"_weights"
,
learning_rate
=
lr_mult
),
bias_attr
=
False
)
if
name
==
"conv1"
:
bn_name
=
"bn_"
+
name
else
:
bn_name
=
"bn"
+
name
[
3
:]
return
fluid
.
layers
.
batch_norm
(
input
=
conv
,
act
=
act
,
param_attr
=
ParamAttr
(
name
=
bn_name
+
'_scale'
,
learning_rate
=
lr_mult
),
bias_attr
=
ParamAttr
(
bn_name
+
'_offset'
,
learning_rate
=
lr_mult
),
moving_mean_name
=
bn_name
+
'_mean'
,
moving_variance_name
=
bn_name
+
'_variance'
)
def
shortcut
(
self
,
input
,
ch_out
,
stride
,
is_first
,
name
):
ch_in
=
input
.
shape
[
1
]
if
ch_in
!=
ch_out
or
stride
!=
1
:
if
is_first
or
stride
==
1
:
return
self
.
conv_bn_layer
(
input
,
ch_out
,
1
,
stride
,
name
=
name
)
else
:
return
self
.
conv_bn_layer_new
(
input
,
ch_out
,
1
,
stride
,
name
=
name
)
elif
is_first
:
return
self
.
conv_bn_layer
(
input
,
ch_out
,
1
,
stride
,
name
=
name
)
else
:
return
input
def
bottleneck_block
(
self
,
input
,
num_filters
,
stride
,
name
,
is_first
=
False
,
dilation
=
1
):
conv0
=
self
.
conv_bn_layer
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
1
,
dilation
=
1
,
stride
=
1
,
act
=
'relu'
,
name
=
name
+
"_branch2a"
)
if
dilation
>
1
:
conv0
=
self
.
zero_padding
(
conv0
,
dilation
)
conv1
=
self
.
conv_bn_layer
(
input
=
conv0
,
num_filters
=
num_filters
,
filter_size
=
3
,
dilation
=
dilation
,
stride
=
stride
,
act
=
'relu'
,
name
=
name
+
"_branch2b"
)
conv2
=
self
.
conv_bn_layer
(
input
=
conv1
,
num_filters
=
num_filters
*
4
,
dilation
=
1
,
filter_size
=
1
,
act
=
None
,
name
=
name
+
"_branch2c"
)
short
=
self
.
shortcut
(
input
,
num_filters
*
4
,
stride
,
is_first
=
is_first
,
name
=
name
+
"_branch1"
)
return
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv2
,
act
=
'relu'
,
name
=
name
+
".add.output.5"
)
def
basic_block
(
self
,
input
,
num_filters
,
stride
,
is_first
,
name
):
conv0
=
self
.
conv_bn_layer
(
input
=
input
,
num_filters
=
num_filters
,
filter_size
=
3
,
act
=
'relu'
,
stride
=
stride
,
name
=
name
+
"_branch2a"
)
conv1
=
self
.
conv_bn_layer
(
input
=
conv0
,
num_filters
=
num_filters
,
filter_size
=
3
,
act
=
None
,
name
=
name
+
"_branch2b"
)
short
=
self
.
shortcut
(
input
,
num_filters
,
stride
,
is_first
,
name
=
name
+
"_branch1"
)
return
fluid
.
layers
.
elementwise_add
(
x
=
short
,
y
=
conv1
,
act
=
'relu'
)
def
ResNet18
():
model
=
ResNet
(
layers
=
18
)
return
model
def
ResNet34
():
model
=
ResNet
(
layers
=
34
)
return
model
def
ResNet50
():
model
=
ResNet
(
layers
=
50
)
return
model
def
ResNet101
():
model
=
ResNet
(
layers
=
101
)
return
model
def
ResNet152
():
model
=
ResNet
(
layers
=
152
)
return
model
pdseg/models/modeling/deeplab.py
浏览文件 @
ce9ec972
...
@@ -26,6 +26,7 @@ from models.libs.model_libs import conv
...
@@ -26,6 +26,7 @@ from models.libs.model_libs import conv
from
models.libs.model_libs
import
separate_conv
from
models.libs.model_libs
import
separate_conv
from
models.backbone.mobilenet_v2
import
MobileNetV2
as
mobilenet_backbone
from
models.backbone.mobilenet_v2
import
MobileNetV2
as
mobilenet_backbone
from
models.backbone.xception
import
Xception
as
xception_backbone
from
models.backbone.xception
import
Xception
as
xception_backbone
from
models.backbone.resnet_vd
import
ResNet
as
resnet_vd_backbone
def
encoder
(
input
):
def
encoder
(
input
):
...
@@ -227,14 +228,58 @@ def xception(input):
...
@@ -227,14 +228,58 @@ def xception(input):
return
data
,
decode_shortcut
return
data
,
decode_shortcut
def
resnet_vd
(
input
):
# backbone: resnet_vd, 可选resnet50_vd, resnet101_vd
# end_points: resnet终止层数
# dilation_dict: resnet block数及对应的膨胀卷积尺度
backbone
=
cfg
.
MODEL
.
DEEPLAB
.
BACKBONE
if
'50'
in
backbone
:
layers
=
50
elif
'101'
in
backbone
:
layers
=
101
else
:
raise
Exception
(
"resnet_vd backbone only support layers 50 or 101"
)
output_stride
=
cfg
.
MODEL
.
DEEPLAB
.
OUTPUT_STRIDE
end_points
=
layers
-
1
decode_point
=
10
if
output_stride
==
8
:
dilation_dict
=
{
2
:
2
,
3
:
4
}
elif
output_stride
==
16
:
dilation_dict
=
{
3
:
2
}
else
:
raise
Exception
(
"deeplab only support stride 8 or 16"
)
lr_mult_list
=
cfg
.
MODEL
.
DEEPLAB
.
BACKBONE_LR_MULT_LIST
model
=
resnet_vd_backbone
(
layers
,
stem
=
'deeplab'
,
lr_mult_list
=
lr_mult_list
)
data
,
decode_shortcuts
=
model
.
net
(
input
,
end_points
=
end_points
,
decode_points
=
decode_point
,
dilation_dict
=
dilation_dict
)
decode_shortcut
=
decode_shortcuts
[
decode_point
]
return
data
,
decode_shortcut
def
deeplabv3p
(
img
,
num_classes
):
def
deeplabv3p
(
img
,
num_classes
):
# Backbone设置:xception 或 mobilenetv2
# Backbone设置:xception 或 mobilenetv2
if
'xception'
in
cfg
.
MODEL
.
DEEPLAB
.
BACKBONE
:
if
'xception'
in
cfg
.
MODEL
.
DEEPLAB
.
BACKBONE
:
data
,
decode_shortcut
=
xception
(
img
)
data
,
decode_shortcut
=
xception
(
img
)
if
cfg
.
MODEL
.
DEEPLAB
.
BACKBONE_LR_MULT_LIST
is
not
None
:
print
(
'xception backbone do not support BACKBONE_LR_MULT_LIST setting'
)
elif
'mobilenet'
in
cfg
.
MODEL
.
DEEPLAB
.
BACKBONE
:
elif
'mobilenet'
in
cfg
.
MODEL
.
DEEPLAB
.
BACKBONE
:
data
,
decode_shortcut
=
mobilenetv2
(
img
)
data
,
decode_shortcut
=
mobilenetv2
(
img
)
if
cfg
.
MODEL
.
DEEPLAB
.
BACKBONE_LR_MULT_LIST
is
not
None
:
print
(
'mobilenetv2 backbone do not support BACKBONE_LR_MULT_LIST setting'
)
elif
'resnet'
in
cfg
.
MODEL
.
DEEPLAB
.
BACKBONE
:
data
,
decode_shortcut
=
resnet_vd
(
img
)
else
:
else
:
raise
Exception
(
"deeplab only support xception and mobilenet backbone"
)
raise
Exception
(
"deeplab only support xception, mobilenet, and resnet_vd backbone"
)
# 编码器解码器设置
# 编码器解码器设置
cfg
.
MODEL
.
DEFAULT_EPSILON
=
1e-5
cfg
.
MODEL
.
DEFAULT_EPSILON
=
1e-5
...
...
pdseg/reader.py
浏览文件 @
ce9ec972
...
@@ -318,6 +318,8 @@ class SegDataset(object):
...
@@ -318,6 +318,8 @@ class SegDataset(object):
raise
ValueError
(
"Dataset mode={} Error!"
.
format
(
mode
))
raise
ValueError
(
"Dataset mode={} Error!"
.
format
(
mode
))
# Normalize image
# Normalize image
if
cfg
.
AUG
.
TO_RGB
:
img
=
img
[...,
::
-
1
]
img
=
self
.
normalize_image
(
img
)
img
=
self
.
normalize_image
(
img
)
if
ModelPhase
.
is_train
(
mode
)
or
ModelPhase
.
is_eval
(
mode
):
if
ModelPhase
.
is_train
(
mode
)
or
ModelPhase
.
is_eval
(
mode
):
...
...
pdseg/utils/config.py
浏览文件 @
ce9ec972
...
@@ -117,6 +117,8 @@ cfg.AUG.RICH_CROP.CONTRAST_JITTER_RATIO = 0.5
...
@@ -117,6 +117,8 @@ cfg.AUG.RICH_CROP.CONTRAST_JITTER_RATIO = 0.5
cfg
.
AUG
.
RICH_CROP
.
BLUR
=
False
cfg
.
AUG
.
RICH_CROP
.
BLUR
=
False
# 图像启动模糊百分比,0-1
# 图像启动模糊百分比,0-1
cfg
.
AUG
.
RICH_CROP
.
BLUR_RATIO
=
0.1
cfg
.
AUG
.
RICH_CROP
.
BLUR_RATIO
=
0.1
# 图像是否切换到rgb模式
cfg
.
AUG
.
TO_RGB
=
True
########################### 训练配置 ##########################################
########################### 训练配置 ##########################################
# 模型保存路径
# 模型保存路径
...
@@ -192,7 +194,7 @@ cfg.MODEL.FP16 = False
...
@@ -192,7 +194,7 @@ cfg.MODEL.FP16 = False
cfg
.
MODEL
.
SCALE_LOSS
=
"DYNAMIC"
cfg
.
MODEL
.
SCALE_LOSS
=
"DYNAMIC"
########################## DeepLab模型配置 ####################################
########################## DeepLab模型配置 ####################################
# DeepLab backbone 配置, 可选项xception_65,
mobilenetv2
# DeepLab backbone 配置, 可选项xception_65,
xception_41, xception_71, mobilenetv2, resnet50_vd, resnet101_vd
cfg
.
MODEL
.
DEEPLAB
.
BACKBONE
=
"xception_65"
cfg
.
MODEL
.
DEEPLAB
.
BACKBONE
=
"xception_65"
# DeepLab output stride
# DeepLab output stride
cfg
.
MODEL
.
DEEPLAB
.
OUTPUT_STRIDE
=
16
cfg
.
MODEL
.
DEEPLAB
.
OUTPUT_STRIDE
=
16
...
@@ -206,6 +208,8 @@ cfg.MODEL.DEEPLAB.ENABLE_DECODER = True
...
@@ -206,6 +208,8 @@ cfg.MODEL.DEEPLAB.ENABLE_DECODER = True
cfg
.
MODEL
.
DEEPLAB
.
ASPP_WITH_SEP_CONV
=
True
cfg
.
MODEL
.
DEEPLAB
.
ASPP_WITH_SEP_CONV
=
True
# 解码器是否使用可分离卷积
# 解码器是否使用可分离卷积
cfg
.
MODEL
.
DEEPLAB
.
DECODER_USE_SEP_CONV
=
True
cfg
.
MODEL
.
DEEPLAB
.
DECODER_USE_SEP_CONV
=
True
# resnet_vd分阶段学习率
cfg
.
MODEL
.
DEEPLAB
.
BACKBONE_LR_MULT_LIST
=
None
########################## UNET模型配置 #######################################
########################## UNET模型配置 #######################################
# 上采样方式, 默认为双线性插值
# 上采样方式, 默认为双线性插值
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录