Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleSeg
提交
11737433
P
PaddleSeg
项目概览
PaddlePaddle
/
PaddleSeg
通知
285
Star
8
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
53
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleSeg
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
53
Issue
53
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
11737433
编写于
5月 18, 2020
作者:
C
chenguowei01
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change fluid.layes.one_hot to fluid.one_hot
上级
5df30f99
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
35 addition
and
18 deletion
+35
-18
contrib/HumanSeg/nets/seg_modules.py
contrib/HumanSeg/nets/seg_modules.py
+1
-1
contrib/RemoteSensing/nets/loss.py
contrib/RemoteSensing/nets/loss.py
+1
-1
pdseg/loss.py
pdseg/loss.py
+33
-16
未找到文件。
contrib/HumanSeg/nets/seg_modules.py
浏览文件 @
11737433
...
@@ -34,7 +34,7 @@ def softmax_with_loss(logit,
...
@@ -34,7 +34,7 @@ def softmax_with_loss(logit,
loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
logit
,
label
,
ignore_index
=
ignore_index
,
return_softmax
=
True
)
logit
,
label
,
ignore_index
=
ignore_index
,
return_softmax
=
True
)
else
:
else
:
label_one_hot
=
fluid
.
layers
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
label_one_hot
=
fluid
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
if
isinstance
(
weight
,
list
):
if
isinstance
(
weight
,
list
):
assert
len
(
assert
len
(
weight
weight
...
...
contrib/RemoteSensing/nets/loss.py
浏览文件 @
11737433
...
@@ -34,7 +34,7 @@ def softmax_with_loss(logit,
...
@@ -34,7 +34,7 @@ def softmax_with_loss(logit,
loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
logit
,
label
,
ignore_index
=
ignore_index
,
return_softmax
=
True
)
logit
,
label
,
ignore_index
=
ignore_index
,
return_softmax
=
True
)
else
:
else
:
label_one_hot
=
fluid
.
layers
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
label_one_hot
=
fluid
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
if
isinstance
(
weight
,
list
):
if
isinstance
(
weight
,
list
):
assert
len
(
assert
len
(
weight
weight
...
...
pdseg/loss.py
浏览文件 @
11737433
...
@@ -20,7 +20,11 @@ import importlib
...
@@ -20,7 +20,11 @@ import importlib
from
utils.config
import
cfg
from
utils.config
import
cfg
def
softmax_with_loss
(
logit
,
label
,
ignore_mask
=
None
,
num_classes
=
2
,
weight
=
None
):
def
softmax_with_loss
(
logit
,
label
,
ignore_mask
=
None
,
num_classes
=
2
,
weight
=
None
):
ignore_mask
=
fluid
.
layers
.
cast
(
ignore_mask
,
'float32'
)
ignore_mask
=
fluid
.
layers
.
cast
(
ignore_mask
,
'float32'
)
label
=
fluid
.
layers
.
elementwise_min
(
label
=
fluid
.
layers
.
elementwise_min
(
label
,
fluid
.
layers
.
assign
(
np
.
array
([
num_classes
-
1
],
dtype
=
np
.
int32
)))
label
,
fluid
.
layers
.
assign
(
np
.
array
([
num_classes
-
1
],
dtype
=
np
.
int32
)))
...
@@ -36,14 +40,18 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
...
@@ -36,14 +40,18 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
ignore_index
=
cfg
.
DATASET
.
IGNORE_INDEX
,
ignore_index
=
cfg
.
DATASET
.
IGNORE_INDEX
,
return_softmax
=
True
)
return_softmax
=
True
)
else
:
else
:
label_one_hot
=
fluid
.
layers
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
label_one_hot
=
fluid
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
if
isinstance
(
weight
,
list
):
if
isinstance
(
weight
,
list
):
assert
len
(
weight
)
==
num_classes
,
"weight length must equal num of classes"
assert
len
(
weight
)
==
num_classes
,
"weight length must equal num of classes"
weight
=
fluid
.
layers
.
assign
(
np
.
array
([
weight
],
dtype
=
'float32'
))
weight
=
fluid
.
layers
.
assign
(
np
.
array
([
weight
],
dtype
=
'float32'
))
elif
isinstance
(
weight
,
str
):
elif
isinstance
(
weight
,
str
):
assert
weight
.
lower
()
==
'dynamic'
,
'if weight is string, must be dynamic!'
assert
weight
.
lower
(
)
==
'dynamic'
,
'if weight is string, must be dynamic!'
tmp
=
[]
tmp
=
[]
total_num
=
fluid
.
layers
.
cast
(
fluid
.
layers
.
shape
(
label
)[
0
],
'float32'
)
total_num
=
fluid
.
layers
.
cast
(
fluid
.
layers
.
shape
(
label
)[
0
],
'float32'
)
for
i
in
range
(
num_classes
):
for
i
in
range
(
num_classes
):
cls_pixel_num
=
fluid
.
layers
.
reduce_sum
(
label_one_hot
[:,
i
])
cls_pixel_num
=
fluid
.
layers
.
reduce_sum
(
label_one_hot
[:,
i
])
ratio
=
total_num
/
(
cls_pixel_num
+
1
)
ratio
=
total_num
/
(
cls_pixel_num
+
1
)
...
@@ -53,9 +61,12 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
...
@@ -53,9 +61,12 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
elif
isinstance
(
weight
,
fluid
.
layers
.
Variable
):
elif
isinstance
(
weight
,
fluid
.
layers
.
Variable
):
pass
pass
else
:
else
:
raise
ValueError
(
'Expect weight is a list, string or Variable, but receive {}'
.
format
(
type
(
weight
)))
raise
ValueError
(
'Expect weight is a list, string or Variable, but receive {}'
.
format
(
type
(
weight
)))
weight
=
fluid
.
layers
.
reshape
(
weight
,
[
1
,
num_classes
])
weight
=
fluid
.
layers
.
reshape
(
weight
,
[
1
,
num_classes
])
weighted_label_one_hot
=
fluid
.
layers
.
elementwise_mul
(
label_one_hot
,
weight
)
weighted_label_one_hot
=
fluid
.
layers
.
elementwise_mul
(
label_one_hot
,
weight
)
probs
=
fluid
.
layers
.
softmax
(
logit
)
probs
=
fluid
.
layers
.
softmax
(
logit
)
loss
=
fluid
.
layers
.
cross_entropy
(
loss
=
fluid
.
layers
.
cross_entropy
(
probs
,
probs
,
...
@@ -75,10 +86,11 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
...
@@ -75,10 +86,11 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
# to change, how to appicate ignore index and ignore mask
# to change, how to appicate ignore index and ignore mask
def
dice_loss
(
logit
,
label
,
ignore_mask
=
None
,
epsilon
=
0.00001
):
def
dice_loss
(
logit
,
label
,
ignore_mask
=
None
,
epsilon
=
0.00001
):
if
logit
.
shape
[
1
]
!=
1
or
label
.
shape
[
1
]
!=
1
or
ignore_mask
.
shape
[
1
]
!=
1
:
if
logit
.
shape
[
1
]
!=
1
or
label
.
shape
[
1
]
!=
1
or
ignore_mask
.
shape
[
1
]
!=
1
:
raise
Exception
(
"dice loss is only applicable to one channel classfication"
)
raise
Exception
(
"dice loss is only applicable to one channel classfication"
)
ignore_mask
=
fluid
.
layers
.
cast
(
ignore_mask
,
'float32'
)
ignore_mask
=
fluid
.
layers
.
cast
(
ignore_mask
,
'float32'
)
logit
=
fluid
.
layers
.
transpose
(
logit
,
[
0
,
2
,
3
,
1
])
logit
=
fluid
.
layers
.
transpose
(
logit
,
[
0
,
2
,
3
,
1
])
label
=
fluid
.
layers
.
transpose
(
label
,
[
0
,
2
,
3
,
1
])
label
=
fluid
.
layers
.
transpose
(
label
,
[
0
,
2
,
3
,
1
])
label
=
fluid
.
layers
.
cast
(
label
,
'int64'
)
label
=
fluid
.
layers
.
cast
(
label
,
'int64'
)
ignore_mask
=
fluid
.
layers
.
transpose
(
ignore_mask
,
[
0
,
2
,
3
,
1
])
ignore_mask
=
fluid
.
layers
.
transpose
(
ignore_mask
,
[
0
,
2
,
3
,
1
])
logit
=
fluid
.
layers
.
sigmoid
(
logit
)
logit
=
fluid
.
layers
.
sigmoid
(
logit
)
...
@@ -88,7 +100,7 @@ def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001):
...
@@ -88,7 +100,7 @@ def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001):
inse
=
fluid
.
layers
.
reduce_sum
(
logit
*
label
,
dim
=
reduce_dim
)
inse
=
fluid
.
layers
.
reduce_sum
(
logit
*
label
,
dim
=
reduce_dim
)
dice_denominator
=
fluid
.
layers
.
reduce_sum
(
dice_denominator
=
fluid
.
layers
.
reduce_sum
(
logit
,
dim
=
reduce_dim
)
+
fluid
.
layers
.
reduce_sum
(
logit
,
dim
=
reduce_dim
)
+
fluid
.
layers
.
reduce_sum
(
label
,
dim
=
reduce_dim
)
label
,
dim
=
reduce_dim
)
dice_score
=
1
-
inse
*
2
/
(
dice_denominator
+
epsilon
)
dice_score
=
1
-
inse
*
2
/
(
dice_denominator
+
epsilon
)
label
.
stop_gradient
=
True
label
.
stop_gradient
=
True
ignore_mask
.
stop_gradient
=
True
ignore_mask
.
stop_gradient
=
True
...
@@ -103,26 +115,31 @@ def bce_loss(logit, label, ignore_mask=None):
...
@@ -103,26 +115,31 @@ def bce_loss(logit, label, ignore_mask=None):
x
=
logit
,
x
=
logit
,
label
=
label
,
label
=
label
,
ignore_index
=
cfg
.
DATASET
.
IGNORE_INDEX
,
ignore_index
=
cfg
.
DATASET
.
IGNORE_INDEX
,
normalize
=
True
)
# or False
normalize
=
True
)
# or False
loss
=
fluid
.
layers
.
reduce_sum
(
loss
)
loss
=
fluid
.
layers
.
reduce_sum
(
loss
)
label
.
stop_gradient
=
True
label
.
stop_gradient
=
True
ignore_mask
.
stop_gradient
=
True
ignore_mask
.
stop_gradient
=
True
return
loss
return
loss
def
multi_softmax_with_loss
(
logits
,
label
,
ignore_mask
=
None
,
num_classes
=
2
,
weight
=
None
):
def
multi_softmax_with_loss
(
logits
,
label
,
ignore_mask
=
None
,
num_classes
=
2
,
weight
=
None
):
if
isinstance
(
logits
,
tuple
):
if
isinstance
(
logits
,
tuple
):
avg_loss
=
0
avg_loss
=
0
for
i
,
logit
in
enumerate
(
logits
):
for
i
,
logit
in
enumerate
(
logits
):
if
label
.
shape
[
2
]
!=
logit
.
shape
[
2
]
or
label
.
shape
[
3
]
!=
logit
.
shape
[
3
]:
if
label
.
shape
[
2
]
!=
logit
.
shape
[
2
]
or
label
.
shape
[
3
]
!=
logit
.
shape
[
3
]:
label
=
fluid
.
layers
.
resize_nearest
(
label
,
logit
.
shape
[
2
:])
label
=
fluid
.
layers
.
resize_nearest
(
label
,
logit
.
shape
[
2
:])
logit_mask
=
(
label
.
astype
(
'int32'
)
!=
logit_mask
=
(
label
.
astype
(
'int32'
)
!=
cfg
.
DATASET
.
IGNORE_INDEX
).
astype
(
'int32'
)
cfg
.
DATASET
.
IGNORE_INDEX
).
astype
(
'int32'
)
loss
=
softmax_with_loss
(
logit
,
label
,
logit_mask
,
loss
=
softmax_with_loss
(
logit
,
label
,
logit_mask
,
num_classes
)
num_classes
)
avg_loss
+=
cfg
.
MODEL
.
MULTI_LOSS_WEIGHT
[
i
]
*
loss
avg_loss
+=
cfg
.
MODEL
.
MULTI_LOSS_WEIGHT
[
i
]
*
loss
else
:
else
:
avg_loss
=
softmax_with_loss
(
logits
,
label
,
ignore_mask
,
num_classes
,
weight
=
weight
)
avg_loss
=
softmax_with_loss
(
logits
,
label
,
ignore_mask
,
num_classes
,
weight
=
weight
)
return
avg_loss
return
avg_loss
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录