Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleSeg
提交
57f5ef69
P
PaddleSeg
项目概览
PaddlePaddle
/
PaddleSeg
通知
285
Star
8
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
53
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleSeg
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
53
Issue
53
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
57f5ef69
编写于
5月 18, 2020
作者:
W
wuyefeilin
提交者:
GitHub
5月 18, 2020
浏览文件
操作
浏览文件
下载
差异文件
update one_hot
update one_hot
上级
5df30f99
2ce4a558
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
38 addition
and
18 deletion
+38
-18
contrib/HumanSeg/nets/seg_modules.py
contrib/HumanSeg/nets/seg_modules.py
+2
-1
contrib/RemoteSensing/nets/loss.py
contrib/RemoteSensing/nets/loss.py
+2
-1
pdseg/loss.py
pdseg/loss.py
+34
-16
未找到文件。
contrib/HumanSeg/nets/seg_modules.py
浏览文件 @
57f5ef69
...
...
@@ -34,7 +34,8 @@ def softmax_with_loss(logit,
loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
logit
,
label
,
ignore_index
=
ignore_index
,
return_softmax
=
True
)
else
:
label_one_hot
=
fluid
.
layers
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
label
=
fluid
.
layers
.
squeeze
(
label
,
axes
=
[
-
1
])
label_one_hot
=
fluid
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
if
isinstance
(
weight
,
list
):
assert
len
(
weight
...
...
contrib/RemoteSensing/nets/loss.py
浏览文件 @
57f5ef69
...
...
@@ -34,7 +34,8 @@ def softmax_with_loss(logit,
loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
logit
,
label
,
ignore_index
=
ignore_index
,
return_softmax
=
True
)
else
:
label_one_hot
=
fluid
.
layers
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
label
=
fluid
.
layers
.
squeeze
(
label
,
axes
=
[
-
1
])
label_one_hot
=
fluid
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
if
isinstance
(
weight
,
list
):
assert
len
(
weight
...
...
pdseg/loss.py
浏览文件 @
57f5ef69
...
...
@@ -20,7 +20,11 @@ import importlib
from
utils.config
import
cfg
def
softmax_with_loss
(
logit
,
label
,
ignore_mask
=
None
,
num_classes
=
2
,
weight
=
None
):
def
softmax_with_loss
(
logit
,
label
,
ignore_mask
=
None
,
num_classes
=
2
,
weight
=
None
):
ignore_mask
=
fluid
.
layers
.
cast
(
ignore_mask
,
'float32'
)
label
=
fluid
.
layers
.
elementwise_min
(
label
,
fluid
.
layers
.
assign
(
np
.
array
([
num_classes
-
1
],
dtype
=
np
.
int32
)))
...
...
@@ -36,14 +40,19 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
ignore_index
=
cfg
.
DATASET
.
IGNORE_INDEX
,
return_softmax
=
True
)
else
:
label_one_hot
=
fluid
.
layers
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
label
=
fluid
.
layers
.
squeeze
(
label
,
axes
=
[
-
1
])
label_one_hot
=
fluid
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
if
isinstance
(
weight
,
list
):
assert
len
(
weight
)
==
num_classes
,
"weight length must equal num of classes"
assert
len
(
weight
)
==
num_classes
,
"weight length must equal num of classes"
weight
=
fluid
.
layers
.
assign
(
np
.
array
([
weight
],
dtype
=
'float32'
))
elif
isinstance
(
weight
,
str
):
assert
weight
.
lower
()
==
'dynamic'
,
'if weight is string, must be dynamic!'
assert
weight
.
lower
(
)
==
'dynamic'
,
'if weight is string, must be dynamic!'
tmp
=
[]
total_num
=
fluid
.
layers
.
cast
(
fluid
.
layers
.
shape
(
label
)[
0
],
'float32'
)
total_num
=
fluid
.
layers
.
cast
(
fluid
.
layers
.
shape
(
label
)[
0
],
'float32'
)
for
i
in
range
(
num_classes
):
cls_pixel_num
=
fluid
.
layers
.
reduce_sum
(
label_one_hot
[:,
i
])
ratio
=
total_num
/
(
cls_pixel_num
+
1
)
...
...
@@ -53,9 +62,12 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
elif
isinstance
(
weight
,
fluid
.
layers
.
Variable
):
pass
else
:
raise
ValueError
(
'Expect weight is a list, string or Variable, but receive {}'
.
format
(
type
(
weight
)))
raise
ValueError
(
'Expect weight is a list, string or Variable, but receive {}'
.
format
(
type
(
weight
)))
weight
=
fluid
.
layers
.
reshape
(
weight
,
[
1
,
num_classes
])
weighted_label_one_hot
=
fluid
.
layers
.
elementwise_mul
(
label_one_hot
,
weight
)
weighted_label_one_hot
=
fluid
.
layers
.
elementwise_mul
(
label_one_hot
,
weight
)
probs
=
fluid
.
layers
.
softmax
(
logit
)
loss
=
fluid
.
layers
.
cross_entropy
(
probs
,
...
...
@@ -75,10 +87,11 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
# to change, how to appicate ignore index and ignore mask
def
dice_loss
(
logit
,
label
,
ignore_mask
=
None
,
epsilon
=
0.00001
):
if
logit
.
shape
[
1
]
!=
1
or
label
.
shape
[
1
]
!=
1
or
ignore_mask
.
shape
[
1
]
!=
1
:
raise
Exception
(
"dice loss is only applicable to one channel classfication"
)
raise
Exception
(
"dice loss is only applicable to one channel classfication"
)
ignore_mask
=
fluid
.
layers
.
cast
(
ignore_mask
,
'float32'
)
logit
=
fluid
.
layers
.
transpose
(
logit
,
[
0
,
2
,
3
,
1
])
label
=
fluid
.
layers
.
transpose
(
label
,
[
0
,
2
,
3
,
1
])
label
=
fluid
.
layers
.
transpose
(
label
,
[
0
,
2
,
3
,
1
])
label
=
fluid
.
layers
.
cast
(
label
,
'int64'
)
ignore_mask
=
fluid
.
layers
.
transpose
(
ignore_mask
,
[
0
,
2
,
3
,
1
])
logit
=
fluid
.
layers
.
sigmoid
(
logit
)
...
...
@@ -88,7 +101,7 @@ def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001):
inse
=
fluid
.
layers
.
reduce_sum
(
logit
*
label
,
dim
=
reduce_dim
)
dice_denominator
=
fluid
.
layers
.
reduce_sum
(
logit
,
dim
=
reduce_dim
)
+
fluid
.
layers
.
reduce_sum
(
label
,
dim
=
reduce_dim
)
label
,
dim
=
reduce_dim
)
dice_score
=
1
-
inse
*
2
/
(
dice_denominator
+
epsilon
)
label
.
stop_gradient
=
True
ignore_mask
.
stop_gradient
=
True
...
...
@@ -103,26 +116,31 @@ def bce_loss(logit, label, ignore_mask=None):
x
=
logit
,
label
=
label
,
ignore_index
=
cfg
.
DATASET
.
IGNORE_INDEX
,
normalize
=
True
)
# or False
normalize
=
True
)
# or False
loss
=
fluid
.
layers
.
reduce_sum
(
loss
)
label
.
stop_gradient
=
True
ignore_mask
.
stop_gradient
=
True
return
loss
def
multi_softmax_with_loss
(
logits
,
label
,
ignore_mask
=
None
,
num_classes
=
2
,
weight
=
None
):
def
multi_softmax_with_loss
(
logits
,
label
,
ignore_mask
=
None
,
num_classes
=
2
,
weight
=
None
):
if
isinstance
(
logits
,
tuple
):
avg_loss
=
0
for
i
,
logit
in
enumerate
(
logits
):
if
label
.
shape
[
2
]
!=
logit
.
shape
[
2
]
or
label
.
shape
[
3
]
!=
logit
.
shape
[
3
]:
if
label
.
shape
[
2
]
!=
logit
.
shape
[
2
]
or
label
.
shape
[
3
]
!=
logit
.
shape
[
3
]:
label
=
fluid
.
layers
.
resize_nearest
(
label
,
logit
.
shape
[
2
:])
logit_mask
=
(
label
.
astype
(
'int32'
)
!=
cfg
.
DATASET
.
IGNORE_INDEX
).
astype
(
'int32'
)
loss
=
softmax_with_loss
(
logit
,
label
,
logit_mask
,
num_classes
)
loss
=
softmax_with_loss
(
logit
,
label
,
logit_mask
,
num_classes
)
avg_loss
+=
cfg
.
MODEL
.
MULTI_LOSS_WEIGHT
[
i
]
*
loss
else
:
avg_loss
=
softmax_with_loss
(
logits
,
label
,
ignore_mask
,
num_classes
,
weight
=
weight
)
avg_loss
=
softmax_with_loss
(
logits
,
label
,
ignore_mask
,
num_classes
,
weight
=
weight
)
return
avg_loss
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录