Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
PaddleSeg
提交
57f5ef69
P
PaddleSeg
项目概览
PaddlePaddle
/
PaddleSeg
通知
285
Star
8
Fork
1
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
53
列表
看板
标记
里程碑
合并请求
3
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
PaddleSeg
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
53
Issue
53
列表
看板
标记
里程碑
合并请求
3
合并请求
3
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
57f5ef69
编写于
5月 18, 2020
作者:
W
wuyefeilin
提交者:
GitHub
5月 18, 2020
浏览文件
操作
浏览文件
下载
差异文件
update one_hot
update one_hot
上级
5df30f99
2ce4a558
变更
3
显示空白变更内容
内联
并排
Showing
3 changed file
with
38 addition
and
18 deletion
+38
-18
contrib/HumanSeg/nets/seg_modules.py
contrib/HumanSeg/nets/seg_modules.py
+2
-1
contrib/RemoteSensing/nets/loss.py
contrib/RemoteSensing/nets/loss.py
+2
-1
pdseg/loss.py
pdseg/loss.py
+34
-16
未找到文件。
contrib/HumanSeg/nets/seg_modules.py
浏览文件 @
57f5ef69
...
@@ -34,7 +34,8 @@ def softmax_with_loss(logit,
...
@@ -34,7 +34,8 @@ def softmax_with_loss(logit,
loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
logit
,
label
,
ignore_index
=
ignore_index
,
return_softmax
=
True
)
logit
,
label
,
ignore_index
=
ignore_index
,
return_softmax
=
True
)
else
:
else
:
label_one_hot
=
fluid
.
layers
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
label
=
fluid
.
layers
.
squeeze
(
label
,
axes
=
[
-
1
])
label_one_hot
=
fluid
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
if
isinstance
(
weight
,
list
):
if
isinstance
(
weight
,
list
):
assert
len
(
assert
len
(
weight
weight
...
...
contrib/RemoteSensing/nets/loss.py
浏览文件 @
57f5ef69
...
@@ -34,7 +34,8 @@ def softmax_with_loss(logit,
...
@@ -34,7 +34,8 @@ def softmax_with_loss(logit,
loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
loss
,
probs
=
fluid
.
layers
.
softmax_with_cross_entropy
(
logit
,
label
,
ignore_index
=
ignore_index
,
return_softmax
=
True
)
logit
,
label
,
ignore_index
=
ignore_index
,
return_softmax
=
True
)
else
:
else
:
label_one_hot
=
fluid
.
layers
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
label
=
fluid
.
layers
.
squeeze
(
label
,
axes
=
[
-
1
])
label_one_hot
=
fluid
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
if
isinstance
(
weight
,
list
):
if
isinstance
(
weight
,
list
):
assert
len
(
assert
len
(
weight
weight
...
...
pdseg/loss.py
浏览文件 @
57f5ef69
...
@@ -20,7 +20,11 @@ import importlib
...
@@ -20,7 +20,11 @@ import importlib
from
utils.config
import
cfg
from
utils.config
import
cfg
def
softmax_with_loss
(
logit
,
label
,
ignore_mask
=
None
,
num_classes
=
2
,
weight
=
None
):
def
softmax_with_loss
(
logit
,
label
,
ignore_mask
=
None
,
num_classes
=
2
,
weight
=
None
):
ignore_mask
=
fluid
.
layers
.
cast
(
ignore_mask
,
'float32'
)
ignore_mask
=
fluid
.
layers
.
cast
(
ignore_mask
,
'float32'
)
label
=
fluid
.
layers
.
elementwise_min
(
label
=
fluid
.
layers
.
elementwise_min
(
label
,
fluid
.
layers
.
assign
(
np
.
array
([
num_classes
-
1
],
dtype
=
np
.
int32
)))
label
,
fluid
.
layers
.
assign
(
np
.
array
([
num_classes
-
1
],
dtype
=
np
.
int32
)))
...
@@ -36,14 +40,19 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
...
@@ -36,14 +40,19 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
ignore_index
=
cfg
.
DATASET
.
IGNORE_INDEX
,
ignore_index
=
cfg
.
DATASET
.
IGNORE_INDEX
,
return_softmax
=
True
)
return_softmax
=
True
)
else
:
else
:
label_one_hot
=
fluid
.
layers
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
label
=
fluid
.
layers
.
squeeze
(
label
,
axes
=
[
-
1
])
label_one_hot
=
fluid
.
one_hot
(
input
=
label
,
depth
=
num_classes
)
if
isinstance
(
weight
,
list
):
if
isinstance
(
weight
,
list
):
assert
len
(
weight
)
==
num_classes
,
"weight length must equal num of classes"
assert
len
(
weight
)
==
num_classes
,
"weight length must equal num of classes"
weight
=
fluid
.
layers
.
assign
(
np
.
array
([
weight
],
dtype
=
'float32'
))
weight
=
fluid
.
layers
.
assign
(
np
.
array
([
weight
],
dtype
=
'float32'
))
elif
isinstance
(
weight
,
str
):
elif
isinstance
(
weight
,
str
):
assert
weight
.
lower
()
==
'dynamic'
,
'if weight is string, must be dynamic!'
assert
weight
.
lower
(
)
==
'dynamic'
,
'if weight is string, must be dynamic!'
tmp
=
[]
tmp
=
[]
total_num
=
fluid
.
layers
.
cast
(
fluid
.
layers
.
shape
(
label
)[
0
],
'float32'
)
total_num
=
fluid
.
layers
.
cast
(
fluid
.
layers
.
shape
(
label
)[
0
],
'float32'
)
for
i
in
range
(
num_classes
):
for
i
in
range
(
num_classes
):
cls_pixel_num
=
fluid
.
layers
.
reduce_sum
(
label_one_hot
[:,
i
])
cls_pixel_num
=
fluid
.
layers
.
reduce_sum
(
label_one_hot
[:,
i
])
ratio
=
total_num
/
(
cls_pixel_num
+
1
)
ratio
=
total_num
/
(
cls_pixel_num
+
1
)
...
@@ -53,9 +62,12 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
...
@@ -53,9 +62,12 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
elif
isinstance
(
weight
,
fluid
.
layers
.
Variable
):
elif
isinstance
(
weight
,
fluid
.
layers
.
Variable
):
pass
pass
else
:
else
:
raise
ValueError
(
'Expect weight is a list, string or Variable, but receive {}'
.
format
(
type
(
weight
)))
raise
ValueError
(
'Expect weight is a list, string or Variable, but receive {}'
.
format
(
type
(
weight
)))
weight
=
fluid
.
layers
.
reshape
(
weight
,
[
1
,
num_classes
])
weight
=
fluid
.
layers
.
reshape
(
weight
,
[
1
,
num_classes
])
weighted_label_one_hot
=
fluid
.
layers
.
elementwise_mul
(
label_one_hot
,
weight
)
weighted_label_one_hot
=
fluid
.
layers
.
elementwise_mul
(
label_one_hot
,
weight
)
probs
=
fluid
.
layers
.
softmax
(
logit
)
probs
=
fluid
.
layers
.
softmax
(
logit
)
loss
=
fluid
.
layers
.
cross_entropy
(
loss
=
fluid
.
layers
.
cross_entropy
(
probs
,
probs
,
...
@@ -75,7 +87,8 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
...
@@ -75,7 +87,8 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
# to change, how to appicate ignore index and ignore mask
# to change, how to appicate ignore index and ignore mask
def
dice_loss
(
logit
,
label
,
ignore_mask
=
None
,
epsilon
=
0.00001
):
def
dice_loss
(
logit
,
label
,
ignore_mask
=
None
,
epsilon
=
0.00001
):
if
logit
.
shape
[
1
]
!=
1
or
label
.
shape
[
1
]
!=
1
or
ignore_mask
.
shape
[
1
]
!=
1
:
if
logit
.
shape
[
1
]
!=
1
or
label
.
shape
[
1
]
!=
1
or
ignore_mask
.
shape
[
1
]
!=
1
:
raise
Exception
(
"dice loss is only applicable to one channel classfication"
)
raise
Exception
(
"dice loss is only applicable to one channel classfication"
)
ignore_mask
=
fluid
.
layers
.
cast
(
ignore_mask
,
'float32'
)
ignore_mask
=
fluid
.
layers
.
cast
(
ignore_mask
,
'float32'
)
logit
=
fluid
.
layers
.
transpose
(
logit
,
[
0
,
2
,
3
,
1
])
logit
=
fluid
.
layers
.
transpose
(
logit
,
[
0
,
2
,
3
,
1
])
label
=
fluid
.
layers
.
transpose
(
label
,
[
0
,
2
,
3
,
1
])
label
=
fluid
.
layers
.
transpose
(
label
,
[
0
,
2
,
3
,
1
])
...
@@ -110,19 +123,24 @@ def bce_loss(logit, label, ignore_mask=None):
...
@@ -110,19 +123,24 @@ def bce_loss(logit, label, ignore_mask=None):
return
loss
return
loss
def
multi_softmax_with_loss
(
logits
,
label
,
ignore_mask
=
None
,
num_classes
=
2
,
weight
=
None
):
def
multi_softmax_with_loss
(
logits
,
label
,
ignore_mask
=
None
,
num_classes
=
2
,
weight
=
None
):
if
isinstance
(
logits
,
tuple
):
if
isinstance
(
logits
,
tuple
):
avg_loss
=
0
avg_loss
=
0
for
i
,
logit
in
enumerate
(
logits
):
for
i
,
logit
in
enumerate
(
logits
):
if
label
.
shape
[
2
]
!=
logit
.
shape
[
2
]
or
label
.
shape
[
3
]
!=
logit
.
shape
[
3
]:
if
label
.
shape
[
2
]
!=
logit
.
shape
[
2
]
or
label
.
shape
[
3
]
!=
logit
.
shape
[
3
]:
label
=
fluid
.
layers
.
resize_nearest
(
label
,
logit
.
shape
[
2
:])
label
=
fluid
.
layers
.
resize_nearest
(
label
,
logit
.
shape
[
2
:])
logit_mask
=
(
label
.
astype
(
'int32'
)
!=
logit_mask
=
(
label
.
astype
(
'int32'
)
!=
cfg
.
DATASET
.
IGNORE_INDEX
).
astype
(
'int32'
)
cfg
.
DATASET
.
IGNORE_INDEX
).
astype
(
'int32'
)
loss
=
softmax_with_loss
(
logit
,
label
,
logit_mask
,
loss
=
softmax_with_loss
(
logit
,
label
,
logit_mask
,
num_classes
)
num_classes
)
avg_loss
+=
cfg
.
MODEL
.
MULTI_LOSS_WEIGHT
[
i
]
*
loss
avg_loss
+=
cfg
.
MODEL
.
MULTI_LOSS_WEIGHT
[
i
]
*
loss
else
:
else
:
avg_loss
=
softmax_with_loss
(
logits
,
label
,
ignore_mask
,
num_classes
,
weight
=
weight
)
avg_loss
=
softmax_with_loss
(
logits
,
label
,
ignore_mask
,
num_classes
,
weight
=
weight
)
return
avg_loss
return
avg_loss
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录