未验证 提交 57f5ef69 编写于 作者: W wuyefeilin 提交者: GitHub

update one_hot

update one_hot
...@@ -34,7 +34,8 @@ def softmax_with_loss(logit, ...@@ -34,7 +34,8 @@ def softmax_with_loss(logit,
loss, probs = fluid.layers.softmax_with_cross_entropy( loss, probs = fluid.layers.softmax_with_cross_entropy(
logit, label, ignore_index=ignore_index, return_softmax=True) logit, label, ignore_index=ignore_index, return_softmax=True)
else: else:
label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes) label = fluid.layers.squeeze(label, axes=[-1])
label_one_hot = fluid.one_hot(input=label, depth=num_classes)
if isinstance(weight, list): if isinstance(weight, list):
assert len( assert len(
weight weight
......
...@@ -34,7 +34,8 @@ def softmax_with_loss(logit, ...@@ -34,7 +34,8 @@ def softmax_with_loss(logit,
loss, probs = fluid.layers.softmax_with_cross_entropy( loss, probs = fluid.layers.softmax_with_cross_entropy(
logit, label, ignore_index=ignore_index, return_softmax=True) logit, label, ignore_index=ignore_index, return_softmax=True)
else: else:
label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes) label = fluid.layers.squeeze(label, axes=[-1])
label_one_hot = fluid.one_hot(input=label, depth=num_classes)
if isinstance(weight, list): if isinstance(weight, list):
assert len( assert len(
weight weight
......
...@@ -20,7 +20,11 @@ import importlib ...@@ -20,7 +20,11 @@ import importlib
from utils.config import cfg from utils.config import cfg
def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None): def softmax_with_loss(logit,
label,
ignore_mask=None,
num_classes=2,
weight=None):
ignore_mask = fluid.layers.cast(ignore_mask, 'float32') ignore_mask = fluid.layers.cast(ignore_mask, 'float32')
label = fluid.layers.elementwise_min( label = fluid.layers.elementwise_min(
label, fluid.layers.assign(np.array([num_classes - 1], dtype=np.int32))) label, fluid.layers.assign(np.array([num_classes - 1], dtype=np.int32)))
...@@ -36,14 +40,19 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None ...@@ -36,14 +40,19 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
ignore_index=cfg.DATASET.IGNORE_INDEX, ignore_index=cfg.DATASET.IGNORE_INDEX,
return_softmax=True) return_softmax=True)
else: else:
label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes) label = fluid.layers.squeeze(label, axes=[-1])
label_one_hot = fluid.one_hot(input=label, depth=num_classes)
if isinstance(weight, list): if isinstance(weight, list):
assert len(weight) == num_classes, "weight length must equal num of classes" assert len(
weight
) == num_classes, "weight length must equal num of classes"
weight = fluid.layers.assign(np.array([weight], dtype='float32')) weight = fluid.layers.assign(np.array([weight], dtype='float32'))
elif isinstance(weight, str): elif isinstance(weight, str):
assert weight.lower() == 'dynamic', 'if weight is string, must be dynamic!' assert weight.lower(
) == 'dynamic', 'if weight is string, must be dynamic!'
tmp = [] tmp = []
total_num = fluid.layers.cast(fluid.layers.shape(label)[0], 'float32') total_num = fluid.layers.cast(
fluid.layers.shape(label)[0], 'float32')
for i in range(num_classes): for i in range(num_classes):
cls_pixel_num = fluid.layers.reduce_sum(label_one_hot[:, i]) cls_pixel_num = fluid.layers.reduce_sum(label_one_hot[:, i])
ratio = total_num / (cls_pixel_num + 1) ratio = total_num / (cls_pixel_num + 1)
...@@ -53,9 +62,12 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None ...@@ -53,9 +62,12 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
elif isinstance(weight, fluid.layers.Variable): elif isinstance(weight, fluid.layers.Variable):
pass pass
else: else:
raise ValueError('Expect weight is a list, string or Variable, but receive {}'.format(type(weight))) raise ValueError(
'Expect weight is a list, string or Variable, but receive {}'.
format(type(weight)))
weight = fluid.layers.reshape(weight, [1, num_classes]) weight = fluid.layers.reshape(weight, [1, num_classes])
weighted_label_one_hot = fluid.layers.elementwise_mul(label_one_hot, weight) weighted_label_one_hot = fluid.layers.elementwise_mul(
label_one_hot, weight)
probs = fluid.layers.softmax(logit) probs = fluid.layers.softmax(logit)
loss = fluid.layers.cross_entropy( loss = fluid.layers.cross_entropy(
probs, probs,
...@@ -75,10 +87,11 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None ...@@ -75,10 +87,11 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
# to change, how to appicate ignore index and ignore mask # to change, how to appicate ignore index and ignore mask
def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001): def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001):
if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1: if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1:
raise Exception("dice loss is only applicable to one channel classfication") raise Exception(
"dice loss is only applicable to one channel classfication")
ignore_mask = fluid.layers.cast(ignore_mask, 'float32') ignore_mask = fluid.layers.cast(ignore_mask, 'float32')
logit = fluid.layers.transpose(logit, [0, 2, 3, 1]) logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
label = fluid.layers.transpose(label, [0, 2, 3, 1]) label = fluid.layers.transpose(label, [0, 2, 3, 1])
label = fluid.layers.cast(label, 'int64') label = fluid.layers.cast(label, 'int64')
ignore_mask = fluid.layers.transpose(ignore_mask, [0, 2, 3, 1]) ignore_mask = fluid.layers.transpose(ignore_mask, [0, 2, 3, 1])
logit = fluid.layers.sigmoid(logit) logit = fluid.layers.sigmoid(logit)
...@@ -88,7 +101,7 @@ def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001): ...@@ -88,7 +101,7 @@ def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001):
inse = fluid.layers.reduce_sum(logit * label, dim=reduce_dim) inse = fluid.layers.reduce_sum(logit * label, dim=reduce_dim)
dice_denominator = fluid.layers.reduce_sum( dice_denominator = fluid.layers.reduce_sum(
logit, dim=reduce_dim) + fluid.layers.reduce_sum( logit, dim=reduce_dim) + fluid.layers.reduce_sum(
label, dim=reduce_dim) label, dim=reduce_dim)
dice_score = 1 - inse * 2 / (dice_denominator + epsilon) dice_score = 1 - inse * 2 / (dice_denominator + epsilon)
label.stop_gradient = True label.stop_gradient = True
ignore_mask.stop_gradient = True ignore_mask.stop_gradient = True
...@@ -103,26 +116,31 @@ def bce_loss(logit, label, ignore_mask=None): ...@@ -103,26 +116,31 @@ def bce_loss(logit, label, ignore_mask=None):
x=logit, x=logit,
label=label, label=label,
ignore_index=cfg.DATASET.IGNORE_INDEX, ignore_index=cfg.DATASET.IGNORE_INDEX,
normalize=True) # or False normalize=True) # or False
loss = fluid.layers.reduce_sum(loss) loss = fluid.layers.reduce_sum(loss)
label.stop_gradient = True label.stop_gradient = True
ignore_mask.stop_gradient = True ignore_mask.stop_gradient = True
return loss return loss
def multi_softmax_with_loss(logits, label, ignore_mask=None, num_classes=2, weight=None): def multi_softmax_with_loss(logits,
label,
ignore_mask=None,
num_classes=2,
weight=None):
if isinstance(logits, tuple): if isinstance(logits, tuple):
avg_loss = 0 avg_loss = 0
for i, logit in enumerate(logits): for i, logit in enumerate(logits):
if label.shape[2] != logit.shape[2] or label.shape[3] != logit.shape[3]: if label.shape[2] != logit.shape[2] or label.shape[
3] != logit.shape[3]:
label = fluid.layers.resize_nearest(label, logit.shape[2:]) label = fluid.layers.resize_nearest(label, logit.shape[2:])
logit_mask = (label.astype('int32') != logit_mask = (label.astype('int32') !=
cfg.DATASET.IGNORE_INDEX).astype('int32') cfg.DATASET.IGNORE_INDEX).astype('int32')
loss = softmax_with_loss(logit, label, logit_mask, loss = softmax_with_loss(logit, label, logit_mask, num_classes)
num_classes)
avg_loss += cfg.MODEL.MULTI_LOSS_WEIGHT[i] * loss avg_loss += cfg.MODEL.MULTI_LOSS_WEIGHT[i] * loss
else: else:
avg_loss = softmax_with_loss(logits, label, ignore_mask, num_classes, weight=weight) avg_loss = softmax_with_loss(
logits, label, ignore_mask, num_classes, weight=weight)
return avg_loss return avg_loss
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册