提交 11737433 编写于 作者: C chenguowei01

change fluid.layes.one_hot to fluid.one_hot

上级 5df30f99
......@@ -34,7 +34,7 @@ def softmax_with_loss(logit,
loss, probs = fluid.layers.softmax_with_cross_entropy(
logit, label, ignore_index=ignore_index, return_softmax=True)
else:
label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes)
label_one_hot = fluid.one_hot(input=label, depth=num_classes)
if isinstance(weight, list):
assert len(
weight
......
......@@ -34,7 +34,7 @@ def softmax_with_loss(logit,
loss, probs = fluid.layers.softmax_with_cross_entropy(
logit, label, ignore_index=ignore_index, return_softmax=True)
else:
label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes)
label_one_hot = fluid.one_hot(input=label, depth=num_classes)
if isinstance(weight, list):
assert len(
weight
......
......@@ -20,7 +20,11 @@ import importlib
from utils.config import cfg
def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None):
def softmax_with_loss(logit,
label,
ignore_mask=None,
num_classes=2,
weight=None):
ignore_mask = fluid.layers.cast(ignore_mask, 'float32')
label = fluid.layers.elementwise_min(
label, fluid.layers.assign(np.array([num_classes - 1], dtype=np.int32)))
......@@ -36,14 +40,18 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
ignore_index=cfg.DATASET.IGNORE_INDEX,
return_softmax=True)
else:
label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes)
label_one_hot = fluid.one_hot(input=label, depth=num_classes)
if isinstance(weight, list):
assert len(weight) == num_classes, "weight length must equal num of classes"
assert len(
weight
) == num_classes, "weight length must equal num of classes"
weight = fluid.layers.assign(np.array([weight], dtype='float32'))
elif isinstance(weight, str):
assert weight.lower() == 'dynamic', 'if weight is string, must be dynamic!'
assert weight.lower(
) == 'dynamic', 'if weight is string, must be dynamic!'
tmp = []
total_num = fluid.layers.cast(fluid.layers.shape(label)[0], 'float32')
total_num = fluid.layers.cast(
fluid.layers.shape(label)[0], 'float32')
for i in range(num_classes):
cls_pixel_num = fluid.layers.reduce_sum(label_one_hot[:, i])
ratio = total_num / (cls_pixel_num + 1)
......@@ -53,9 +61,12 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
elif isinstance(weight, fluid.layers.Variable):
pass
else:
raise ValueError('Expect weight is a list, string or Variable, but receive {}'.format(type(weight)))
raise ValueError(
'Expect weight is a list, string or Variable, but receive {}'.
format(type(weight)))
weight = fluid.layers.reshape(weight, [1, num_classes])
weighted_label_one_hot = fluid.layers.elementwise_mul(label_one_hot, weight)
weighted_label_one_hot = fluid.layers.elementwise_mul(
label_one_hot, weight)
probs = fluid.layers.softmax(logit)
loss = fluid.layers.cross_entropy(
probs,
......@@ -75,10 +86,11 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
# to change, how to appicate ignore index and ignore mask
def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001):
if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1:
raise Exception("dice loss is only applicable to one channel classfication")
raise Exception(
"dice loss is only applicable to one channel classfication")
ignore_mask = fluid.layers.cast(ignore_mask, 'float32')
logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
label = fluid.layers.transpose(label, [0, 2, 3, 1])
label = fluid.layers.transpose(label, [0, 2, 3, 1])
label = fluid.layers.cast(label, 'int64')
ignore_mask = fluid.layers.transpose(ignore_mask, [0, 2, 3, 1])
logit = fluid.layers.sigmoid(logit)
......@@ -88,7 +100,7 @@ def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001):
inse = fluid.layers.reduce_sum(logit * label, dim=reduce_dim)
dice_denominator = fluid.layers.reduce_sum(
logit, dim=reduce_dim) + fluid.layers.reduce_sum(
label, dim=reduce_dim)
label, dim=reduce_dim)
dice_score = 1 - inse * 2 / (dice_denominator + epsilon)
label.stop_gradient = True
ignore_mask.stop_gradient = True
......@@ -103,26 +115,31 @@ def bce_loss(logit, label, ignore_mask=None):
x=logit,
label=label,
ignore_index=cfg.DATASET.IGNORE_INDEX,
normalize=True) # or False
normalize=True) # or False
loss = fluid.layers.reduce_sum(loss)
label.stop_gradient = True
ignore_mask.stop_gradient = True
return loss
def multi_softmax_with_loss(logits, label, ignore_mask=None, num_classes=2, weight=None):
def multi_softmax_with_loss(logits,
label,
ignore_mask=None,
num_classes=2,
weight=None):
if isinstance(logits, tuple):
avg_loss = 0
for i, logit in enumerate(logits):
if label.shape[2] != logit.shape[2] or label.shape[3] != logit.shape[3]:
if label.shape[2] != logit.shape[2] or label.shape[
3] != logit.shape[3]:
label = fluid.layers.resize_nearest(label, logit.shape[2:])
logit_mask = (label.astype('int32') !=
cfg.DATASET.IGNORE_INDEX).astype('int32')
loss = softmax_with_loss(logit, label, logit_mask,
num_classes)
loss = softmax_with_loss(logit, label, logit_mask, num_classes)
avg_loss += cfg.MODEL.MULTI_LOSS_WEIGHT[i] * loss
else:
avg_loss = softmax_with_loss(logits, label, ignore_mask, num_classes, weight=weight)
avg_loss = softmax_with_loss(
logits, label, ignore_mask, num_classes, weight=weight)
return avg_loss
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册