未验证 提交 57f5ef69 编写于 作者: W wuyefeilin 提交者: GitHub

update one_hot

update one_hot
......@@ -34,7 +34,8 @@ def softmax_with_loss(logit,
loss, probs = fluid.layers.softmax_with_cross_entropy(
logit, label, ignore_index=ignore_index, return_softmax=True)
else:
label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes)
label = fluid.layers.squeeze(label, axes=[-1])
label_one_hot = fluid.one_hot(input=label, depth=num_classes)
if isinstance(weight, list):
assert len(
weight
......
......@@ -34,7 +34,8 @@ def softmax_with_loss(logit,
loss, probs = fluid.layers.softmax_with_cross_entropy(
logit, label, ignore_index=ignore_index, return_softmax=True)
else:
label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes)
label = fluid.layers.squeeze(label, axes=[-1])
label_one_hot = fluid.one_hot(input=label, depth=num_classes)
if isinstance(weight, list):
assert len(
weight
......
......@@ -20,7 +20,11 @@ import importlib
from utils.config import cfg
def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None):
def softmax_with_loss(logit,
label,
ignore_mask=None,
num_classes=2,
weight=None):
ignore_mask = fluid.layers.cast(ignore_mask, 'float32')
label = fluid.layers.elementwise_min(
label, fluid.layers.assign(np.array([num_classes - 1], dtype=np.int32)))
......@@ -36,14 +40,19 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
ignore_index=cfg.DATASET.IGNORE_INDEX,
return_softmax=True)
else:
label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes)
label = fluid.layers.squeeze(label, axes=[-1])
label_one_hot = fluid.one_hot(input=label, depth=num_classes)
if isinstance(weight, list):
assert len(weight) == num_classes, "weight length must equal num of classes"
assert len(
weight
) == num_classes, "weight length must equal num of classes"
weight = fluid.layers.assign(np.array([weight], dtype='float32'))
elif isinstance(weight, str):
assert weight.lower() == 'dynamic', 'if weight is string, must be dynamic!'
assert weight.lower(
) == 'dynamic', 'if weight is string, must be dynamic!'
tmp = []
total_num = fluid.layers.cast(fluid.layers.shape(label)[0], 'float32')
total_num = fluid.layers.cast(
fluid.layers.shape(label)[0], 'float32')
for i in range(num_classes):
cls_pixel_num = fluid.layers.reduce_sum(label_one_hot[:, i])
ratio = total_num / (cls_pixel_num + 1)
......@@ -53,9 +62,12 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
elif isinstance(weight, fluid.layers.Variable):
pass
else:
raise ValueError('Expect weight is a list, string or Variable, but receive {}'.format(type(weight)))
raise ValueError(
'Expect weight is a list, string or Variable, but receive {}'.
format(type(weight)))
weight = fluid.layers.reshape(weight, [1, num_classes])
weighted_label_one_hot = fluid.layers.elementwise_mul(label_one_hot, weight)
weighted_label_one_hot = fluid.layers.elementwise_mul(
label_one_hot, weight)
probs = fluid.layers.softmax(logit)
loss = fluid.layers.cross_entropy(
probs,
......@@ -75,7 +87,8 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None
# to change, how to appicate ignore index and ignore mask
def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001):
if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1:
raise Exception("dice loss is only applicable to one channel classfication")
raise Exception(
"dice loss is only applicable to one channel classfication")
ignore_mask = fluid.layers.cast(ignore_mask, 'float32')
logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
label = fluid.layers.transpose(label, [0, 2, 3, 1])
......@@ -110,19 +123,24 @@ def bce_loss(logit, label, ignore_mask=None):
return loss
def multi_softmax_with_loss(logits, label, ignore_mask=None, num_classes=2, weight=None):
def multi_softmax_with_loss(logits,
label,
ignore_mask=None,
num_classes=2,
weight=None):
if isinstance(logits, tuple):
avg_loss = 0
for i, logit in enumerate(logits):
if label.shape[2] != logit.shape[2] or label.shape[3] != logit.shape[3]:
if label.shape[2] != logit.shape[2] or label.shape[
3] != logit.shape[3]:
label = fluid.layers.resize_nearest(label, logit.shape[2:])
logit_mask = (label.astype('int32') !=
cfg.DATASET.IGNORE_INDEX).astype('int32')
loss = softmax_with_loss(logit, label, logit_mask,
num_classes)
loss = softmax_with_loss(logit, label, logit_mask, num_classes)
avg_loss += cfg.MODEL.MULTI_LOSS_WEIGHT[i] * loss
else:
avg_loss = softmax_with_loss(logits, label, ignore_mask, num_classes, weight=weight)
avg_loss = softmax_with_loss(
logits, label, ignore_mask, num_classes, weight=weight)
return avg_loss
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册