From 11737433be74d26ea1c220018aa716188a973c08 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Mon, 18 May 2020 11:15:45 +0800 Subject: [PATCH] change fluid.layes.one_hot to fluid.one_hot --- contrib/HumanSeg/nets/seg_modules.py | 2 +- contrib/RemoteSensing/nets/loss.py | 2 +- pdseg/loss.py | 49 +++++++++++++++++++--------- 3 files changed, 35 insertions(+), 18 deletions(-) diff --git a/contrib/HumanSeg/nets/seg_modules.py b/contrib/HumanSeg/nets/seg_modules.py index 83449e21..fb59dce4 100644 --- a/contrib/HumanSeg/nets/seg_modules.py +++ b/contrib/HumanSeg/nets/seg_modules.py @@ -34,7 +34,7 @@ def softmax_with_loss(logit, loss, probs = fluid.layers.softmax_with_cross_entropy( logit, label, ignore_index=ignore_index, return_softmax=True) else: - label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes) + label_one_hot = fluid.one_hot(input=label, depth=num_classes) if isinstance(weight, list): assert len( weight diff --git a/contrib/RemoteSensing/nets/loss.py b/contrib/RemoteSensing/nets/loss.py index 83449e21..fb59dce4 100644 --- a/contrib/RemoteSensing/nets/loss.py +++ b/contrib/RemoteSensing/nets/loss.py @@ -34,7 +34,7 @@ def softmax_with_loss(logit, loss, probs = fluid.layers.softmax_with_cross_entropy( logit, label, ignore_index=ignore_index, return_softmax=True) else: - label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes) + label_one_hot = fluid.one_hot(input=label, depth=num_classes) if isinstance(weight, list): assert len( weight diff --git a/pdseg/loss.py b/pdseg/loss.py index 14f1b379..92638a9c 100644 --- a/pdseg/loss.py +++ b/pdseg/loss.py @@ -20,7 +20,11 @@ import importlib from utils.config import cfg -def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None): +def softmax_with_loss(logit, + label, + ignore_mask=None, + num_classes=2, + weight=None): ignore_mask = fluid.layers.cast(ignore_mask, 'float32') label = fluid.layers.elementwise_min( label, fluid.layers.assign(np.array([num_classes - 1], dtype=np.int32))) @@ -36,14 +40,18 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None ignore_index=cfg.DATASET.IGNORE_INDEX, return_softmax=True) else: - label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes) + label_one_hot = fluid.one_hot(input=label, depth=num_classes) if isinstance(weight, list): - assert len(weight) == num_classes, "weight length must equal num of classes" + assert len( + weight + ) == num_classes, "weight length must equal num of classes" weight = fluid.layers.assign(np.array([weight], dtype='float32')) elif isinstance(weight, str): - assert weight.lower() == 'dynamic', 'if weight is string, must be dynamic!' + assert weight.lower( + ) == 'dynamic', 'if weight is string, must be dynamic!' tmp = [] - total_num = fluid.layers.cast(fluid.layers.shape(label)[0], 'float32') + total_num = fluid.layers.cast( + fluid.layers.shape(label)[0], 'float32') for i in range(num_classes): cls_pixel_num = fluid.layers.reduce_sum(label_one_hot[:, i]) ratio = total_num / (cls_pixel_num + 1) @@ -53,9 +61,12 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None elif isinstance(weight, fluid.layers.Variable): pass else: - raise ValueError('Expect weight is a list, string or Variable, but receive {}'.format(type(weight))) + raise ValueError( + 'Expect weight is a list, string or Variable, but receive {}'. + format(type(weight))) weight = fluid.layers.reshape(weight, [1, num_classes]) - weighted_label_one_hot = fluid.layers.elementwise_mul(label_one_hot, weight) + weighted_label_one_hot = fluid.layers.elementwise_mul( + label_one_hot, weight) probs = fluid.layers.softmax(logit) loss = fluid.layers.cross_entropy( probs, @@ -75,10 +86,11 @@ def softmax_with_loss(logit, label, ignore_mask=None, num_classes=2, weight=None # to change, how to appicate ignore index and ignore mask def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001): if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1: - raise Exception("dice loss is only applicable to one channel classfication") + raise Exception( + "dice loss is only applicable to one channel classfication") ignore_mask = fluid.layers.cast(ignore_mask, 'float32') logit = fluid.layers.transpose(logit, [0, 2, 3, 1]) - label = fluid.layers.transpose(label, [0, 2, 3, 1]) + label = fluid.layers.transpose(label, [0, 2, 3, 1]) label = fluid.layers.cast(label, 'int64') ignore_mask = fluid.layers.transpose(ignore_mask, [0, 2, 3, 1]) logit = fluid.layers.sigmoid(logit) @@ -88,7 +100,7 @@ def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001): inse = fluid.layers.reduce_sum(logit * label, dim=reduce_dim) dice_denominator = fluid.layers.reduce_sum( logit, dim=reduce_dim) + fluid.layers.reduce_sum( - label, dim=reduce_dim) + label, dim=reduce_dim) dice_score = 1 - inse * 2 / (dice_denominator + epsilon) label.stop_gradient = True ignore_mask.stop_gradient = True @@ -103,26 +115,31 @@ def bce_loss(logit, label, ignore_mask=None): x=logit, label=label, ignore_index=cfg.DATASET.IGNORE_INDEX, - normalize=True) # or False + normalize=True) # or False loss = fluid.layers.reduce_sum(loss) label.stop_gradient = True ignore_mask.stop_gradient = True return loss -def multi_softmax_with_loss(logits, label, ignore_mask=None, num_classes=2, weight=None): +def multi_softmax_with_loss(logits, + label, + ignore_mask=None, + num_classes=2, + weight=None): if isinstance(logits, tuple): avg_loss = 0 for i, logit in enumerate(logits): - if label.shape[2] != logit.shape[2] or label.shape[3] != logit.shape[3]: + if label.shape[2] != logit.shape[2] or label.shape[ + 3] != logit.shape[3]: label = fluid.layers.resize_nearest(label, logit.shape[2:]) logit_mask = (label.astype('int32') != cfg.DATASET.IGNORE_INDEX).astype('int32') - loss = softmax_with_loss(logit, label, logit_mask, - num_classes) + loss = softmax_with_loss(logit, label, logit_mask, num_classes) avg_loss += cfg.MODEL.MULTI_LOSS_WEIGHT[i] * loss else: - avg_loss = softmax_with_loss(logits, label, ignore_mask, num_classes, weight=weight) + avg_loss = softmax_with_loss( + logits, label, ignore_mask, num_classes, weight=weight) return avg_loss -- GitLab