多类别分割的类不均衡问题处理_自定义loss function
Created by: julianadobetter
之前使用tensorflow 实现的自定义如下,可以使用,可以克服多类严重不均衡问题: def loss(self, logits, annotation):
logits = tf.reshape(logits, (-1,num_of_classes))
labels = tf.reshape(annotation, (-1,num_of_classes))
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)
weights = [0.1]
for jj in range(1,num_of_classes):
labels_temp = labels[...,jj]
positive_count = tf.count_nonzero(labels_temp,dtype=tf.float32)
total_num = tf.constant(IMG_SIZE * IMG_SIZE,dtype=tf.float32)
negativa_count = tf.subtract(total_num, positive_count)
alpha = tf.div(tf.multiply(negativa_count,1.0),(positive_count*gama+1))
weights = weights + [alpha]
weights = tf.convert_to_tensor(weights)
weights = tf.to_float(weights)
labels = tf.to_float(labels)
weight_map = tf.reduce_sum(tf.multiply(labels, weights), -1)
weight_map = tf.stop_gradient(weight_map, name='stop_gradient')
weighted_cross_entropy = tf.multiply(weight_map, cross_entropy, name='apply_weights')
loss_wc = tf.reduce_mean(weighted_cross_entropy, name='loss')
return loss_wc
然后我想转成paddle版本,如下: def weighted_softmax_cross_entropy(logit, label, num_classes=2):
logit = fluid.layers.reshape(logit, [-1,num_classes])
label = fluid.layers.reshape(label, [-1,1])
logit = fluid.layers.softmax(logit)
one_hot_label_ori = fluid.one_hot(label, depth=logit.shape[-1])
one_hot_label = fluid.layers.squeeze(one_hot_label_ori, axes=[-2])
log_prob = fluid.layers.log(fluid.layers.clip(logit, min=1e-7, max =1.))
ce_loss = -1 * log_prob * one_hot_label
cross_entropy = fluid.layers.reduce_sum(ce_loss)
weight = fluid.layers.assign(np.array([0.01,1.0,2.,3.,5.], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
weight.stop_gradient = True
weights=weight
weight_map = fluid.layers.reduce_sum(fluid.layers.elementwise_mul(one_hot_label_ori, weights))
weight_map.stop_gradient = True
weighted_cross_entropy = fluid.layers.elementwise_mul(weight_map, cross_entropy)
loss_wc = fluid.layers.reduce_mean(weighted_cross_entropy)
return loss_wc
不知道我哪里写错了,训练的loss 不减少,不收敛 想请大家给看一下,谢谢