diff --git a/PaddleNLP/models/dialogue_model_toolkit/dialogue_general_understanding/define_paradigm.py b/PaddleNLP/models/dialogue_model_toolkit/dialogue_general_understanding/define_paradigm.py index 07581b59b6c04641a73a80b51f6d4f8b651cfa25..63d1952efec3879ea4350219fdc40eadacf2e0b0 100644 --- a/PaddleNLP/models/dialogue_model_toolkit/dialogue_general_understanding/define_paradigm.py +++ b/PaddleNLP/models/dialogue_model_toolkit/dialogue_general_understanding/define_paradigm.py @@ -62,7 +62,7 @@ class Paradigm(object): ce_loss, probs = fluid.layers.softmax_with_cross_entropy( logits=logits, label=params['labels'], return_softmax=True) - loss = fluid.layers.mean(input=ce_loss) + loss = fluid.layers.mean(x=ce_loss) num_seqs = fluid.layers.create_tensor(dtype='int64') accuracy = fluid.layers.accuracy( input=probs, label=params['labels'], total=num_seqs) @@ -102,7 +102,7 @@ class Paradigm(object): ce_loss = fluid.layers.reduce_sum( fluid.layers.sigmoid_cross_entropy_with_logits( x=logits, label=labels_onehot)) - loss = fluid.layers.mean(input=ce_loss) + loss = fluid.layers.mean(x=ce_loss) probs = fluid.layers.sigmoid(logits) if params['is_prediction']: @@ -158,7 +158,7 @@ class Paradigm(object): correct_prediction, dtype='float32')) ce_loss = fluid.layers.softmax_with_cross_entropy(logits=logits, \ label=fluid.layers.reshape(params['labels'], [-1, 1])) - loss = fluid.layers.mean(input=ce_loss) + loss = fluid.layers.mean(x=ce_loss) loss.persistable = True probs.persistable = True