diff --git a/text_classification/text_classification_cnn.py b/text_classification/text_classification_cnn.py index 21995d5caa8692aed20d179f53d56d2a85a97f8e..87baccc57f8a8f924497f2be22497b57aaf1a399 100644 --- a/text_classification/text_classification_cnn.py +++ b/text_classification/text_classification_cnn.py @@ -38,7 +38,7 @@ def convolution_net(input_dim, class_dim=2, emb_dim=128, hid_dim=128): cost = paddle.layer.classification_cost(input=output, label=lbl) - return cost, output + return cost, output, lbl def train_cnn_model(num_pass): @@ -57,7 +57,7 @@ def train_cnn_model(num_pass): lambda: paddle.dataset.imdb.test(word_dict), batch_size=100) # network config - [cost, _] = convolution_net(dict_dim, class_dim=class_dim) + [cost, output, label] = convolution_net(dict_dim, class_dim=class_dim) # create parameters parameters = paddle.parameters.create(cost) # create optimizer @@ -66,6 +66,9 @@ def train_cnn_model(num_pass): regularization=paddle.optimizer.L2Regularization(rate=8e-4), model_average=paddle.optimizer.ModelAverage(average_window=0.5)) + # add auc evaluator + paddle.evaluator.auc(input=output, label=label) + # create trainer trainer = paddle.trainer.SGD( cost=cost, parameters=parameters, update_equation=adam_optimizer) @@ -104,7 +107,7 @@ def cnn_infer(file_name): dict_dim = len(word_dict) class_dim = 2 - [_, output] = convolution_net(dict_dim, class_dim=class_dim) + [_, output, _] = convolution_net(dict_dim, class_dim=class_dim) parameters = paddle.parameters.Parameters.from_tar(gzip.open(file_name)) infer_data = [] diff --git a/text_classification/text_classification_dnn.py b/text_classification/text_classification_dnn.py index 192deaa893388b00e9cbd9dbca7ce2d45151a9e2..af5bb67e9d32cdd8b292fb53064a0747eaf042d0 100644 --- a/text_classification/text_classification_dnn.py +++ b/text_classification/text_classification_dnn.py @@ -54,7 +54,7 @@ def fc_net(input_dim, class_dim=2, emb_dim=256): cost = paddle.layer.classification_cost(input=output, label=lbl) - return cost, output + return cost, output, lbl def train_dnn_model(num_pass): @@ -73,7 +73,8 @@ def train_dnn_model(num_pass): lambda: paddle.dataset.imdb.test(word_dict), batch_size=100) # network config - [cost, _] = fc_net(dict_dim, class_dim=class_dim) + [cost, output, label] = fc_net(dict_dim, class_dim=class_dim) + # create parameters parameters = paddle.parameters.create(cost) # create optimizer @@ -82,6 +83,9 @@ def train_dnn_model(num_pass): regularization=paddle.optimizer.L2Regularization(rate=8e-4), model_average=paddle.optimizer.ModelAverage(average_window=0.5)) + # add auc evaluator + paddle.evaluator.auc(input=output, label=label) + # create trainer trainer = paddle.trainer.SGD( cost=cost, parameters=parameters, update_equation=adam_optimizer) @@ -120,7 +124,7 @@ def dnn_infer(file_name): dict_dim = len(word_dict) class_dim = 2 - [_, output] = fc_net(dict_dim, class_dim=class_dim) + [_, output, _] = fc_net(dict_dim, class_dim=class_dim) parameters = paddle.parameters.Parameters.from_tar(gzip.open(file_name)) infer_data = []