diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index 6298c923a27e1d35fcf441bb07c0fd10660a8b83..5514d3aa25cf4cda081c66f986319545a64f2788 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -806,7 +806,12 @@ class TestAdamOptimizer(unittest.TestCase): input=fc_1, size=2, param_attr=weight_attr2, act='softmax' ) - cost = fluid.layers.cross_entropy(input=prediction, label=label) + cost = paddle.nn.functional.cross_entropy( + input=prediction, + label=label, + reduction='none', + use_softmax=False, + ) loss = paddle.mean(cost) beta1_init = 0.9 beta2_init = 0.999 @@ -966,7 +971,9 @@ class TestAdamOptimizer(unittest.TestCase): fc_1 = fluid.layers.fc(input=z, size=128) prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax') - cost = fluid.layers.cross_entropy(input=prediction, label=label) + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) loss = paddle.mean(cost) adam = fluid.optimizer.Adam(use_global_beta_pow=True) adam.minimize(loss) diff --git a/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py index 54e74ade09aef476668bfb7cf1461a9bf2f567cb..b5f3ada246371e38b461d507547817a63150e054 100644 --- a/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py @@ -57,7 +57,9 @@ def convolutional_neural_network(use_py_reader): ) prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) acc = paddle.static.accuracy(input=prediction, label=label) i = fluid.layers.zeros(shape=[1], dtype='int64') diff --git a/python/paddle/fluid/tests/unittests/test_compiled_program.py b/python/paddle/fluid/tests/unittests/test_compiled_program.py index 70edcc5358afb249ff0e1a32d6c3562fc4f10fdf..729a7e3e10a56cc89b0092018349e20f8733c4fa 100644 --- a/python/paddle/fluid/tests/unittests/test_compiled_program.py +++ b/python/paddle/fluid/tests/unittests/test_compiled_program.py @@ -107,7 +107,9 @@ class TestCompiledProgramError(unittest.TestCase): ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') prediction = fluid.layers.fc(input=img, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) def compile_program_not_compiled(self): diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py index 4e139b1bef8f99acb5eb73eaaa4e7c19845b05bc..a1f650dc63172de2dbad98d3362426a1b27f3366 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_op.py @@ -17,6 +17,7 @@ import unittest import numpy as np from op_test import OpTest, randomize_probability +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard @@ -419,7 +420,9 @@ class TestCrossEntropyOpError(unittest.TestCase): lab1 = fluid.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() ) - fluid.layers.cross_entropy(x1, lab1) + paddle.nn.functional.cross_entropy( + x1, lab1, reduction='none', use_softmax=False + ) self.assertRaises(TypeError, test_Variable) @@ -432,7 +435,9 @@ class TestCrossEntropyOpError(unittest.TestCase): lab2 = fluid.layers.data( name='lab2', shape=[3, 4, 5, 6], dtype="int32" ) - fluid.layers.cross_entropy(x2, lab2) + paddle.nn.functional.cross_entropy( + x2, lab2, reduction='none', use_softmax=False + ) self.assertRaises(TypeError, test_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py index 72ec58aa2d1ea6f3ff2b11a6ab2daa148e9264c0..f3913b7f3a2a0a38164b8965d1f9a22d0e7661ac 100644 --- a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py +++ b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader.py @@ -67,7 +67,12 @@ def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): hidden, size=CLASS_NUM, act='softmax' ) loss = paddle.mean( - fluid.layers.cross_entropy(input=predict_label, label=label) + paddle.nn.functional.cross_entropy( + input=predict_label, + label=label, + reduction='none', + use_softmax=False, + ) ) optimizer = fluid.optimizer.Adam() diff --git a/python/paddle/fluid/tests/unittests/test_desc_clone.py b/python/paddle/fluid/tests/unittests/test_desc_clone.py index 477910f53d59d414deb3ee779287ad3dca4cb58f..52ee114ae8383d1d61f11b3fd006c0e16ed26a0b 100644 --- a/python/paddle/fluid/tests/unittests/test_desc_clone.py +++ b/python/paddle/fluid/tests/unittests/test_desc_clone.py @@ -73,7 +73,9 @@ def get_model(batch_size): # Train program predict = cnn_model(images) - cost = fluid.layers.cross_entropy(input=predict, label=label) + cost = paddle.nn.functional.cross_entropy( + input=predict, label=label, reduction='none', use_softmax=False + ) avg_cost = paddle.mean(x=cost) # Evaluator @@ -188,9 +190,11 @@ class TestCloneWithStopGradient(unittest.TestCase): hidden1 = fluid.layers.fc(input=img, size=200, act='relu') hidden1.stop_gradient = True hidden2 = fluid.layers.dropout(hidden1, dropout_prob=0.5) - loss = fluid.layers.cross_entropy( + loss = paddle.nn.functional.cross_entropy( input=fluid.layers.fc(hidden2, size=10, act='softmax'), label=fluid.layers.data(name='label', shape=[1], dtype='int64'), + reduction='none', + use_softmax=False, ) avg_loss = paddle.mean(loss) test_program = train_program.clone(for_test=False) @@ -226,9 +230,11 @@ class TestCloneWithStopGradientInSubBlock(unittest.TestCase): hidden2 = fluid.layers.cond(cond, true_fn, false_fn) - loss = fluid.layers.cross_entropy( + loss = paddle.nn.functional.cross_entropy( input=fluid.layers.fc(hidden2, size=10, act='softmax'), label=fluid.layers.data(name='label', shape=[1], dtype='int64'), + reduction='none', + use_softmax=False, ) avg_loss = paddle.mean(loss) test_program = train_program.clone(for_test=False) @@ -266,9 +272,11 @@ class TestCloneWithRaise(unittest.TestCase): return hidden2 hidden2 = fluid.layers.cond(cond, true_fn, false_fn) - loss = fluid.layers.cross_entropy( + loss = paddle.nn.functional.cross_entropy( input=fluid.layers.fc(hidden2, size=10, act='softmax'), label=fluid.layers.data(name='label', shape=[1], dtype='int64'), + reduction='none', + use_softmax=False, ) avg_loss = paddle.mean(loss) test_program = train_program.clone(for_test=False) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py index e2f08591d705e79fcff54f01dd520c5c2519ee19..967028f02d20327a5007c1117516161f71acf807 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto.py @@ -52,8 +52,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy( - input=prediction, label=input_y + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y, reduction='none', use_softmax=False ) avg_cost = paddle.mean(x=cost) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py index aa46502b4d234a098d47efb5ab1c0bdcf9bd63a5..6ffea5df324b7b870bb15740cd08180c4c5f249d 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_async.py @@ -68,8 +68,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): fc_1 = paddle.fluid.layers.fc(input=x_embedding, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy( - input=prediction, label=input_y + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y, reduction='none', use_softmax=False ) avg_cost = paddle.mean(x=cost) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py index 88f3769c83efd40e7d0de932e31e105fa8f88666..3bb5b669c8d1541f813538e80f2e40299efac714 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_auto_geo.py @@ -56,8 +56,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): fc_1 = paddle.fluid.layers.fc(input=emb, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy( - input=prediction, label=input_y + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y, reduction='none', use_softmax=False ) avg_cost = paddle.mean(x=cost) os.environ["FLAGS_LAUNCH_BARRIER"] = "0" diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py index 789f7ee8637855edfde47abe4f8f51985cc0e33e..6556e19079ce58a87d8d26bb53de8255a031cd72 100755 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_a_sync_optimizer_geo.py @@ -51,8 +51,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy( - input=prediction, label=input_y + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y, reduction='none', use_softmax=False ) avg_cost = paddle.mean(x=cost) @@ -84,8 +84,8 @@ class TestFleetGradientMergeMetaOptimizer(unittest.TestCase): fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy( - input=prediction, label=input_y + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y, reduction='none', use_softmax=False ) avg_cost = paddle.mean(x=cost) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_program.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_program.py index 477d9091d7a51bd30edb4a8389ab28f31d375d28..dc77545cc196b664b0cd812e6d7e4a10587a7e37 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_program.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_heter_program.py @@ -150,7 +150,9 @@ class TestDistFleetHeterProgram(unittest.TestCase): with fluid.device_guard("gpu"): labels = fluid.layers.cast(inputs[-1], dtype="int64") - cost = fluid.layers.cross_entropy(input=predict, label=labels) + cost = paddle.nn.functional.cross_entropy( + input=predict, label=labels, reduction='none', use_softmax=False + ) avg_cost = paddle.sum(cost) return avg_cost diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_mnist_fp16.py b/python/paddle/fluid/tests/unittests/test_dygraph_mnist_fp16.py index ef7059887b91bc4a40c4f7ca8eef00d45097a73c..862b3868023ca502b24a8b91b75540336683c48d 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_mnist_fp16.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_mnist_fp16.py @@ -112,7 +112,9 @@ class MNIST(fluid.dygraph.Layer): x = paddle.reshape(x, shape=[-1, self.pool_2_shape]) cost = self._linear(x) cost = paddle.nn.functional.softmax(cost) - loss = fluid.layers.cross_entropy(cost, label) + loss = paddle.nn.functional.cross_entropy( + cost, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) return avg_loss diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py index e4fd9766a26225a24a9fdc4047e6e7c1bbd12ad5..e79773e8d41f46805d7b29bc09b5a8320644e42b 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_multi_forward.py @@ -138,7 +138,9 @@ class TestDygraphMultiForward(unittest.TestCase): label.stop_gradient = True cost = mnist(img) - loss = fluid.layers.cross_entropy(cost, label) + loss = paddle.nn.functional.cross_entropy( + cost, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) dy_out = avg_loss.numpy() @@ -167,7 +169,9 @@ class TestDygraphMultiForward(unittest.TestCase): ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = mnist(img) - loss = fluid.layers.cross_entropy(cost, label) + loss = paddle.nn.functional.cross_entropy( + cost, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) # initialize params and fetch them diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py index d3ea6fd4f4f5f61a88f2441ddb21bf46ba946e8b..9895f5b2488bf276df08e42de0867676140e919f 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py @@ -45,7 +45,9 @@ def simple_fc_net(): ), ) prediction = fluid.layers.fc(hidden, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) loss = paddle.mean(loss) optimizer = fluid.optimizer.Adam(learning_rate=1e-3) optimizer.minimize(loss) diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py index b5dff40cb3d2681d9689e2cd1a2cdf7a40fa3d58..1ac689d8df05acd00d0141f8d4fe743cea61043e 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_gru_net.py @@ -43,7 +43,9 @@ def gru_net( gru_max_tanh = paddle.tanh(gru_max) fc1 = fluid.layers.fc(input=gru_max_tanh, size=hid_dim2, act='tanh') prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax') - cost = fluid.layers.cross_entropy(input=prediction, label=label) + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) avg_cost = paddle.mean(x=cost) return avg_cost diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py index 3cd7c681adc72c22f7f3adf9ae2ab956c4baec09..09fc60e6b120c32dd133f9333b2c21a282854797 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_lstm_net.py @@ -45,7 +45,9 @@ def lstm_net( lstm_max_tanh = paddle.tanh(lstm_max) fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh') prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax') - cost = fluid.layers.cross_entropy(input=prediction, label=label) + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) avg_cost = paddle.mean(x=cost) return avg_cost diff --git a/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py b/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py index f1a1fb5f3b513078feec53717d6374043f15e3de..0e08b14e32f9583fb53962158a6bb885ab293143 100644 --- a/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py +++ b/python/paddle/fluid/tests/unittests/test_feed_data_check_shape_type.py @@ -67,7 +67,12 @@ class TestFeedData(unittest.TestCase): predict_label = fluid.layers.fc(hidden, size=class_num, act='softmax') loss = paddle.mean( - fluid.layers.cross_entropy(input=predict_label, label=label) + paddle.nn.functional.cross_entropy( + input=predict_label, + label=label, + reduction='none', + use_softmax=False, + ) ) optimizer = fluid.optimizer.Adam() diff --git a/python/paddle/fluid/tests/unittests/test_fetch_unmerged.py b/python/paddle/fluid/tests/unittests/test_fetch_unmerged.py index 028954d22ffdc95c4366817415da3139e01ca76a..c1b8046c6976ba3ecbd5e14d88d1e3119bcd6b4c 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_unmerged.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_unmerged.py @@ -46,7 +46,9 @@ class TestFetchUnmerged(unittest.TestCase): ) hidden = fluid.layers.fc(input=conv_pool_2, size=32, act='relu') prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) return avg_loss, prediction diff --git a/python/paddle/fluid/tests/unittests/test_fleet_auto.py b/python/paddle/fluid/tests/unittests/test_fleet_auto.py index 22abaaf4d18c3662468933246e505feb5fb770e1..28f05a6e03f6fb6a1a0a717dde50a95616be40de 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_auto.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_auto.py @@ -40,8 +40,8 @@ class TestDistributedStrategyAuto(unittest.TestCase): fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy( - input=prediction, label=input_y + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y, reduction='none', use_softmax=False ) avg_cost = paddle.mean(x=cost) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base.py b/python/paddle/fluid/tests/unittests/test_fleet_base.py index 74886d9a9c12307716e741fa55c264879eb9cc79..1f5ea942965fab182e9a626ca97ec2180ca080e6 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base.py @@ -203,7 +203,12 @@ class TestFleetBaseSingleError(unittest.TestCase): fc_1 = fluid.layers.fc(input=input_x, size=64, act='tanh') prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax') - cost = fluid.layers.cross_entropy(input=prediction, label=input_y) + cost = paddle.nn.functional.cross_entropy( + input=prediction, + label=input_y, + reduction='none', + use_softmax=False, + ) avg_cost = paddle.mean(x=cost) fleet.init(is_collective=True) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base_2.py b/python/paddle/fluid/tests/unittests/test_fleet_base_2.py index 64cd2df9037290c9ee2dce09ebcb4bfa4ea81039..fdffc388eacea6fd18b837b142a36f435860238a 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base_2.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base_2.py @@ -54,8 +54,8 @@ class TestFleetBase(unittest.TestCase): fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy( - input=prediction, label=input_y + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y, reduction='none', use_softmax=False ) avg_cost = paddle.mean(x=cost) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base_3.py b/python/paddle/fluid/tests/unittests/test_fleet_base_3.py index 7d1672064d1156661fe3ecb841009ef88416baef..33d970f109d78a7dfccab9978aeffef3627117a6 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base_3.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base_3.py @@ -40,8 +40,8 @@ class TestFleetBase_1(unittest.TestCase): fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy( - input=prediction, label=input_y + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y, reduction='none', use_softmax=False ) avg_cost = paddle.mean(x=cost) @@ -71,8 +71,8 @@ class TestFleetBase(unittest.TestCase): fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') - cost = paddle.fluid.layers.cross_entropy( - input=prediction, label=input_y + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y, reduction='none', use_softmax=False ) avg_cost = paddle.mean(x=cost) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base_single.py b/python/paddle/fluid/tests/unittests/test_fleet_base_single.py index 39825686b371130b5a438d2442ffee11342864ed..4040e10594e9a15ccc5f452d83e63f92b9a12fdd 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base_single.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base_single.py @@ -85,7 +85,9 @@ class TestFleetBaseSingleRunCollective(unittest.TestCase): fc_1 = fluid.layers.fc(input=input_x, size=64, act='tanh') prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax') - cost = fluid.layers.cross_entropy(input=prediction, label=input_y) + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y, reduction='none', use_softmax=False + ) avg_cost = paddle.mean(x=cost) fleet.init(is_collective=True) @@ -124,7 +126,9 @@ class TestFleetBaseSingleRunPS(unittest.TestCase): fc_1 = fluid.layers.fc(input=input_x, size=64, act='tanh') prediction = fluid.layers.fc(input=fc_1, size=2, act='softmax') - cost = fluid.layers.cross_entropy(input=prediction, label=input_y) + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=input_y, reduction='none', use_softmax=False + ) avg_cost = paddle.mean(x=cost) fleet.init() diff --git a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py index 9a7a907321089a8b9f9f9e6139828af86924a48c..066443ebcf83b1ea3f869231f33d724302309b8b 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_bn_act_pass.py @@ -53,7 +53,9 @@ class TestFuseBatchNormActPass(unittest.TestCase): input=hidden3, act='relu', data_layout='NHWC' ) prediction = fluid.layers.fc(input=hidden4, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=y) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=y, reduction='none', use_softmax=False + ) loss = paddle.mean(loss) sgd = fluid.optimizer.SGD(learning_rate=0.001) if use_cuda: diff --git a/python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py index 1b83dfa2b010dbe58a2257698735d8ee41c47dca..c644391eeea8a69f4cd41e31d973f486cfc39818 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_bn_add_act_pass.py @@ -106,7 +106,9 @@ class TestFusedBnAddActAPI(unittest.TestCase): act='softmax', param_attr=self.fc_param_attr, ) - loss = fluid.layers.cross_entropy(input=prediction, label=y) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=y, reduction='none', use_softmax=False + ) loss = paddle.mean(loss) sgd = fluid.optimizer.SGD(learning_rate=0.001) sgd = fluid.contrib.mixed_precision.decorate( @@ -162,7 +164,9 @@ class TestFusedBnAddActAPI(unittest.TestCase): prediction = fluid.layers.fc( input=out, size=10, act='softmax', param_attr=self.fc_param_attr ) - loss = fluid.layers.cross_entropy(input=prediction, label=y) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=y, reduction='none', use_softmax=False + ) loss = paddle.mean(loss) sgd = fluid.optimizer.SGD(learning_rate=0.001) sgd = fluid.contrib.mixed_precision.decorate( diff --git a/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py b/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py index 025e12c02c611be8795a1380299011f06b091410..bb2b22ff18e5dd62054352509073935578d631c2 100644 --- a/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py +++ b/python/paddle/fluid/tests/unittests/test_fuse_relu_depthwise_conv_pass.py @@ -61,7 +61,9 @@ def simple_depthwise_net(use_feed): hidden = sep_conv(hidden, channel=200, stride=2, filter=5) hidden = fluid.layers.relu(hidden) prediction = fluid.layers.fc(hidden, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) loss = paddle.mean(loss) return loss diff --git a/python/paddle/fluid/tests/unittests/test_generator_dataloader.py b/python/paddle/fluid/tests/unittests/test_generator_dataloader.py index 22114853e9e86c59d682c22067f73eac5b9a8f25..02a34401e79f08a15d2babb03a5cc95739b44fc0 100644 --- a/python/paddle/fluid/tests/unittests/test_generator_dataloader.py +++ b/python/paddle/fluid/tests/unittests/test_generator_dataloader.py @@ -68,7 +68,12 @@ def simple_fc_net(places, use_legacy_py_reader, use_double_buffer): hidden, size=CLASS_NUM, act='softmax' ) loss = paddle.mean( - fluid.layers.cross_entropy(input=predict_label, label=label) + paddle.nn.functional.cross_entropy( + input=predict_label, + label=label, + reduction='none', + use_softmax=False, + ) ) optimizer = fluid.optimizer.Adam() diff --git a/python/paddle/fluid/tests/unittests/test_gradient_clip.py b/python/paddle/fluid/tests/unittests/test_gradient_clip.py index 0c89e000538d6e5fd8cd28d7806dc80e1edc9934..2243ae8c45602a694e1ce79e72cbc033abaf1636 100644 --- a/python/paddle/fluid/tests/unittests/test_gradient_clip.py +++ b/python/paddle/fluid/tests/unittests/test_gradient_clip.py @@ -41,7 +41,9 @@ def bow_net( fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh") fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh") prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax") - cost = fluid.layers.cross_entropy(input=prediction, label=label) + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) avg_cost = paddle.mean(x=cost) return avg_cost @@ -83,7 +85,9 @@ class TestGradientClip(unittest.TestCase): hidden = fluid.layers.fc(input=image, size=32, act='relu') predict = fluid.layers.fc(input=hidden, size=10, act='softmax') - cost = fluid.layers.cross_entropy(input=predict, label=label) + cost = paddle.nn.functional.cross_entropy( + input=predict, label=label, reduction='none', use_softmax=False + ) avg_cost = paddle.mean(cost) prog_clip = prog.clone() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py b/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py index 5dbb1ac0a2974bfe1158d97324dfd889e389cb1d..5a301d3f0a5adc4688ac53da7b25b0bf201c8a98 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py @@ -90,8 +90,10 @@ class AutoPruneLayer2(fluid.Layer): label = self.linear2(label) label = fluid.layers.cast(label, dtype="float32") label = fluid.layers.cast(label, dtype='int64') - # Note that the label is not persistable in fluid.layers.cross_entropy. - loss = fluid.layers.cross_entropy(input=feature, label=label) + # Note that the label is not persistable in paddle.nn.functional.cross_entropy. + loss = paddle.nn.functional.cross_entropy( + input=feature, label=label, reduction='none', use_softmax=False + ) loss = paddle.mean(loss) return loss @@ -107,7 +109,9 @@ class AutoPruneLayer3(fluid.Layer): feature, num_or_sections=[10, 10], dim=1 ) # Note that: part2 is not used. - loss = fluid.layers.cross_entropy(input=part1, label=label) + loss = paddle.nn.functional.cross_entropy( + input=part1, label=label, reduction='none', use_softmax=False + ) loss = paddle.mean(loss) if test_num == 1: return loss, part2 diff --git a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py index 7c8977ee1815e891cad9772db72ed8e6cdcb4601..8ee54bab6b0fbf557b58e5f06346bddbcdceea81 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_mnist.py @@ -159,7 +159,9 @@ class TestImperativeMnist(unittest.TestCase): cost_static = traced_layer([img]) helper.assertEachVar(cost, cost_static) - loss = fluid.layers.cross_entropy(cost, label) + loss = paddle.nn.functional.cross_entropy( + cost, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) dy_out = avg_loss.numpy() @@ -199,7 +201,9 @@ class TestImperativeMnist(unittest.TestCase): ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = mnist(img) - loss = fluid.layers.cross_entropy(cost, label) + loss = paddle.nn.functional.cross_entropy( + cost, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) sgd.minimize(avg_loss) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py index 2ed75cb777a93e7359408f982dd23dcb329c4b0e..80f7162a640f757ed7fca4ecccf0c811f7f74249 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_mnist_sorted_gradient.py @@ -62,7 +62,9 @@ class TestImperativeMnistSortGradient(unittest.TestCase): label2.stop_gradient = True cost2 = mnist2(img2) - loss2 = fluid.layers.cross_entropy(cost2, label2) + loss2 = paddle.nn.functional.cross_entropy( + cost2, label2, reduction='none', use_softmax=False + ) avg_loss2 = paddle.mean(loss2) dy_out2 = avg_loss2.numpy() @@ -102,7 +104,9 @@ class TestImperativeMnistSortGradient(unittest.TestCase): ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') cost = mnist(img) - loss = fluid.layers.cross_entropy(cost, label) + loss = paddle.nn.functional.cross_entropy( + cost, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) sgd.minimize(avg_loss) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py index 4e4c8aa4351e09db83ea6e149a0e38dabec7a7c3..1df0a4148c9c637aa77b3a8c6e587515b66bbb6d 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py @@ -477,8 +477,11 @@ class TestDygraphOCRAttention(unittest.TestCase): dy_prediction = paddle.reshape( dy_prediction, [label_out.shape[0], -1] ) - loss = fluid.layers.cross_entropy( - input=dy_prediction, label=label_out + loss = paddle.nn.functional.cross_entropy( + input=dy_prediction, + label=label_out, + reduction='none', + use_softmax=False, ) avg_loss = paddle.sum(loss) @@ -555,8 +558,11 @@ class TestDygraphOCRAttention(unittest.TestCase): static_prediction, shape=[-1, Config.num_classes + 2] ) - cost = fluid.layers.cross_entropy( - input=static_prediction, label=static_label_out + cost = paddle.nn.functional.cross_entropy( + input=static_prediction, + label=static_label_out, + reduction='none', + use_softmax=False, ) static_avg_loss = paddle.sum(cost) # param_grad_list = fluid.backward.append_backward(static_avg_loss) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index 559ea6ff71e86ef478e4c9b2aff5c61881f4450e..16951a8743c4ff514aa2cb6af098ae7432a61338 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -311,7 +311,9 @@ class TestDygraphResnet(unittest.TestCase): helper.assertEachVar(out_dygraph, out_static) resnet.train() - loss = fluid.layers.cross_entropy(input=out, label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(x=loss) dy_out = avg_loss.numpy() @@ -364,7 +366,9 @@ class TestDygraphResnet(unittest.TestCase): ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') out = resnet(img) - loss = fluid.layers.cross_entropy(input=out, label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(x=loss) optimizer.minimize(avg_loss) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py index 50afad1b5cf0eda65c5293f787b3c92eadf49a56..f28631d0adab21e8e9293cdfca8f2ddb1e841f15 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py @@ -118,7 +118,9 @@ class TestDygraphResnetSortGradient(unittest.TestCase): label.stop_gradient = True out = resnet(img) - loss = fluid.layers.cross_entropy(input=out, label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(x=loss) dy_out = avg_loss.numpy() @@ -174,7 +176,9 @@ class TestDygraphResnetSortGradient(unittest.TestCase): ) label = fluid.layers.data(name='label', shape=[1], dtype='int64') out = resnet(img) - loss = fluid.layers.cross_entropy(input=out, label=label) + loss = paddle.nn.functional.cross_entropy( + input=out, label=label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(x=loss) optimizer.minimize(avg_loss) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py index f2c8d285a0439bb407bb7d3f14a98712bd0580e6..6180d1c66494b28c6f9c29fc9ac951434ce4d593 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py @@ -373,8 +373,11 @@ class TestImperativeResneXt(unittest.TestCase): out = se_resnext(img) softmax_out = paddle.nn.functional.softmax(out) - loss = fluid.layers.cross_entropy( - input=softmax_out, label=label + loss = paddle.nn.functional.cross_entropy( + input=softmax_out, + label=label, + reduction='none', + use_softmax=False, ) avg_loss = paddle.mean(x=loss) @@ -453,7 +456,12 @@ class TestImperativeResneXt(unittest.TestCase): label = fluid.layers.data(name='label', shape=[1], dtype='int64') out = se_resnext(img) softmax_out = paddle.nn.function.softmax(out) - loss = fluid.layers.cross_entropy(input=softmax_out, label=label) + loss = paddle.nn.functional.cross_entropy( + input=softmax_out, + label=label, + reduction='none', + use_softmax=False, + ) avg_loss = paddle.mean(x=loss) optimizer.minimize(avg_loss) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py index ee2cc13d6a8c0134e523e39e2888cb272eaa9d61..6963be33790497581d855dbf91fcfc97ccdaf6a0 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_mnist.py @@ -49,7 +49,9 @@ def convolutional_neural_network(img): def static_train_net(img, label): prediction = convolutional_neural_network(img) - loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) optimizer = fluid.optimizer.SGD(learning_rate=0.001) @@ -172,7 +174,9 @@ class TestImperativeStaticModelRunnerMnist(unittest.TestCase): cost = mnist(img) - loss = fluid.layers.cross_entropy(cost, label) + loss = paddle.nn.functional.cross_entropy( + cost, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) avg_loss.backward() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py index aac9152195be55fe4c7db204ee3479685ec20358..ff7644e61d80696840dd095e394bebec91041044 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_static_runner_while.py @@ -78,7 +78,9 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): pred = while_softmax_regression(img) - loss = fluid.layers.cross_entropy(input=pred, label=label) + loss = paddle.nn.functional.cross_entropy( + input=pred, label=label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) optimizer = fluid.optimizer.SGD(learning_rate=0.001) @@ -149,7 +151,9 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): cost = while_net(img) - loss = fluid.layers.cross_entropy(cost, label) + loss = paddle.nn.functional.cross_entropy( + cost, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) avg_loss.backward() @@ -174,7 +178,9 @@ class TestImperativeStaticModelRunnerWhile(unittest.TestCase): pred = while_softmax_regression(img) - loss = fluid.layers.cross_entropy(input=pred, label=label) + loss = paddle.nn.functional.cross_entropy( + input=pred, label=label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) optimizer = fluid.optimizer.SGD(learning_rate=0.001) diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py index daeae8e472fe2653a4eb77365edd45c20ef4269d..aad4a7264895d484dd32e6665f0f0a5045e1a9d2 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py @@ -182,7 +182,9 @@ class TestSaveInferenceModel(unittest.TestCase): auc_var, batch_auc_var, auc_states = paddle.static.auc( input=predict, label=y ) - cost = fluid.layers.cross_entropy(input=predict, label=y) + cost = paddle.nn.functional.cross_entropy( + input=predict, label=y, reduction='none', use_softmax=False + ) avg_cost = paddle.mean(x=cost) place = core.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_ir_inplace_pass.py b/python/paddle/fluid/tests/unittests/test_ir_inplace_pass.py index 3c5f2edc4f53bad3145ee7bb9ce42289690822a1..5d3efb8230ae22e2c0a5dd828651a79a3191ce2e 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_inplace_pass.py +++ b/python/paddle/fluid/tests/unittests/test_ir_inplace_pass.py @@ -40,7 +40,9 @@ def fc_with_batchnorm(use_feed): hidden = paddle.static.nn.batch_norm(input=hidden) prediction = fluid.layers.fc(hidden, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) loss = paddle.mean(loss) return loss diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_nlp.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_nlp.py index 53d01d9df693356e13c66b0a08add1eed0f00f10..04afd37c26d3ee2d3d56b6191d6fa8f3075ef137 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_nlp.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_nlp.py @@ -46,7 +46,9 @@ def lstm_net( lstm_max_tanh = paddle.tanh(lstm_max) fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh') prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax') - cost = fluid.layers.cross_entropy(input=prediction, label=label) + cost = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) avg_cost = paddle.mean(x=cost) return avg_cost diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py index 6cf56915832d9904e2f6450dc8a4176923738375..f2de46ea465a93507f889daf3c1f9952f9124ff0 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_pass.py @@ -35,7 +35,9 @@ def simple_fc_net(use_feed): for _ in range(hidden_layer): x = fluid.layers.fc(input=x, size=20, act='relu') y_predict = fluid.layers.fc(input=x, size=10, act='softmax') - cost = fluid.layers.cross_entropy(input=y_predict, label=y) + cost = paddle.nn.functional.cross_entropy( + input=y_predict, label=y, reduction='none', use_softmax=False + ) avg_cost = paddle.mean(cost) return avg_cost @@ -48,7 +50,9 @@ def fc_with_inplace_net(use_feed): reshape = paddle.reshape(x=fc, shape=[-1, 2, 5]) reshape = paddle.reshape(x=reshape, shape=[-1, 5, 2]) y_predict = fluid.layers.fc(input=reshape, size=10, act='softmax') - cost = fluid.layers.cross_entropy(input=y_predict, label=y) + cost = paddle.nn.functional.cross_entropy( + input=y_predict, label=y, reduction='none', use_softmax=False + ) avg_cost = paddle.mean(cost) return avg_cost diff --git a/python/paddle/fluid/tests/unittests/test_jit_save_load.py b/python/paddle/fluid/tests/unittests/test_jit_save_load.py index acb8d0859835f5673b5e66c13adf9b045593587b..65138482a87e81a1cfbf6f0e5aa43973a7bd455a 100644 --- a/python/paddle/fluid/tests/unittests/test_jit_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_jit_save_load.py @@ -94,7 +94,9 @@ class LinerNetWithLabel(paddle.nn.Layer): ) def forward(self, x, label): out = self._linear(x) - loss = fluid.layers.cross_entropy(out, label) + loss = paddle.nn.functional.cross_entropy( + out, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) return out, avg_loss @@ -112,7 +114,9 @@ class LinerNetWithPruneInput(paddle.nn.Layer): ) def forward(self, x, label): out = self._linear(x) - loss = fluid.layers.cross_entropy(out, label) + loss = paddle.nn.functional.cross_entropy( + out, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) return out @@ -312,7 +316,9 @@ def train(layer, input_size=784, label_size=1): cost = layer(img) - loss = fluid.layers.cross_entropy(cost, label) + loss = paddle.nn.functional.cross_entropy( + cost, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) avg_loss.backward() diff --git a/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py b/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py index db4af74fc35bbea8a37b3b3382655be91da92d5c..16ba749c9b4ae980fc7015189ccb23aab4b2ee71 100644 --- a/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py +++ b/python/paddle/fluid/tests/unittests/test_load_state_dict_from_old_format.py @@ -49,7 +49,9 @@ def convolutional_neural_network(img): def static_train_net(img, label): prediction = convolutional_neural_network(img) - loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) optimizer = fluid.optimizer.SGD(learning_rate=0.001) diff --git a/python/paddle/fluid/tests/unittests/test_mix_precision_all_reduce_fuse.py b/python/paddle/fluid/tests/unittests/test_mix_precision_all_reduce_fuse.py index e024917a30682ecf04ca52eb4b80b24855ab34c2..08bc72ffd7415b408bf98083b1a694b1e9660707 100644 --- a/python/paddle/fluid/tests/unittests/test_mix_precision_all_reduce_fuse.py +++ b/python/paddle/fluid/tests/unittests/test_mix_precision_all_reduce_fuse.py @@ -28,7 +28,9 @@ img_shape = [1, 28, 28] def loss_net(hidden, label): prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = paddle.nn.functional.cross_entropy( + input=prediction, label=label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) return avg_loss diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dynamic.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dynamic.py index 34d89ec89b47cc29a9b0aa9e6a86677d2d58b2e1..46ad749d02796adce842ce8ff89675dfcd354f74 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dynamic.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_dynamic.py @@ -99,7 +99,9 @@ class TestDygraphDataLoader(unittest.TestCase): step = 0 for image, label in dataloader(): out = fc_net(image) - loss = fluid.layers.cross_entropy(out, label) + loss = paddle.nn.functional.cross_entropy( + out, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) avg_loss.backward() optimizer.minimize(avg_loss) @@ -169,7 +171,9 @@ class TestDygraphDataLoaderWithBatchedDataset(TestDygraphDataLoader): step = 0 for image, label in dataloader(): out = fc_net(image) - loss = fluid.layers.cross_entropy(out, label) + loss = paddle.nn.functional.cross_entropy( + out, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) avg_loss.backward() optimizer.minimize(avg_loss) diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_dynamic.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_dynamic.py index c4b59ef96eea798ea37b2fa7e09a1a31320f6d7d..29e209856768adad339aa5f04fe9bd888c04d76e 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_dynamic.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_dynamic.py @@ -99,7 +99,9 @@ class TestDygraphDataLoader(unittest.TestCase): step = 0 for image, label in dataloader(): out = fc_net(image) - loss = fluid.layers.cross_entropy(out, label) + loss = paddle.nn.functional.cross_entropy( + out, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) avg_loss.backward() optimizer.minimize(avg_loss) @@ -167,7 +169,9 @@ class TestDygraphDataLoaderWithBatchedDataset(TestDygraphDataLoader): step = 0 for image, label in dataloader(): out = fc_net(image) - loss = fluid.layers.cross_entropy(out, label) + loss = paddle.nn.functional.cross_entropy( + out, label, reduction='none', use_softmax=False + ) avg_loss = paddle.mean(loss) avg_loss.backward() optimizer.minimize(avg_loss) diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py index f9fcb6f77d8f3eef61a07c902389fabce2a6687e..beca81e7047101e708a800b851d000d2f63d0e20 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_iterable_dataset_static.py @@ -80,7 +80,12 @@ def simple_fc_net_static(): bias_attr=bias_attr, ) loss = paddle.mean( - fluid.layers.cross_entropy(input=predict_label, label=label) + paddle.nn.functional.cross_entropy( + input=predict_label, + label=label, + reduction='none', + use_softmax=False, + ) ) optimizer = fluid.optimizer.Adam() diff --git a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py index 7321e4d137442af00c84f441631276f62d7d9e4b..e63cf6694af266248a6dfc2b1915553afbcdc5ef 100644 --- a/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py +++ b/python/paddle/fluid/tests/unittests/test_multiprocess_dataloader_static.py @@ -80,7 +80,12 @@ def simple_fc_net_static(): bias_attr=bias_attr, ) loss = paddle.mean( - fluid.layers.cross_entropy(input=predict_label, label=label) + paddle.nn.functional.cross_entropy( + input=predict_label, + label=label, + reduction='none', + use_softmax=False, + ) ) optimizer = fluid.optimizer.Adam()