未验证 提交 d341ce9d 编写于 作者: K kangguangli 提交者: GitHub

[remove fluid.layers.cross_entropy] remove unit tests (part 3) (#48918)

* replace cross_entropy in python/paddle/fluid/tests/unittests/test_[o-z]*.py plus test_dist_transpiler.py

* fix test_prune
上级 e83f5f33
......@@ -747,7 +747,9 @@ class TestDistLookupTableBase(TranspilerTest):
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost = paddle.nn.functional.cross_entropy(
input=predict, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(cost)
optimizer = fluid.optimizer.Adam(learning_rate=0.003)
optimizer.minimize(avg_cost)
......
......@@ -1168,7 +1168,12 @@ class TestRecomputeOptimizer(unittest.TestCase):
prediction = fluid.layers.fc(
input=[drop_res], size=2, act='softmax'
)
cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
cost = paddle.nn.functional.cross_entropy(
input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
)
sum_cost = paddle.mean(cost)
return drop_res, prediction, sum_cost
......@@ -1225,7 +1230,12 @@ class TestRecomputeOptimizerCUDA(unittest.TestCase):
prediction = fluid.layers.fc(
input=[drop_res], size=2, act='softmax'
)
cost = fluid.layers.cross_entropy(input=prediction, label=input_y)
cost = paddle.nn.functional.cross_entropy(
input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
)
sum_cost = paddle.mean(cost)
return drop_res, prediction, sum_cost
......
......@@ -75,7 +75,9 @@ def static(
def fn_1(opt, avg_loss=None, pred=None, label=None):
if avg_loss is None:
loss = layers.cross_entropy(input=pred, label=label)
loss = paddle.nn.functional.cross_entropy(
input=pred, label=label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss, name='mean_cross_entropy_loss')
opt.minimize(avg_loss)
return avg_loss
......@@ -106,7 +108,12 @@ def static(
lambda: fn_2(sgd, None, prediction, label),
)
else:
loss_1 = layers.cross_entropy(input=prediction, label=label)
loss_1 = paddle.nn.functional.cross_entropy(
input=prediction,
label=label,
reduction='none',
use_softmax=False,
)
avg_loss_1 = paddle.mean(loss_1)
loss_2 = paddle.nn.functional.softmax_with_cross_entropy(
logits=prediction, label=label
......@@ -188,7 +195,9 @@ def dynamic(train_data, use_cuda=False, use_parallel_exe=False):
hidden, prediction = dy_layer(var_input)
if epoch % 2 == 0:
cross_entropy_loss = layers.cross_entropy(prediction, var_label)
cross_entropy_loss = paddle.nn.functional.cross_entropy(
prediction, var_label, reduction='none', use_softmax=False
)
loss = paddle.mean(cross_entropy_loss)
loss.backward()
adam.minimize(loss)
......
......@@ -82,7 +82,9 @@ class TestMNISTDryRun(TestBase):
for _ in range(10):
hidden = fluid.layers.fc(input=img, size=200, act='tanh')
prediction = fluid.layers.fc(input=hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss)
fluid.optimizer.Adam().minimize(avg_loss)
return avg_loss
......
......@@ -60,7 +60,9 @@ class TestFetchAndFeed(unittest.TestCase):
)
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
out = Lenet(data, class_dim=102)
loss = fluid.layers.cross_entropy(input=out, label=label)
loss = paddle.nn.functional.cross_entropy(
input=out, label=label, reduction='none', use_softmax=False
)
loss = paddle.mean(loss)
opt = fluid.optimizer.Momentum(
learning_rate=0.1,
......
......@@ -37,7 +37,9 @@ def simple_fc_net(use_feed):
),
)
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
loss = paddle.mean(loss)
return loss
......@@ -62,7 +64,9 @@ def fc_with_batchnorm(use_feed):
with fluid.name_scope("fc_layer"):
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
with fluid.name_scope("loss"):
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
loss = paddle.mean(loss)
return loss
......
......@@ -57,7 +57,9 @@ class TestProfiler(unittest.TestCase):
hidden2 = fluid.layers.fc(input=hidden_n, size=64, act='relu')
predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost = paddle.nn.functional.cross_entropy(
input=predict, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(cost)
batch_size = paddle.tensor.create_tensor(dtype='int64')
batch_acc = paddle.static.accuracy(
......
......@@ -54,7 +54,9 @@ def lstm_net(use_feed):
lstm_max_tanh = paddle.tanh(lstm_max)
fc1 = fluid.layers.fc(input=lstm_max_tanh, size=hid_dim2, act='tanh')
prediction = fluid.layers.fc(input=fc1, size=class_dim, act='softmax')
cost = fluid.layers.cross_entropy(input=prediction, label=label)
cost = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(x=cost)
return avg_cost
......@@ -74,7 +76,9 @@ def simple_fc_net_with_accuracy(use_feed):
),
)
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
loss = paddle.mean(loss)
accuracy_out = paddle.static.accuracy(input=prediction, label=label, k=5)
return loss
......@@ -87,7 +91,9 @@ def cond_net(use_feed=None):
def loss1(pred, label):
x = fluid.layers.data(name="x", shape=[4], dtype='float32')
loss = fluid.layers.cross_entropy(input=pred, label=label)
loss = paddle.nn.functional.cross_entropy(
input=pred, label=label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss, name='mean_cross_entropy_loss')
return avg_loss
......@@ -114,7 +120,9 @@ def optimization_in_cond_net(with_optimize=False):
def loss1(opt, pred, label, with_optimize):
x = fluid.layers.data(name="x", shape=[4], dtype='float32')
loss = fluid.layers.cross_entropy(input=pred, label=label)
loss = paddle.nn.functional.cross_entropy(
input=pred, label=label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss, name='mean_cross_entropy_loss')
if with_optimize:
opt.minimize(avg_loss)
......
......@@ -28,7 +28,9 @@ class TestPrune(unittest.TestCase):
x = fluid.layers.data(name='x', shape=[2], dtype='float32')
label = fluid.layers.data(name="label", shape=[1], dtype="int64")
y = fluid.layers.fc(input=[x], size=2, act="softmax")
loss = fluid.layers.cross_entropy(input=y, label=label)
loss = paddle.nn.functional.cross_entropy(
input=y, label=label, reduction='none', use_softmax=False
)
loss = paddle.mean(x=loss)
return x, y, label, loss
......@@ -45,7 +47,7 @@ class TestPrune(unittest.TestCase):
"mul",
"elementwise_add",
"softmax",
"cross_entropy2",
"softmax_with_cross_entropy",
"reduce_mean",
],
)
......@@ -55,7 +57,7 @@ class TestPrune(unittest.TestCase):
self.assertEqual(len(pruned_program.global_block().ops), 2)
self.assertEqual(
[op.type for op in pruned_program.global_block().ops],
["cross_entropy2", "reduce_mean"],
["softmax_with_cross_entropy", "reduce_mean"],
)
def test_prune(self):
......@@ -71,7 +73,7 @@ class TestPrune(unittest.TestCase):
"mul",
"elementwise_add",
"softmax",
"cross_entropy2",
"softmax_with_cross_entropy",
"reduce_mean",
],
)
......@@ -83,7 +85,7 @@ class TestPrune(unittest.TestCase):
"mul",
"elementwise_add",
"softmax",
"cross_entropy2",
"softmax_with_cross_entropy",
"reduce_mean",
],
)
......@@ -101,7 +103,7 @@ class TestPrune(unittest.TestCase):
"mul",
"elementwise_add",
"softmax",
"cross_entropy2",
"softmax_with_cross_entropy",
"reduce_mean",
],
)
......@@ -113,7 +115,7 @@ class TestPrune(unittest.TestCase):
"mul",
"elementwise_add",
"softmax",
"cross_entropy2",
"softmax_with_cross_entropy",
"reduce_mean",
],
)
......@@ -131,7 +133,7 @@ class TestPrune(unittest.TestCase):
"mul",
"elementwise_add",
"softmax",
"cross_entropy2",
"softmax_with_cross_entropy",
"reduce_mean",
],
)
......@@ -170,9 +172,13 @@ class TestExecutorRunAutoPrune(unittest.TestCase):
y = fluid.layers.fc(
input=[x], size=2, act="softmax", param_attr=w_param_attrs
)
loss1 = fluid.layers.cross_entropy(input=y, label=label)
loss1 = paddle.nn.functional.cross_entropy(
input=y, label=label, reduction='none', use_softmax=False
)
loss1 = paddle.mean(x=loss1)
loss2 = fluid.layers.cross_entropy(input=y, label=label)
loss2 = paddle.nn.functional.cross_entropy(
input=y, label=label, reduction='none', use_softmax=False
)
loss2 = paddle.mean(x=loss2)
loss1.persistable = True
loss2.persistable = True
......@@ -200,9 +206,13 @@ class TestExecutorRunAutoPrune(unittest.TestCase):
y2 = fluid.layers.fc(
input=[x2], size=2, act="softmax", param_attr=w2_param_attrs
)
loss1 = fluid.layers.cross_entropy(input=y1, label=label)
loss1 = paddle.nn.functional.cross_entropy(
input=y1, label=label, reduction='none', use_softmax=False
)
loss1 = paddle.mean(x=loss1)
loss2 = fluid.layers.cross_entropy(input=y2, label=label)
loss2 = paddle.nn.functional.cross_entropy(
input=y2, label=label, reduction='none', use_softmax=False
)
loss2 = paddle.mean(x=loss2)
return (
x1,
......
......@@ -104,7 +104,9 @@ def simple_fc_net(img, label, use_py_func_op):
prediction = fluid.layers.fc(hidden, size=10, act='softmax')
if not use_py_func_op:
loss = fluid.layers.cross_entropy(input=prediction, label=label)
loss = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
else:
loss = (
fluid.default_main_program()
......
......@@ -141,7 +141,9 @@ def bow_net(
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
cost = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(x=cost)
return avg_cost
......
......@@ -47,7 +47,9 @@ def bow_net(
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
cost = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(x=cost)
return avg_cost
......
......@@ -296,7 +296,9 @@ class PolicyGradient:
self.reward = paddle.static.py_func(
func=reward_func, x=[action, length], out=reward
)
neg_log_prob = layers.cross_entropy(act_prob, action)
neg_log_prob = paddle.nn.functional.cross_entropy(
act_prob, action, reduction='none', use_softmax=False
)
cost = neg_log_prob * reward
cost = (
(paddle.sum(cost) / paddle.sum(length))
......@@ -383,7 +385,13 @@ class MLE:
self.lr = lr
def learn(self, probs, label, weight=None, length=None):
loss = layers.cross_entropy(input=probs, label=label, soft_label=False)
loss = paddle.nn.functional.cross_entropy(
input=probs,
label=label,
soft_label=False,
reduction='none',
use_softmax=False,
)
max_seq_len = paddle.shape(probs)[1]
mask = layers.sequence_mask(length, maxlen=max_seq_len, dtype="float32")
loss = loss * mask
......
......@@ -27,7 +27,9 @@ def test_trainable():
feature = fluid.layers.fc(
input=x, size=10, param_attr=fluid.ParamAttr(trainable=False)
)
loss = fluid.layers.cross_entropy(input=feature, label=label)
loss = paddle.nn.functional.cross_entropy(
input=feature, label=label, reduction='none', use_softmax=False
)
loss = paddle.mean(loss)
return loss
......
......@@ -63,7 +63,9 @@ def bow_net(
fc_1 = fluid.layers.fc(input=bow_tanh, size=hid_dim, act="tanh")
fc_2 = fluid.layers.fc(input=fc_1, size=hid_dim2, act="tanh")
prediction = fluid.layers.fc(input=[fc_2], size=class_dim, act="softmax")
cost = fluid.layers.cross_entropy(input=prediction, label=label)
cost = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(x=cost)
return avg_cost
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册