未验证 提交 8035c6f2 编写于 作者: K kangguangli 提交者: GitHub

remove reset reference in unittest for `fluid.layers.cross_entropy` (#49012)

上级 acee3dd3
...@@ -173,7 +173,9 @@ class TestImperativeQatMatmul(unittest.TestCase): ...@@ -173,7 +173,9 @@ class TestImperativeQatMatmul(unittest.TestCase):
label = fluid.dygraph.to_variable(y_data) label = fluid.dygraph.to_variable(y_data)
out = lenet(img) out = lenet(img)
acc = paddle.static.accuracy(out, label) acc = paddle.static.accuracy(out, label)
loss = fluid.layers.cross_entropy(out, label) loss = paddle.nn.functional.cross_entropy(
out, label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss) avg_loss = paddle.mean(loss)
avg_loss.backward() avg_loss.backward()
......
...@@ -42,8 +42,11 @@ class TestFleetFP16CompressOptimizer(unittest.TestCase): ...@@ -42,8 +42,11 @@ class TestFleetFP16CompressOptimizer(unittest.TestCase):
prediction = paddle.fluid.layers.fc( prediction = paddle.fluid.layers.fc(
input=[fc_2], size=2, act='softmax' input=[fc_2], size=2, act='softmax'
) )
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
......
...@@ -72,8 +72,11 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): ...@@ -72,8 +72,11 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
prediction = paddle.fluid.layers.fc( prediction = paddle.fluid.layers.fc(
input=[fc_2], size=2, act='softmax' input=[fc_2], size=2, act='softmax'
) )
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
...@@ -135,8 +138,11 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): ...@@ -135,8 +138,11 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
prediction = paddle.fluid.layers.fc( prediction = paddle.fluid.layers.fc(
input=[fc_2], size=2, act='softmax' input=[fc_2], size=2, act='softmax'
) )
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
...@@ -210,8 +216,11 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): ...@@ -210,8 +216,11 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
prediction = paddle.fluid.layers.fc( prediction = paddle.fluid.layers.fc(
input=[fc_2], size=2, act='softmax' input=[fc_2], size=2, act='softmax'
) )
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
...@@ -272,8 +281,11 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): ...@@ -272,8 +281,11 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
prediction = paddle.fluid.layers.fc( prediction = paddle.fluid.layers.fc(
input=[fc_2], size=2, act='softmax' input=[fc_2], size=2, act='softmax'
) )
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
......
...@@ -59,8 +59,11 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase): ...@@ -59,8 +59,11 @@ class TestFleetGraphExecutionMetaOptimizer(unittest.TestCase):
prediction = paddle.fluid.layers.fc( prediction = paddle.fluid.layers.fc(
input=[fc_2], size=2, act='softmax' input=[fc_2], size=2, act='softmax'
) )
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
......
...@@ -47,8 +47,11 @@ class TestFleetLambMetaOptimizer(unittest.TestCase): ...@@ -47,8 +47,11 @@ class TestFleetLambMetaOptimizer(unittest.TestCase):
prediction = paddle.fluid.layers.fc( prediction = paddle.fluid.layers.fc(
input=[fc_2], size=2, act='softmax' input=[fc_2], size=2, act='softmax'
) )
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
...@@ -122,8 +125,8 @@ class TestFleetLambMetaOptimizer(unittest.TestCase): ...@@ -122,8 +125,8 @@ class TestFleetLambMetaOptimizer(unittest.TestCase):
fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh')
fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh')
prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax')
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction, label=input_y, reduction='none', use_softmax=False
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
......
...@@ -47,8 +47,11 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase): ...@@ -47,8 +47,11 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase):
prediction = paddle.fluid.layers.fc( prediction = paddle.fluid.layers.fc(
input=[fc_2], size=2, act='softmax' input=[fc_2], size=2, act='softmax'
) )
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
...@@ -127,8 +130,8 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase): ...@@ -127,8 +130,8 @@ class TestFleetLarsMetaOptimizer(unittest.TestCase):
fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh') fc_1 = paddle.fluid.layers.fc(input=input_x, size=64, act='tanh')
fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh')
prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax')
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction, label=input_y, reduction='none', use_softmax=False
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
......
...@@ -43,8 +43,11 @@ class TestFleetMetaOptimizerBase(unittest.TestCase): ...@@ -43,8 +43,11 @@ class TestFleetMetaOptimizerBase(unittest.TestCase):
prediction = paddle.fluid.layers.fc( prediction = paddle.fluid.layers.fc(
input=[fc_2], size=2, act='softmax' input=[fc_2], size=2, act='softmax'
) )
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
......
...@@ -56,8 +56,11 @@ class TestFleetMetaOptimizer(unittest.TestCase): ...@@ -56,8 +56,11 @@ class TestFleetMetaOptimizer(unittest.TestCase):
prediction = paddle.fluid.layers.fc( prediction = paddle.fluid.layers.fc(
input=[fc_2], size=2, act='softmax' input=[fc_2], size=2, act='softmax'
) )
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
return avg_cost return avg_cost
......
...@@ -52,8 +52,11 @@ class TestFleetMetaOptimizer(unittest.TestCase): ...@@ -52,8 +52,11 @@ class TestFleetMetaOptimizer(unittest.TestCase):
prediction = paddle.fluid.layers.fc( prediction = paddle.fluid.layers.fc(
input=[fc_7], size=2, act='softmax' input=[fc_7], size=2, act='softmax'
) )
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction,
label=input_y,
reduction='none',
use_softmax=False,
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
......
...@@ -41,8 +41,8 @@ class TestFleetMetaOptimizer(unittest.TestCase): ...@@ -41,8 +41,8 @@ class TestFleetMetaOptimizer(unittest.TestCase):
fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh') fc_2 = paddle.fluid.layers.fc(input=fc_1, size=64, act='tanh')
prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax') prediction = paddle.fluid.layers.fc(input=[fc_2], size=2, act='softmax')
cost = paddle.fluid.layers.cross_entropy( cost = paddle.nn.functional.cross_entropy(
input=prediction, label=input_y input=prediction, label=input_y, reduction='none', use_softmax=False
) )
avg_cost = paddle.mean(x=cost) avg_cost = paddle.mean(x=cost)
......
...@@ -531,8 +531,11 @@ def train_mobilenet(args, to_static): ...@@ -531,8 +531,11 @@ def train_mobilenet(args, to_static):
t_end = time.time() t_end = time.time()
softmax_out = paddle.nn.functional.softmax(out) softmax_out = paddle.nn.functional.softmax(out)
loss = fluid.layers.cross_entropy( loss = paddle.nn.functional.cross_entropy(
input=softmax_out, label=label input=softmax_out,
label=label,
reduction='none',
use_softmax=False,
) )
avg_loss = paddle.mean(x=loss) avg_loss = paddle.mean(x=loss)
acc_top1 = paddle.static.accuracy(input=out, label=label, k=1) acc_top1 = paddle.static.accuracy(input=out, label=label, k=1)
......
...@@ -329,8 +329,12 @@ def train(args, fake_data_reader, to_static): ...@@ -329,8 +329,12 @@ def train(args, fake_data_reader, to_static):
labels = to_variable(y_data) labels = to_variable(y_data)
labels.stop_gradient = True labels.stop_gradient = True
outputs = video_model(imgs) outputs = video_model(imgs)
loss = fluid.layers.cross_entropy( loss = paddle.nn.functional.cross_entropy(
input=outputs, label=labels, ignore_index=-1 input=outputs,
label=labels,
ignore_index=-1,
reduction='none',
use_softmax=False,
) )
avg_loss = paddle.mean(loss) avg_loss = paddle.mean(loss)
acc_top1 = paddle.static.accuracy( acc_top1 = paddle.static.accuracy(
......
...@@ -63,8 +63,12 @@ class TestBase(IPUOpTest): ...@@ -63,8 +63,12 @@ class TestBase(IPUOpTest):
label = paddle.static.data( label = paddle.static.data(
name=self.feed_list[1], shape=self.feed_shape[1], dtype='int64' name=self.feed_list[1], shape=self.feed_shape[1], dtype='int64'
) )
out = paddle.fluid.layers.cross_entropy( out = paddle.nn.functional.cross_entropy(
input=x, label=label, **self.attrs input=x,
label=label,
reduction='none',
use_softmax=False,
**self.attrs
) )
self.fetch_list = [out.name] self.fetch_list = [out.name]
......
...@@ -49,9 +49,6 @@ class SimpleLayer(paddle.nn.Layer): ...@@ -49,9 +49,6 @@ class SimpleLayer(paddle.nn.Layer):
if target is not None: if target is not None:
if self.use_softmax: if self.use_softmax:
x = paddle.nn.functional.softmax(x) x = paddle.nn.functional.softmax(x)
if self.loss_op:
loss = self.loss_op(x, target)
else:
loss = paddle.paddle.nn.functional.cross_entropy( loss = paddle.paddle.nn.functional.cross_entropy(
x, target, reduction='none', use_softmax=False x, target, reduction='none', use_softmax=False
) )
...@@ -69,7 +66,7 @@ class TestBase(IPUD2STest): ...@@ -69,7 +66,7 @@ class TestBase(IPUD2STest):
self.set_data_feed() self.set_data_feed()
def set_op_attrs(self): def set_op_attrs(self):
self.loss_op = paddle.fluid.layers.cross_entropy pass
def set_data_feed(self): def set_data_feed(self):
self.data = paddle.uniform((8, 3, 10, 10), dtype='float32') self.data = paddle.uniform((8, 3, 10, 10), dtype='float32')
......
...@@ -2062,7 +2062,9 @@ class TestBook(LayerTest): ...@@ -2062,7 +2062,9 @@ class TestBook(LayerTest):
act='softmax', act='softmax',
param_attr=["sftmax.w1", "sftmax.w2"], param_attr=["sftmax.w1", "sftmax.w2"],
) )
cost = layers.cross_entropy(input=predict, label=label) cost = paddle.nn.functional.cross_entropy(
input=predict, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(cost) avg_cost = paddle.mean(cost)
return avg_cost return avg_cost
...@@ -2101,7 +2103,9 @@ class TestBook(LayerTest): ...@@ -2101,7 +2103,9 @@ class TestBook(LayerTest):
) )
predict = layers.fc(input=conv_pool_2, size=10, act="softmax") predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
cost = layers.cross_entropy(input=predict, label=label) cost = paddle.nn.functional.cross_entropy(
input=predict, label=label, reduction='none', use_softmax=False
)
avg_cost = paddle.mean(cost) avg_cost = paddle.mean(cost)
return avg_cost return avg_cost
...@@ -2154,7 +2158,12 @@ class TestBook(LayerTest): ...@@ -2154,7 +2158,12 @@ class TestBook(LayerTest):
predict_word = layers.fc( predict_word = layers.fc(
input=hidden1, size=dict_size, act='softmax' input=hidden1, size=dict_size, act='softmax'
) )
cost = layers.cross_entropy(input=predict_word, label=next_word) cost = paddle.nn.functional.cross_entropy(
input=predict_word,
label=next_word,
reduction='none',
use_softmax=False,
)
avg_cost = paddle.mean(cost) avg_cost = paddle.mean(cost)
return avg_cost return avg_cost
...@@ -2366,7 +2375,14 @@ class TestBook(LayerTest): ...@@ -2366,7 +2375,14 @@ class TestBook(LayerTest):
x = self._get_data(name="x", shape=[30, 10], dtype="float32") x = self._get_data(name="x", shape=[30, 10], dtype="float32")
label = self._get_data(name="label", shape=[30, 1], dtype="int64") label = self._get_data(name="label", shape=[30, 1], dtype="int64")
mode = 'channel' mode = 'channel'
out = layers.cross_entropy(x, label, False, 4) out = paddle.nn.functional.cross_entropy(
x,
label,
soft_label=False,
ignore_index=4,
reduction='none',
use_softmax=False,
)
return out return out
def make_uniform_random_batch_size_like(self): def make_uniform_random_batch_size_like(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册