未验证 提交 2472d8f9 编写于 作者: K kangguangli 提交者: GitHub

replace cross_entropy in python/paddle/fluid/tests/unittests/*.py (#48975)

上级 4d5a5533
...@@ -186,7 +186,7 @@ class TestReluDoubleGradCheck(unittest.TestCase): ...@@ -186,7 +186,7 @@ class TestReluDoubleGradCheck(unittest.TestCase):
x = layers.data('x', shape, False, dtype) x = layers.data('x', shape, False, dtype)
x.persistable = True x.persistable = True
y = layers.relu(x) y = F.relu(x)
x_arr = np.random.uniform(-1, 1, shape).astype(dtype) x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
x_arr[np.abs(x_arr) < 0.005] = 0.02 x_arr[np.abs(x_arr) < 0.005] = 0.02
......
...@@ -18,6 +18,7 @@ import numpy as np ...@@ -18,6 +18,7 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F
import paddle.static as static import paddle.static as static
...@@ -285,7 +286,7 @@ class TestGradientsError(unittest.TestCase): ...@@ -285,7 +286,7 @@ class TestGradientsError(unittest.TestCase):
x = fluid.data(name='x', shape=[None, 2, 8, 8], dtype='float32') x = fluid.data(name='x', shape=[None, 2, 8, 8], dtype='float32')
x.stop_gradient = False x.stop_gradient = False
conv = fluid.layers.conv2d(x, 4, 1, bias_attr=False) conv = fluid.layers.conv2d(x, 4, 1, bias_attr=False)
y = fluid.layers.relu(conv) y = F.relu(conv)
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
x_grad = fluid.gradients(y.name, x) x_grad = fluid.gradients(y.name, x)
......
...@@ -18,6 +18,7 @@ import numpy as np ...@@ -18,6 +18,7 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid import core from paddle.fluid import core
paddle.enable_static() paddle.enable_static()
...@@ -160,7 +161,7 @@ class TestFusedBnAddActAPI(unittest.TestCase): ...@@ -160,7 +161,7 @@ class TestFusedBnAddActAPI(unittest.TestCase):
data_layout='NHWC', data_layout='NHWC',
) )
out = bn1 + bn2 out = bn1 + bn2
out = fluid.layers.relu(out) out = F.relu(out)
prediction = fluid.layers.fc( prediction = fluid.layers.fc(
input=out, size=10, act='softmax', param_attr=self.fc_param_attr input=out, size=10, act='softmax', param_attr=self.fc_param_attr
) )
......
...@@ -22,6 +22,7 @@ from simple_nets import fc_with_batchnorm, init_data, simple_fc_net ...@@ -22,6 +22,7 @@ from simple_nets import fc_with_batchnorm, init_data, simple_fc_net
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.nn.functional as F
class TestMNIST(TestParallelExecutorBase): class TestMNIST(TestParallelExecutorBase):
...@@ -97,7 +98,7 @@ class TestFuseActElewiseAddInplaceGradPass(unittest.TestCase): ...@@ -97,7 +98,7 @@ class TestFuseActElewiseAddInplaceGradPass(unittest.TestCase):
X = fluid.data(name="X", shape=[3, 3], dtype='float32') X = fluid.data(name="X", shape=[3, 3], dtype='float32')
Y = fluid.data(name="Y", shape=[3, 3], dtype='float32') Y = fluid.data(name="Y", shape=[3, 3], dtype='float32')
Out1 = X * 5 Out1 = X * 5
Out2 = fluid.layers.relu(Out1) Out2 = F.relu(Out1)
prediction = paddle.tensor.math._add_with_axis(Y, Out2, axis=1) prediction = paddle.tensor.math._add_with_axis(Y, Out2, axis=1)
loss = paddle.mean(prediction) loss = paddle.mean(prediction)
sgd = fluid.optimizer.SGD(learning_rate=0.001) sgd = fluid.optimizer.SGD(learning_rate=0.001)
......
...@@ -20,6 +20,7 @@ from parallel_executor_test_base import DeviceType, TestParallelExecutorBase ...@@ -20,6 +20,7 @@ from parallel_executor_test_base import DeviceType, TestParallelExecutorBase
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.nn.functional as F
def norm(*args, **kargs): def norm(*args, **kargs):
...@@ -59,7 +60,7 @@ def simple_depthwise_net(use_feed): ...@@ -59,7 +60,7 @@ def simple_depthwise_net(use_feed):
hidden = paddle.reshape(img, (-1, 1, 28, 28)) hidden = paddle.reshape(img, (-1, 1, 28, 28))
for _ in range(4): for _ in range(4):
hidden = sep_conv(hidden, channel=200, stride=2, filter=5) hidden = sep_conv(hidden, channel=200, stride=2, filter=5)
hidden = fluid.layers.relu(hidden) hidden = F.relu(hidden)
prediction = fluid.layers.fc(hidden, size=10, act='softmax') prediction = fluid.layers.fc(hidden, size=10, act='softmax')
loss = paddle.nn.functional.cross_entropy( loss = paddle.nn.functional.cross_entropy(
input=prediction, label=label, reduction='none', use_softmax=False input=prediction, label=label, reduction='none', use_softmax=False
......
...@@ -17,6 +17,7 @@ import unittest ...@@ -17,6 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.reader import use_pinned_memory from paddle.fluid.reader import use_pinned_memory
...@@ -45,7 +46,7 @@ class TestDygraphDataLoader(unittest.TestCase): ...@@ -45,7 +46,7 @@ class TestDygraphDataLoader(unittest.TestCase):
def iter_loader_data(self, loader): def iter_loader_data(self, loader):
for _ in range(self.epoch_num): for _ in range(self.epoch_num):
for image, label in loader(): for image, label in loader():
relu = fluid.layers.relu(image) relu = F.relu(image)
self.assertEqual(image.shape, [self.batch_size, 784]) self.assertEqual(image.shape, [self.batch_size, 784])
self.assertEqual(label.shape, [self.batch_size, 1]) self.assertEqual(label.shape, [self.batch_size, 1])
self.assertEqual(relu.shape, [self.batch_size, 784]) self.assertEqual(relu.shape, [self.batch_size, 784])
......
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid import core from paddle.fluid import core
...@@ -112,7 +113,7 @@ class TestDygraphDataLoaderWithException(unittest.TestCase): ...@@ -112,7 +113,7 @@ class TestDygraphDataLoaderWithException(unittest.TestCase):
try: try:
for _ in range(self.epoch_num): for _ in range(self.epoch_num):
for image, _ in loader(): for image, _ in loader():
fluid.layers.relu(image) F.relu(image)
except core.EnforceNotMet as ex: except core.EnforceNotMet as ex:
self.assertIn("Blocking queue is killed", str(ex)) self.assertIn("Blocking queue is killed", str(ex))
exception = ex exception = ex
......
...@@ -17,6 +17,7 @@ import unittest ...@@ -17,6 +17,7 @@ import unittest
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.io import DataLoader, Dataset from paddle.io import DataLoader, Dataset
...@@ -71,7 +72,7 @@ class TestDygraphDataLoaderMmapFdsClear(unittest.TestCase): ...@@ -71,7 +72,7 @@ class TestDygraphDataLoaderMmapFdsClear(unittest.TestCase):
def run_one_epoch_with_break(self, loader): def run_one_epoch_with_break(self, loader):
for step_id, data in enumerate(loader()): for step_id, data in enumerate(loader()):
image, label = data image, label = data
relu = fluid.layers.relu(image) relu = F.relu(image)
self.assertEqual(image.shape, [self.batch_size, 784]) self.assertEqual(image.shape, [self.batch_size, 784])
self.assertEqual(label.shape, [self.batch_size, 1]) self.assertEqual(label.shape, [self.batch_size, 1])
self.assertEqual(relu.shape, [self.batch_size, 784]) self.assertEqual(relu.shape, [self.batch_size, 784])
......
...@@ -19,6 +19,7 @@ import numpy as np ...@@ -19,6 +19,7 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.wrapped_decorator import wrap_decorator from paddle.fluid.wrapped_decorator import wrap_decorator
from paddle.vision.models import resnet50, resnet101 from paddle.vision.models import resnet50, resnet101
...@@ -317,8 +318,8 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -317,8 +318,8 @@ class TestDygraphDoubleGrad(TestCase):
numel = x_np.size numel = x_np.size
x.stop_gradient = False x.stop_gradient = False
y1 = fluid.layers.relu(x) y1 = F.relu(x)
y2 = fluid.layers.relu(x) y2 = F.relu(x)
z = y1 + y2 z = y1 + y2
w = z * z w = z * z
...@@ -436,7 +437,7 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -436,7 +437,7 @@ class TestDygraphDoubleGrad(TestCase):
numel = x_np.size numel = x_np.size
x.stop_gradient = False x.stop_gradient = False
y = fluid.layers.relu(x) y = F.relu(x)
z = y + 1 z = y + 1
w = z * z w = z * z
...@@ -489,8 +490,8 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -489,8 +490,8 @@ class TestDygraphDoubleGrad(TestCase):
numel = x_np.size numel = x_np.size
x.stop_gradient = False x.stop_gradient = False
y1 = fluid.layers.relu(x) y1 = F.relu(x)
y2 = fluid.layers.relu(x) y2 = F.relu(x)
z = y1 + y2 z = y1 + y2
w = z * z w = z * z
...@@ -540,7 +541,7 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -540,7 +541,7 @@ class TestDygraphDoubleGrad(TestCase):
numel = x_np.size numel = x_np.size
x.stop_gradient = False x.stop_gradient = False
y = fluid.layers.relu(x) y = F.relu(x)
z = y + 1 z = y + 1
w = z * z w = z * z
......
...@@ -21,6 +21,7 @@ from test_imperative_base import new_program_scope ...@@ -21,6 +21,7 @@ from test_imperative_base import new_program_scope
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.framework import _test_eager_guard from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.optimizer import AdamOptimizer from paddle.fluid.optimizer import AdamOptimizer
...@@ -58,7 +59,7 @@ class GCN(fluid.Layer): ...@@ -58,7 +59,7 @@ class GCN(fluid.Layer):
self.gc2 = GraphConv(self.full_name(), 32, 10) self.gc2 = GraphConv(self.full_name(), 32, 10)
def forward(self, x, adj): def forward(self, x, adj):
x = fluid.layers.relu(self.gc(x, adj)) x = F.relu(self.gc(x, adj))
return self.gc2(x, adj) return self.gc2(x, adj)
......
...@@ -19,6 +19,7 @@ import numpy as np ...@@ -19,6 +19,7 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.dygraph.base import to_variable from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.parallel import ( from paddle.fluid.dygraph.parallel import (
...@@ -34,7 +35,7 @@ class MyLayer(fluid.Layer): ...@@ -34,7 +35,7 @@ class MyLayer(fluid.Layer):
super().__init__(name_scope) super().__init__(name_scope)
def forward(self, inputs): def forward(self, inputs):
x = fluid.layers.relu(inputs) x = F.relu(inputs)
x = paddle.multiply(x, x) x = paddle.multiply(x, x)
x = paddle.sum(x) x = paddle.sum(x)
return [x] return [x]
......
...@@ -19,6 +19,7 @@ from test_imperative_base import new_program_scope ...@@ -19,6 +19,7 @@ from test_imperative_base import new_program_scope
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
...@@ -38,7 +39,7 @@ class Policy(fluid.dygraph.Layer): ...@@ -38,7 +39,7 @@ class Policy(fluid.dygraph.Layer):
x = paddle.reshape(inputs, shape=[-1, 4]) x = paddle.reshape(inputs, shape=[-1, 4])
x = self.affine1(x) x = self.affine1(x)
x = paddle.nn.functional.dropout(x, self.dropout_ratio) x = paddle.nn.functional.dropout(x, self.dropout_ratio)
x = fluid.layers.relu(x) x = F.relu(x)
action_scores = self.affine2(x) action_scores = self.affine2(x)
return paddle.nn.functional.softmax(action_scores, axis=1) return paddle.nn.functional.softmax(action_scores, axis=1)
......
...@@ -344,7 +344,7 @@ class TestLayer(LayerTest): ...@@ -344,7 +344,7 @@ class TestLayer(LayerTest):
def test_relu(self): def test_relu(self):
with self.static_graph(): with self.static_graph():
t = layers.data(name='t', shape=[3, 3], dtype='float32') t = layers.data(name='t', shape=[3, 3], dtype='float32')
ret = layers.relu(t) ret = F.relu(t)
static_ret = self.get_static_graph_result( static_ret = self.get_static_graph_result(
feed={'t': np.ones([3, 3], dtype='float32')}, fetch_list=[ret] feed={'t': np.ones([3, 3], dtype='float32')}, fetch_list=[ret]
)[0] )[0]
...@@ -352,11 +352,11 @@ class TestLayer(LayerTest): ...@@ -352,11 +352,11 @@ class TestLayer(LayerTest):
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard(): with _test_eager_guard():
t = np.ones([3, 3], dtype='float32') t = np.ones([3, 3], dtype='float32')
dy_eager_ret = layers.relu(base.to_variable(t)) dy_eager_ret = F.relu(base.to_variable(t))
dy_eager_ret_value = dy_eager_ret.numpy() dy_eager_ret_value = dy_eager_ret.numpy()
t = np.ones([3, 3], dtype='float32') t = np.ones([3, 3], dtype='float32')
dy_ret = layers.relu(base.to_variable(t)) dy_ret = F.relu(base.to_variable(t))
dy_ret_value = dy_ret.numpy() dy_ret_value = dy_ret.numpy()
np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05) np.testing.assert_allclose(static_ret, dy_ret_value, rtol=1e-05)
......
...@@ -18,6 +18,7 @@ import numpy as np ...@@ -18,6 +18,7 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F
class TestMemoryReuseExcludeFeedVar(unittest.TestCase): class TestMemoryReuseExcludeFeedVar(unittest.TestCase):
...@@ -29,7 +30,7 @@ class TestMemoryReuseExcludeFeedVar(unittest.TestCase): ...@@ -29,7 +30,7 @@ class TestMemoryReuseExcludeFeedVar(unittest.TestCase):
image = fluid.layers.data( image = fluid.layers.data(
name='image', shape=self.image_shape, dtype='float32' name='image', shape=self.image_shape, dtype='float32'
) )
relu_image = fluid.layers.relu(image) relu_image = F.relu(image)
loss = paddle.mean(relu_image) loss = paddle.mean(relu_image)
build_strategy = fluid.BuildStrategy() build_strategy = fluid.BuildStrategy()
......
...@@ -18,7 +18,7 @@ import numpy as np ...@@ -18,7 +18,7 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.nn.functional as F
from paddle import _legacy_C_ops from paddle import _legacy_C_ops
...@@ -66,7 +66,7 @@ class TestVariable(unittest.TestCase): ...@@ -66,7 +66,7 @@ class TestVariable(unittest.TestCase):
a = np.random.uniform(-1, 1, self.shape).astype(self.dtype) a = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
x = fluid.dygraph.to_variable(a) x = fluid.dygraph.to_variable(a)
res1 = layers.relu(x) res1 = F.relu(x)
res2 = _legacy_C_ops.relu(x) res2 = _legacy_C_ops.relu(x)
np.testing.assert_array_equal(res1.numpy(), res2.numpy()) np.testing.assert_array_equal(res1.numpy(), res2.numpy())
......
...@@ -19,6 +19,7 @@ import numpy as np ...@@ -19,6 +19,7 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.framework import _in_legacy_dygraph from paddle.fluid.framework import _in_legacy_dygraph
from paddle.fluid.wrapped_decorator import wrap_decorator from paddle.fluid.wrapped_decorator import wrap_decorator
...@@ -220,7 +221,7 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -220,7 +221,7 @@ class TestDygraphDoubleGrad(TestCase):
numel = x_np.size numel = x_np.size
x.stop_gradient = False x.stop_gradient = False
y = fluid.layers.relu(x) y = F.relu(x)
z = y + 1 z = y + 1
w = z * z w = z * z
...@@ -261,8 +262,8 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -261,8 +262,8 @@ class TestDygraphDoubleGrad(TestCase):
numel = x_np.size numel = x_np.size
x.stop_gradient = False x.stop_gradient = False
y1 = fluid.layers.relu(x) y1 = F.relu(x)
y2 = fluid.layers.relu(x) y2 = F.relu(x)
z = y1 + y2 z = y1 + y2
w = z * z w = z * z
...@@ -308,7 +309,7 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -308,7 +309,7 @@ class TestDygraphDoubleGrad(TestCase):
numel = x_np.size numel = x_np.size
x.stop_gradient = False x.stop_gradient = False
y = fluid.layers.relu(x) y = F.relu(x)
z = y + 1 z = y + 1
w = z * z w = z * z
......
...@@ -18,6 +18,7 @@ import numpy as np ...@@ -18,6 +18,7 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.nn.functional as F
class TestInferencePartialFeed(unittest.TestCase): class TestInferencePartialFeed(unittest.TestCase):
...@@ -38,9 +39,9 @@ class TestInferencePartialFeed(unittest.TestCase): ...@@ -38,9 +39,9 @@ class TestInferencePartialFeed(unittest.TestCase):
else: else:
lr = fluid.data(name='lr', shape=[None], dtype='float32') lr = fluid.data(name='lr', shape=[None], dtype='float32')
relu_x = fluid.layers.relu(x) relu_x = F.relu(x)
relu_y = fluid.layers.relu(y) relu_y = F.relu(y)
relu_lr = fluid.layers.relu(lr) relu_lr = F.relu(lr)
exe = fluid.Executor(places[0]) exe = fluid.Executor(places[0])
exe.run(startup_prog) exe.run(startup_prog)
......
...@@ -20,6 +20,7 @@ import numpy as np ...@@ -20,6 +20,7 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard from paddle.fluid.framework import _in_legacy_dygraph, _test_eager_guard
...@@ -653,7 +654,7 @@ class TestVarBase(unittest.TestCase): ...@@ -653,7 +654,7 @@ class TestVarBase(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array) var = fluid.dygraph.to_variable(self.array)
var.stop_gradient = False var.stop_gradient = False
loss = fluid.layers.relu(var) loss = F.relu(var)
loss.backward() loss.backward()
grad_var = var._grad_ivar() grad_var = var._grad_ivar()
self.assertEqual(grad_var.shape, self.shape) self.assertEqual(grad_var.shape, self.shape)
...@@ -667,7 +668,7 @@ class TestVarBase(unittest.TestCase): ...@@ -667,7 +668,7 @@ class TestVarBase(unittest.TestCase):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
var = fluid.dygraph.to_variable(self.array) var = fluid.dygraph.to_variable(self.array)
var.stop_gradient = False var.stop_gradient = False
loss = fluid.layers.relu(var) loss = F.relu(var)
loss.backward() loss.backward()
grad_var = var.gradient() grad_var = var.gradient()
self.assertEqual(grad_var.shape, self.array.shape) self.assertEqual(grad_var.shape, self.array.shape)
......
...@@ -20,6 +20,7 @@ import paddle ...@@ -20,6 +20,7 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
import paddle.nn.functional as F
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.framework import Program, program_guard from paddle.fluid.framework import Program, program_guard
...@@ -96,7 +97,7 @@ class TestApiWhileLoop(unittest.TestCase): ...@@ -96,7 +97,7 @@ class TestApiWhileLoop(unittest.TestCase):
test_list[0] = paddle.reshape(test_list[0], [2, -1]) + 1 test_list[0] = paddle.reshape(test_list[0], [2, -1]) + 1
test_list_dict[0]["test_key"] += 1 test_list_dict[0]["test_key"] += 1
test_list_dict[0]["test_key"] = fluid.layers.relu( test_list_dict[0]["test_key"] = F.relu(
test_list_dict[0]["test_key"] test_list_dict[0]["test_key"]
) )
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册