未验证 提交 265a54aa 编写于 作者: K kangguangli 提交者: GitHub

replace cross_entropy in test*.py except python/paddle/fluid/tests/unittests/*.py (#48978)

上级 8035c6f2
......@@ -28,6 +28,7 @@ from paddle.fluid.contrib.slim.quantization import OutScaleForInferencePass
from paddle.fluid.contrib.slim.quantization import AddQuantDequantPass
from paddle.fluid import core
from paddle.fluid.layer_helper import LayerHelper
import paddle.nn.functional as F
paddle.enable_static()
......@@ -75,8 +76,8 @@ def pact(x, name=None):
learning_rate=1,
)
u_param = helper.create_parameter(attr=u_param_attr, shape=[1], dtype=dtype)
x = paddle.subtract(x, fluid.layers.relu(paddle.subtract(x, u_param)))
x = paddle.add(x, fluid.layers.relu(paddle.subtract(-u_param, x)))
x = paddle.subtract(x, F.relu(paddle.subtract(x, u_param)))
x = paddle.add(x, F.relu(paddle.subtract(-u_param, x)))
return x
......
......@@ -25,6 +25,7 @@ from ifelse_simple_func import (
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.jit.dy2static.utils import ast_to_func
from paddle.utils import gast
......@@ -60,7 +61,7 @@ class TestAST2Func(unittest.TestCase):
def test_ast2func_static(self):
def func(x):
y = fluid.layers.relu(x)
y = F.relu(x)
loss = paddle.mean(y)
return loss
......
......@@ -43,6 +43,7 @@ from ifelse_simple_func import (
import paddle
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.jit.api import declarative
from paddle.jit.dy2static.program_translator import ProgramTranslator
from paddle.jit.dy2static.utils import Dygraph2StaticException
......@@ -269,7 +270,7 @@ class TestDygraphIfElseNet(unittest.TestCase):
# Test to call function ahead caller.
def relu(x):
return fluid.layers.relu(x)
return F.relu(x)
def call_external_func(x, label=None):
......
......@@ -18,6 +18,7 @@ import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.jit.api import declarative
......@@ -48,7 +49,7 @@ def call_lambda_in_func(x):
add_func = lambda x: x + 1
y = paddle.mean((lambda x: fluid.layers.relu(x))(x))
y = paddle.mean((lambda x: F.relu(x))(x))
out = add_func(y) if y > 1 and y < 2 else (lambda x: x**2)(y)
return out
......
......@@ -19,6 +19,7 @@ import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.jit.api import declarative
from paddle.jit.dy2static.loop_transformer import NameVisitor
from paddle.utils import gast
......@@ -51,7 +52,7 @@ def while_loop_dyfun_with_conflict_var(x):
def relu(y):
# 'y' is not visible outside the scope.
return fluid.layers.relu(y)
return F.relu(y)
while x < 10:
# If a tmp variable is created which has same name
......
......@@ -21,6 +21,7 @@ import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.dygraph import Layer, to_variable
from paddle.jit import ProgramTranslator
from paddle.jit.api import declarative
......@@ -45,7 +46,7 @@ class Policy(Layer):
x = paddle.reshape(x, shape=[1, 4])
x = self.affine1(x)
x = fluid.layers.dropout(x, self.dropout_ratio)
x = fluid.layers.relu(x)
x = F.relu(x)
action_scores = self.affine2(x)
log_prob = paddle.nn.functional.softmax(action_scores, axis=1)
......
......@@ -17,6 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.nn.functional as F
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
......@@ -63,7 +64,7 @@ class TestBase(IPUOpTest):
conv3 = paddle.static.nn.conv2d(
add1, num_filters=8, filter_size=8, bias_attr=False
)
out = paddle.fluid.layers.relu(conv3, **self.attrs)
out = F.relu(conv3, **self.attrs)
self.fetch_list = [out.name]
def run_model(self, exec_mode):
......
......@@ -17,6 +17,7 @@ import unittest
import numpy as np
import paddle
import paddle.nn.functional as F
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest
......@@ -147,7 +148,7 @@ class TestReciprocal(TestBase):
class TestRelu(TestBase):
def set_test_op(self):
self.op = paddle.fluid.layers.relu
self.op = F.relu
self.op_attrs = {}
......
......@@ -19,6 +19,7 @@ from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.core import PassVersionChecker
......@@ -69,7 +70,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Relu(
):
def set_params(self):
self.operand = paddle.add
self.act = fluid.layers.relu
self.act = F.relu
class ElementwiseActivationMkldnnFusePassTest_Add_Tanh(
......@@ -169,7 +170,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Relu(
):
def set_params(self):
self.operand = paddle.subtract
self.act = fluid.layers.relu
self.act = F.relu
class ElementwiseActivationMkldnnFusePassTest_Sub_Tanh(
......@@ -261,7 +262,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Relu(
):
def set_params(self):
self.operand = paddle.multiply
self.act = fluid.layers.relu
self.act = F.relu
class ElementwiseActivationMkldnnFusePassTest_Mul_Tanh(
......
......@@ -19,6 +19,7 @@ from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.core import PassVersionChecker
......@@ -33,7 +34,7 @@ class MkldnnInplacePassTest(InferencePassTest):
data, num_filters=3, filter_size=3, bias_attr=False
)
softmax_out = paddle.nn.functional.softmax(conv_out_1)
relu_out = fluid.layers.relu(conv_out_1)
relu_out = F.relu(conv_out_1)
eltwise_out = paddle.add(softmax_out, relu_out)
self.pass_name = 'mkldnn_inplace_pass'
......
......@@ -19,6 +19,7 @@ from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
class TestMKLDNNMatmulFuseOp(InferencePassTest):
......@@ -41,7 +42,7 @@ class TestMKLDNNMatmulFuseOp(InferencePassTest):
out = paddle.transpose(out, perm=[0, 2, 1, 3])
out = paddle.reshape(out, [0, 0, self.shape_y[0] * self.shape_y[2]])
out = fluid.layers.relu(out)
out = F.relu(out)
return out
def setUp(self):
......@@ -107,7 +108,7 @@ class TestMKLDNNMatmulOpNotFusedBreakPattern(TestMKLDNNMatmulFuseOp):
out = paddle.transpose(out, perm=[0, 1, 2, 3]) # breaks pattern
out = paddle.reshape(out, [0, 0, self.shape_y[0] * self.shape_y[2]])
out = fluid.layers.relu(out)
out = F.relu(out)
return out
......
......@@ -22,6 +22,7 @@ from inference_pass_test import InferencePassTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
import paddle.static.nn as nn
from paddle.fluid.core import AnalysisConfig, PassVersionChecker
......@@ -47,7 +48,7 @@ class TensorRTSubgraphPassActivationTest(InferencePassTest):
self.fetch_list = [out]
def append_act(self, x):
return fluid.layers.relu(x)
return F.relu(x)
def test_check_output(self):
if core.is_compiled_with_cuda():
......
......@@ -20,6 +20,7 @@ from quant_dequant_test import QuantDequantTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.core import AnalysisConfig, PassVersionChecker
......@@ -52,7 +53,7 @@ class QuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest):
cout = paddle.reshape(conv_out, shape=[1, 1, 12544])
elif self.conv_groups == 4:
cout = paddle.reshape(conv_out, shape=[1, 1, 10816])
result = fluid.layers.relu(cout)
result = F.relu(cout)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=label_shape,
......@@ -160,7 +161,7 @@ class DynamicShapeQuantDequantTensorRTSubgraphPassConvTest(QuantDequantTest):
act=None,
)
cout = paddle.reshape(conv_out, shape=[1, 1, 10816])
result = fluid.layers.relu(cout)
result = F.relu(cout)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=label_shape,
......@@ -266,7 +267,7 @@ class QuantDequantTensorRTSubgraphPassConvTransposeTest(QuantDequantTest):
cout = paddle.reshape(conv_out, shape=[1, 1, 12544])
elif self.conv_groups == 4:
cout = paddle.reshape(conv_out, shape=[1, 1, 10816])
result = fluid.layers.relu(cout)
result = F.relu(cout)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=label_shape,
......
......@@ -20,6 +20,7 @@ from quant_dequant_test import QuantDequantTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.core import AnalysisConfig, PassVersionChecker
......@@ -37,7 +38,7 @@ class FCQuantDequantFusePassTRTDims3Cols1Test(QuantDequantTest):
bias_attr=False,
act="relu",
)
result = fluid.layers.relu(fc_out)
result = F.relu(fc_out)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=self.label,
......@@ -109,7 +110,7 @@ class FCQuantDequantFusePassTRTDims3Cols2Test(QuantDequantTest):
act=None,
)
c_out = paddle.reshape(fc_out, shape=[0, 784])
result = fluid.layers.relu(c_out)
result = F.relu(c_out)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=self.label,
......@@ -183,7 +184,7 @@ class FCQuantDequantFusePassTRTDims3Cols3Test(QuantDequantTest):
act=None,
)
c_out = paddle.reshape(fc_out, shape=[1, 1, 2744])
result = fluid.layers.relu(c_out)
result = F.relu(c_out)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=label_shape,
......
......@@ -20,6 +20,7 @@ from quant_dequant_test import QuantDequantTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.core import AnalysisConfig, PassVersionChecker
......@@ -46,7 +47,7 @@ class TensorRTMatMulQuantDequantDims3Test(QuantDequantTest):
bias_attr=False,
act=None,
)
result = fluid.layers.relu(fc_out)
result = F.relu(fc_out)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=self.label,
......@@ -148,7 +149,7 @@ class TensorRTMatMulQuantDequantDims4Test(QuantDequantTest):
bias_attr=False,
act=None,
)
result = fluid.layers.relu(fc_out)
result = F.relu(fc_out)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=self.label,
......@@ -249,7 +250,7 @@ class TensorRTMatMulQuantDequantDims3DynamicTest(QuantDequantTest):
bias_attr=False,
act=None,
)
result = fluid.layers.relu(fc_out)
result = F.relu(fc_out)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=self.label,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册