未验证 提交 e6ca78c2 编写于 作者: W wanghuancoder 提交者: GitHub

Del old dygraph optest2 (#51458)

* delete old dygraph op test
上级 48090c72
......@@ -437,6 +437,7 @@ class TestMKLDNNExpOp(TestActivation):
# Check if primitives already exist in backward
class TestMKLDNNAbsPrimitivesAlreadyExist(unittest.TestCase):
def setUp(self):
paddle.enable_static()
super().setUp()
np.random.seed(123)
......
......@@ -17,7 +17,7 @@ import unittest
import warnings
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16, paddle_static_guard
from scipy.special import erf, expit
import paddle
......@@ -28,11 +28,10 @@ import paddle.static as static
from paddle.fluid import Program, program_guard
from paddle.fluid.layer_helper import LayerHelper
paddle.enable_static()
class TestSqrtOpError(unittest.TestCase):
def test_errors(self):
with paddle_static_guard():
with program_guard(Program(), Program()):
# The input type of sqrt op must be Variable or numpy.ndarray.
in1 = 1
......@@ -56,7 +55,6 @@ class TestActivation(OpTest):
self.init_dtype()
self.init_shape()
self.init_kernel_type()
self.check_eager = True
self.python_api = paddle.exp
np.random.seed(2049)
......@@ -67,18 +65,15 @@ class TestActivation(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
check_eager = False
if hasattr(self, 'check_eager'):
check_eager = self.check_eager
self.check_output(check_eager=check_eager)
self.check_output()
def test_check_grad(self):
if self.dtype == np.float16:
return
check_eager = False
if hasattr(self, 'check_eager'):
check_eager = self.check_eager
self.check_grad(['X'], 'Out', check_eager=check_eager)
self.check_grad(
['X'],
'Out',
)
def init_dtype(self):
self.dtype = np.float64
......@@ -155,10 +150,10 @@ class TestExpm1(TestActivation):
self.outputs = {'Out': out}
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestExpm1_ZeroDim(TestExpm1):
......@@ -181,9 +176,8 @@ class TestExpm1API(unittest.TestCase):
self.place.append(paddle.CUDAPlace(0))
def test_static_api(self):
paddle.enable_static()
def run(place):
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
X = paddle.fluid.data('X', self.shape, dtype=self.dtype)
out = paddle.expm1(X)
......@@ -197,17 +191,15 @@ class TestExpm1API(unittest.TestCase):
def test_dygraph_api(self):
def run(place):
paddle.disable_static(place)
X = paddle.to_tensor(self.x)
out = paddle.expm1(X)
np.testing.assert_allclose(self.out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static()
for place in self.place:
run(place)
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
X = paddle.fluid.data('X', self.shape, dtype='int32')
self.assertRaises(TypeError, paddle.expm1, X)
......@@ -216,11 +208,12 @@ class TestExpm1API(unittest.TestCase):
class TestParameter:
def test_out_name(self):
with paddle_static_guard():
with fluid.program_guard(fluid.Program()):
if paddle.fluid.framework.in_dygraph_mode():
paddle.enable_static()
np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
data = paddle.static.data(name="X", shape=[-1, 1], dtype="float32")
data = paddle.static.data(
name="X", shape=[-1, 1], dtype="float32"
)
out = eval("paddle.%s(data, name='Y')" % self.op_type)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
......@@ -357,7 +350,7 @@ class TestSiluAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [11, 17])
out1 = F.silu(x)
......@@ -370,7 +363,6 @@ class TestSiluAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.silu(x)
m = paddle.nn.Silu()
......@@ -378,9 +370,9 @@ class TestSiluAPI(unittest.TestCase):
out_ref = self.x_np / (1 + np.exp(-self.x_np))
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.silu, 1)
......@@ -399,6 +391,7 @@ class TestSiluAPI(unittest.TestCase):
class TestLogSigmoid(TestActivation):
def setUp(self):
self.op_type = "logsigmoid"
self.python_api = paddle.nn.functional.log_sigmoid
self.init_dtype()
self.init_shape()
......@@ -432,7 +425,7 @@ class TestLogSigmoidAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [11, 17])
out1 = F.log_sigmoid(x)
......@@ -445,7 +438,6 @@ class TestLogSigmoidAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.log_sigmoid(x)
m = paddle.nn.LogSigmoid()
......@@ -453,10 +445,9 @@ class TestLogSigmoidAPI(unittest.TestCase):
out_ref = np.log(1 / (1 + np.exp(-self.x_np)))
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.log_sigmoid, 1)
......@@ -475,6 +466,7 @@ class TestLogSigmoidAPI(unittest.TestCase):
class TestTanh(TestActivation, TestParameter):
def setUp(self):
self.op_type = "tanh"
self.python_api = paddle.tanh
self.init_dtype()
self.init_shape()
......@@ -519,7 +511,7 @@ class TestTanhAPI(unittest.TestCase):
self.tanh = F.tanh
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [10, 12], self.dtype)
out1 = self.tanh(x)
......@@ -532,7 +524,6 @@ class TestTanhAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.tanh(x)
out2 = paddle.tanh(x)
......@@ -541,10 +532,9 @@ class TestTanhAPI(unittest.TestCase):
out_ref = np.tanh(self.x_np)
for r in [out1, out2, out3]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, self.tanh, 1)
......@@ -569,6 +559,7 @@ class TestTanhInplaceAPI(TestTanhAPI):
class TestAtan(TestActivation, TestParameter):
def setUp(self):
self.op_type = "atan"
self.python_api = paddle.atan
self.init_dtype()
self.init_shape()
......@@ -585,9 +576,12 @@ class TestAtan(TestActivation, TestParameter):
self.check_grad(['X'], 'Out')
def test_out_name(self):
with paddle_static_guard():
with fluid.program_guard(fluid.Program()):
np_x = np.array([0.1]).astype('float32').reshape((-1, 1))
data = paddle.static.data(name="X", shape=[-1, 1], dtype="float32")
data = paddle.static.data(
name="X", shape=[-1, 1], dtype="float32"
)
out = paddle.atan(data, name='Y')
place = fluid.CPUPlace()
exe = fluid.Executor(place)
......@@ -612,6 +606,7 @@ class TestAtan_ZeroDim(TestTanh):
class TestSinh(TestActivation):
def setUp(self):
self.op_type = "sinh"
self.python_api = paddle.sinh
self.init_dtype()
self.init_shape()
......@@ -643,6 +638,7 @@ class TestSinhAPI(unittest.TestCase):
np.testing.assert_allclose(z, z_expected, rtol=1e-05)
def test_api(self):
with paddle_static_guard():
test_data_shape = [11, 17]
with fluid.program_guard(fluid.Program(), fluid.Program()):
input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
......@@ -682,20 +678,26 @@ class TestSinhAPI(unittest.TestCase):
class TestSinhOpError(unittest.TestCase):
def test_errors(self):
with paddle_static_guard():
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, paddle.sinh, 1)
# The input dtype must be float16, float32, float64.
x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = fluid.data(
name='x_int32', shape=[12, 10], dtype='int32'
)
self.assertRaises(TypeError, paddle.sinh, x_int32)
# support the input dtype is float16
x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = fluid.data(
name='x_fp16', shape=[12, 10], dtype='float16'
)
paddle.sinh(x_fp16)
class TestCosh(TestActivation):
def setUp(self):
self.op_type = "cosh"
self.python_api = paddle.cosh
self.init_dtype()
self.init_shape()
......@@ -727,6 +729,7 @@ class TestCoshAPI(unittest.TestCase):
np.testing.assert_allclose(z, z_expected, rtol=1e-05)
def test_api(self):
with paddle_static_guard():
test_data_shape = [11, 17]
with fluid.program_guard(fluid.Program(), fluid.Program()):
input_x = np.random.uniform(0.1, 1, test_data_shape).astype(
......@@ -766,14 +769,19 @@ class TestCoshAPI(unittest.TestCase):
class TestCoshOpError(unittest.TestCase):
def test_errors(self):
with paddle_static_guard():
with program_guard(Program()):
# The input type must be Variable.
self.assertRaises(TypeError, paddle.cosh, 1)
# The input dtype must be float16, float32, float64.
x_int32 = fluid.data(name='x_int32', shape=[12, 10], dtype='int32')
x_int32 = fluid.data(
name='x_int32', shape=[12, 10], dtype='int32'
)
self.assertRaises(TypeError, paddle.cosh, x_int32)
# support the input dtype is float16
x_fp16 = fluid.data(name='x_fp16', shape=[12, 10], dtype='float16')
x_fp16 = fluid.data(
name='x_fp16', shape=[12, 10], dtype='float16'
)
paddle.cosh(x_fp16)
......@@ -785,6 +793,7 @@ def ref_tanhshrink(x):
class TestTanhshrink(TestActivation):
def setUp(self):
self.op_type = "tanh_shrink"
self.python_api = paddle.nn.functional.tanhshrink
self.init_dtype()
self.init_shape()
......@@ -818,7 +827,7 @@ class TestTanhshrinkAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.tanhshrink(x)
......@@ -831,7 +840,6 @@ class TestTanhshrinkAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.tanhshrink(x)
tanhshrink = paddle.nn.Tanhshrink()
......@@ -839,10 +847,9 @@ class TestTanhshrinkAPI(unittest.TestCase):
out_ref = ref_tanhshrink(self.x_np)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.tanhshrink, 1)
......@@ -867,6 +874,7 @@ def ref_hardshrink(x, threshold):
class TestHardShrink(TestActivation):
def setUp(self):
self.op_type = "hard_shrink"
self.python_api = paddle.nn.functional.hardshrink
self.init_dtype()
self.init_shape()
......@@ -917,7 +925,7 @@ class TestHardShrinkAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [10, 12])
out1 = F.hardshrink(x)
......@@ -930,7 +938,6 @@ class TestHardShrinkAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.hardshrink(x)
hd = paddle.nn.Hardshrink()
......@@ -945,10 +952,9 @@ class TestHardShrinkAPI(unittest.TestCase):
out_ref = ref_hardshrink(self.x_np, 0.6)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.hardshrink, 1)
......@@ -984,7 +990,7 @@ class TestHardtanhAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [10, 12])
out1 = F.hardtanh(x)
......@@ -997,7 +1003,6 @@ class TestHardtanhAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.hardtanh(x)
m = paddle.nn.Hardtanh()
......@@ -1012,10 +1017,9 @@ class TestHardtanhAPI(unittest.TestCase):
out_ref = ref_hardtanh(self.x_np, -2.0, 2.0)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.hardtanh, 1)
......@@ -1042,7 +1046,6 @@ def ref_softshrink(x, threshold=0.5):
class TestSoftshrink(TestActivation):
def setUp(self):
self.op_type = "softshrink"
self.check_eager = True
self.python_api = paddle.nn.functional.softshrink
self.init_dtype()
self.init_shape()
......@@ -1059,7 +1062,7 @@ class TestSoftshrink(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestSoftshrink_ZeroDim(TestSoftshrink):
......@@ -1080,7 +1083,7 @@ class TestSoftshrinkAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.softshrink(x, self.threshold)
......@@ -1093,7 +1096,6 @@ class TestSoftshrinkAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.softshrink(x, self.threshold)
softshrink = paddle.nn.Softshrink(self.threshold)
......@@ -1101,10 +1103,9 @@ class TestSoftshrinkAPI(unittest.TestCase):
out_ref = ref_softshrink(self.x_np, self.threshold)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.softshrink, 1)
......@@ -1145,10 +1146,10 @@ class TestSqrt(TestActivation, TestParameter):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestSqrtPrimFp32(TestActivation):
......@@ -1169,10 +1170,10 @@ class TestSqrtPrimFp32(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
self.check_grad(['X'], 'Out', check_prim=True)
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def init_dtype(self):
self.dtype = np.float32
......@@ -1227,13 +1228,11 @@ class TestSqrtBF16(OpTest):
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True)
self.check_output_with_place(place)
def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X'], 'Out', check_eager=True, check_prim=True
)
self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
class TestRsqrt(TestActivation):
......@@ -1256,9 +1255,7 @@ class TestRsqrt(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(
['X'], 'Out', max_relative_error=0.0005, check_eager=True
)
self.check_grad(['X'], 'Out', max_relative_error=0.0005)
'''
......@@ -1296,7 +1293,7 @@ class TestAbs(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=False, check_prim=True)
self.check_grad(['X'], 'Out', check_prim=True)
class TestAbs_ZeroDim(TestAbs):
......@@ -1307,7 +1304,6 @@ class TestAbs_ZeroDim(TestAbs):
class TestCeil(TestActivation):
def setUp(self):
self.op_type = "ceil"
self.check_eager = True
self.python_api = paddle.ceil
self.init_dtype()
self.init_shape()
......@@ -1336,7 +1332,6 @@ class TestFloor(TestActivation):
def setUp(self):
self.op_type = "floor"
self.prim_op_type = "prim"
self.check_eager = True
self.python_api = paddle.floor
self.init_dtype()
self.init_shape()
......@@ -1367,7 +1362,6 @@ class TestFloor_Prim(TestActivation):
def setUp(self):
self.op_type = "floor"
self.prim_op_type = "prim"
self.check_eager = True
self.python_api = paddle.floor
self.init_dtype()
self.init_shape()
......@@ -1407,6 +1401,7 @@ class TestFloorFp16_Prim(TestFloor_Prim):
class TestCos(TestActivation):
def setUp(self):
self.op_type = "cos"
self.python_api = paddle.cos
self.init_dtype()
self.init_shape()
......@@ -1435,6 +1430,7 @@ class TestTan(TestActivation):
def setUp(self):
np.random.seed(1024)
self.op_type = "tan"
self.python_api = paddle.tan
self.init_dtype()
self.init_shape()
......@@ -1477,15 +1473,13 @@ class TestTanAPI(unittest.TestCase):
)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out_test = paddle.tan(x)
out_ref = np.tan(self.x_np)
np.testing.assert_allclose(out_ref, out_test.numpy(), rtol=1e-05)
paddle.enable_static()
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data('X', [11, 17], self.dtype)
out = paddle.tan(x)
......@@ -1511,6 +1505,7 @@ class TestTanAPI(unittest.TestCase):
class TestAcos(TestActivation):
def setUp(self):
self.op_type = "acos"
self.python_api = paddle.acos
self.init_dtype()
self.init_shape()
......@@ -1538,6 +1533,7 @@ class TestAcos_ZeroDim(TestAcos):
class TestSin(TestActivation, TestParameter):
def setUp(self):
self.op_type = "sin"
self.python_api = paddle.sin
self.init_dtype()
self.init_shape()
# prim not support now
......@@ -1567,6 +1563,7 @@ class TestSin_ZeroDim(TestSin):
class TestAsin(TestActivation):
def setUp(self):
self.op_type = "asin"
self.python_api = paddle.asin
self.init_dtype()
self.init_shape()
......@@ -1594,6 +1591,7 @@ class TestAsin_ZeroDim(TestAsin):
class TestAcosh(TestActivation):
def setUp(self):
self.op_type = "acosh"
self.python_api = paddle.acosh
self.init_dtype()
self.init_shape()
......@@ -1621,6 +1619,7 @@ class TestAcosh_ZeroDim(TestAcosh):
class TestAsinh(TestActivation):
def setUp(self):
self.op_type = "asinh"
self.python_api = paddle.asinh
self.init_dtype()
self.init_shape()
......@@ -1648,6 +1647,7 @@ class TestAsinh_ZeroDim(TestAsinh):
class TestAtanh(TestActivation):
def setUp(self):
self.op_type = "atanh"
self.python_api = paddle.atanh
self.init_dtype()
self.init_shape()
......@@ -1675,7 +1675,6 @@ class TestAtanh_ZeroDim(TestAtanh):
class TestRound(TestActivation):
def setUp(self):
self.op_type = "round"
self.check_eager = True
self.python_api = paddle.round
self.init_dtype()
self.init_shape()
......@@ -1760,7 +1759,7 @@ class TestReluAPI(unittest.TestCase):
self.relu = F.relu
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [10, 12])
out1 = self.relu(x)
......@@ -1773,7 +1772,6 @@ class TestReluAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
m = paddle.nn.ReLU()
out1 = m(x)
......@@ -1781,10 +1779,10 @@ class TestReluAPI(unittest.TestCase):
out_ref = np.maximum(self.x_np, 0)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, self.relu, 1)
......@@ -1818,6 +1816,7 @@ class TestLeakyRelu(TestActivation):
def setUp(self):
self.op_type = "leaky_relu"
self.python_api = paddle.nn.functional.leaky_relu
self.init_dtype()
self.init_shape()
alpha = self.get_alpha()
......@@ -1870,7 +1869,7 @@ class TestLeakyReluAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [10, 12])
out1 = F.leaky_relu(x)
......@@ -1883,7 +1882,6 @@ class TestLeakyReluAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.leaky_relu(x)
m = paddle.nn.LeakyReLU()
......@@ -1898,10 +1896,9 @@ class TestLeakyReluAPI(unittest.TestCase):
out_ref = ref_leaky_relu(self.x_np, 0.6)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.leaky_relu, 1)
......@@ -2007,7 +2004,7 @@ class TestGELUAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [11, 17])
out1 = F.gelu(x)
......@@ -2020,7 +2017,6 @@ class TestGELUAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.gelu(x)
m = paddle.nn.GELU()
......@@ -2035,10 +2031,9 @@ class TestGELUAPI(unittest.TestCase):
out_ref = gelu(self.x_np, True)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.gelu, 1)
......@@ -2057,6 +2052,7 @@ class TestGELUAPI(unittest.TestCase):
class TestBRelu(TestActivation):
def setUp(self):
self.op_type = "brelu"
self.python_api = paddle.nn.functional.hardtanh
self.init_dtype()
np.random.seed(1024)
......@@ -2109,7 +2105,7 @@ class TestRelu6(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestRelu6_ZeroDim(TestRelu6):
......@@ -2130,7 +2126,7 @@ class TestRelu6API(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.relu6(x)
......@@ -2143,7 +2139,6 @@ class TestRelu6API(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.relu6(x)
relu6 = paddle.nn.ReLU6()
......@@ -2151,10 +2146,9 @@ class TestRelu6API(unittest.TestCase):
out_ref = ref_relu6(self.x_np)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with paddle_static_guard():
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = paddle.nn.functional.relu6(x)
......@@ -2164,7 +2158,7 @@ class TestRelu6API(unittest.TestCase):
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.relu6, 1)
......@@ -2182,15 +2176,17 @@ class TestRelu6API(unittest.TestCase):
class TestRelu6APIWarnings(unittest.TestCase):
def test_warnings(self):
with paddle_static_guard():
with warnings.catch_warnings(record=True) as context:
warnings.simplefilter("always")
paddle.enable_static()
helper = LayerHelper("relu6")
data = paddle.static.data(
name='data', shape=[None, 3, 32, 32], dtype='float32'
)
out = helper.create_variable_for_type_inference(dtype=data.dtype)
out = helper.create_variable_for_type_inference(
dtype=data.dtype
)
os.environ['FLAGS_print_extra_attrs'] = "1"
helper.append_op(
type="relu6",
......@@ -2199,7 +2195,8 @@ class TestRelu6APIWarnings(unittest.TestCase):
attrs={'threshold': 6.0},
)
self.assertTrue(
"op relu6 use extra_attr: threshold" in str(context[-1].message)
"op relu6 use extra_attr: threshold"
in str(context[-1].message)
)
os.environ['FLAGS_print_extra_attrs'] = "0"
......@@ -2247,13 +2244,12 @@ class TestHardSwish(TestActivation):
self.check_grad(
['X'],
'Out',
check_eager=True,
check_prim=True,
only_check_prim=self.if_only_check_prim(),
)
def test_check_output(self):
self.check_output(check_eager=True, check_prim=True)
self.check_output(check_prim=True)
class TestHardSwish_ZeroDim(TestHardSwish):
......@@ -2276,6 +2272,7 @@ class TestHardswishAPI(unittest.TestCase):
)
def test_static_api(self):
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.hardswish(x)
......@@ -2288,7 +2285,6 @@ class TestHardswishAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor([11648.0, 11448.0])
out1 = F.hardswish(x)
m = paddle.nn.Hardswish()
......@@ -2296,9 +2292,9 @@ class TestHardswishAPI(unittest.TestCase):
out_ref = [11648.0, 11448.0]
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_fluid_api(self):
with paddle_static_guard():
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = paddle.nn.functional.hardswish(x)
......@@ -2307,13 +2303,12 @@ class TestHardswishAPI(unittest.TestCase):
out_ref = ref_hardswish(self.x_np)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out = paddle.nn.functional.hardswish(x)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.hardswish, 1)
......@@ -2365,6 +2360,7 @@ class TestELU(TestActivation):
self.op_type = "elu"
self.init_dtype()
self.init_shape()
self.python_api = paddle.nn.functional.elu
np.random.seed(1024)
x = np.random.uniform(-3, 3, self.shape).astype(self.dtype)
......@@ -2414,7 +2410,7 @@ class TestELUAPI(unittest.TestCase):
self.elu = F.elu
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [10, 12])
out1 = self.elu(x)
......@@ -2427,7 +2423,6 @@ class TestELUAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = self.elu(x)
x = paddle.to_tensor(self.x_np)
......@@ -2444,10 +2439,9 @@ class TestELUAPI(unittest.TestCase):
out_ref = elu(self.x_np, 0.2)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, self.elu, 1)
......@@ -2469,10 +2463,8 @@ class TestELUInplaceAPI(TestELUAPI):
self.elu = F.elu_
def test_alpha_error(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
self.assertRaises(Exception, F.elu_, x, -0.2)
paddle.enable_static()
def celu(x, alpha):
......@@ -2501,7 +2493,7 @@ class TestCELU(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestCELU_ZeroDim(TestCELU):
......@@ -2525,7 +2517,7 @@ class TestCELUAPI(unittest.TestCase):
self.celu = F.celu
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [10, 12])
out1 = self.celu(x, 1.5)
......@@ -2538,7 +2530,6 @@ class TestCELUAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = self.celu(x, 1.5)
x = paddle.to_tensor(self.x_np)
......@@ -2555,10 +2546,9 @@ class TestCELUAPI(unittest.TestCase):
out_ref = celu(self.x_np, 0.2)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, self.celu, 1)
......@@ -2596,10 +2586,10 @@ class TestReciprocal(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', max_relative_error=0.01, check_eager=True)
self.check_grad(['X'], 'Out', max_relative_error=0.01)
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestReciprocal_ZeroDim(TestReciprocal):
......@@ -2610,7 +2600,6 @@ class TestReciprocal_ZeroDim(TestReciprocal):
class TestLog(TestActivation):
def setUp(self):
self.op_type = "log"
self.check_eager = True
self.prim_op_type = "prim"
self.python_api = paddle.log
self.init_dtype()
......@@ -2630,11 +2619,17 @@ class TestLog(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
self.check_grad(['X'], 'Out', check_prim=True)
def test_error(self):
in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
with paddle_static_guard():
with paddle_static_guard():
in1 = paddle.static.data(
name="in1", shape=[11, 17], dtype="int32"
)
in2 = paddle.static.data(
name="in2", shape=[11, 17], dtype="int64"
)
self.assertRaises(TypeError, paddle.log, in1)
self.assertRaises(TypeError, paddle.log, in2)
......@@ -2642,7 +2637,7 @@ class TestLog(TestActivation):
class Test_Log_Op_Fp16(unittest.TestCase):
def test_api_fp16(self):
paddle.enable_static()
with paddle_static_guard():
with static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
......@@ -2663,7 +2658,6 @@ class TestLog_ZeroDim(TestLog):
class TestLog2(TestActivation):
def setUp(self):
self.op_type = "log2"
self.check_eager = True
self.python_api = paddle.log2
self.init_dtype()
self.init_shape()
......@@ -2677,9 +2671,10 @@ class TestLog2(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def test_error(self):
with paddle_static_guard():
in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
......@@ -2687,6 +2682,7 @@ class TestLog2(TestActivation):
self.assertRaises(TypeError, paddle.log2, in2)
def test_api(self):
with paddle_static_guard():
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
......@@ -2724,7 +2720,6 @@ class TestLog2_ZeroDim(TestLog2):
class TestLog10(TestActivation):
def setUp(self):
self.op_type = "log10"
self.check_eager = True
self.python_api = paddle.log10
self.init_dtype()
self.init_shape()
......@@ -2738,7 +2733,7 @@ class TestLog10(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestLog10_ZeroDim(TestLog10):
......@@ -2748,6 +2743,7 @@ class TestLog10_ZeroDim(TestLog10):
class TestLog10API(unittest.TestCase):
def test_error(self):
with paddle_static_guard():
in1 = paddle.static.data(name="in1", shape=[11, 17], dtype="int32")
in2 = paddle.static.data(name="in2", shape=[11, 17], dtype="int64")
......@@ -2755,6 +2751,7 @@ class TestLog10API(unittest.TestCase):
self.assertRaises(TypeError, paddle.log10, in2)
def test_api(self):
with paddle_static_guard():
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
......@@ -2787,7 +2784,6 @@ class TestLog10API(unittest.TestCase):
class TestLog1p(TestActivation):
def setUp(self):
self.op_type = "log1p"
self.check_eager = True
self.python_api = paddle.log1p
self.init_dtype()
self.init_shape()
......@@ -2802,12 +2798,12 @@ class TestLog1p(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class Test_Log1p_Op_Fp16(unittest.TestCase):
def test_api_fp16(self):
paddle.enable_static()
with paddle_static_guard():
with static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
......@@ -2827,6 +2823,7 @@ class TestLog1p_ZeroDim(TestLog1p):
class TestLog1pAPI(unittest.TestCase):
def test_api(self):
with paddle_static_guard():
with fluid.program_guard(fluid.Program(), fluid.Program()):
input_x = np.random.uniform(0.1, 1, [11, 17]).astype("float64")
data_x = paddle.static.data(
......@@ -2873,12 +2870,10 @@ class TestSquare(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(
['X'], 'Out', max_relative_error=0.007, check_eager=True
)
self.check_grad(['X'], 'Out', max_relative_error=0.007)
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestSquare_ZeroDim(TestSquare):
......@@ -2909,20 +2904,17 @@ class TestSquareBF16(OpTest):
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True)
self.check_output_with_place(place)
def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X'], 'Out', numeric_grad_delta=0.5, check_eager=True
)
self.check_grad_with_place(place, ['X'], 'Out', numeric_grad_delta=0.5)
class TestPow(TestActivation):
def setUp(self):
self.op_type = "pow"
self.python_api = paddle.pow
self.check_eager = True
self.init_dtype()
self.init_shape()
......@@ -2935,12 +2927,12 @@ class TestPow(TestActivation):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=self.check_eager)
self.check_output()
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=self.check_eager)
self.check_grad(['X'], 'Out')
class TestPow_ZeroDim(TestPow):
......@@ -2951,7 +2943,6 @@ class TestPow_ZeroDim(TestPow):
class TestPow_factor_tensor(TestActivation):
def setUp(self):
self.op_type = "pow"
self.check_eager = False
self.python_api = paddle.pow
self.init_dtype()
......@@ -2961,24 +2952,27 @@ class TestPow_factor_tensor(TestActivation):
self.inputs = {
'X': OpTest.np_dtype_to_fluid_dtype(x),
'FactorTensor': np.array([3.0]).astype("float32"),
'FactorTensor': np.array([3.0]).astype(self.dtype),
}
self.attrs = {}
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=self.check_eager)
self.check_output()
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=self.check_eager)
self.check_grad(['X'], 'Out')
def test_api(self):
with paddle_static_guard():
input = np.random.uniform(1, 2, [11, 17]).astype("float32")
x = paddle.static.data(name="x", shape=[11, 17], dtype="float32")
res = paddle.static.data(name="res", shape=[11, 17], dtype="float32")
res = paddle.static.data(
name="res", shape=[11, 17], dtype="float32"
)
factor_1 = 2.0
factor_2 = fluid.layers.fill_constant([1], "float32", 3.0)
......@@ -3014,6 +3008,7 @@ class TestSTanh(TestActivation):
def setUp(self):
self.op_type = "stanh"
self.python_api = paddle.stanh
self.init_dtype()
self.init_shape()
......@@ -3070,7 +3065,7 @@ class TestSTanhAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', [10, 12])
out = paddle.stanh(x, self.scale_a, self.scale_b)
......@@ -3081,16 +3076,14 @@ class TestSTanhAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out = paddle.stanh(x, self.scale_a, self.scale_b)
out_ref = ref_stanh(self.x_np, self.scale_a, self.scale_b)
for r in [out]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with paddle_static_guard():
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', [10, 12])
out = paddle.stanh(x, self.scale_a, self.scale_b)
......@@ -3100,7 +3093,7 @@ class TestSTanhAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, paddle.stanh, 1)
......@@ -3152,17 +3145,13 @@ class TestSoftplus(TestActivation):
self.attrs = {'beta': beta, "threshold": threshold}
self.outputs = {'Out': out}
self.check_eager = True
def init_shape(self):
self.shape = [10, 12]
def test_check_grad(self):
if self.dtype == np.float16:
return
if hasattr(self, 'check_eager'):
check_eager = self.check_eager
self.check_grad(['X'], 'Out', check_eager=check_eager)
self.check_grad(['X'], 'Out')
class TestSoftplus_ZeroDim(TestSoftplus):
......@@ -3177,6 +3166,7 @@ class TestSoftplusBF16(OpTest):
def setUp(self):
self.op_type = "softplus"
self.init_dtype()
self.python_api = paddle.nn.functional.softplus
beta = 2
threshold = 15
......@@ -3214,7 +3204,7 @@ class TestSoftplusAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.softplus(x, self.beta, self.threshold)
......@@ -3227,7 +3217,6 @@ class TestSoftplusAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.softplus(x, self.beta, self.threshold)
softplus = paddle.nn.Softplus(self.beta, self.threshold)
......@@ -3235,10 +3224,9 @@ class TestSoftplusAPI(unittest.TestCase):
out_ref = ref_softplus(self.x_np, self.beta, self.threshold)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.softplus, 1)
......@@ -3279,7 +3267,7 @@ class TestSoftsign(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestSoftsign_ZeroDim(TestSoftsign):
......@@ -3299,7 +3287,7 @@ class TestSoftsignAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.softsign(x)
......@@ -3312,7 +3300,6 @@ class TestSoftsignAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.softsign(x)
softsign = paddle.nn.Softsign()
......@@ -3320,10 +3307,9 @@ class TestSoftsignAPI(unittest.TestCase):
out_ref = ref_softsign(self.x_np)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.softsign, 1)
......@@ -3349,6 +3335,7 @@ class TestThresholdedRelu(TestActivation):
self.op_type = "thresholded_relu"
self.init_dtype()
self.init_shape()
self.python_api = paddle.nn.functional.thresholded_relu
threshold = 15
......@@ -3388,7 +3375,7 @@ class TestThresholdedReluAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.thresholded_relu(x, self.threshold)
......@@ -3401,7 +3388,6 @@ class TestThresholdedReluAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.thresholded_relu(x, self.threshold)
thresholded_relu = paddle.nn.ThresholdedReLU(self.threshold)
......@@ -3409,10 +3395,9 @@ class TestThresholdedReluAPI(unittest.TestCase):
out_ref = ref_thresholded_relu(self.x_np, self.threshold)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.thresholded_relu, 1)
......@@ -3440,6 +3425,7 @@ class TestHardSigmoid(TestActivation):
self.offset = 0.5
self.set_attrs()
self.init_shape()
self.python_api = paddle.nn.functional.hardsigmoid
x = np.random.uniform(-5, 5, self.shape).astype(self.dtype)
lower_threshold = -self.offset / self.slope
......@@ -3490,6 +3476,7 @@ class TestHardsigmoidAPI(unittest.TestCase):
)
def test_static_api(self):
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.hardsigmoid(x)
......@@ -3502,7 +3489,6 @@ class TestHardsigmoidAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.hardsigmoid(x)
m = paddle.nn.Hardsigmoid()
......@@ -3510,9 +3496,9 @@ class TestHardsigmoidAPI(unittest.TestCase):
out_ref = ref_hardsigmoid(self.x_np)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_fluid_api(self):
with paddle_static_guard():
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
......@@ -3521,13 +3507,12 @@ class TestHardsigmoidAPI(unittest.TestCase):
out_ref = ref_hardsigmoid(self.x_np, 0.2, 0.5)
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out = paddle.nn.functional.hardsigmoid(x, slope=0.2)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static()
def test_errors(self):
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.hardsigmoid, 1)
......@@ -3555,8 +3540,6 @@ class TestSwish(TestActivation):
self.init_dtype()
self.init_shape()
self.check_eager = True
np.random.seed(1024)
x = np.random.uniform(-1, 1, self.shape).astype(self.dtype)
out = ref_swish(x)
......@@ -3570,10 +3553,10 @@ class TestSwish(TestActivation):
def test_check_grad(self):
if self.dtype == np.float16:
return
check_eager = False
if hasattr(self, 'check_eager'):
check_eager = self.check_eager
self.check_grad(['X'], 'Out', check_eager=check_eager)
self.check_grad(
['X'],
'Out',
)
class TestSwish_ZeroDim(TestSwish):
......@@ -3593,7 +3576,7 @@ class TestSwishAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.swish(x)
......@@ -3606,7 +3589,6 @@ class TestSwishAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.swish(x)
swish = paddle.nn.Swish()
......@@ -3614,10 +3596,9 @@ class TestSwishAPI(unittest.TestCase):
out_ref = ref_swish(self.x_np)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with paddle_static_guard():
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = paddle.nn.functional.swish(x)
......@@ -3627,7 +3608,7 @@ class TestSwishAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.swish, 1)
......@@ -3667,12 +3648,12 @@ class TestMish(TestActivation):
self.shape = [10, 12]
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
if self.dtype == np.float16:
return
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestMish_ZeroDim(TestMish):
......@@ -3692,7 +3673,7 @@ class TestMishAPI(unittest.TestCase):
)
def test_static_api(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.static.data('X', self.x_np.shape, self.x_np.dtype)
out1 = F.mish(x)
......@@ -3705,7 +3686,6 @@ class TestMishAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, r, rtol=1e-05)
def test_dygraph_api(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = F.mish(x)
mish = paddle.nn.Mish()
......@@ -3713,10 +3693,9 @@ class TestMishAPI(unittest.TestCase):
out_ref = ref_mish(self.x_np)
for r in [out1, out2]:
np.testing.assert_allclose(out_ref, r.numpy(), rtol=1e-05)
paddle.enable_static()
def test_fluid_api(self):
paddle.enable_static()
with paddle_static_guard():
with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = paddle.nn.functional.mish(x)
......@@ -3726,7 +3705,7 @@ class TestMishAPI(unittest.TestCase):
np.testing.assert_allclose(out_ref, res[0], rtol=1e-05)
def test_errors(self):
paddle.enable_static()
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, F.mish, 1)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......@@ -44,10 +44,10 @@ class TestAtan2(OpTest):
self.outputs = {'Out': out}
def test_check_grad(self):
self.check_grad(['X1', 'X2'], 'Out', check_eager=True)
self.check_grad(['X1', 'X2'], 'Out')
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def init_dtype(self):
self.dtype = np.float64
......@@ -67,7 +67,6 @@ class TestAtan2_float(TestAtan2):
self.inputs['X2'],
1 / self.inputs['X1'].size,
),
check_eager=True,
)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from test_fusion_lstm_op import ACTIVATION, fc
from test_softmax_op import stable_softmax
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -65,7 +65,7 @@ class TestAucOp(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_dygraph=False)
class TestGlobalAucOp(OpTest):
......@@ -105,7 +105,7 @@ class TestGlobalAucOp(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_dygraph=False)
class TestAucAPI(unittest.TestCase):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from paddle.fluid import metrics
......@@ -66,7 +66,7 @@ class TestAucSinglePredOp(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_dygraph=False)
class TestAucGlobalSinglePredOp(OpTest):
......@@ -109,7 +109,7 @@ class TestAucGlobalSinglePredOp(OpTest):
}
def test_check_output(self):
self.check_output()
self.check_output(check_dygraph=False)
if __name__ == "__main__":
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle.fluid.core as core
......
......@@ -16,7 +16,7 @@ import os
import unittest
import numpy as np
from op_test import OpTest, _set_use_system_allocator
from eager_op_test import OpTest, _set_use_system_allocator
import paddle
import paddle.fluid as fluid
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -135,8 +135,6 @@ class TestBicubicInterpOp(OpTest):
self.init_test_case()
self.op_type = "bicubic_interp"
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float64")
if self.data_layout == "NCHW":
......@@ -165,10 +163,8 @@ class TestBicubicInterpOp(OpTest):
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
self.attrs = {
'out_h': self.out_h,
......@@ -181,12 +177,10 @@ class TestBicubicInterpOp(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_eager=self.check_eager)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager
)
self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'bicubic'
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -186,8 +186,6 @@ class TestBicubicInterpOp(OpTest):
self.init_test_case()
self.op_type = "bicubic_interp_v2"
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float64")
scale_h = 0
scale_w = 0
......@@ -227,10 +225,8 @@ class TestBicubicInterpOp(OpTest):
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
self.attrs = {
'out_h': self.out_h,
......@@ -249,12 +245,10 @@ class TestBicubicInterpOp(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_eager=self.check_eager)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager
)
self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'bicubic'
......
......@@ -16,7 +16,7 @@ import math
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest, paddle_static_guard
import paddle
......@@ -192,6 +192,7 @@ class TestBilateralSliceOp1(TestBilateralSliceOp):
class TestBilateralSliceApi(unittest.TestCase):
def test_api(self):
with paddle_static_guard():
x = paddle.fluid.data(
name='x', shape=[None, 3, 25, 15], dtype='float32'
)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -219,10 +219,10 @@ class TestBilinearInterpOp(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True, check_eager=True)
self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'bilinear'
......@@ -409,9 +409,7 @@ class TestBilinearInterpOpUint8(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_eager=True
)
self.check_output_with_place(place=core.CPUPlace(), atol=1)
def init_test_case(self):
self.interp_method = 'bilinear'
......@@ -585,10 +583,10 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True, check_eager=True)
self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'bilinear'
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest, paddle_static_guard
import paddle
import paddle.fluid as fluid
......@@ -23,6 +23,7 @@ import paddle.fluid as fluid
class TestDygraphBilinearTensorProductAPIError(unittest.TestCase):
def test_errors(self):
with paddle_static_guard():
with fluid.program_guard(fluid.Program(), fluid.Program()):
layer = paddle.nn.Bilinear(5, 4, 1000)
# the input must be Variable.
......@@ -73,10 +74,10 @@ class TestBilinearTensorProductOp(OpTest):
self.outputs = {'Out': output + bias}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y', 'Weight', 'Bias'], 'Out', check_eager=True)
self.check_grad(['X', 'Y', 'Weight', 'Bias'], 'Out')
if __name__ == "__main__":
......
......@@ -17,7 +17,7 @@ import tempfile
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -150,7 +150,7 @@ class TestBincountOp(OpTest):
self.Out = np.bincount(self.np_input, minlength=self.minlength)
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
class TestCase1(TestBincountOp):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
def bipartite_match(distance, match_indices, match_dist):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest, paddle_static_guard
import paddle
import paddle.fluid as fluid
......@@ -32,14 +32,15 @@ class TestBmmOp(OpTest):
self.outputs = {'Out': Out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_checkout_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
self.check_grad(['X', 'Y'], 'Out')
class API_TestBmm(unittest.TestCase):
def test_out(self):
with paddle_static_guard():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data(
'data1', shape=[-1, 3, 4], dtype='float64'
......@@ -53,7 +54,8 @@ class API_TestBmm(unittest.TestCase):
input1 = np.random.random([10, 3, 4]).astype('float64')
input2 = np.random.random([10, 4, 5]).astype('float64')
(result,) = exe.run(
feed={"data1": input1, "data2": input2}, fetch_list=[result_bmm]
feed={"data1": input1, "data2": input2},
fetch_list=[result_bmm],
)
expected_result = np.matmul(input1, input2)
np.testing.assert_allclose(expected_result, result, rtol=1e-05)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
def box_clip(input_box, im_info, output_box):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册