未验证 提交 6261076c 编写于 作者: W wanghuancoder 提交者: GitHub

Del old dygraph optest5 (#51686)

* delete old dygraph op test
上级 3d78e759
......@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestCollectiveAllToAllSingle(TestMultipleGpus):
def test_collective_alltoall_single(self):
self.run_mnist_2gpu('collective_alltoall_single.py', eager_mode=True)
self.run_mnist_2gpu('collective_alltoall_single.py')
if __name__ == "__main__":
......
......@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestCollectiveBatchIsendIrecv(TestMultipleGpus):
def test_collective_batch_isend_irecv(self):
self.run_mnist_2gpu('collective_batch_isend_irecv.py', eager_mode=True)
self.run_mnist_2gpu('collective_batch_isend_irecv.py')
if __name__ == "__main__":
......
......@@ -19,7 +19,7 @@ from test_parallel_dygraph_dataparallel import TestMultipleGpus
class TestCollectiveReduceScatter(TestMultipleGpus):
def test_collective_reduce_scatter(self):
self.run_mnist_2gpu('collective_reduce_scatter.py', eager_mode=True)
self.run_mnist_2gpu('collective_reduce_scatter.py')
if __name__ == "__main__":
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
......@@ -45,10 +45,10 @@ class TestGatherOp(OpTest):
self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
self.check_grad(['X'], 'Out', check_prim=True)
def config(self):
"""
......@@ -194,10 +194,10 @@ class TestGatherBF16Op(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=0.5, check_eager=True)
self.check_grad(['X'], 'Out', numeric_grad_delta=0.5)
def config(self):
"""
......@@ -223,10 +223,10 @@ class TestGatherOp1(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def config(self):
"""
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
from paddle.fluid.framework import Program, program_guard
......@@ -36,7 +36,7 @@ class TestGatherTreeOp(OpTest):
self.outputs = {'Out': self.backtrace(ids, parents)}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
@staticmethod
def backtrace(ids, parents):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -49,12 +49,10 @@ class TestGraphSendRecvMaxOp(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'], 'Out', user_defined_grads=[self.gradient], check_eager=True
)
self.check_grad(['X'], 'Out', user_defined_grads=[self.gradient])
class TestGraphSendRecvMinOp(OpTest):
......@@ -79,12 +77,10 @@ class TestGraphSendRecvMinOp(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'], 'Out', user_defined_grads=[self.gradient], check_eager=True
)
self.check_grad(['X'], 'Out', user_defined_grads=[self.gradient])
class TestGraphSendRecvSumOp(OpTest):
......@@ -107,10 +103,10 @@ class TestGraphSendRecvSumOp(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestGraphSendRecvMeanOp(OpTest):
......@@ -135,10 +131,10 @@ class TestGraphSendRecvMeanOp(OpTest):
self.outputs = {'Out': out, 'Dst_count': dst_count}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def compute_graph_send_recv_for_sum_mean(inputs, attributes):
......
......@@ -16,7 +16,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......@@ -314,10 +314,10 @@ class TestGraphSendUERecvSumOp(OpTest):
self.message_op = 'ADD'
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
self.check_grad(['X', 'Y'], 'Out')
class TestSumCase1(TestGraphSendUERecvSumOp):
......@@ -420,10 +420,10 @@ class TestGraphSendUERecvMeanOp(OpTest):
self.message_op = 'ADD'
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
self.check_grad(['X', 'Y'], 'Out')
class TestMeanCase1(TestGraphSendUERecvMeanOp):
......@@ -526,14 +526,13 @@ class TestGraphSendUERecvMaxOp(OpTest):
self.message_op = 'ADD'
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X', 'Y'],
'Out',
user_defined_grads=self.gradients,
check_eager=True,
)
......@@ -637,14 +636,13 @@ class TestGraphSendUERecvMinOp(OpTest):
self.message_op = 'ADD'
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X', 'Y'],
'Out',
user_defined_grads=self.gradients,
check_eager=True,
)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -63,10 +63,10 @@ class TestGraphSendUVOp(OpTest):
self.outputs = {'out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['x', 'y'], 'out', check_eager=True)
self.check_grad(['x', 'y'], 'out')
def set_config(self):
self.x = np.random.random((10, 20)).astype("float64")
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
from eager_op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid.core as core
......@@ -379,7 +379,7 @@ class TestGridSamplerOp(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
......@@ -387,7 +387,6 @@ class TestGridSamplerOp(OpTest):
'Output',
max_relative_error=0.01,
numeric_grad_delta=self.numeric_grad_delta,
check_eager=True,
)
def initTestCase(self):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestHingeLossOp(OpTest):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -154,7 +154,7 @@ class TestHistogramOp(OpTest):
self.attrs = {"bins": self.bins, "min": self.min, "max": self.max}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestHistogramOp_ZeroDim(TestHistogramOp):
......
......@@ -16,7 +16,7 @@ import math
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
from eager_op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
......@@ -219,14 +219,13 @@ class TestHSigmoidOp(OpTest):
self.user_grads = hsigmoid_grad(x, w, label, bias, num_classes)
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X', 'W', 'Bias'],
['Out'],
user_defined_grads=self.user_grads,
check_eager=True,
)
......@@ -280,7 +279,7 @@ class TestHSigmoidOpSparse(OpTest):
self.outputs = {'PreOut': pre_output, 'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestHSigmoidOpWithSparseGrad(unittest.TestCase):
......@@ -416,14 +415,13 @@ class TestHSigmoidOpWithCostumTree(OpTest):
self.outputs = {'PreOut': pre_output, 'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['Bias', 'X', 'W'],
['Out'],
no_grad_set=set('Label'),
check_eager=True,
)
......@@ -482,12 +480,10 @@ class TestHSigmoidOpWithCostumTreeWithoutBias(OpTest):
self.outputs = {'PreOut': pre_output, 'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X', 'W'], ['Out'], no_grad_set=set('Label'), check_eager=True
)
self.check_grad(['X', 'W'], ['Out'], no_grad_set=set('Label'))
class TestHSigmoidLossAPI(unittest.TestCase):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -48,12 +48,12 @@ class TestIdentityLossOp(OpTest):
def test_check_output(self):
paddle.enable_static()
self.check_output(check_eager=True)
self.check_output()
paddle.disable_static()
def test_check_grad_normal(self):
paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
paddle.disable_static()
def initTestCase(self):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
from paddle.fluid import Program
......@@ -93,10 +93,10 @@ class TestIndexAddOp(OpTest):
self.add_value_shape = (3, 3)
def test_check_output(self):
self.check_output(check_eager=True, atol=1e-2)
self.check_output(atol=1e-2)
def test_check_grad_normal(self):
self.check_grad(['X', 'AddValue'], 'Out', check_eager=True)
self.check_grad(['X', 'AddValue'], 'Out')
class TestIndexAddAPI(unittest.TestCase):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -40,10 +40,10 @@ class TestIndexSampleOp(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def config(self):
"""
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -40,10 +40,10 @@ class TestInverseOp(OpTest):
self.outputs = {'Output': inverse}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_grad(self):
self.check_grad(['Input'], 'Output', check_eager=True)
self.check_grad(['Input'], 'Output')
class TestInverseOpBatched(TestInverseOp):
......@@ -60,9 +60,7 @@ class TestInverseOpLarge(TestInverseOp):
self.python_api = paddle.tensor.math.inverse
def test_grad(self):
self.check_grad(
['Input'], 'Output', max_relative_error=1e-6, check_eager=True
)
self.check_grad(['Input'], 'Output', max_relative_error=1e-6)
class TestInverseOpFP32(TestInverseOp):
......@@ -72,9 +70,7 @@ class TestInverseOpFP32(TestInverseOp):
self.python_api = paddle.tensor.math.inverse
def test_grad(self):
self.check_grad(
['Input'], 'Output', max_relative_error=1e-2, check_eager=True
)
self.check_grad(['Input'], 'Output', max_relative_error=1e-2)
class TestInverseOpBatchedFP32(TestInverseOpFP32):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy.random as random
from op_test import OpTest
from eager_op_test import OpTest
class TestIOUSimilarityOp(OpTest):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......@@ -56,7 +56,7 @@ class TestIscloseOp(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestIscloseOpException(TestIscloseOp):
......@@ -64,28 +64,28 @@ class TestIscloseOpException(TestIscloseOp):
def test_rtol_num():
self.inputs['Rtol'] = np.array([1e-05, 1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output(check_eager=True)
self.check_output()
self.assertRaises(ValueError, test_rtol_num)
def test_rtol_type():
self.inputs['Rtol'] = np.array([5]).astype("int32")
self.inputs['Atol'] = np.array([1e-08]).astype("float64")
self.check_output(check_eager=True)
self.check_output()
self.assertRaises(ValueError, test_rtol_type)
def test_atol_num():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([1e-08, 1e-08]).astype("float64")
self.check_output(check_eager=True)
self.check_output()
self.assertRaises(ValueError, test_atol_num)
def test_atol_type():
self.inputs['Rtol'] = np.array([1e-05]).astype("float64")
self.inputs['Atol'] = np.array([8]).astype("int32")
self.check_output(check_eager=True)
self.check_output()
self.assertRaises(ValueError, test_atol_type)
......@@ -239,7 +239,7 @@ class TestIscloseOpFloat16(TestIscloseOp):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, check_eager=True)
self.check_output_with_place(place)
class TestIscloseOpFloat32(TestIscloseOp):
......@@ -260,7 +260,7 @@ class TestIscloseOpFloat64(TestIscloseOp):
self.equal_nan = False
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestIscloseOpLargeDimInput(TestIscloseOp):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle.fluid.core as core
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest, paddle_static_guard
import paddle
from paddle.nn.functional import kl_div
......@@ -55,12 +55,10 @@ class TestKLDivLossOp(OpTest):
self.outputs = {'Loss': loss.astype('float64')}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'], 'Loss', no_grad_set=set(["Target"]), check_eager=True
)
self.check_grad(['X'], 'Loss', no_grad_set=set(["Target"]))
def initTestCase(self):
self.x_shape = (4, 5, 5)
......@@ -114,12 +112,13 @@ class TestKLDivLossDygraph(unittest.TestCase):
self.run_kl_loss('none')
def test_kl_loss_static_api(self):
input = paddle.static.data(name='input', shape=[5, 20])
label = paddle.static.data(name='label', shape=[5, 20])
with paddle_static_guard():
input = paddle.static.data(name='input', shape=[5, 20])
label = paddle.static.data(name='label', shape=[5, 20])
paddle.nn.functional.kl_div(input, label)
paddle.nn.functional.kl_div(input, label, 'sum')
paddle.nn.functional.kl_div(input, label, 'batchmean')
paddle.nn.functional.kl_div(input, label)
paddle.nn.functional.kl_div(input, label, 'sum')
paddle.nn.functional.kl_div(input, label, 'batchmean')
class TestKLDivLossTypePromotion(unittest.TestCase):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -37,16 +37,16 @@ class TestKronOp(OpTest):
return "float64"
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
self.check_grad(['X', 'Y'], 'Out')
def test_check_grad_ignore_x(self):
self.check_grad(['Y'], 'Out', no_grad_set=set('X'), check_eager=True)
self.check_grad(['Y'], 'Out', no_grad_set=set('X'))
def test_check_grad_ignore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_eager=True)
self.check_grad(['X'], 'Out', no_grad_set=set('Y'))
class TestKronOp2(TestKronOp):
......@@ -168,7 +168,7 @@ class TestComplexKronOp(OpTest):
return grad_y
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
......@@ -176,7 +176,6 @@ class TestComplexKronOp(OpTest):
'Out',
user_defined_grads=[self.grad_x, self.grad_y],
user_defined_grad_outputs=[self.grad_out],
check_eager=True,
)
def test_check_grad_ingore_x(self):
......@@ -186,7 +185,6 @@ class TestComplexKronOp(OpTest):
no_grad_set=set("X"),
user_defined_grads=[self.grad_y],
user_defined_grad_outputs=[self.grad_out],
check_eager=True,
)
def test_check_grad_ingore_y(self):
......@@ -196,7 +194,6 @@ class TestComplexKronOp(OpTest):
no_grad_set=set('Y'),
user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out],
check_eager=True,
)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -54,11 +54,11 @@ class TestKthvalueOp(OpTest):
def test_check_output(self):
paddle.enable_static()
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True)
self.check_grad(set(['X']), 'Out')
class TestKthvalueOpWithKeepdim(OpTest):
......@@ -81,11 +81,11 @@ class TestKthvalueOpWithKeepdim(OpTest):
def test_check_output(self):
paddle.enable_static()
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True)
self.check_grad(set(['X']), 'Out')
class TestKthvalueOpKernels(unittest.TestCase):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestL1NormOp(OpTest):
......
......@@ -17,7 +17,7 @@ from functools import reduce
from operator import mul
import numpy as np
from op_test import _set_use_system_allocator
from eager_op_test import _set_use_system_allocator
import paddle
import paddle.fluid as fluid
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......@@ -43,10 +43,10 @@ class TestLerp(OpTest):
self.shape = [100]
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
self.check_grad(['X', 'Y'], 'Out')
class TestLerpWithDim2(TestLerp):
......
......@@ -16,7 +16,7 @@ import math
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from scipy import special
import paddle
......@@ -42,10 +42,10 @@ class TestLgammaOp(OpTest):
self.dtype = np.float64
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-7, check_eager=True)
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-7)
class TestLgammaOpFp32(TestLgammaOp):
......@@ -53,9 +53,7 @@ class TestLgammaOpFp32(TestLgammaOp):
self.dtype = np.float32
def test_check_grad_normal(self):
self.check_grad(
['X'], 'Out', numeric_grad_delta=0.005, check_eager=True
)
self.check_grad(['X'], 'Out', numeric_grad_delta=0.005)
class TestLgammaOpApi(unittest.TestCase):
......
......@@ -16,7 +16,7 @@ import random
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class LinearChainCrfForward:
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest, paddle_static_guard
import paddle
import paddle.fluid as fluid
......@@ -37,7 +37,7 @@ class TestLinspaceOpCommonCase(OpTest):
self.outputs = {'Out': np.arange(0, 11).astype(dtype)}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestLinspaceOpReverseCase(OpTest):
......@@ -55,7 +55,7 @@ class TestLinspaceOpReverseCase(OpTest):
self.outputs = {'Out': np.arange(10, -1, -1).astype(dtype)}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestLinspaceOpNumOneCase(OpTest):
......@@ -73,56 +73,55 @@ class TestLinspaceOpNumOneCase(OpTest):
self.outputs = {'Out': np.array(10, dtype=dtype)}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestLinspaceAPI(unittest.TestCase):
def test_variable_input1(self):
start = paddle.full(shape=[1], fill_value=0, dtype='float32')
stop = paddle.full(shape=[1], fill_value=10, dtype='float32')
num = paddle.full(shape=[1], fill_value=5, dtype='int32')
out = paddle.linspace(start, stop, num, dtype='float32')
exe = fluid.Executor(place=fluid.CPUPlace())
res = exe.run(fluid.default_main_program(), fetch_list=[out])
np_res = np.linspace(0, 10, 5, dtype='float32')
self.assertEqual((res == np_res).all(), True)
with paddle_static_guard():
start = paddle.full(shape=[1], fill_value=0, dtype='float32')
stop = paddle.full(shape=[1], fill_value=10, dtype='float32')
num = paddle.full(shape=[1], fill_value=5, dtype='int32')
out = paddle.linspace(start, stop, num, dtype='float32')
exe = fluid.Executor(place=fluid.CPUPlace())
res = exe.run(fluid.default_main_program(), fetch_list=[out])
np_res = np.linspace(0, 10, 5, dtype='float32')
self.assertEqual((res == np_res).all(), True)
def test_variable_input2(self):
paddle.disable_static()
start = paddle.full(shape=[1], fill_value=0, dtype='float32')
stop = paddle.full(shape=[1], fill_value=10, dtype='float32')
num = paddle.full(shape=[1], fill_value=5, dtype='int32')
out = paddle.linspace(start, stop, num, dtype='float32')
np_res = np.linspace(0, 10, 5, dtype='float32')
self.assertEqual((out.numpy() == np_res).all(), True)
paddle.enable_static()
def test_dtype(self):
out_1 = paddle.linspace(0, 10, 5, dtype='float32')
out_2 = paddle.linspace(0, 10, 5, dtype=np.float32)
out_3 = paddle.linspace(0, 10, 5, dtype=core.VarDesc.VarType.FP32)
exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3 = exe.run(
fluid.default_main_program(), fetch_list=[out_1, out_2, out_3]
)
assert np.array_equal(res_1, res_2)
with paddle_static_guard():
out_1 = paddle.linspace(0, 10, 5, dtype='float32')
out_2 = paddle.linspace(0, 10, 5, dtype=np.float32)
out_3 = paddle.linspace(0, 10, 5, dtype=core.VarDesc.VarType.FP32)
exe = fluid.Executor(place=fluid.CPUPlace())
res_1, res_2, res_3 = exe.run(
fluid.default_main_program(), fetch_list=[out_1, out_2, out_3]
)
assert np.array_equal(res_1, res_2)
def test_name(self):
with paddle.static.program_guard(paddle.static.Program()):
out = paddle.linspace(
0, 10, 5, dtype='float32', name='linspace_res'
)
assert 'linspace_res' in out.name
with paddle_static_guard():
with paddle.static.program_guard(paddle.static.Program()):
out = paddle.linspace(
0, 10, 5, dtype='float32', name='linspace_res'
)
assert 'linspace_res' in out.name
def test_imperative(self):
paddle.disable_static()
out1 = paddle.linspace(0, 10, 5, dtype='float32')
np_out1 = np.linspace(0, 10, 5, dtype='float32')
out2 = paddle.linspace(0, 10, 5, dtype='int32')
np_out2 = np.linspace(0, 10, 5, dtype='int32')
out3 = paddle.linspace(0, 10, 200, dtype='int32')
np_out3 = np.linspace(0, 10, 200, dtype='int32')
paddle.enable_static()
self.assertEqual((out1.numpy() == np_out1).all(), True)
self.assertEqual((out2.numpy() == np_out2).all(), True)
self.assertEqual((out3.numpy() == np_out3).all(), True)
......@@ -130,52 +129,57 @@ class TestLinspaceAPI(unittest.TestCase):
class TestLinspaceOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
with paddle_static_guard():
with program_guard(Program(), Program()):
def test_dtype():
paddle.linspace(0, 10, 1, dtype="int8")
def test_dtype():
paddle.linspace(0, 10, 1, dtype="int8")
self.assertRaises(TypeError, test_dtype)
self.assertRaises(TypeError, test_dtype)
def test_dtype1():
paddle.linspace(0, 10, 1.33, dtype="int32")
def test_dtype1():
paddle.linspace(0, 10, 1.33, dtype="int32")
self.assertRaises(TypeError, test_dtype1)
self.assertRaises(TypeError, test_dtype1)
def test_start_type():
paddle.linspace([0], 10, 1, dtype="float32")
def test_start_type():
paddle.linspace([0], 10, 1, dtype="float32")
self.assertRaises(TypeError, test_start_type)
self.assertRaises(TypeError, test_start_type)
def test_end_type():
paddle.linspace(0, [10], 1, dtype="float32")
def test_end_type():
paddle.linspace(0, [10], 1, dtype="float32")
self.assertRaises(TypeError, test_end_type)
self.assertRaises(TypeError, test_end_type)
def test_step_dtype():
paddle.linspace(0, 10, [0], dtype="float32")
def test_step_dtype():
paddle.linspace(0, 10, [0], dtype="float32")
self.assertRaises(TypeError, test_step_dtype)
self.assertRaises(TypeError, test_step_dtype)
def test_start_dtype():
start = paddle.static.data(
shape=[1], dtype="float64", name="start"
)
paddle.linspace(start, 10, 1, dtype="float32")
def test_start_dtype():
start = paddle.static.data(
shape=[1], dtype="float64", name="start"
)
paddle.linspace(start, 10, 1, dtype="float32")
self.assertRaises(ValueError, test_start_dtype)
self.assertRaises(ValueError, test_start_dtype)
def test_end_dtype():
end = paddle.static.data(shape=[1], dtype="float64", name="end")
paddle.linspace(0, end, 1, dtype="float32")
def test_end_dtype():
end = paddle.static.data(
shape=[1], dtype="float64", name="end"
)
paddle.linspace(0, end, 1, dtype="float32")
self.assertRaises(ValueError, test_end_dtype)
self.assertRaises(ValueError, test_end_dtype)
def test_num_dtype():
num = paddle.static.data(shape=[1], dtype="int32", name="step")
paddle.linspace(0, 10, num, dtype="float32")
def test_num_dtype():
num = paddle.static.data(
shape=[1], dtype="int32", name="step"
)
paddle.linspace(0, 10, num, dtype="float32")
self.assertRaises(TypeError, test_step_dtype)
self.assertRaises(TypeError, test_step_dtype)
if __name__ == "__main__":
......
......@@ -16,7 +16,7 @@ import copy
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from test_multiclass_nms_op import iou
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestLodResetOpByAttr(OpTest):
......
......@@ -63,12 +63,10 @@ class TestLogSoftmaxOp(OpTest):
pass
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True
)
self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
class TestLogSoftmaxOp_ZeroDim(TestLogSoftmaxOp):
......@@ -85,10 +83,10 @@ class TestLogSoftmaxOp_ZeroDim(TestLogSoftmaxOp):
self.attrs = {'axis': -1}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], ['Out'], check_eager=True)
self.check_grad(['X'], ['Out'])
class TestLogSoftmaxShape(TestLogSoftmaxOp):
......@@ -122,7 +120,7 @@ class TestLogSoftmaxBF16Op(OpTest):
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True)
self.check_output_with_place(place)
def test_check_grad(self):
place = core.CUDAPlace(0)
......@@ -131,7 +129,6 @@ class TestLogSoftmaxBF16Op(OpTest):
['X'],
['Out'],
user_defined_grads=[self.x_grad],
check_eager=True,
)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
from paddle.fluid import core
......@@ -59,12 +59,10 @@ class TestLogitOp(OpTest):
self.eps = 1e-8
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True
)
self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
class TestLogitOpFp32(TestLogitOp):
......@@ -74,12 +72,10 @@ class TestLogitOpFp32(TestLogitOp):
self.eps = 1e-8
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True
)
self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
class TestLogitOpFp16(TestLogitOp):
......@@ -89,12 +85,10 @@ class TestLogitOpFp16(TestLogitOp):
self.eps = 1e-8
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True
)
self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad])
@unittest.skipIf(
......@@ -122,7 +116,7 @@ class TestLogitOpBf16(OpTest):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_cuda():
......@@ -132,7 +126,6 @@ class TestLogitOpBf16(OpTest):
['X'],
['Out'],
user_defined_grads=[self.x_grad],
check_eager=True,
)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......@@ -87,7 +87,7 @@ class TestLogsumexp(OpTest):
pass
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
......@@ -95,7 +95,6 @@ class TestLogsumexp(OpTest):
['Out'],
user_defined_grads=self.user_defined_grads,
user_defined_grad_outputs=self.user_defined_grad_outputs,
check_eager=True,
)
def calc_grad(self):
......
......@@ -16,7 +16,7 @@ import struct
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestLookupTableDequantOp(OpTest):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest, paddle_static_guard
import paddle
import paddle.fluid as fluid
......@@ -116,94 +116,104 @@ class TestLocalResponseNormFAPI(unittest.TestCase):
self.places.append(fluid.CUDAPlace(0))
def check_static_3d_input(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
in_np1 = np.random.random([3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 1))
with paddle_static_guard():
with fluid.program_guard(fluid.Program(), fluid.Program()):
in_np1 = np.random.random([3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 1))
input1 = paddle.static.data(
name="input1", shape=[3, 40, 40], dtype="float32"
)
input2 = paddle.static.data(
name="input2", shape=[3, 40, 40], dtype="float32"
)
res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCL'
)
res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NLC'
)
exe = fluid.Executor(place)
fetches = exe.run(
fluid.default_main_program(),
feed={"input1": in_np1, "input2": in_np2},
fetch_list=[res1, res2],
)
input1 = paddle.static.data(
name="input1", shape=[3, 40, 40], dtype="float32"
)
input2 = paddle.static.data(
name="input2", shape=[3, 40, 40], dtype="float32"
)
res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCL'
)
res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NLC'
)
exe = fluid.Executor(place)
fetches = exe.run(
fluid.default_main_program(),
feed={"input1": in_np1, "input2": in_np2},
fetch_list=[res1, res2],
)
fetches1_tran = np.transpose(fetches[1], (0, 2, 1))
np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05)
fetches1_tran = np.transpose(fetches[1], (0, 2, 1))
np.testing.assert_allclose(
fetches[0], fetches1_tran, rtol=1e-05
)
def check_static_4d_input(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input1 = paddle.static.data(
name="input1", shape=[3, 3, 40, 40], dtype="float32"
)
input2 = paddle.static.data(
name="input2", shape=[3, 40, 40, 3], dtype="float32"
)
with paddle_static_guard():
with fluid.program_guard(fluid.Program(), fluid.Program()):
input1 = paddle.static.data(
name="input1", shape=[3, 3, 40, 40], dtype="float32"
)
input2 = paddle.static.data(
name="input2", shape=[3, 40, 40, 3], dtype="float32"
)
res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCHW'
)
res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NHWC'
)
res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCHW'
)
res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NHWC'
)
in_np1 = np.random.random([3, 3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 3, 1))
in_np1 = np.random.random([3, 3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 3, 1))
exe = fluid.Executor(place)
fetches = exe.run(
fluid.default_main_program(),
feed={"input1": in_np1, "input2": in_np2},
fetch_list=[res1, res2],
)
exe = fluid.Executor(place)
fetches = exe.run(
fluid.default_main_program(),
feed={"input1": in_np1, "input2": in_np2},
fetch_list=[res1, res2],
)
fetches1_tran = np.transpose(fetches[1], (0, 3, 1, 2))
np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05)
fetches1_tran = np.transpose(fetches[1], (0, 3, 1, 2))
np.testing.assert_allclose(
fetches[0], fetches1_tran, rtol=1e-05
)
def check_static_5d_input(self, place):
with fluid.program_guard(fluid.Program(), fluid.Program()):
input1 = paddle.static.data(
name="input1", shape=[3, 3, 3, 40, 40], dtype="float32"
)
input2 = paddle.static.data(
name="input2", shape=[3, 3, 40, 40, 3], dtype="float32"
)
res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCDHW'
)
res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NDHWC'
)
with paddle_static_guard():
with fluid.program_guard(fluid.Program(), fluid.Program()):
input1 = paddle.static.data(
name="input1", shape=[3, 3, 3, 40, 40], dtype="float32"
)
input2 = paddle.static.data(
name="input2", shape=[3, 3, 40, 40, 3], dtype="float32"
)
res1 = paddle.nn.functional.local_response_norm(
x=input1, size=5, data_format='NCDHW'
)
res2 = paddle.nn.functional.local_response_norm(
x=input2, size=5, data_format='NDHWC'
)
in_np1 = np.random.random([3, 3, 3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 3, 4, 1))
in_np1 = np.random.random([3, 3, 3, 40, 40]).astype("float32")
in_np2 = np.transpose(in_np1, (0, 2, 3, 4, 1))
exe = fluid.Executor(place)
fetches = exe.run(
fluid.default_main_program(),
feed={"input1": in_np1, "input2": in_np2},
fetch_list=[res1, res2],
)
exe = fluid.Executor(place)
fetches = exe.run(
fluid.default_main_program(),
feed={"input1": in_np1, "input2": in_np2},
fetch_list=[res1, res2],
)
fetches1_tran = np.transpose(fetches[1], (0, 4, 1, 2, 3))
np.testing.assert_allclose(fetches[0], fetches1_tran, rtol=1e-05)
fetches1_tran = np.transpose(fetches[1], (0, 4, 1, 2, 3))
np.testing.assert_allclose(
fetches[0], fetches1_tran, rtol=1e-05
)
def test_static(self):
for place in self.places:
self.check_static_3d_input(place=place)
self.check_static_4d_input(place=place)
self.check_static_5d_input(place=place)
with paddle_static_guard():
for place in self.places:
self.check_static_3d_input(place=place)
self.check_static_4d_input(place=place)
self.check_static_5d_input(place=place)
def check_dygraph_3d_input(self, place):
with fluid.dygraph.guard(place):
......@@ -268,46 +278,51 @@ class TestLocalResponseNormFAPI(unittest.TestCase):
class TestLocalResponseNormFAPIError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
with paddle_static_guard():
with program_guard(Program(), Program()):
def test_Variable():
# the input of lrn must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace()
)
paddle.nn.functional.local_response_norm(x1, size=5)
def test_Variable():
# the input of lrn must be Variable.
x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]),
[[1, 1, 1, 1]],
fluid.CPUPlace(),
)
paddle.nn.functional.local_response_norm(x1, size=5)
self.assertRaises(TypeError, test_Variable)
self.assertRaises(TypeError, test_Variable)
def test_datatype():
x = paddle.static.data(
name='x', shape=[3, 4, 5, 6], dtype="int32"
)
paddle.nn.functional.local_response_norm(x, size=5)
def test_datatype():
x = paddle.static.data(
name='x', shape=[3, 4, 5, 6], dtype="int32"
)
paddle.nn.functional.local_response_norm(x, size=5)
self.assertRaises(TypeError, test_datatype)
self.assertRaises(TypeError, test_datatype)
def test_dataformat():
x = paddle.static.data(
name='x', shape=[3, 4, 5, 6], dtype="float32"
)
paddle.nn.functional.local_response_norm(
x, size=5, data_format="NCTHW"
)
def test_dataformat():
x = paddle.static.data(
name='x', shape=[3, 4, 5, 6], dtype="float32"
)
paddle.nn.functional.local_response_norm(
x, size=5, data_format="NCTHW"
)
self.assertRaises(ValueError, test_dataformat)
self.assertRaises(ValueError, test_dataformat)
def test_dim():
x = paddle.static.data(name='x', shape=[3, 4], dtype="float32")
paddle.nn.functional.local_response_norm(x, size=5)
def test_dim():
x = paddle.static.data(
name='x', shape=[3, 4], dtype="float32"
)
paddle.nn.functional.local_response_norm(x, size=5)
self.assertRaises(ValueError, test_dim)
self.assertRaises(ValueError, test_dim)
def test_shape():
x = paddle.rand(shape=[0, 0, 2, 3], dtype="float32")
paddle.nn.functional.local_response_norm(x, size=5)
def test_shape():
x = paddle.rand(shape=[0, 0, 2, 3], dtype="float32")
paddle.nn.functional.local_response_norm(x, size=5)
self.assertRaises(ValueError, test_shape)
self.assertRaises(ValueError, test_shape)
class TestLocalResponseNormCAPI(unittest.TestCase):
......@@ -335,28 +350,29 @@ class TestLocalResponseNormCAPI(unittest.TestCase):
def test_static_fp16_gpu(self):
if paddle.fluid.core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
input = np.random.random([3, 3, 112, 112]).astype("float16")
x = paddle.static.data(
name="x", shape=[3, 3, 112, 112], dtype="float16"
)
m = paddle.nn.LocalResponseNorm(size=5)
y = m(x)
exe = paddle.static.Executor(place)
res = exe.run(
paddle.static.default_main_program(),
feed={
"x": input,
},
fetch_list=[y],
)
assert np.array_equal(res[0].shape, input.shape)
with paddle_static_guard():
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
input = np.random.random([3, 3, 112, 112]).astype("float16")
x = paddle.static.data(
name="x", shape=[3, 3, 112, 112], dtype="float16"
)
m = paddle.nn.LocalResponseNorm(size=5)
y = m(x)
exe = paddle.static.Executor(place)
res = exe.run(
paddle.static.default_main_program(),
feed={
"x": input,
},
fetch_list=[y],
)
assert np.array_equal(res[0].shape, input.shape)
if __name__ == "__main__":
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
SIGMOID_THRESHOLD_MIN = -40.0
SIGMOID_THRESHOLD_MAX = 13.0
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
def sigmoid_np(x):
......
......@@ -19,7 +19,7 @@ import unittest
import numpy as np
import scipy
import scipy.linalg
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -156,10 +156,10 @@ class TestLUOp(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], ['Out'], check_eager=True)
self.check_grad(['X'], ['Out'])
# m = n 2D
......
......@@ -19,7 +19,7 @@ import unittest
import numpy as np
import scipy
import scipy.linalg
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -168,10 +168,10 @@ class TestLU_UnpackOp(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], ['L', 'U'], check_eager=True)
self.check_grad(['X'], ['L', 'U'])
# m = n
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest, paddle_static_guard
import paddle
from paddle.fluid import Program, core, program_guard
......@@ -148,14 +148,10 @@ class TestMarginCrossEntropyOp(OpTest):
}
def test_check_output(self):
self.check_output_with_place(
core.CUDAPlace(0), atol=1e-5, check_eager=True
)
self.check_output_with_place(core.CUDAPlace(0), atol=1e-5)
def test_check_grad(self):
self.check_grad_with_place(
core.CUDAPlace(0), ["Logits"], "Loss", check_eager=True
)
self.check_grad_with_place(core.CUDAPlace(0), ["Logits"], "Loss")
@unittest.skipIf(
......@@ -172,7 +168,6 @@ class TestMarginCrossEntropyOpFP32(TestMarginCrossEntropyOp):
"Loss",
numeric_grad_delta=5e-2,
max_relative_error=5e-2,
check_eager=True,
)
......@@ -184,9 +179,7 @@ class TestMarginCrossEntropyOpFP16(TestMarginCrossEntropyOp):
self.dtype = np.float16
def test_check_output(self):
self.check_output_with_place(
core.CUDAPlace(0), atol=5e-2, check_eager=True
)
self.check_output_with_place(core.CUDAPlace(0), atol=5e-2)
def test_check_grad(self):
self.check_grad_with_place(
......@@ -195,7 +188,6 @@ class TestMarginCrossEntropyOpFP16(TestMarginCrossEntropyOp):
"Loss",
numeric_grad_delta=6e-1,
max_relative_error=6e-1,
check_eager=True,
)
......@@ -224,17 +216,13 @@ class TestMarginCrossEntropyOpSphereFace(TestMarginCrossEntropyOp):
class TestMarginCrossEntropyOpCPU(TestMarginCrossEntropyOp):
def test_check_output(self):
try:
self.check_output_with_place(
core.CPUPlace(), atol=1e-5, check_eager=True
)
self.check_output_with_place(core.CPUPlace(), atol=1e-5)
except RuntimeError:
pass
def test_check_grad(self):
try:
self.check_grad_with_place(
core.CPUPlace(), ["Logits"], "Loss", check_eager=True
)
self.check_grad_with_place(core.CPUPlace(), ["Logits"], "Loss")
except RuntimeError:
pass
......@@ -279,63 +267,64 @@ class TestMarginCrossEntropyOpV2(unittest.TestCase):
self.check_static_result(place=place)
def check_static_result(self, place):
with program_guard(Program(), Program()):
datas = np.random.uniform(
-0.99, 0.99, [self.batch_dim, self.feat_dim]
).astype(self.dtype)
datas = datas / np.sqrt(
np.sum(np.square(datas), axis=1, keepdims=True)
)
weights = np.random.uniform(
-0.99, 0.99, [self.feat_dim, self.num_class]
).astype(self.dtype)
weights = weights / np.sqrt(
np.sum(np.square(weights), axis=0, keepdims=True)
)
logits_np = np.matmul(datas, weights)
labels_np = np.random.randint(
0, self.num_class, (self.batch_dim,), dtype="int64"
)
loss_np, softmax_np = margin_cross_entropy(
logits_np,
labels_np,
self.axis,
self.margin1,
self.margin2,
self.margin3,
self.scale,
self.reduction,
)
logits = paddle.static.data(
name='logits',
shape=[self.batch_dim, self.num_class],
dtype=self.dtype,
)
label = paddle.static.data(
name='label', shape=[self.batch_dim], dtype="int64"
)
loss, softmax = paddle.nn.functional.margin_cross_entropy(
logits,
label,
margin1=self.margin1,
margin2=self.margin2,
margin3=self.margin3,
scale=self.scale,
return_softmax=True,
reduction=self.reduction,
)
exe = paddle.fluid.Executor(place)
[loss_res, softmax_res] = exe.run(
paddle.fluid.default_main_program(),
feed={'logits': logits_np, 'label': labels_np},
fetch_list=[loss, softmax],
)
np.testing.assert_allclose(loss_res, loss_np)
np.testing.assert_allclose(softmax_res, softmax_np)
with paddle_static_guard():
with program_guard(Program(), Program()):
datas = np.random.uniform(
-0.99, 0.99, [self.batch_dim, self.feat_dim]
).astype(self.dtype)
datas = datas / np.sqrt(
np.sum(np.square(datas), axis=1, keepdims=True)
)
weights = np.random.uniform(
-0.99, 0.99, [self.feat_dim, self.num_class]
).astype(self.dtype)
weights = weights / np.sqrt(
np.sum(np.square(weights), axis=0, keepdims=True)
)
logits_np = np.matmul(datas, weights)
labels_np = np.random.randint(
0, self.num_class, (self.batch_dim,), dtype="int64"
)
loss_np, softmax_np = margin_cross_entropy(
logits_np,
labels_np,
self.axis,
self.margin1,
self.margin2,
self.margin3,
self.scale,
self.reduction,
)
logits = paddle.static.data(
name='logits',
shape=[self.batch_dim, self.num_class],
dtype=self.dtype,
)
label = paddle.static.data(
name='label', shape=[self.batch_dim], dtype="int64"
)
loss, softmax = paddle.nn.functional.margin_cross_entropy(
logits,
label,
margin1=self.margin1,
margin2=self.margin2,
margin3=self.margin3,
scale=self.scale,
return_softmax=True,
reduction=self.reduction,
)
exe = paddle.fluid.Executor(place)
[loss_res, softmax_res] = exe.run(
paddle.fluid.default_main_program(),
feed={'logits': logits_np, 'label': labels_np},
fetch_list=[loss, softmax],
)
np.testing.assert_allclose(loss_res, loss_np)
np.testing.assert_allclose(softmax_res, softmax_np)
def test_dynamic(self):
for place in self.places:
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest, paddle_static_guard
import paddle
from paddle import fluid
......@@ -83,27 +83,32 @@ class TestMarginRankLossLayer(unittest.TestCase):
self.check_identity(place)
def check_identity(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
label = paddle.static.data(
"label", (self.batch_size, 1), "float32"
)
x1 = paddle.static.data("x1", (self.batch_size, 1), "float32")
x2 = paddle.static.data("x2", (self.batch_size, 1), "float32")
out = paddle.nn.functional.margin_ranking_loss(
x1, x2, label, self.margin, 'none'
)
exe = fluid.Executor(place)
exe.run(start)
(out_np,) = exe.run(
main,
feed={"label": self.label, "x1": self.x1, "x2": self.x2},
fetch_list=[out],
)
np.testing.assert_allclose(out_np, self.loss)
with paddle_static_guard():
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
label = paddle.static.data(
"label", (self.batch_size, 1), "float32"
)
x1 = paddle.static.data(
"x1", (self.batch_size, 1), "float32"
)
x2 = paddle.static.data(
"x2", (self.batch_size, 1), "float32"
)
out = paddle.nn.functional.margin_ranking_loss(
x1, x2, label, self.margin, 'none'
)
exe = fluid.Executor(place)
exe.run(start)
(out_np,) = exe.run(
main,
feed={"label": self.label, "x1": self.x1, "x2": self.x2},
fetch_list=[out],
)
np.testing.assert_allclose(out_np, self.loss)
if __name__ == '__main__':
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -40,10 +40,10 @@ class TestMaskedSelectOp(OpTest):
self.outputs = {'Y': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Y', check_eager=True)
self.check_grad(['X'], 'Y')
def init(self):
self.shape = (50, 3)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestMatchMatrixTensorOp(OpTest):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest, paddle_static_guard
import paddle
import paddle.fluid as fluid
......@@ -163,28 +163,33 @@ for dim in [4]:
class API_TestMm(unittest.TestCase):
def test_out(self):
with fluid.program_guard(fluid.Program()):
x = paddle.static.data(name="x", shape=[2], dtype="float64")
y = paddle.static.data(name='y', shape=[2], dtype='float64')
res = paddle.static.data(name="output", shape=[1], dtype="float64")
result = paddle.mm(x, y)
exe = fluid.Executor(fluid.CPUPlace())
data1 = np.random.rand(2)
data2 = np.random.rand(2)
np_res = exe.run(feed={'x': data1, 'y': data2}, fetch_list=[result])
expected_result = np.matmul(
data1.reshape(1, 2), data2.reshape(2, 1)
)
with paddle_static_guard():
with fluid.program_guard(fluid.Program()):
x = paddle.static.data(name="x", shape=[2], dtype="float64")
y = paddle.static.data(name='y', shape=[2], dtype='float64')
res = paddle.static.data(
name="output", shape=[1], dtype="float64"
)
result = paddle.mm(x, y)
exe = fluid.Executor(fluid.CPUPlace())
data1 = np.random.rand(2)
data2 = np.random.rand(2)
np_res = exe.run(
feed={'x': data1, 'y': data2}, fetch_list=[result]
)
expected_result = np.matmul(
data1.reshape(1, 2), data2.reshape(2, 1)
)
np.testing.assert_allclose(
np_res,
expected_result,
rtol=1e-05,
atol=1e-05,
err_msg='two value is {}\n{}, check diff!'.format(
np_res, expected_result
),
)
np.testing.assert_allclose(
np_res,
expected_result,
rtol=1e-05,
atol=1e-05,
err_msg='two value is {}\n{}, check diff!'.format(
np_res, expected_result
),
)
def test_dygraph_without_out(self):
device = fluid.CPUPlace()
......@@ -213,41 +218,43 @@ class Test_API_Matmul(unittest.TestCase):
class API_TestMmError(unittest.TestCase):
def test_errors(self):
def test_error1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data(
name="data1", shape=[10, 2], dtype="float32"
)
data2 = paddle.static.data(
name="data2", shape=[3, 10], dtype="float32"
)
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error1)
def test_error2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data(
name="data1", shape=[-1, 10, 2], dtype="float32"
)
data2 = paddle.static.data(
name="data2", shape=[-1, 2, 10], dtype="float32"
)
paddle.mm(data1, data2)
test_error2()
def test_error3():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data(
name="data1", shape=[10, 10, 2], dtype="float32"
)
data2 = paddle.static.data(
name="data2", shape=[3, 2, 10], dtype="float32"
)
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error3)
with paddle_static_guard():
def test_error1():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data(
name="data1", shape=[10, 2], dtype="float32"
)
data2 = paddle.static.data(
name="data2", shape=[3, 10], dtype="float32"
)
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error1)
def test_error2():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data(
name="data1", shape=[-1, 10, 2], dtype="float32"
)
data2 = paddle.static.data(
name="data2", shape=[-1, 2, 10], dtype="float32"
)
paddle.mm(data1, data2)
test_error2()
def test_error3():
with fluid.program_guard(fluid.Program(), fluid.Program()):
data1 = paddle.static.data(
name="data1", shape=[10, 10, 2], dtype="float32"
)
data2 = paddle.static.data(
name="data2", shape=[3, 2, 10], dtype="float32"
)
paddle.mm(data1, data2)
self.assertRaises(ValueError, test_error3)
if __name__ == "__main__":
......
......@@ -16,7 +16,7 @@ import copy
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
from paddle.fluid import Program, program_guard
......@@ -296,7 +296,7 @@ class TestMatrixNMSOp(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestMatrixNMSOpNoOutput(TestMatrixNMSOp):
......
......@@ -45,7 +45,7 @@ class TestMatrixRankOP(OpTest):
self.outputs = {'Out': self.out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def init_data(self):
self.x = np.eye(3, dtype=np.float32)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import check_out_dtype
from eager_op_test import check_out_dtype
from test_sum_op import TestReduceOPTensorAxisBase
import paddle
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......@@ -57,10 +57,10 @@ class TestMaxOutOp(OpTest):
pass
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestMaxOutOpAxis0(TestMaxOutOp):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
def compute_mean_iou(
......@@ -137,7 +137,7 @@ class TestCase1(TestMeanIOUOp):
# NOTE(dev): Skip check_dygraph becuase Python API doesn't expose
# in_wrong_num/in_correct_num/in_mean_iou_num argument
def test_check_output(self):
self.check_output(check_dygraph=False, check_eager=False)
self.check_output(check_dygraph=False)
if __name__ == '__main__':
......
......@@ -17,7 +17,7 @@ import unittest
import gradient_checker
import numpy as np
from decorator_helper import prog_scope
from op_test import OpTest, OpTestTool, convert_float_to_uint16
from eager_op_test import OpTest, OpTestTool, convert_float_to_uint16
from test_sum_op import TestReduceOPTensorAxisBase
import paddle
......@@ -53,10 +53,10 @@ class TestMeanOp(OpTest):
pass
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_checkout_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestMeanOp_ZeroDim(OpTest):
......@@ -68,10 +68,10 @@ class TestMeanOp_ZeroDim(OpTest):
self.outputs = {'Out': np.mean(self.inputs["X"])}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_checkout_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestMeanOpError(unittest.TestCase):
......@@ -102,7 +102,7 @@ class TestFP16MeanOp(TestMeanOp):
def test_check_output(self):
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, check_eager=True)
self.check_output_with_place(place)
def test_checkout_grad(self):
place = core.CUDAPlace(0)
......@@ -126,11 +126,11 @@ class TestBF16MeanOp(TestMeanOp):
def test_check_output(self):
paddle.enable_static()
self.check_output_with_place(core.CPUPlace(), check_eager=True)
self.check_output_with_place(core.CPUPlace())
def test_checkout_grad(self):
place = core.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
self.check_grad_with_place(place, ['X'], 'Out')
def ref_reduce_mean(x, axis=None, keepdim=False, reduce_all=False):
......@@ -181,14 +181,14 @@ class TestReduceMeanOp(OpTest):
def test_check_output(self):
if self.dtype != 'float16':
self.check_output(check_eager=True, check_prim=True)
self.check_output(check_prim=True)
else:
place = paddle.CUDAPlace(0)
self.check_output_with_place(place=place, check_prim=True)
def test_check_grad(self):
if self.dtype != 'float16':
self.check_grad(['X'], ['Out'], check_eager=True, check_prim=True)
self.check_grad(['X'], ['Out'], check_prim=True)
else:
place = paddle.CUDAPlace(0)
self.check_grad_with_place(
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import check_out_dtype
from eager_op_test import check_out_dtype
from test_sum_op import TestReduceOPTensorAxisBase
import paddle
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -74,11 +74,11 @@ class TestModeOp(OpTest):
def test_check_output(self):
paddle.enable_static()
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True)
self.check_grad(set(['X']), 'Out')
class TestModeOpLastdim(OpTest):
......@@ -99,11 +99,11 @@ class TestModeOpLastdim(OpTest):
def test_check_output(self):
paddle.enable_static()
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
paddle.enable_static()
self.check_grad(set(['X']), 'Out', check_eager=True)
self.check_grad(set(['X']), 'Out')
class TestModeOpKernels(unittest.TestCase):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
def modified_huber_loss_forward(val):
......
......@@ -15,8 +15,8 @@
import unittest
import numpy as np
from eager_op_test import OpTest
from numpy.linalg import multi_dot
from op_test import OpTest
import paddle
......@@ -42,11 +42,11 @@ class TestMultiDotOp(OpTest):
self.outputs = {'Out': multi_dot([self.A, self.B])}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out')
# (A*B)*C
......@@ -59,9 +59,9 @@ class TestMultiDotOp3Mat(TestMultiDotOp):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out', check_eager=True)
self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out')
# A*(B*C)
......@@ -74,9 +74,9 @@ class TestMultiDotOp3Mat2(TestMultiDotOp):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out', check_eager=True)
self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out')
class TestMultiDotOp4Mat(TestMultiDotOp):
......@@ -96,10 +96,10 @@ class TestMultiDotOp4Mat(TestMultiDotOp):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])}
def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out', check_eager=True)
self.check_grad(['x3'], 'Out', check_eager=True)
self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out')
self.check_grad(['x3'], 'Out')
class TestMultiDotOpFirst1D(TestMultiDotOp):
......@@ -153,9 +153,9 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
def test_check_grad(self):
self.check_grad(['x0'], 'Out', check_eager=True)
self.check_grad(['x1'], 'Out', check_eager=True)
self.check_grad(['x2'], 'Out', check_eager=True)
self.check_grad(['x0'], 'Out')
self.check_grad(['x1'], 'Out')
self.check_grad(['x2'], 'Out')
class TestMultiDotOp4MatLast1D(TestMultiDotOp4Mat):
......
......@@ -16,7 +16,7 @@ import copy
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
from paddle import _C_ops, _legacy_C_ops
......@@ -797,7 +797,7 @@ class TestMulticlassNMS3Op(TestMulticlassNMS2Op):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestMulticlassNMS3OpNoOutput(TestMulticlassNMS3Op):
......
......@@ -16,7 +16,7 @@ import os
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from test_attribute_var import UnittestBase
import paddle
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
from paddle.static import Program, program_guard
......@@ -30,10 +30,10 @@ class TestMVOp(OpTest):
self.outputs = {'Out': np.dot(self.x, self.vec)}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Vec'], 'Out', check_eager=True)
self.check_grad(['X', 'Vec'], 'Out')
def init_config(self):
self.x = np.random.random((2, 100)).astype("float64")
......
......@@ -20,7 +20,7 @@ import numpy as np
import paddle
import paddle.fluid.core as core
# from op_test import OpTest
# from eager_op_test import OpTest
def np_nan_to_num(
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -314,10 +314,10 @@ class TestNearestInterpOp(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True, check_eager=True)
self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'nearest'
......@@ -481,9 +481,7 @@ class TestNearestInterpOpUint8(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_eager=True
)
self.check_output_with_place(place=core.CPUPlace(), atol=1)
def init_test_case(self):
self.interp_method = 'nearest'
......@@ -631,10 +629,10 @@ class TestNearestInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', in_place=True, check_eager=True)
self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'nearest'
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -958,19 +958,19 @@ class TestNLLLossOp1DWithReduce(OpTest):
self.attrs = {'reduction': 'mean', 'ignore_index': -100}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_output_with_weight(self):
self.with_weight = True
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.with_weight = True
place = fluid.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
self.check_grad_with_place(place, ['X'], 'Out')
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self):
self.input_shape = [10, 10]
......@@ -1009,19 +1009,19 @@ class TestNLLLossOp1DNoReduce(OpTest):
self.attrs = {'reduction': 'none', 'ignore_index': -100}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_output_with_weight(self):
self.with_weight = True
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.with_weight = True
place = fluid.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
self.check_grad_with_place(place, ['X'], 'Out')
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self):
self.input_shape = [10, 10]
......@@ -1059,19 +1059,19 @@ class TestNLLLossOp2DWithReduce(OpTest):
self.attrs = {'reduction': 'mean', 'ignore_index': -100}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_output_with_weight(self):
self.with_weight = True
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.with_weight = True
place = fluid.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
self.check_grad_with_place(place, ['X'], 'Out')
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self):
self.input_shape = [2, 3, 5, 5]
......@@ -1110,19 +1110,19 @@ class TestNLLLossOp2DNoReduce(OpTest):
self.attrs = {'reduction': 'none', 'ignore_index': -100}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_output_with_weight(self):
self.with_weight = True
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.with_weight = True
place = fluid.CPUPlace()
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
self.check_grad_with_place(place, ['X'], 'Out')
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_eager=True)
self.check_grad_with_place(place, ['X'], 'Out')
def init_test_case(self):
self.input_shape = [5, 3, 5, 5]
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -90,7 +90,7 @@ class TestNMSOp(OpTest):
pass
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
if __name__ == "__main__":
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
......@@ -103,10 +103,10 @@ class TestFrobeniusNormOp(OpTest):
self.outputs = {'Out': norm}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def init_test_case(self):
self.shape = [2, 3, 4, 5]
......@@ -127,7 +127,7 @@ class TestFrobeniusNormOp2(TestFrobeniusNormOp):
self.dtype = "float32"
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestPnormOp(OpTest):
......@@ -150,10 +150,10 @@ class TestPnormOp(OpTest):
self.gradient = self.calc_gradient()
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def init_test_case(self):
self.shape = [2, 3, 4, 5]
......@@ -349,7 +349,7 @@ class TestPnormBF16Op(OpTest):
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=True)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self):
place = core.CUDAPlace(0)
......@@ -358,7 +358,6 @@ class TestPnormBF16Op(OpTest):
['X'],
'Out',
user_defined_grads=self.gradient,
check_eager=True,
)
def init_test_case(self):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -92,12 +92,12 @@ class TestOverlapAddOp(OpTest):
def test_check_output(self):
paddle.enable_static()
self.check_output(check_eager=True)
self.check_output()
paddle.disable_static()
def test_check_grad_normal(self):
paddle.enable_static()
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
paddle.disable_static()
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......@@ -77,10 +77,10 @@ class TestPad3dOp(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def initTestCase(self):
self.shape = (2, 3, 4, 5, 6)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestPadConstantLikeOp(OpTest):
......
......@@ -102,7 +102,6 @@ def start_local_trainers(
pod,
training_script,
training_script_args,
eager_mode=True,
allocator_strategy="auto_growth",
log_dir=None,
):
......@@ -158,7 +157,6 @@ class TestMultipleGpus(unittest.TestCase):
def run_mnist_2gpu(
self,
target_file_name,
eager_mode=True,
allocator_strategy="auto_growth",
):
if (
......@@ -176,7 +174,6 @@ class TestMultipleGpus(unittest.TestCase):
procs = start_local_trainers(
cluster,
pod,
eager_mode=eager_mode,
allocator_strategy=allocator_strategy,
training_script=target_file_name,
training_script_args=[],
......
......@@ -16,7 +16,7 @@ import random
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
def np_partial_concat(inputs, start, length):
......
......@@ -16,7 +16,7 @@ import random
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestPartialSumOp(OpTest):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
def PolygonBoxRestore(input):
......
......@@ -16,7 +16,7 @@ import math
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -76,7 +76,7 @@ class TestPriorBoxOp(OpTest):
self.outputs = {'Boxes': self.out_boxes, 'Variances': self.out_var}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def setUp(self):
self.op_type = "prior_box"
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestProximalAdagradOp(OpTest):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestProximalGDOp(OpTest):
......
......@@ -16,7 +16,7 @@ import math
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -174,10 +174,10 @@ class TestPSROIPoolOp(OpTest):
self.set_data()
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestPSROIPoolDynamicFunctionAPI(unittest.TestCase):
......
......@@ -16,7 +16,7 @@ import itertools
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -71,13 +71,12 @@ class TestQrOp(OpTest):
return a, q, r
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
['X'],
['Q', 'R'],
check_eager=True,
numeric_grad_delta=1e-5,
max_relative_error=1e-6,
)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestRandomCropOp(OpTest):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......
......@@ -16,7 +16,7 @@ import unittest
from functools import partial
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -47,7 +47,7 @@ class TestRangeOp(OpTest):
self.case = (0, 1, 0.2)
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestFloatRangeOpCase0(TestRangeOp):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -58,7 +58,7 @@ class TestRealOp(OpTest):
)
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
......@@ -66,7 +66,6 @@ class TestRealOp(OpTest):
'Out',
user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out],
check_eager=True,
)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -58,10 +58,10 @@ class TestRepeatInterleaveOp(OpTest):
self.index_size = self.x_shape[self.dim]
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestRepeatInterleaveOp2(OpTest):
......@@ -96,10 +96,10 @@ class TestRepeatInterleaveOp2(OpTest):
self.index_size = self.x_shape[self.dim]
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestIndexSelectAPI(unittest.TestCase):
......
......@@ -16,7 +16,7 @@ import math
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python
from test_multiclass_nms_op import nms
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -37,10 +37,10 @@ class TestReverseOp(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestCase0(TestReverseOp):
......
......@@ -17,7 +17,7 @@ import sys
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......@@ -31,6 +31,38 @@ np.set_printoptions(threshold=np.inf)
paddle.enable_static()
def rnn_warpper(
Input,
PreState,
WeightList=None,
SequenceLength=None,
dropout_prob=0.0,
is_bidirec=False,
input_size=10,
hidden_size=100,
num_layers=1,
mode="LSTM",
seed=0,
is_test=False,
):
dropout_state_in = paddle.Tensor()
return paddle._C_ops.rnn(
Input,
PreState,
WeightList,
SequenceLength,
dropout_state_in,
dropout_prob,
is_bidirec,
input_size,
hidden_size,
num_layers,
mode,
seed,
is_test,
)
class TestRNNOp(OpTest):
def get_weight_names(self):
weight_names = []
......@@ -44,6 +76,9 @@ class TestRNNOp(OpTest):
def setUp(self):
self.op_type = "rnn"
self.python_api = rnn_warpper
self.python_out_sig = ["Out", "DropoutState", "State"]
self.python_out_sig_sub_name = {"State": ["last_hidden", "last_cell"]}
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
self.sequence_length = (
None
......
......@@ -16,7 +16,7 @@ import math
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -233,10 +233,10 @@ class TestROIAlignOp(OpTest):
self.set_data()
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestROIAlignInLodOp(TestROIAlignOp):
......
......@@ -18,7 +18,7 @@ import unittest
from decimal import ROUND_HALF_UP, Decimal
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -170,10 +170,10 @@ class TestROIPoolOp(OpTest):
self.set_data()
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestROIPoolInLodOp(TestROIPoolOp):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
......@@ -47,10 +47,10 @@ class TestRollOp(OpTest):
self.axis = [0, -2]
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestRollOpCase2(TestRollOp):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
from paddle import fluid
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python
from test_generate_proposal_labels_op import (
_bbox_overlaps,
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -345,10 +345,10 @@ class RReluTest(OpTest):
}
def test_check_output(self):
self.check_output(no_check_set=['Noise'], check_eager=True)
self.check_output(no_check_set=['Noise'])
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class RReluTrainingTest(RReluTest):
......
......@@ -16,7 +16,7 @@ import collections
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestSampleLogitsOp(OpTest):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
......@@ -91,10 +91,10 @@ class TestScatterNdAddSimpleOp(OpTest):
self.dtype = np.float64
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
self.check_grad(['X', 'Updates'], 'Out')
class TestScatterNdAddSimpleFP16Op(TestScatterNdAddSimpleOp):
......@@ -122,14 +122,12 @@ class TestScatterNdAddSimpleBF16Op(TestScatterNdAddSimpleOp):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=True
)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
class TestScatterNdAddWithEmptyIndex(OpTest):
......@@ -165,10 +163,10 @@ class TestScatterNdAddWithEmptyIndex(OpTest):
self.dtype = np.float64
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
self.check_grad(['X', 'Updates'], 'Out')
class TestScatterNdAddWithEmptyIndexFP16(TestScatterNdAddWithEmptyIndex):
......@@ -196,14 +194,12 @@ class TestScatterNdAddWithEmptyIndexBF16(TestScatterNdAddWithEmptyIndex):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=True
)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
class TestScatterNdAddWithHighRankSame(OpTest):
......@@ -242,10 +238,10 @@ class TestScatterNdAddWithHighRankSame(OpTest):
self.dtype = np.float64
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
self.check_grad(['X', 'Updates'], 'Out')
class TestScatterNdAddWithHighRankSameFP16(TestScatterNdAddWithHighRankSame):
......@@ -273,14 +269,12 @@ class TestScatterNdAddWithHighRankSameBF16(TestScatterNdAddWithHighRankSame):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=True)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=True
)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
class TestScatterNdAddWithHighRankDiff(OpTest):
......@@ -303,10 +297,10 @@ class TestScatterNdAddWithHighRankDiff(OpTest):
self.outputs = {'Out': expect_np}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
self.check_grad(['X', 'Updates'], 'Out')
# Test Python API
......
......@@ -16,7 +16,7 @@ import os
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
......@@ -46,10 +46,10 @@ class TestScatterOp(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out", check_eager=False)
self.check_grad(["X", "Updates"], "Out")
class TestScatterFP16Op(TestScatterOp):
......@@ -69,14 +69,12 @@ class TestScatterBF16Op(TestScatterOp):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=False
)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
class TestScatterOp0(OpTest):
......@@ -102,10 +100,10 @@ class TestScatterOp0(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out", check_eager=False)
self.check_grad(["X", "Updates"], "Out")
class TestScatterFP16Op0(TestScatterOp0):
......@@ -125,14 +123,12 @@ class TestScatterBF16Op0(TestScatterOp0):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=False
)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
class TestScatterOp1(OpTest):
......@@ -161,10 +157,10 @@ class TestScatterOp1(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out", check_eager=False)
self.check_grad(["X", "Updates"], "Out")
class TestScatterFP16Op1(TestScatterOp1):
......@@ -184,14 +180,12 @@ class TestScatterBF16Op1(TestScatterOp1):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=False
)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
@unittest.skipIf(
......@@ -221,14 +215,12 @@ class TestScatterOp2(OpTest):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=False)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=False
)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
@unittest.skipIf(
......@@ -280,14 +272,12 @@ class TestScatterOp3(OpTest):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=False)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=False
)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
@unittest.skipIf(
......@@ -330,10 +320,10 @@ class TestScatterOp4(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=False)
self.check_grad(['X', 'Updates'], 'Out')
class TestScatterFP16Op4(TestScatterOp4):
......@@ -353,14 +343,12 @@ class TestScatterBF16Op4(TestScatterOp4):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=False
)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
@unittest.skipIf(
......@@ -390,14 +378,12 @@ class TestScatterOp5(OpTest):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, atol=1e-3, check_eager=False)
self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=False
)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
@unittest.skipIf(
......@@ -440,10 +426,10 @@ class TestScatterOp6(OpTest):
self.dtype = np.float32
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def test_check_grad(self):
self.check_grad(["X", "Updates"], "Out", check_eager=False)
self.check_grad(["X", "Updates"], "Out")
class TestScatterFP16Op6(TestScatterOp6):
......@@ -463,14 +449,12 @@ class TestScatterBF16Op6(TestScatterOp6):
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_output_with_place(place, check_eager=False)
self.check_output_with_place(place)
def test_check_grad(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X', 'Updates'], 'Out', check_eager=False
)
self.check_grad_with_place(place, ['X', 'Updates'], 'Out')
class TestScatterAPI(unittest.TestCase):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......@@ -42,7 +42,7 @@ class TestSearchSorted(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def init_test_case(self):
self.sorted_sequence = np.array([1, 3, 5, 7, 9]).astype("float32")
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.static as static
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......@@ -116,10 +116,10 @@ class TestSegmentOps(OpTest):
self.outputs = {'Out': result.astype(self.dtype)}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True)
self.check_grad(["X"], "Out")
class TestSegmentSum2(TestSegmentOps):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......
......@@ -18,7 +18,7 @@ import unittest
from functools import reduce
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid.core as core
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
from paddle.fluid import core
......@@ -36,7 +36,7 @@ class TestShapeOp(OpTest):
self.shape = [2, 3]
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class case1(TestShapeOp):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from paddle.fluid import core
from paddle.fluid.op import Operator
......
......@@ -17,7 +17,7 @@ import os
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle.fluid as fluid
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestShuffleChannelOp(OpTest):
......
......@@ -17,7 +17,7 @@ import math
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......
......@@ -17,7 +17,7 @@ import unittest
import gradient_checker
import numpy as np
from decorator_helper import prog_scope
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestSimilarityFocusOp(OpTest):
......
......@@ -17,20 +17,52 @@ import sys
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
sys.path.append("./rnn")
from convert import get_params_for_net
from rnn_numpy import SimpleRNN
from rnn.convert import get_params_for_net
from rnn.rnn_numpy import SimpleRNN
random.seed(2)
np.set_printoptions(threshold=np.inf)
paddle.enable_static()
def rnn_warpper(
Input,
PreState,
WeightList=None,
SequenceLength=None,
dropout_prob=0.0,
is_bidirec=False,
input_size=10,
hidden_size=100,
num_layers=1,
mode="LSTM",
seed=0,
is_test=False,
):
dropout_state_in = paddle.Tensor()
return paddle._C_ops.rnn(
Input,
[PreState],
WeightList,
SequenceLength,
dropout_state_in,
dropout_prob,
is_bidirec,
input_size,
hidden_size,
num_layers,
mode,
seed,
is_test,
)
class TestSimpleRNNOp(OpTest):
def get_weight_names(self):
weight_names = []
......@@ -44,6 +76,10 @@ class TestSimpleRNNOp(OpTest):
def setUp(self):
self.op_type = "rnn"
self.python_api = rnn_warpper
self.python_out_sig = ["Out", "DropoutState", "State"]
self.python_out_sig_sub_name = {"State": ["last_hidden"]}
self.dtype = "float32" if core.is_compiled_with_rocm() else "float64"
self.sequence_length = (
None
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
def smooth_l1_loss_forward(val, sigma2):
......@@ -46,12 +46,10 @@ class TestSmoothL1LossOp1(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
['X', 'Y'], 'Out', max_relative_error=0.02, check_eager=True
)
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02)
def test_check_grad_ingore_x(self):
self.check_grad(
......@@ -59,7 +57,6 @@ class TestSmoothL1LossOp1(OpTest):
'Out',
max_relative_error=0.03,
no_grad_set=set("X"),
check_eager=True,
)
def test_check_grad_ingore_y(self):
......@@ -68,7 +65,6 @@ class TestSmoothL1LossOp1(OpTest):
'Out',
max_relative_error=0.03,
no_grad_set=set('Y'),
check_eager=True,
)
......@@ -96,12 +92,10 @@ class TestSmoothL1LossOp2(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
['X', 'Y'], 'Out', max_relative_error=0.03, check_eager=True
)
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.03)
def test_check_grad_ingore_x(self):
self.check_grad(
......@@ -109,7 +103,6 @@ class TestSmoothL1LossOp2(OpTest):
'Out',
max_relative_error=0.03,
no_grad_set=set(['X', 'InsideWeight', 'OutsideWeight']),
check_eager=True,
)
def test_check_grad_ingore_y(self):
......@@ -118,7 +111,6 @@ class TestSmoothL1LossOp2(OpTest):
'Out',
max_relative_error=0.03,
no_grad_set=set(['Y', 'InsideWeight', 'OutsideWeight']),
check_eager=True,
)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from test_softmax_op import stable_softmax
import paddle
......@@ -153,7 +153,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest):
def test_check_output(self):
if self.python_api is not None:
self.check_output(check_eager=True)
self.check_output()
self.check_output()
def test_check_grad(self):
......@@ -163,7 +163,6 @@ class TestSoftmaxWithCrossEntropyOp(OpTest):
["Logits"],
"Loss",
max_relative_error=5e-1,
check_eager=True,
)
# HIP will have accuracy fail when using float32 in CPU place
self.check_grad(["Logits"], "Loss", max_relative_error=5e-1)
......@@ -173,7 +172,6 @@ class TestSoftmaxWithCrossEntropyOp(OpTest):
["Logits"],
"Loss",
numeric_grad_delta=0.001,
check_eager=True,
)
self.check_grad(["Logits"], "Loss", numeric_grad_delta=0.001)
......@@ -510,14 +508,12 @@ class TestSoftmaxWithCrossEntropyOpFp16(TestSoftmaxWithCrossEntropyOp):
def test_check_output(self):
if self.python_api is not None:
self.check_output(atol=1e-2, check_eager=True)
self.check_output(atol=1e-2)
self.check_output(atol=1e-2)
def test_check_grad(self):
if self.python_api is not None:
self.check_grad(
["Logits"], "Loss", max_relative_error=0.1, check_eager=True
)
self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
......@@ -537,9 +533,7 @@ class TestSoftmaxWithCrossEntropyOpNoCudnnFp16(
def test_check_grad(self):
if self.python_api is not None:
self.check_grad(
["Logits"], "Loss", max_relative_error=0.1, check_eager=True
)
self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
......@@ -562,20 +556,18 @@ class TestSoftmaxWithCrossEntropyOp2(TestSoftmaxWithCrossEntropyOp):
def test_check_output(self):
if self.python_api is not None:
self.check_output(check_eager=True)
self.check_output()
self.check_output()
def test_check_grad(self):
if core.is_compiled_with_rocm():
# HIP will have accuracy fail when using float32 in CPU place
if self.python_api is not None:
self.check_grad(
["Logits"], "Loss", max_relative_error=0.1, check_eager=True
)
self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
self.check_grad(["Logits"], "Loss", max_relative_error=0.1)
else:
if self.python_api is not None:
self.check_grad(["Logits"], "Loss", check_eager=True)
self.check_grad(["Logits"], "Loss")
self.check_grad(["Logits"], "Loss")
......
......@@ -21,7 +21,7 @@ import paddle
import paddle.fluid.core as core
sys.path.append("..")
from op_test import OpTest
from eager_op_test import OpTest
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
......@@ -50,10 +50,10 @@ class TestSolveOp(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
self.check_grad(['X', 'Y'], 'Out')
# x broadcast + 3D batch case
......@@ -71,12 +71,10 @@ class TestSolveOpBatched_case0(OpTest):
self.outputs = {'Out': result}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
['X', 'Y'], 'Out', max_relative_error=1e-1, check_eager=True
)
self.check_grad(['X', 'Y'], 'Out', max_relative_error=1e-1)
# 3D batch + y vector case
......@@ -94,12 +92,10 @@ class TestSolveOpBatched_case1(OpTest):
self.outputs = {'Out': result}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
['X', 'Y'], 'Out', max_relative_error=0.04, check_eager=True
)
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.04)
# 3D batch + y broadcast case
......@@ -117,12 +113,10 @@ class TestSolveOpBatched_case2(OpTest):
self.outputs = {'Out': result}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
['X', 'Y'], 'Out', max_relative_error=0.02, check_eager=True
)
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02)
# x broadcast + 3D batch case
......@@ -140,12 +134,10 @@ class TestSolveOpBatched_case3(OpTest):
self.outputs = {'Out': result}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
['X', 'Y'], 'Out', max_relative_error=0.02, check_eager=True
)
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.02)
# 3D normal batch case
......@@ -163,10 +155,10 @@ class TestSolveOpBatched_case4(OpTest):
self.outputs = {'Out': result}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
self.check_grad(['X', 'Y'], 'Out')
# 4D normal batch case
......@@ -184,10 +176,10 @@ class TestSolveOpBatched_case5(OpTest):
self.outputs = {'Out': result}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
self.check_grad(['X', 'Y'], 'Out')
# 4D batch + y broadcast case
......@@ -205,10 +197,10 @@ class TestSolveOpBatched_case6(OpTest):
self.outputs = {'Out': result}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
self.check_grad(['X', 'Y'], 'Out')
# 5D normal batch case
......@@ -226,12 +218,10 @@ class TestSolveOpBatched_case7(OpTest):
self.outputs = {'Out': result}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
['X', 'Y'], 'Out', max_relative_error=0.04, check_eager=True
)
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.04)
# 5D batch + y broadcast case
......@@ -249,12 +239,10 @@ class TestSolveOpBatched_case8(OpTest):
self.outputs = {'Out': result}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(
['X', 'Y'], 'Out', max_relative_error=0.04, check_eager=True
)
self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.04)
class TestSolveOpError(unittest.TestCase):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle.fluid as fluid
......
......@@ -18,7 +18,7 @@ import re
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
def calculate_sparse_momentum_by_numpy(
......@@ -175,9 +175,7 @@ class TestSparseMomentumOp(OpTest):
pass
def test_check_output(self):
self.check_output(
atol=5e-3 if self.multi_precision else 1e-5, check_eager=True
)
self.check_output(atol=5e-3 if self.multi_precision else 1e-5)
class TestSparseMomentumOpDtype1(TestSparseMomentumOp):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from test_pool2d_op import avg_pool2D_forward_naive, max_pool2D_forward_naive
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
class TestSquaredL2DistanceOp_f0(OpTest):
......
......@@ -15,8 +15,8 @@
import unittest
import numpy as np
from eager_op_test import OpTest
from numpy import linalg as LA
from op_test import OpTest
import paddle
from paddle import _C_ops, _legacy_C_ops
......@@ -84,14 +84,13 @@ class TestL2LossOp(OpTest):
self.outputs = {'Out': np.square(LA.norm(X))}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'],
'Out',
max_relative_error=self.max_relative_error,
check_eager=True,
)
......
......@@ -16,7 +16,7 @@ import os
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from test_attribute_var import UnittestBase
import paddle
......@@ -44,12 +44,10 @@ class TestSqueezeOp(OpTest):
}
def test_check_output(self):
self.check_output(
no_check_set=['XShape'], check_eager=True, check_prim=True
)
self.check_output(no_check_set=['XShape'], check_prim=True)
def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True, check_prim=True)
self.check_grad(["X"], "Out", check_prim=True)
def init_test_case(self):
self.ori_shape = (1, 3, 1, 40)
......
......@@ -17,7 +17,7 @@ import unittest
import gradient_checker
import numpy as np
from decorator_helper import prog_scope
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
......@@ -63,12 +63,10 @@ class TestStackOpBase(OpTest):
self.attrs = {'axis': self.axis}
def test_check_output(self):
self.check_output(check_eager=True, check_prim=True)
self.check_output(check_prim=True)
def test_check_grad(self):
self.check_grad(
self.get_x_names(), 'Y', check_eager=True, check_prim=True
)
self.check_grad(self.get_x_names(), 'Y', check_prim=True)
class TestStackOp1(TestStackOpBase):
......@@ -149,11 +147,11 @@ class TestStackBF16Op(OpTest):
self.attrs = {'axis': self.axis}
def test_check_output(self):
self.check_output(check_eager=True, check_prim=True)
self.check_output(check_prim=True)
def test_check_grad(self):
# concat_grad unspport bfloat16 dtype, skip check_prim
self.check_grad(self.get_x_names(), 'Y', check_eager=True)
self.check_grad(self.get_x_names(), 'Y')
class TestStackAPIWithLoDTensorArray(unittest.TestCase):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
from eager_op_test import OpTest, skip_check_grad_ci
import paddle
import paddle.fluid as fluid
......@@ -51,7 +51,7 @@ class TestSvdOp(OpTest):
self._output_data = np.linalg.svd(self._input_data)
def test_check_output(self):
self.check_output(no_check_set=['U', 'VH'], check_eager=True)
self.check_output(no_check_set=['U', 'VH'])
def test_svd_forward(self):
"""u matmul diag(s) matmul vt must become X"""
......@@ -71,19 +71,13 @@ class TestSvdOp(OpTest):
paddle.enable_static()
def check_S_grad(self):
self.check_grad(
['X'], ['S'], numeric_grad_delta=0.001, check_eager=True
)
self.check_grad(['X'], ['S'], numeric_grad_delta=0.001)
def check_U_grad(self):
self.check_grad(
['X'], ['U'], numeric_grad_delta=0.001, check_eager=True
)
self.check_grad(['X'], ['U'], numeric_grad_delta=0.001)
def check_V_grad(self):
self.check_grad(
['X'], ['VH'], numeric_grad_delta=0.001, check_eager=True
)
self.check_grad(['X'], ['VH'], numeric_grad_delta=0.001)
def test_check_grad(self):
"""
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest, paddle_static_guard
import paddle
import paddle.fluid as fluid
......@@ -140,31 +140,45 @@ class TestCase4(TestTDMChildOp):
class TestTDMChildShape(unittest.TestCase):
def test_shape(self):
x = paddle.static.data(
name='x', shape=[-1, 1], dtype='int32', lod_level=1
)
tdm_tree_info = create_tdm_tree()
tree_info_np = np.array(tdm_tree_info).astype('int32')
child, leaf_mask = fluid.contrib.layers.tdm_child(
x=x,
node_nums=26,
child_nums=2,
param_attr=fluid.ParamAttr(
initializer=paddle.nn.initializer.Assign(tree_info_np)
),
)
place = fluid.CPUPlace()
exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
feed = {
'x': np.array(
[[1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11], [12]]
).astype('int32')
}
exe.run(feed=feed)
with paddle_static_guard():
x = paddle.static.data(
name='x', shape=[-1, 1], dtype='int32', lod_level=1
)
tdm_tree_info = create_tdm_tree()
tree_info_np = np.array(tdm_tree_info).astype('int32')
child, leaf_mask = fluid.contrib.layers.tdm_child(
x=x,
node_nums=26,
child_nums=2,
param_attr=fluid.ParamAttr(
initializer=paddle.nn.initializer.Assign(tree_info_np)
),
)
place = fluid.CPUPlace()
exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
feed = {
'x': np.array(
[
[1],
[2],
[3],
[4],
[5],
[6],
[7],
[8],
[9],
[10],
[11],
[12],
]
).astype('int32')
}
exe.run(feed=feed)
if __name__ == "__main__":
......
......@@ -16,7 +16,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest, paddle_static_guard
import paddle
import paddle.fluid as fluid
......@@ -267,65 +267,66 @@ class TestCase7(TestTDMSamplerOp):
class TestTDMSamplerShape(unittest.TestCase):
def test_shape(self):
x = paddle.static.data(
name='x', shape=[-1, 1], dtype='int32', lod_level=1
)
tdm_tree_travel = create_tdm_travel()
tdm_tree_layer = create_tdm_layer()
layer_node_num_list = [len(i) for i in tdm_tree_layer]
tree_layer_flat = []
for layer_idx, layer_node in enumerate(layer_node_num_list):
tree_layer_flat += tdm_tree_layer[layer_idx]
travel_array = np.array(tdm_tree_travel).astype('int32')
layer_array = np.array(tree_layer_flat).astype('int32')
neg_samples_num_list = [1, 2, 3, 4]
leaf_node_num = 13
sample, label, mask = fluid.contrib.layers.tdm_sampler(
x,
neg_samples_num_list,
layer_node_num_list,
leaf_node_num,
tree_travel_attr=fluid.ParamAttr(
initializer=paddle.nn.initializer.Assign(travel_array)
),
tree_layer_attr=fluid.ParamAttr(
initializer=paddle.nn.initializer.Assign(layer_array)
),
output_positive=True,
output_list=True,
seed=0,
tree_dtype='int32',
dtype='int32',
)
with paddle_static_guard():
x = paddle.static.data(
name='x', shape=[-1, 1], dtype='int32', lod_level=1
)
tdm_tree_travel = create_tdm_travel()
tdm_tree_layer = create_tdm_layer()
layer_node_num_list = [len(i) for i in tdm_tree_layer]
tree_layer_flat = []
for layer_idx, layer_node in enumerate(layer_node_num_list):
tree_layer_flat += tdm_tree_layer[layer_idx]
travel_array = np.array(tdm_tree_travel).astype('int32')
layer_array = np.array(tree_layer_flat).astype('int32')
neg_samples_num_list = [1, 2, 3, 4]
leaf_node_num = 13
sample, label, mask = fluid.contrib.layers.tdm_sampler(
x,
neg_samples_num_list,
layer_node_num_list,
leaf_node_num,
tree_travel_attr=fluid.ParamAttr(
initializer=paddle.nn.initializer.Assign(travel_array)
),
tree_layer_attr=fluid.ParamAttr(
initializer=paddle.nn.initializer.Assign(layer_array)
),
output_positive=True,
output_list=True,
seed=0,
tree_dtype='int32',
dtype='int32',
)
place = fluid.CPUPlace()
exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
feed = {
'x': np.array(
[
[0],
[1],
[2],
[3],
[4],
[5],
[6],
[7],
[8],
[9],
[10],
[11],
[12],
]
).astype('int32')
}
exe.run(feed=feed)
place = fluid.CPUPlace()
exe = fluid.Executor(place=place)
exe.run(fluid.default_startup_program())
feed = {
'x': np.array(
[
[0],
[1],
[2],
[3],
[4],
[5],
[6],
[7],
[8],
[9],
[10],
[11],
[12],
]
).astype('int32')
}
exe.run(feed=feed)
if __name__ == "__main__":
......
......@@ -15,7 +15,7 @@
from math import exp, log
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from scipy.special import logit
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
from paddle.fluid import core
......@@ -69,10 +69,10 @@ class TestTemporalShift(OpTest):
self.dtype = 'float64'
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_ignore_uv(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def initTestCase(self):
self.x_shape = (6, 4, 4, 4)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......@@ -59,10 +59,10 @@ class TestTopkOp(OpTest):
self.outputs = {'Out': output, 'Indices': indices}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True, check_prim=True)
self.check_grad(['X'], 'Out', check_prim=True)
class TestTopkOp1(TestTopkOp):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -31,10 +31,10 @@ class TestTraceOp(OpTest):
self.outputs = {'Out': self.target}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['Input'], 'Out', check_eager=True)
self.check_grad(['Input'], 'Out')
def init_config(self):
self.case = np.random.randn(20, 6).astype('float64')
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
def collect_node_patch(og, max_depth):
......
......@@ -18,7 +18,7 @@ import unittest
import numpy as np
sys.path.append("..")
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -64,10 +64,10 @@ class TestTriangularSolveOp(OpTest):
self.outputs = {'Out': self.output}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
self.check_grad(['X', 'Y'], 'Out')
# 2D(broadcast) + 3D, test 'transpose'
......
......@@ -14,7 +14,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -45,10 +45,10 @@ class TrilTriuOpDefaultTest(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad_normal(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def initTestCase(self):
self.real_op_type = np.random.choice(['triu', 'tril'])
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -204,8 +204,6 @@ class TestTrilinearInterpOp(OpTest):
self.init_test_case()
self.op_type = "trilinear_interp_v2"
# NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float32")
scale_w = 0
......@@ -255,11 +253,9 @@ class TestTrilinearInterpOp(OpTest):
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
# c++ end treat NCDHW the same way as NCHW
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
# c++ end treat NCDHW the same way as NCHW
if self.data_layout == 'NCDHW':
data_layout = 'NCHW'
else:
......@@ -283,12 +279,10 @@ class TestTrilinearInterpOp(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_eager=self.check_eager)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager
)
self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'trilinear'
......@@ -435,7 +429,6 @@ class TestTrilinearInterpOpUint8(OpTest):
self.actual_shape = None
self.init_test_case()
self.op_type = "trilinear_interp_v2"
self.check_eager = True
input_np = np.random.randint(
low=0, high=256, size=self.input_shape
).astype("uint8")
......@@ -474,7 +467,6 @@ class TestTrilinearInterpOpUint8(OpTest):
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
self.check_eager = False
self.attrs = {
'out_d': self.out_d,
......@@ -494,9 +486,7 @@ class TestTrilinearInterpOpUint8(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_eager=self.check_eager
)
self.check_output_with_place(place=core.CPUPlace(), atol=1)
def init_test_case(self):
self.interp_method = 'trilinear'
......@@ -607,7 +597,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
self.actual_shape = None
self.init_test_case()
self.op_type = "trilinear_interp_v2"
self.check_eager = True
self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False
self.attrs = {
......@@ -641,7 +630,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size
self.check_eager = False
elif self.out_size is not None:
size_tensor = []
for index, ele in enumerate(self.out_size):
......@@ -649,7 +637,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
("x" + str(index), np.ones((1)).astype('int32') * ele)
)
self.inputs['SizeTensor'] = size_tensor
self.check_eager = False
self.attrs['out_d'] = self.out_d
self.attrs['out_h'] = self.out_h
......@@ -677,12 +664,10 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_eager=self.check_eager)
self.check_output()
def test_check_grad(self):
self.check_grad(
['X'], 'Out', in_place=True, check_eager=self.check_eager
)
self.check_grad(['X'], 'Out', in_place=True)
def init_test_case(self):
self.interp_method = 'trilinear'
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -35,10 +35,10 @@ class TestTruncOp(OpTest):
self.dtype = np.float64
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-5, check_eager=True)
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-5)
class TestFloatTruncOp(TestTruncOp):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, convert_uint16_to_float
from eager_op_test import OpTest, convert_uint16_to_float
import paddle
import paddle.fluid as fluid
......
......@@ -16,7 +16,7 @@ import os
import unittest
import numpy as np
from op_test import OpTest, convert_uint16_to_float
from eager_op_test import OpTest, convert_uint16_to_float
from test_attribute_var import UnittestBase
import paddle
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -99,7 +99,7 @@ class TestUniqueConsecutiveOp(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestUniqueConsecutiveOp2(TestUniqueConsecutiveOp):
......@@ -347,7 +347,7 @@ class TestUniqueConsecutiveEmptyInput(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
if __name__ == "__main__":
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.nn.functional as F
......@@ -143,10 +143,10 @@ class TestUnpool3DOp(OpTest):
self.outputs = {'Out': output.astype('float64')}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def init_test_case(self):
self.unpool3d_forward_naive = unpool3dmax_forward_naive
......
......@@ -16,7 +16,7 @@ import os
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from test_attribute_var import UnittestBase
import paddle
......@@ -130,10 +130,10 @@ class TestUnpoolOp(OpTest):
self.outputs = {'Out': output.astype('float64')}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def init_test_case(self):
self.unpool2d_forward_naive = unpool2dmax_forward_naive
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -39,12 +39,10 @@ class TestUnsqueezeOp(OpTest):
self.prim_op_type = "comp"
def test_check_output(self):
self.check_output(
no_check_set=["XShape"], check_eager=True, check_prim=True
)
self.check_output(no_check_set=["XShape"], check_prim=True)
def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True)
self.check_grad(["X"], "Out")
def init_test_case(self):
self.ori_shape = (3, 40)
......@@ -136,10 +134,10 @@ class TestUnsqueezeOp_AxesTensorList(OpTest):
}
def test_check_output(self):
self.check_output(no_check_set=["XShape"], check_eager=True)
self.check_output(no_check_set=["XShape"])
def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True)
self.check_grad(["X"], "Out")
def init_test_case(self):
self.ori_shape = (20, 5)
......@@ -197,10 +195,10 @@ class TestUnsqueezeOp_AxesTensor(OpTest):
}
def test_check_output(self):
self.check_output(no_check_set=["XShape"], check_eager=True)
self.check_output(no_check_set=["XShape"])
def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=True)
self.check_grad(["X"], "Out")
def init_test_case(self):
self.ori_shape = (20, 5)
......
......@@ -17,7 +17,7 @@ import unittest
import gradient_checker
import numpy as np
from decorator_helper import prog_scope
from op_test import OpTest, convert_float_to_uint16
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -58,10 +58,10 @@ class TestUnStackOpBase(OpTest):
self.attrs = {'axis': self.axis, 'num': self.input_dim[self.axis]}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], self.get_y_names(), check_eager=True)
self.check_grad(['X'], self.get_y_names())
class TestStackOp3(TestUnStackOpBase):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest, paddle_static_guard
import paddle
import paddle.fluid as fluid
......@@ -95,7 +95,7 @@ class TestUpdateLossScalingOp(OpTest):
}
def test_check_output(self):
self.check_output(no_check_set=['Out'], check_eager=True)
self.check_output(no_check_set=['Out'])
class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp):
......@@ -132,188 +132,199 @@ class TestUpdateLossScalingOpBad(TestUpdateLossScalingOp):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestUpdateLossScalingLayer(unittest.TestCase):
def loss_scaling_check(self, use_cuda=True, scope=fluid.Scope()):
a = paddle.static.data(name="a", shape=[1024, 1024], dtype='float32')
b = paddle.static.data(name="b", shape=[512, 128], dtype='float32')
x = [a, b]
found_inf = paddle.static.data(
name="found_inf", shape=[1], dtype='bool'
)
prev_loss_scaling = paddle.static.data(
name="prev_loss_scaling", shape=[1], dtype='float32'
)
num_good_steps = paddle.static.data(
name="num_good_steps", shape=[1], dtype='int32'
)
num_bad_steps = paddle.static.data(
name="num_bad_steps", shape=[1], dtype='int32'
)
a_v = np.random.random([1024, 1024]).astype('float32')
b_v = np.random.random([512, 128]).astype('float32')
found_inf_v = np.array([False]).astype('bool')
prev_loss_scaling_v = np.array([2048]).astype('float32')
num_good_steps_v = np.array([999], dtype=np.int32)
num_bad_steps_v = np.array([1], dtype=np.int32)
incr_every_n_steps = 1000
decr_every_n_nan_or_inf = 2
incr_ratio = 2
decr_ratio = 0.8
result = amp_nn.update_loss_scaling(
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
incr_every_n_steps,
decr_every_n_nan_or_inf,
incr_ratio,
decr_ratio,
name="update_loss_scaling",
)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(scope):
exe.run(fluid.default_startup_program())
result_v = exe.run(
feed={
'a': a_v,
'b': b_v,
'found_inf': found_inf_v,
'prev_loss_scaling': prev_loss_scaling_v,
'num_good_steps': num_good_steps_v,
'num_bad_steps': num_bad_steps_v,
},
fetch_list=[
result,
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
],
with paddle_static_guard():
a = paddle.static.data(
name="a", shape=[1024, 1024], dtype='float32'
)
assert np.array_equal(result_v[0], a_v)
assert np.array_equal(result_v[1], b_v)
assert np.array_equal(result_v[0], result_v[2])
assert np.array_equal(result_v[1], result_v[3])
assert np.array_equal(result_v[4], found_inf_v)
assert np.array_equal(result_v[5], prev_loss_scaling_v * incr_ratio)
assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v))
assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v))
b = paddle.static.data(name="b", shape=[512, 128], dtype='float32')
x = [a, b]
found_inf = paddle.static.data(
name="found_inf", shape=[1], dtype='bool'
)
prev_loss_scaling = paddle.static.data(
name="prev_loss_scaling", shape=[1], dtype='float32'
)
num_good_steps = paddle.static.data(
name="num_good_steps", shape=[1], dtype='int32'
)
num_bad_steps = paddle.static.data(
name="num_bad_steps", shape=[1], dtype='int32'
)
a_v = np.random.random([1024, 1024]).astype('float32')
b_v = np.random.random([512, 128]).astype('float32')
found_inf_v = np.array([False]).astype('bool')
prev_loss_scaling_v = np.array([2048]).astype('float32')
num_good_steps_v = np.array([999], dtype=np.int32)
num_bad_steps_v = np.array([1], dtype=np.int32)
incr_every_n_steps = 1000
decr_every_n_nan_or_inf = 2
incr_ratio = 2
decr_ratio = 0.8
result = amp_nn.update_loss_scaling(
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
incr_every_n_steps,
decr_every_n_nan_or_inf,
incr_ratio,
decr_ratio,
name="update_loss_scaling",
)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(scope):
exe.run(fluid.default_startup_program())
result_v = exe.run(
feed={
'a': a_v,
'b': b_v,
'found_inf': found_inf_v,
'prev_loss_scaling': prev_loss_scaling_v,
'num_good_steps': num_good_steps_v,
'num_bad_steps': num_bad_steps_v,
},
fetch_list=[
result,
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
],
)
assert np.array_equal(result_v[0], a_v)
assert np.array_equal(result_v[1], b_v)
assert np.array_equal(result_v[0], result_v[2])
assert np.array_equal(result_v[1], result_v[3])
assert np.array_equal(result_v[4], found_inf_v)
assert np.array_equal(result_v[5], prev_loss_scaling_v * incr_ratio)
assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v))
assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v))
def loss_scaling_check_inf(self, use_cuda=True, scope=fluid.Scope()):
a = paddle.static.data(name="a", shape=[1024, 1024], dtype='float32')
b = paddle.static.data(name="b", shape=[512, 128], dtype='float32')
x = [a, b]
found_inf = paddle.static.data(
name="found_inf", shape=[1], dtype='bool'
)
prev_loss_scaling = paddle.static.data(
name="prev_loss_scaling", shape=[1], dtype='float32'
)
num_good_steps = paddle.static.data(
name="num_good_steps", shape=[1], dtype='int32'
)
num_bad_steps = paddle.static.data(
name="num_bad_steps", shape=[1], dtype='int32'
)
a_v = np.random.random([1024, 1024]).astype('float32')
b_v = np.random.random([512, 128]).astype('float32')
i = np.random.randint(0, 1024, 1)
j = np.random.randint(0, 1024, 1)
a_v[i[0]][j[0]] = np.inf
found_inf_v = np.array([True]).astype('bool')
prev_loss_scaling_v = np.array([2048]).astype('float32')
num_good_steps_v = np.array([999], dtype=np.int32)
num_bad_steps_v = np.array([1], dtype=np.int32)
incr_every_n_steps = 1000
decr_every_n_nan_or_inf = 2
incr_ratio = 2
decr_ratio = 0.8
result = amp_nn.update_loss_scaling(
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
incr_every_n_steps,
decr_every_n_nan_or_inf,
incr_ratio,
decr_ratio,
name="update_loss_scaling",
)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(scope):
exe.run(fluid.default_startup_program())
result_v = exe.run(
feed={
'a': a_v,
'b': b_v,
'found_inf': found_inf_v,
'prev_loss_scaling': prev_loss_scaling_v,
'num_good_steps': num_good_steps_v,
'num_bad_steps': num_bad_steps_v,
},
fetch_list=[
result,
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
],
with paddle_static_guard():
a = paddle.static.data(
name="a", shape=[1024, 1024], dtype='float32'
)
b = paddle.static.data(name="b", shape=[512, 128], dtype='float32')
x = [a, b]
found_inf = paddle.static.data(
name="found_inf", shape=[1], dtype='bool'
)
prev_loss_scaling = paddle.static.data(
name="prev_loss_scaling", shape=[1], dtype='float32'
)
num_good_steps = paddle.static.data(
name="num_good_steps", shape=[1], dtype='int32'
)
num_bad_steps = paddle.static.data(
name="num_bad_steps", shape=[1], dtype='int32'
)
assert np.array_equal(result_v[0], np.zeros_like(a_v))
assert np.array_equal(result_v[1], np.zeros_like(b_v))
assert np.array_equal(result_v[2], np.zeros_like(a_v))
assert np.array_equal(result_v[3], np.zeros_like(b_v))
assert np.array_equal(result_v[4], found_inf_v)
assert np.array_equal(result_v[5], prev_loss_scaling_v * decr_ratio)
assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v))
assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v))
def test_loss_scaling_cpu(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.loss_scaling_check(use_cuda=False)
a_v = np.random.random([1024, 1024]).astype('float32')
b_v = np.random.random([512, 128]).astype('float32')
i = np.random.randint(0, 1024, 1)
j = np.random.randint(0, 1024, 1)
a_v[i[0]][j[0]] = np.inf
found_inf_v = np.array([True]).astype('bool')
prev_loss_scaling_v = np.array([2048]).astype('float32')
num_good_steps_v = np.array([999], dtype=np.int32)
num_bad_steps_v = np.array([1], dtype=np.int32)
incr_every_n_steps = 1000
decr_every_n_nan_or_inf = 2
incr_ratio = 2
decr_ratio = 0.8
result = amp_nn.update_loss_scaling(
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
incr_every_n_steps,
decr_every_n_nan_or_inf,
incr_ratio,
decr_ratio,
name="update_loss_scaling",
)
def test_loss_scaling_cpu_inf(self):
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.loss_scaling_check_inf(use_cuda=False)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
with fluid.scope_guard(scope):
exe.run(fluid.default_startup_program())
result_v = exe.run(
feed={
'a': a_v,
'b': b_v,
'found_inf': found_inf_v,
'prev_loss_scaling': prev_loss_scaling_v,
'num_good_steps': num_good_steps_v,
'num_bad_steps': num_bad_steps_v,
},
fetch_list=[
result,
x,
found_inf,
prev_loss_scaling,
num_good_steps,
num_bad_steps,
],
)
assert np.array_equal(result_v[0], np.zeros_like(a_v))
assert np.array_equal(result_v[1], np.zeros_like(b_v))
assert np.array_equal(result_v[2], np.zeros_like(a_v))
assert np.array_equal(result_v[3], np.zeros_like(b_v))
assert np.array_equal(result_v[4], found_inf_v)
assert np.array_equal(result_v[5], prev_loss_scaling_v * decr_ratio)
assert np.array_equal(result_v[6], np.zeros_like(num_good_steps_v))
assert np.array_equal(result_v[7], np.zeros_like(num_bad_steps_v))
def test_loss_scaling_gpu(self):
if fluid.core.is_compiled_with_cuda():
def test_loss_scaling_cpu(self):
with paddle_static_guard():
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.loss_scaling_check(use_cuda=True)
self.loss_scaling_check(use_cuda=False)
def test_loss_scaling_gpu_inf(self):
if fluid.core.is_compiled_with_cuda():
def test_loss_scaling_cpu_inf(self):
with paddle_static_guard():
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.loss_scaling_check_inf(use_cuda=True)
self.loss_scaling_check_inf(use_cuda=False)
def test_loss_scaling_gpu(self):
if fluid.core.is_compiled_with_cuda():
with paddle_static_guard():
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.loss_scaling_check(use_cuda=True)
def test_loss_scaling_gpu_inf(self):
if fluid.core.is_compiled_with_cuda():
with paddle_static_guard():
main = fluid.Program()
startup = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, startup):
self.loss_scaling_check_inf(use_cuda=True)
if __name__ == '__main__':
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest, skip_check_grad_ci
from eager_op_test import OpTest, skip_check_grad_ci
class TestVarConv2DOp(OpTest):
......
......@@ -11,7 +11,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -99,7 +99,7 @@ class TestViterbiOp(OpTest):
self.outputs = {'Scores': scores, 'Path': path}
def test_output(self):
self.check_output(check_eager=True)
self.check_output()
class TestViterbiAPI(unittest.TestCase):
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid.core as core
......@@ -228,7 +228,7 @@ class TestWarpRNNTOp(OpTest):
}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.outputs["warprnntgrad"] = self.gradient
......@@ -237,21 +237,19 @@ class TestWarpRNNTOp(OpTest):
["input"],
"loss",
numeric_grad_delta=0.009,
check_eager=True,
)
else:
self.check_grad(
["input"],
"loss",
numeric_grad_delta=0.009,
check_eager=True,
)
class TestWarpRNNTFP64Op(TestWarpRNNTOp):
def test_check_output(self):
self.acts.astype(np.float64)
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.acts.astype(np.float64)
......@@ -261,14 +259,12 @@ class TestWarpRNNTFP64Op(TestWarpRNNTOp):
["input"],
"loss",
numeric_grad_delta=0.009,
check_eager=True,
)
else:
self.check_grad(
["input"],
"loss",
numeric_grad_delta=0.009,
check_eager=True,
)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -115,7 +115,7 @@ class TestYoloBoxOp(OpTest):
self.outputs = {'Boxes': boxes, 'Scores': scores}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output()
def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23]
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from scipy.special import expit, logit
import paddle
......@@ -272,13 +272,11 @@ class TestYolov3LossOp(OpTest):
def test_check_output(self):
place = core.CPUPlace()
self.check_output_with_place(place, atol=2e-3, check_eager=True)
self.check_output_with_place(place, atol=2e-3)
def test_check_grad_ignore_gtbox(self):
place = core.CPUPlace()
self.check_grad_with_place(
place, ['X'], 'Loss', max_relative_error=0.2, check_eager=True
)
self.check_grad_with_place(place, ['X'], 'Loss', max_relative_error=0.2)
def initTestCase(self):
self.anchors = [
......
......@@ -56,7 +56,6 @@ def start_local_trainers(
cluster,
pod,
training_script,
eager_mode,
training_script_args,
log_dir=None,
):
......@@ -105,7 +104,7 @@ def start_local_trainers(
class TestMultipleGpus(unittest.TestCase):
def run_mnist_2gpu(self, target_file_name, eager_mode=True):
def run_mnist_2gpu(self, target_file_name):
if fluid.core.get_cuda_device_count() == 0:
return
......@@ -118,7 +117,6 @@ class TestMultipleGpus(unittest.TestCase):
procs = start_local_trainers(
cluster,
pod,
eager_mode=eager_mode,
training_script=target_file_name,
training_script_args=[],
)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册