未验证 提交 df0ed4d6 编写于 作者: 姜永久 提交者: GitHub

disable deprecated ops dygraph tests (#50521)

* disable unewanted dygraph tests

* mine_hard_exa
上级 f7f67b72
...@@ -21,7 +21,7 @@ import numpy as np ...@@ -21,7 +21,7 @@ import numpy as np
import paddle import paddle
sys.path.append("../") sys.path.append("../")
from op_test import OpTest from eager_op_test import OpTest
def seqconv( def seqconv(
...@@ -153,12 +153,16 @@ class TestSeqProject(OpTest): ...@@ -153,12 +153,16 @@ class TestSeqProject(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
if self.padding_trainable: if self.padding_trainable:
self.check_grad( self.check_grad(
set(self.inputs_val), 'Out', max_relative_error=0.05 set(self.inputs_val),
'Out',
max_relative_error=0.05,
check_dygraph=False,
) )
def test_check_grad_input(self): def test_check_grad_input(self):
...@@ -167,12 +171,16 @@ class TestSeqProject(OpTest): ...@@ -167,12 +171,16 @@ class TestSeqProject(OpTest):
'Out', 'Out',
max_relative_error=0.05, max_relative_error=0.05,
no_grad_set=set(self.inputs_val_no_x), no_grad_set=set(self.inputs_val_no_x),
check_dygraph=False,
) )
def test_check_grad_padding_data(self): def test_check_grad_padding_data(self):
if self.padding_trainable: if self.padding_trainable:
self.check_grad( self.check_grad(
['PaddingData'], 'Out', no_grad_set=set(['X', 'Filter']) ['PaddingData'],
'Out',
no_grad_set=set(['X', 'Filter']),
check_dygraph=False,
) )
def test_check_grad_Filter(self): def test_check_grad_Filter(self):
...@@ -181,6 +189,7 @@ class TestSeqProject(OpTest): ...@@ -181,6 +189,7 @@ class TestSeqProject(OpTest):
'Out', 'Out',
max_relative_error=0.05, max_relative_error=0.05,
no_grad_set=set(self.inputs_val_no_f), no_grad_set=set(self.inputs_val_no_f),
check_dygraph=False,
) )
def test_check_grad_input_filter(self): def test_check_grad_input_filter(self):
...@@ -190,6 +199,7 @@ class TestSeqProject(OpTest): ...@@ -190,6 +199,7 @@ class TestSeqProject(OpTest):
'Out', 'Out',
max_relative_error=0.05, max_relative_error=0.05,
no_grad_set=set(['PaddingData']), no_grad_set=set(['PaddingData']),
check_dygraph=False,
) )
def test_check_grad_padding_input(self): def test_check_grad_padding_input(self):
...@@ -199,6 +209,7 @@ class TestSeqProject(OpTest): ...@@ -199,6 +209,7 @@ class TestSeqProject(OpTest):
'Out', 'Out',
max_relative_error=0.05, max_relative_error=0.05,
no_grad_set=set(['Filter']), no_grad_set=set(['Filter']),
check_dygraph=False,
) )
def test_check_grad_padding_filter(self): def test_check_grad_padding_filter(self):
...@@ -208,6 +219,7 @@ class TestSeqProject(OpTest): ...@@ -208,6 +219,7 @@ class TestSeqProject(OpTest):
'Out', 'Out',
max_relative_error=0.05, max_relative_error=0.05,
no_grad_set=set(['X']), no_grad_set=set(['X']),
check_dygraph=False,
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def box_decoder_and_assign(deltas, weights, boxes, box_score, box_clip): def box_decoder_and_assign(deltas, weights, boxes, box_score, box_clip):
...@@ -62,7 +62,8 @@ def box_decoder_and_assign(deltas, weights, boxes, box_score, box_clip): ...@@ -62,7 +62,8 @@ def box_decoder_and_assign(deltas, weights, boxes, box_score, box_clip):
class TestBoxDecoderAndAssignOpWithLoD(OpTest): class TestBoxDecoderAndAssignOpWithLoD(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def setUp(self): def setUp(self):
self.op_type = "box_decoder_and_assign" self.op_type = "box_decoder_and_assign"
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class Segment: class Segment:
...@@ -222,7 +222,8 @@ class TestChunkEvalOp(OpTest): ...@@ -222,7 +222,8 @@ class TestChunkEvalOp(OpTest):
self.set_data() self.set_data()
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
class TestChunkEvalOpWithExclude(TestChunkEvalOp): class TestChunkEvalOpWithExclude(TestChunkEvalOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -95,7 +95,8 @@ class TestCTCAlignOp(OpTest): ...@@ -95,7 +95,8 @@ class TestCTCAlignOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
class TestCTCAlignOpCase1(TestCTCAlignOp): class TestCTCAlignOpCase1(TestCTCAlignOp):
...@@ -161,7 +162,8 @@ class TestCTCAlignPaddingOp(OpTest): ...@@ -161,7 +162,8 @@ class TestCTCAlignPaddingOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
class TestCTCAlignOpCase3(TestCTCAlignPaddingOp): class TestCTCAlignOpCase3(TestCTCAlignPaddingOp):
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -268,13 +268,15 @@ class TestDataNormOp(OpTest): ...@@ -268,13 +268,15 @@ class TestDataNormOp(OpTest):
""" """
test check forward, check output test check forward, check output
""" """
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
""" """
test check backward, check grad test check backward, check grad
""" """
self.check_grad(['X'], 'Y', no_grad_set=set([])) # NODE(yjjiang11): This op will be deprecated.
self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False)
class TestDataNormOpWithEnableScaleAndShift(OpTest): class TestDataNormOpWithEnableScaleAndShift(OpTest):
...@@ -330,13 +332,15 @@ class TestDataNormOpWithEnableScaleAndShift(OpTest): ...@@ -330,13 +332,15 @@ class TestDataNormOpWithEnableScaleAndShift(OpTest):
""" """
test check forward, check output test check forward, check output
""" """
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
""" """
test check backward, check grad test check backward, check grad
""" """
self.check_grad(['X'], 'Y', no_grad_set=set([])) # NODE(yjjiang11): This op will be deprecated.
self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False)
class TestDataNormOpWithoutEnableScaleAndShift(OpTest): class TestDataNormOpWithoutEnableScaleAndShift(OpTest):
...@@ -387,13 +391,15 @@ class TestDataNormOpWithoutEnableScaleAndShift(OpTest): ...@@ -387,13 +391,15 @@ class TestDataNormOpWithoutEnableScaleAndShift(OpTest):
""" """
test check forward, check output test check forward, check output
""" """
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
""" """
test check backward, check grad test check backward, check grad
""" """
self.check_grad(['X'], 'Y', no_grad_set=set([])) # NODE(yjjiang11): This op will be deprecated.
self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False)
class TestDataNormOpWithEnableScaleAndShift_1(OpTest): class TestDataNormOpWithEnableScaleAndShift_1(OpTest):
...@@ -449,13 +455,15 @@ class TestDataNormOpWithEnableScaleAndShift_1(OpTest): ...@@ -449,13 +455,15 @@ class TestDataNormOpWithEnableScaleAndShift_1(OpTest):
""" """
test check forward, check output test check forward, check output
""" """
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
""" """
test check backward, check grad test check backward, check grad
""" """
self.check_grad(['X'], 'Y', no_grad_set=set([])) # NODE(yjjiang11): This op will be deprecated.
self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False)
class TestDataNormOpWithSlotDim(OpTest): class TestDataNormOpWithSlotDim(OpTest):
...@@ -505,13 +513,15 @@ class TestDataNormOpWithSlotDim(OpTest): ...@@ -505,13 +513,15 @@ class TestDataNormOpWithSlotDim(OpTest):
""" """
test check forward, check output test check forward, check output
""" """
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
""" """
test check backward, check grad test check backward, check grad
""" """
self.check_grad(['X'], 'Y', no_grad_set=set([])) # NODE(yjjiang11): This op will be deprecated.
self.check_grad(['X'], 'Y', no_grad_set=set([]), check_dygraph=False)
class TestDataNormOpErrorr(unittest.TestCase): class TestDataNormOpErrorr(unittest.TestCase):
......
...@@ -17,7 +17,7 @@ import math ...@@ -17,7 +17,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestDetectionMAPOp(OpTest): class TestDetectionMAPOp(OpTest):
...@@ -267,7 +267,8 @@ class TestDetectionMAPOp(OpTest): ...@@ -267,7 +267,8 @@ class TestDetectionMAPOp(OpTest):
self.set_data() self.set_data()
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp): class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -109,10 +109,12 @@ class TestDropoutNdOp(OpTest): ...@@ -109,10 +109,12 @@ class TestDropoutNdOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out') # NODE(yjjiang11): This op will be deprecated.
self.check_grad(['X'], 'Out', check_dygraph=False)
class TestDropoutNdAPI(unittest.TestCase): class TestDropoutNdAPI(unittest.TestCase):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def generate_proposal_labels_in_python( def generate_proposal_labels_in_python(
...@@ -339,7 +339,8 @@ class TestGenerateProposalLabelsOp(OpTest): ...@@ -339,7 +339,8 @@ class TestGenerateProposalLabelsOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def setUp(self): def setUp(self):
self.op_type = 'generate_proposal_labels' self.op_type = 'generate_proposal_labels'
......
...@@ -17,7 +17,7 @@ import math ...@@ -17,7 +17,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
from test_anchor_generator_op import anchor_generator_in_python from test_anchor_generator_op import anchor_generator_in_python
import paddle import paddle
...@@ -351,7 +351,8 @@ class TestGenerateProposalsOp(OpTest): ...@@ -351,7 +351,8 @@ class TestGenerateProposalsOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def setUp(self): def setUp(self):
self.op_type = "generate_proposals" self.op_type = "generate_proposals"
......
...@@ -16,7 +16,7 @@ import math ...@@ -16,7 +16,7 @@ import math
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -124,10 +124,13 @@ class TestGRUUnitOp(OpTest): ...@@ -124,10 +124,13 @@ class TestGRUUnitOp(OpTest):
self.set_outputs() self.set_outputs()
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['Input', 'HiddenPrev', 'Weight'], ['Hidden']) self.check_grad(
['Input', 'HiddenPrev', 'Weight'], ['Hidden'], check_dygraph=False
)
class TestGRUUnitOpOriginMode(TestGRUUnitOp): class TestGRUUnitOpOriginMode(TestGRUUnitOp):
...@@ -154,13 +157,19 @@ class TestGRUUnitOpWithBias(TestGRUUnitOp): ...@@ -154,13 +157,19 @@ class TestGRUUnitOpWithBias(TestGRUUnitOp):
} }
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['Input', 'HiddenPrev', 'Weight', 'Bias'], ['Hidden']) # NODE(yjjiang11): This op will be deprecated.
self.check_grad(
['Input', 'HiddenPrev', 'Weight', 'Bias'],
['Hidden'],
check_dygraph=False,
)
def test_check_grad_ingore_input(self): def test_check_grad_ingore_input(self):
self.check_grad( self.check_grad(
['HiddenPrev', 'Weight', 'Bias'], ['HiddenPrev', 'Weight', 'Bias'],
['Hidden'], ['Hidden'],
no_grad_set=set('Input'), no_grad_set=set('Input'),
check_dygraph=False,
) )
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from eager_op_test import OpTest, skip_check_grad_ci
def get_output_shape(attrs, in_shape, img_real_size): def get_output_shape(attrs, in_shape, img_real_size):
...@@ -207,10 +207,11 @@ class TestBlockExpandOp(OpTest): ...@@ -207,10 +207,11 @@ class TestBlockExpandOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_dygraph=False)
class TestBlockExpandOpCase2(TestBlockExpandOp): class TestBlockExpandOpCase2(TestBlockExpandOp):
...@@ -287,7 +288,8 @@ class TestBlockExpandOpCase5(OpTest): ...@@ -287,7 +288,8 @@ class TestBlockExpandOpCase5(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
class TestBlockExpandOpCase6(TestBlockExpandOpCase5): class TestBlockExpandOpCase6(TestBlockExpandOpCase5):
......
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
import unittest import unittest
from op_test import OpTest from eager_op_test import OpTest
from paddle.distributed.fleet.meta_optimizers.common import OpRole from paddle.distributed.fleet.meta_optimizers.common import OpRole
...@@ -30,7 +30,8 @@ class TestMarkerOp(OpTest): ...@@ -30,7 +30,8 @@ class TestMarkerOp(OpTest):
self.outputs = {} self.outputs = {}
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
class TestMineHardExamplesOp(OpTest): class TestMineHardExamplesOp(OpTest):
...@@ -41,7 +41,8 @@ class TestMineHardExamplesOp(OpTest): ...@@ -41,7 +41,8 @@ class TestMineHardExamplesOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
return return
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle import paddle
...@@ -30,10 +30,11 @@ class TestMinusOp(OpTest): ...@@ -30,10 +30,11 @@ class TestMinusOp(OpTest):
self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])} self.outputs = {'Out': (self.inputs['X'] - self.inputs['Y'])}
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -20,7 +20,7 @@ import numpy as np ...@@ -20,7 +20,7 @@ import numpy as np
import paddle.fluid.core as core import paddle.fluid.core as core
sys.path.append("..") sys.path.append("..")
from op_test import OpTest from eager_op_test import OpTest
class TestMulOp(OpTest): class TestMulOp(OpTest):
...@@ -38,19 +38,28 @@ class TestMulOp(OpTest): ...@@ -38,19 +38,28 @@ class TestMulOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
['Y'], 'Out', max_relative_error=0.5, no_grad_set=set("X") ['Y'],
'Out',
max_relative_error=0.5,
no_grad_set=set("X"),
check_dygraph=False,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y') ['X'],
'Out',
max_relative_error=0.5,
no_grad_set=set('Y'),
check_dygraph=False,
) )
...@@ -78,19 +87,27 @@ class TestMulOp2(OpTest): ...@@ -78,19 +87,27 @@ class TestMulOp2(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_dygraph=False)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
['Y'], 'Out', max_relative_error=0.5, no_grad_set=set('X') ['Y'],
'Out',
max_relative_error=0.5,
no_grad_set=set('X'),
check_dygraph=False,
) )
def test_check_grad_ignore_y(self): def test_check_grad_ignore_y(self):
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.5, no_grad_set=set('Y') ['X'],
'Out',
max_relative_error=0.5,
no_grad_set=set('Y'),
check_dygraph=False,
) )
...@@ -104,13 +121,17 @@ class TestFP16MulOp1(TestMulOp): ...@@ -104,13 +121,17 @@ class TestFP16MulOp1(TestMulOp):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-1) self.check_output_with_place(place, atol=1e-1, check_dygraph=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_grad_with_place( self.check_grad_with_place(
place, ['X', 'Y'], 'Out', max_relative_error=0.5 place,
['X', 'Y'],
'Out',
max_relative_error=0.5,
check_dygraph=False,
) )
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
...@@ -122,6 +143,7 @@ class TestFP16MulOp1(TestMulOp): ...@@ -122,6 +143,7 @@ class TestFP16MulOp1(TestMulOp):
'Out', 'Out',
max_relative_error=0.5, max_relative_error=0.5,
no_grad_set=set("X"), no_grad_set=set("X"),
check_dygraph=False,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -133,6 +155,7 @@ class TestFP16MulOp1(TestMulOp): ...@@ -133,6 +155,7 @@ class TestFP16MulOp1(TestMulOp):
'Out', 'Out',
max_relative_error=0.5, max_relative_error=0.5,
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_dygraph=False,
) )
...@@ -146,13 +169,17 @@ class TestFP16MulOp2(TestMulOp2): ...@@ -146,13 +169,17 @@ class TestFP16MulOp2(TestMulOp2):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place(place, atol=2e-1) self.check_output_with_place(place, atol=2e-1, check_dygraph=False)
def test_check_grad_normal(self): def test_check_grad_normal(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_grad_with_place( self.check_grad_with_place(
place, ['X', 'Y'], 'Out', max_relative_error=0.9 place,
['X', 'Y'],
'Out',
max_relative_error=0.9,
check_dygraph=False,
) )
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
...@@ -164,6 +191,7 @@ class TestFP16MulOp2(TestMulOp2): ...@@ -164,6 +191,7 @@ class TestFP16MulOp2(TestMulOp2):
'Out', 'Out',
max_relative_error=0.5, max_relative_error=0.5,
no_grad_set=set("X"), no_grad_set=set("X"),
check_dygraph=False,
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -175,6 +203,7 @@ class TestFP16MulOp2(TestMulOp2): ...@@ -175,6 +203,7 @@ class TestFP16MulOp2(TestMulOp2):
'Out', 'Out',
max_relative_error=0.9, max_relative_error=0.9,
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_dygraph=False,
) )
......
...@@ -16,7 +16,7 @@ import itertools ...@@ -16,7 +16,7 @@ import itertools
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def py_pnpair_op(score, label, query, column=-1, weight=None): def py_pnpair_op(score, label, query, column=-1, weight=None):
...@@ -75,7 +75,8 @@ class TestPositiveNegativePairOp(OpTest): ...@@ -75,7 +75,8 @@ class TestPositiveNegativePairOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
class TestPositiveNegativePairOpAccumulateWeight(OpTest): class TestPositiveNegativePairOpAccumulateWeight(OpTest):
...@@ -123,7 +124,7 @@ class TestPositiveNegativePairOpAccumulateWeight(OpTest): ...@@ -123,7 +124,7 @@ class TestPositiveNegativePairOpAccumulateWeight(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -16,7 +16,7 @@ import random ...@@ -16,7 +16,7 @@ import random
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod): def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod):
...@@ -135,7 +135,8 @@ class TestTargetAssginFloatType(OpTest): ...@@ -135,7 +135,8 @@ class TestTargetAssginFloatType(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
class TestTargetAssginIntType(OpTest): class TestTargetAssginIntType(OpTest):
...@@ -182,7 +183,7 @@ class TestTargetAssginIntType(OpTest): ...@@ -182,7 +183,7 @@ class TestTargetAssginIntType(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_dygraph=False)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from eager_op_test import OpTest
import paddle.fluid.core as core import paddle.fluid.core as core
...@@ -144,8 +144,6 @@ class TestTrilinearInterpOp(OpTest): ...@@ -144,8 +144,6 @@ class TestTrilinearInterpOp(OpTest):
self.init_test_case() self.init_test_case()
self.op_type = "trilinear_interp" self.op_type = "trilinear_interp"
# NOTE(dev): some AsDispensible input is not used under imperative mode. # NOTE(dev): some AsDispensible input is not used under imperative mode.
# Skip check_eager while found them in Inputs.
self.check_eager = True
input_np = np.random.random(self.input_shape).astype("float32") input_np = np.random.random(self.input_shape).astype("float32")
if self.data_layout == "NCDHW": if self.data_layout == "NCDHW":
...@@ -180,10 +178,8 @@ class TestTrilinearInterpOp(OpTest): ...@@ -180,10 +178,8 @@ class TestTrilinearInterpOp(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
if self.actual_shape is not None: if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape self.inputs['OutSize'] = self.actual_shape
self.check_eager = False
# c++ end treat NCDHW the same way as NCHW # c++ end treat NCDHW the same way as NCHW
if self.data_layout == 'NCDHW': if self.data_layout == 'NCDHW':
data_layout = 'NCHW' data_layout = 'NCHW'
...@@ -202,12 +198,11 @@ class TestTrilinearInterpOp(OpTest): ...@@ -202,12 +198,11 @@ class TestTrilinearInterpOp(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=self.check_eager) # NODE(yjjiang11): This op will be deprecated.
self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Out', in_place=True, check_dygraph=False)
['X'], 'Out', in_place=True, check_eager=self.check_eager
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'trilinear' self.interp_method = 'trilinear'
...@@ -353,7 +348,6 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -353,7 +348,6 @@ class TestTrilinearInterpOpUint8(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "trilinear_interp" self.op_type = "trilinear_interp"
self.check_eager = True
input_np = np.random.randint( input_np = np.random.randint(
low=0, high=256, size=self.input_shape low=0, high=256, size=self.input_shape
).astype("uint8") ).astype("uint8")
...@@ -380,7 +374,6 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -380,7 +374,6 @@ class TestTrilinearInterpOpUint8(OpTest):
self.inputs = {'X': input_np} self.inputs = {'X': input_np}
if self.out_size is not None: if self.out_size is not None:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
self.attrs = { self.attrs = {
'out_d': self.out_d, 'out_d': self.out_d,
...@@ -395,7 +388,7 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -395,7 +388,7 @@ class TestTrilinearInterpOpUint8(OpTest):
def test_check_output(self): def test_check_output(self):
self.check_output_with_place( self.check_output_with_place(
place=core.CPUPlace(), atol=1, check_eager=self.check_eager place=core.CPUPlace(), atol=1, check_dygraph=False
) )
def init_test_case(self): def init_test_case(self):
...@@ -506,7 +499,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -506,7 +499,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
self.actual_shape = None self.actual_shape = None
self.init_test_case() self.init_test_case()
self.op_type = "trilinear_interp" self.op_type = "trilinear_interp"
self.check_eager = True
self.shape_by_1Dtensor = False self.shape_by_1Dtensor = False
self.scale_by_1Dtensor = False self.scale_by_1Dtensor = False
self.attrs = { self.attrs = {
...@@ -532,7 +524,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -532,7 +524,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
if self.shape_by_1Dtensor: if self.shape_by_1Dtensor:
self.inputs['OutSize'] = self.out_size self.inputs['OutSize'] = self.out_size
self.check_eager = False
elif self.out_size is not None: elif self.out_size is not None:
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
...@@ -540,7 +531,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -540,7 +531,6 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones((1)).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
self.check_eager = False
self.attrs['out_d'] = self.out_d self.attrs['out_d'] = self.out_d
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
...@@ -558,12 +548,10 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -558,12 +548,10 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=self.check_eager) self.check_output(check_dygraph=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(['X'], 'Out', in_place=True, check_dygraph=False)
['X'], 'Out', in_place=True, check_eager=self.check_eager
)
def init_test_case(self): def init_test_case(self):
self.interp_method = 'trilinear' self.interp_method = 'trilinear'
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册