提交 cf15da65 编写于 作者: S sneaxiy

move gc test to op_test

test=develop
上级 45ebafc3
......@@ -371,6 +371,20 @@ class OpTest(unittest.TestCase):
no_check_set=None,
equal_nan=False,
check_dygraph=False):
self._check_output_with_place(place, atol, no_check_set, equal_nan,
check_dygraph)
if self._is_gc_test_enabled():
self._enable_gc(True)
self._check_output_with_place(place, atol, no_check_set, equal_nan,
check_dygraph)
self._enable_gc(False)
def _check_output_with_place(self,
place,
atol,
no_check_set=None,
equal_nan=False,
check_dygraph=False):
if check_dygraph:
dygraph_outs = self._calc_dygraph_output(
place, no_check_set=no_check_set)
......@@ -492,6 +506,19 @@ class OpTest(unittest.TestCase):
places.append(core.CUDAPlace(0))
return places
def _is_gc_test_enabled(self):
if hasattr(self, "test_gc") and self.test_gc:
# FIXME(zjl): if ngraph is enabled, some ops would be fused in Executor
# This may cause error of gc. Disable unittests of gc when FLAGS_use_ngraph is True
is_ngraph_enabled = bool(os.environ.get('FLAGS_use_ngraph', False))
return False if is_ngraph_enabled else True
else:
return False
def _enable_gc(self, enabled):
threshold = 0.0 if enabled else -1.0
fluid.core._set_eager_deletion_mode(threshold, 1.0, True)
def check_output(self,
atol=1e-5,
no_check_set=None,
......@@ -503,6 +530,13 @@ class OpTest(unittest.TestCase):
check_dygraph)
def check_output_customized(self, checker):
self._check_output_customized(checker)
if self._is_gc_test_enabled():
self._enable_gc(True)
self._check_output_customized(checker)
self._enable_gc(False)
def _check_output_customized(self, checker):
places = self._get_places()
for place in places:
outs = self.calc_output(place)
......@@ -553,6 +587,26 @@ class OpTest(unittest.TestCase):
in_place=False,
max_relative_error=0.005,
user_defined_grads=None):
self._check_grad_with_place(place, inputs_to_check, output_names,
no_grad_set, numeric_grad_delta, in_place,
max_relative_error, user_defined_grads)
if self._is_gc_test_enabled():
self._enable_gc(True)
self._check_grad_with_place(place, inputs_to_check, output_names,
no_grad_set, numeric_grad_delta,
in_place, max_relative_error,
user_defined_grads)
self._enable_gc(False)
def _check_grad_with_place(self,
place,
inputs_to_check,
output_names,
no_grad_set=None,
numeric_grad_delta=0.005,
in_place=False,
max_relative_error=0.005,
user_defined_grads=None):
self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_outputs = self.outputs if hasattr(self, "outputs") else dict()
......
......@@ -85,6 +85,7 @@ def bilinear_interp_np(input,
class TestBilinearInterpOp(OpTest):
def setUp(self):
self.test_gc = True
self.out_size = None
self.actual_shape = None
self.init_test_case()
......@@ -217,6 +218,7 @@ class TestBilinearInterpActualShape(TestBilinearInterpOp):
class TestBilinearInterpOpUint8(OpTest):
def setUp(self):
self.test_gc = True
self.out_size = None
self.actual_shape = None
self.init_test_case()
......
......@@ -21,6 +21,7 @@ from op_test import OpTest
class TestConcatOp(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "concat"
self.init_test_data()
self.inputs = {'X': [('x0', self.x0), ('x1', self.x1), ('x2', self.x2)]}
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import paddle.fluid as fluid
import importlib
fluid.core._set_eager_deletion_mode(0.0, 1.0, True)
from test_bilinear_interp_op import *
from test_concat_op import *
from test_elementwise_add_op import *
from test_elementwise_sub_op import *
from test_fill_constant_batch_size_like_op import *
from test_fill_zeros_like2_op import *
from test_gather_op import *
from test_gaussian_random_batch_size_like_op import *
from test_linear_chain_crf_op import *
from test_lod_reset_op import *
from test_lookup_table_op import *
from test_mean_op import *
from test_nearest_interp_op import *
from test_pad2d_op import *
from test_scatter_op import *
from test_sequence_concat import *
from test_seq_conv import *
from test_seq_pool import *
from test_sequence_expand_as import *
from test_sequence_expand import *
from test_sequence_pad_op import *
from test_sequence_unpad_op import *
from test_sequence_scatter_op import *
from test_sequence_slice_op import *
from test_slice_op import *
from test_space_to_depth_op import *
from test_squared_l2_distance_op import *
from test_uniform_random_batch_size_like_op import *
if __name__ == '__main__':
unittest.main()
......@@ -24,6 +24,7 @@ class TestElementwiseAddOp(OpTest):
self.use_mkldnn = False
def setUp(self):
self.test_gc = True
self.op_type = "elementwise_add"
self.dtype = np.float32
self.axis = -1
......
......@@ -20,6 +20,7 @@ from op_test import OpTest
class TestElementwiseOp(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "elementwise_sub"
self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 3]).astype("float32"),
......
......@@ -21,6 +21,7 @@ from op_test import OpTest
class TestFillConstantBatchSizeLikeWhenFirstDimIsBatchSize(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "fill_constant_batch_size_like"
self.inputs = {'Input': np.random.random((219, 232)).astype("float32")}
self.attrs = {'value': 3.5, 'shape': [-1, 132, 7]}
......@@ -35,6 +36,7 @@ class TestFillConstantBatchSizeLikeWhenFirstDimIsBatchSize(OpTest):
class TestFillConstantBatchSizeLikeWhenSecondDimIsBatchSize(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "fill_constant_batch_size_like"
self.inputs = {'Input': np.random.random((219, 232)).astype("float32")}
self.attrs = {
......@@ -54,6 +56,7 @@ class TestFillConstantBatchSizeLikeWhenSecondDimIsBatchSize(OpTest):
class TestFillConstantBatchSizeLikeWithLoDTensor(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "fill_constant_batch_size_like"
self.inputs = {
'Input': (np.random.random((31, 28)).astype("float32"),
......
......@@ -22,6 +22,7 @@ from op_test import OpTest
class TestFillZerosLike2Op(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "fill_zeros_like2"
self.dtype = np.float32
self.init_dtype()
......
......@@ -21,6 +21,7 @@ from op_test import OpTest
class TestGatherOp(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "gather"
self.config()
xnp = np.random.random(self.x_shape).astype("float32")
......
......@@ -21,6 +21,7 @@ from op_test import OpTest
class TestGaussianRandomBatchSizeLike(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "gaussian_random_batch_size_like"
self.inputs = {'Input': np.zeros((500, 2000), dtype="float32")}
self.attrs = {'mean': 1., 'std': 2., 'shape': [-1, 2000]}
......
......@@ -143,6 +143,7 @@ class TestLinearChainCrfOp(OpTest):
}
def setUp(self):
self.test_gc = True
self.op_type = "linear_chain_crf"
self.set_test_data()
......
......@@ -21,6 +21,7 @@ from op_test import OpTest
class TestLodResetOpByAttr(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32")
lod = [[3, 2, 5]]
......@@ -42,6 +43,7 @@ class TestLodResetOpByAttr(OpTest):
class TestLodResetOpByInput(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32")
lod = [[3, 2, 5]]
......@@ -64,6 +66,7 @@ class TestLodResetOpByInput(OpTest):
class TestLodResetOpBoth(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32")
lod = [[3, 2, 5]]
......@@ -86,6 +89,7 @@ class TestLodResetOpBoth(OpTest):
class TestLodResetOpYIsLoDTensor(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "lod_reset"
x = np.random.random((10, 20)).astype("float32")
lod = [[3, 2, 5]]
......
......@@ -24,6 +24,7 @@ import paddle.compat as cpt
class TestLookupTableOp(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "lookup_table"
table = np.random.random((17, 31)).astype("float32")
ids = np.random.randint(0, 17, 4).astype("int64")
......@@ -40,6 +41,7 @@ class TestLookupTableOp(OpTest):
class TestLookupTableOpWithTensorIds(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "lookup_table"
table = np.random.random((17, 31)).astype("float32")
ids = np.random.randint(
......
......@@ -22,6 +22,7 @@ import paddle.fluid.core as core
class TestMeanOp(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "mean"
self.dtype = np.float32
self.init_dtype_type()
......
......@@ -67,6 +67,7 @@ def nearest_neighbor_interp_np(X,
class TestNearestInterpOp(OpTest):
def setUp(self):
self.test_gc = True
self.out_size = None
self.actual_shape = None
self.init_test_case()
......@@ -189,6 +190,7 @@ class TestNearestNeighborInterpActualShape(TestNearestInterpOp):
class TestNearestInterpOpUint8(OpTest):
def setUp(self):
self.test_gc = True
self.out_size = None
self.actual_shape = None
self.init_test_case()
......
......@@ -19,6 +19,7 @@ from op_test import OpTest
class TestPad2dOp(OpTest):
def setUp(self):
self.test_gc = True
self.pad_value = 0.0
self.variable_paddings = False
self.initTestCase()
......
......@@ -21,6 +21,7 @@ from op_test import OpTest
class TestScatterOp(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "scatter"
ref_np = np.ones((3, 3)).astype("float32")
index_np = np.array([1, 2]).astype("int32")
......
......@@ -69,6 +69,7 @@ def seqconv(x,
class TestSeqProject(OpTest):
def setUp(self):
self.test_gc = True
self.init_test_case()
self.op_type = 'sequence_conv'
......
......@@ -56,6 +56,7 @@ class TestSeqAvgPool(OpTest):
compute_seqpool_avg(x, offset, out)
def setUp(self):
self.test_gc = True
x, offset, out = self.set_data()
self.compute(x, offset, out)
......
......@@ -26,6 +26,7 @@ class TestSequenceConcat(OpTest):
self.out_lod = [19, 11]
def setUp(self):
self.test_gc = True
x1 = np.random.random(size=(10, 80))
x2 = np.random.random(size=(20, 80))
self.setLoD()
......
......@@ -67,6 +67,7 @@ class TestSequenceExpand(OpTest):
self.outputs = {'Out': (out, out_lod)}
def setUp(self):
self.test_gc = True
self.op_type = 'sequence_expand'
self.set_data()
self.compute()
......
......@@ -21,6 +21,7 @@ from op_test import OpTest
class TestSequenceExpandAs(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = 'sequence_expand_as'
self.set_data()
self.compute()
......
......@@ -66,6 +66,7 @@ class TestSequencePadOp(OpTest):
self.outputs = {'Out': out_data, 'Length': length}
def setUp(self):
self.test_gc = True
self.op_type = 'sequence_pad'
self.set_attr()
self.set_data()
......
......@@ -22,6 +22,7 @@ class TestSequenceScatterOp(OpTest):
return [[3, 5, 4]]
def setUp(self):
self.test_gc = True
self.op_type = "sequence_scatter"
X_data = np.random.uniform(0.1, 1.0, [3, 6]).astype('float32')
......
......@@ -49,6 +49,7 @@ class TestSequenceSliceOp(OpTest):
self.length = [[10], [8], [6], [4], [2]]
def setUp(self):
self.test_gc = True
self.op_type = "sequence_slice"
self.set_data()
......
......@@ -46,6 +46,7 @@ class TestSequenceUnpadOp(OpTest):
self.outputs = {'Out': (out.reshape(out_shape), out_lod)}
def setUp(self):
self.test_gc = True
self.op_type = 'sequence_unpad'
self.init()
self.compute()
......
......@@ -22,6 +22,7 @@ from op_test import OpTest
class TestSliceOp(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "slice"
self.config()
self.inputs = {'Input': self.input}
......
......@@ -41,6 +41,7 @@ class TestSpaceToDepthOp(OpTest):
out_[in_index] = in_[out_index]
def setUp(self):
self.test_gc = True
self.init_data()
self.op_type = "space_to_depth"
......
......@@ -21,6 +21,7 @@ from op_test import OpTest
class TestSquaredL2DistanceOp_f0(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "squared_l2_distance"
self.inputs = {
'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"),
......@@ -42,6 +43,7 @@ class TestSquaredL2DistanceOp_f0(OpTest):
class TestSquaredL2DistanceOp_f1(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "squared_l2_distance"
self.inputs = {
'X': np.random.uniform(0.1, 0.6, (2, 3)).astype("float32"),
......@@ -63,6 +65,7 @@ class TestSquaredL2DistanceOp_f1(OpTest):
class TestSquaredL2DistanceOp_f2(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "squared_l2_distance"
self.inputs = {
'X': np.random.uniform(0.1, 0.6, (2, 3, 4)).astype("float32"),
......
......@@ -21,6 +21,7 @@ from op_test import OpTest
class TestUniformRandomBatchSizeLike(OpTest):
def setUp(self):
self.test_gc = True
self.op_type = "uniform_random_batch_size_like"
self.inputs = {'Input': np.zeros((500, 2000), dtype="float32")}
self.attrs = {'min': 1., 'max': 2., 'shape': [-1, 2000]}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册