diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 8bb1317b032f8039c81a7262725d332bf5811cbf..666824a87d8b11fd86bdae4cedfcaca0d67e7491 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -33,7 +33,7 @@ from paddle.fluid.executor import Executor from paddle.fluid.framework import Program, OpProtoHolder, Variable from testsuite import create_op, set_input, append_input_output, append_loss_ops from paddle.fluid import unique_name -from white_list import op_accuracy_white_list, op_check_grad_white_list +from white_list import op_accuracy_white_list, op_check_grad_white_list, check_shape_white_list def _set_use_system_allocator(value=None): @@ -69,6 +69,10 @@ def get_numeric_gradient(place, tensor_to_check = scope.find_var(input_to_check).get_tensor() tensor_size = product(tensor_to_check.shape()) + if not hasattr(get_numeric_gradient, 'check_shape_time'): + get_numeric_gradient.check_shape_time = 0 + if tensor_size >= 100: + get_numeric_gradient.check_shape_time += 1 tensor_to_check_dtype = tensor_to_check._dtype() if tensor_to_check_dtype == core.VarDesc.VarType.FP32: tensor_to_check_dtype = np.float32 @@ -170,6 +174,13 @@ class OpTest(unittest.TestCase): "This test do not have op_type in class attrs," " please set self.__class__.op_type=the_real_op_type manually.") + if hasattr( + get_numeric_gradient, 'check_shape_time' + ) and get_numeric_gradient.check_shape_time == 0 and OpTest.op_type not in check_shape_white_list.NOT_CHECK_OP_LIST and OpTest.op_type not in check_shape_white_list.NEED_TO_FIX_OP_LIST: + raise AssertionError( + "At least one input's shape should be large than or equal to 100 for " + + OpTest.op_type + " Op.") + # cases and ops do no need check_grad if cls.__name__ in op_check_grad_white_list.NO_NEED_CHECK_GRAD_CASES \ or cls.op_type in op_check_grad_white_list.EMPTY_GRAD_OP_LIST: @@ -1141,6 +1152,7 @@ class OpTest(unittest.TestCase): max_relative_error=0.005, user_defined_grads=None, check_dygraph=True): + OpTest.op_type = self.op_type self.scope = core.Scope() op_inputs = self.inputs if hasattr(self, "inputs") else dict() op_outputs = self.outputs if hasattr(self, "outputs") else dict() diff --git a/python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py b/python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py new file mode 100644 index 0000000000000000000000000000000000000000..62f2668a773a7812c7d5c5c912ebcb0dcb5ebbe4 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/white_list/check_shape_white_list.py @@ -0,0 +1,25 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +NOT_CHECK_OP_LIST = [ + # increment's input must be 1-d and only has one data + 'increment' +] + +NEED_TO_FIX_OP_LIST = [ + 'sequence_scatter', 'log_loss', 'sequence_topk_avg_pooling', 'matmul', + 'add_position_encoding', 'margin_rank_loss', 'elementwise_pow', + 'fused_elemwise_activation', 'tree_conv', 'mul', + 'teacher_student_sigmoid_loss' +]