提交 9bb1e3ea 编写于 作者: Z zhupengyang 提交者: Tao Luo

add shape_check for op tests (#21391)

上级 63a5fb4c
...@@ -33,7 +33,7 @@ from paddle.fluid.executor import Executor ...@@ -33,7 +33,7 @@ from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, OpProtoHolder, Variable from paddle.fluid.framework import Program, OpProtoHolder, Variable
from testsuite import create_op, set_input, append_input_output, append_loss_ops from testsuite import create_op, set_input, append_input_output, append_loss_ops
from paddle.fluid import unique_name from paddle.fluid import unique_name
from white_list import op_accuracy_white_list, op_check_grad_white_list from white_list import op_accuracy_white_list, op_check_grad_white_list, check_shape_white_list
def _set_use_system_allocator(value=None): def _set_use_system_allocator(value=None):
...@@ -69,6 +69,10 @@ def get_numeric_gradient(place, ...@@ -69,6 +69,10 @@ def get_numeric_gradient(place,
tensor_to_check = scope.find_var(input_to_check).get_tensor() tensor_to_check = scope.find_var(input_to_check).get_tensor()
tensor_size = product(tensor_to_check.shape()) tensor_size = product(tensor_to_check.shape())
if not hasattr(get_numeric_gradient, 'check_shape_time'):
get_numeric_gradient.check_shape_time = 0
if tensor_size >= 100:
get_numeric_gradient.check_shape_time += 1
tensor_to_check_dtype = tensor_to_check._dtype() tensor_to_check_dtype = tensor_to_check._dtype()
if tensor_to_check_dtype == core.VarDesc.VarType.FP32: if tensor_to_check_dtype == core.VarDesc.VarType.FP32:
tensor_to_check_dtype = np.float32 tensor_to_check_dtype = np.float32
...@@ -170,6 +174,13 @@ class OpTest(unittest.TestCase): ...@@ -170,6 +174,13 @@ class OpTest(unittest.TestCase):
"This test do not have op_type in class attrs," "This test do not have op_type in class attrs,"
" please set self.__class__.op_type=the_real_op_type manually.") " please set self.__class__.op_type=the_real_op_type manually.")
if hasattr(
get_numeric_gradient, 'check_shape_time'
) and get_numeric_gradient.check_shape_time == 0 and OpTest.op_type not in check_shape_white_list.NOT_CHECK_OP_LIST and OpTest.op_type not in check_shape_white_list.NEED_TO_FIX_OP_LIST:
raise AssertionError(
"At least one input's shape should be large than or equal to 100 for "
+ OpTest.op_type + " Op.")
# cases and ops do no need check_grad # cases and ops do no need check_grad
if cls.__name__ in op_check_grad_white_list.NO_NEED_CHECK_GRAD_CASES \ if cls.__name__ in op_check_grad_white_list.NO_NEED_CHECK_GRAD_CASES \
or cls.op_type in op_check_grad_white_list.EMPTY_GRAD_OP_LIST: or cls.op_type in op_check_grad_white_list.EMPTY_GRAD_OP_LIST:
...@@ -1141,6 +1152,7 @@ class OpTest(unittest.TestCase): ...@@ -1141,6 +1152,7 @@ class OpTest(unittest.TestCase):
max_relative_error=0.005, max_relative_error=0.005,
user_defined_grads=None, user_defined_grads=None,
check_dygraph=True): check_dygraph=True):
OpTest.op_type = self.op_type
self.scope = core.Scope() self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict() op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_outputs = self.outputs if hasattr(self, "outputs") else dict() op_outputs = self.outputs if hasattr(self, "outputs") else dict()
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NOT_CHECK_OP_LIST = [
# increment's input must be 1-d and only has one data
'increment'
]
NEED_TO_FIX_OP_LIST = [
'sequence_scatter', 'log_loss', 'sequence_topk_avg_pooling', 'matmul',
'add_position_encoding', 'margin_rank_loss', 'elementwise_pow',
'fused_elemwise_activation', 'tree_conv', 'mul',
'teacher_student_sigmoid_loss'
]
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册