未验证 提交 bde7ee97 编写于 作者: J juncaipeng 提交者: GitHub

Use unified threshold in all op test (#21814)

* modify test framework, test=develop

* fix test_fsp_op, test=develop

* up, test=develop

* up, test=develop

* up, test=develop

* add check for threshold, test=develop

* up, test=develop

* up, test=develop

* up, test=develop

* up, test=develop

* up, test=develop

* up, test=develop

* up, test=develop

* update, test=develop

* update, test=develop

* update, test=develop
上级 d0406193
...@@ -34,6 +34,7 @@ from paddle.fluid.framework import Program, OpProtoHolder, Variable ...@@ -34,6 +34,7 @@ from paddle.fluid.framework import Program, OpProtoHolder, Variable
from testsuite import create_op, set_input, append_input_output, append_loss_ops from testsuite import create_op, set_input, append_input_output, append_loss_ops
from paddle.fluid import unique_name from paddle.fluid import unique_name
from white_list import op_accuracy_white_list, check_shape_white_list, compile_vs_runtime_white_list, no_check_set_white_list from white_list import op_accuracy_white_list, check_shape_white_list, compile_vs_runtime_white_list, no_check_set_white_list
from white_list import op_threshold_white_list
def _set_use_system_allocator(value=None): def _set_use_system_allocator(value=None):
...@@ -898,15 +899,21 @@ class OpTest(unittest.TestCase): ...@@ -898,15 +899,21 @@ class OpTest(unittest.TestCase):
def check_output_with_place(self, def check_output_with_place(self,
place, place,
atol, atol=0,
no_check_set=None, no_check_set=None,
equal_nan=False, equal_nan=False,
check_dygraph=True, check_dygraph=True,
inplace_atol=None): inplace_atol=None):
self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
if self.dtype == np.float64 and \
self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_OUTPUT_THRESHOLD_OP_LIST:
atol = 0
if no_check_set is not None: if no_check_set is not None:
if self.op_type not in no_check_set_white_list.no_check_set_white_list: if self.op_type not in no_check_set_white_list.no_check_set_white_list:
raise AssertionError( raise AssertionError(
"no_check_set of op %s must be set to None." % self.op_type) "no_check_set of op %s must be set to None." % self.op_type)
if check_dygraph: if check_dygraph:
dygraph_outs = self._calc_dygraph_output( dygraph_outs = self._calc_dygraph_output(
place, no_check_set=no_check_set) place, no_check_set=no_check_set)
...@@ -1145,7 +1152,19 @@ class OpTest(unittest.TestCase): ...@@ -1145,7 +1152,19 @@ class OpTest(unittest.TestCase):
max_relative_error, msg_prefix): max_relative_error, msg_prefix):
for a, b, name in six.moves.zip(numeric_grads, analytic_grads, names): for a, b, name in six.moves.zip(numeric_grads, analytic_grads, names):
# It asserts np.abs(a - b) / np.abs(a) < max_relative_error, in which
# max_relative_error is 1e-7. According to the value of np.abs(a), we
# change np.abs(a) to achieve dynamic threshold. For example, if
# the value of np.abs(a) is between 1e-10 and 1e-8, we set np.abs(a)*=1e4.
# Therefore, it asserts np.abs(a - b) / (np.abs(a)*1e4) < max_relative_error,
# which is the same as np.abs(a - b) / np.abs(a) < max_relative_error*1e4.
abs_a = np.abs(a) abs_a = np.abs(a)
if self.dtype == np.float64 and \
self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST:
abs_a[abs_a < 1e-10] = 1e-3
abs_a[np.logical_and(abs_a > 1e-10, abs_a <= 1e-8)] *= 1e4
abs_a[np.logical_and(abs_a > 1e-8, abs_a <= 1e-6)] *= 1e2
else:
abs_a[abs_a < 1e-3] = 1 abs_a[abs_a < 1e-3] = 1
diff_mat = np.abs(a - b) / abs_a diff_mat = np.abs(a - b) / abs_a
...@@ -1153,9 +1172,9 @@ class OpTest(unittest.TestCase): ...@@ -1153,9 +1172,9 @@ class OpTest(unittest.TestCase):
def err_msg(): def err_msg():
offset = np.argmax(diff_mat > max_relative_error) offset = np.argmax(diff_mat > max_relative_error)
return ("%s Variable %s max gradient diff %f over limit %f, " return ("%s error, %s variable %s max gradient diff %f over limit %f, "
"the first error element is %d, expected %f, but got %f" "the first error element is %d, expected %f, but got %f.") \
) % (msg_prefix, name, max_diff, max_relative_error, % (self.op_type, msg_prefix, name, max_diff, max_relative_error,
offset, a.flatten()[offset], b.flatten()[offset]) offset, a.flatten()[offset], b.flatten()[offset])
self.assertLessEqual(max_diff, max_relative_error, err_msg()) self.assertLessEqual(max_diff, max_relative_error, err_msg())
...@@ -1201,6 +1220,10 @@ class OpTest(unittest.TestCase): ...@@ -1201,6 +1220,10 @@ class OpTest(unittest.TestCase):
op_attrs = self.attrs if hasattr(self, "attrs") else dict() op_attrs = self.attrs if hasattr(self, "attrs") else dict()
self._check_grad_helper() self._check_grad_helper()
if self.dtype == np.float64 and \
self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST:
numeric_grad_delta = 1e-5
max_relative_error = 1e-7
cache_list = None cache_list = None
if hasattr(self, "cache_name_list"): if hasattr(self, "cache_name_list"):
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST = [
'affine_channel', 'bilinear_interp', 'bilinear_tensor_product', 'conv2d',
'conv3d', 'cross_entropy', 'depthwise_conv2d_transpose', 'elementwise_pow',
'grid_sampler', 'group_norm', 'gru', 'gru_unit', 'kldiv_loss', 'lstm',
'lstmp', 'max_pool2d_with_index', 'max_pool3d_with_index', 'norm', 'pool3d',
'reduce_prod', 'selu', 'sigmoid_cross_entropy_with_logits', 'soft_relu',
'softmax_with_cross_entropy', 'spp', 'teacher_student_sigmoid_loss',
'unpool', 'yolov3_loss'
]
NEED_FIX_FP64_CHECK_OUTPUT_THRESHOLD_OP_LIST = ['bilinear_interp']
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册