diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 5006a8cf3b8cd45bcc7f56dbe3d2d5d9649c77ee..abf15778ea2fe724e2dba46fc25c9a41996fb563 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -34,6 +34,7 @@ from paddle.fluid.framework import Program, OpProtoHolder, Variable from testsuite import create_op, set_input, append_input_output, append_loss_ops from paddle.fluid import unique_name from white_list import op_accuracy_white_list, check_shape_white_list, compile_vs_runtime_white_list, no_check_set_white_list +from white_list import op_threshold_white_list def _set_use_system_allocator(value=None): @@ -898,15 +899,21 @@ class OpTest(unittest.TestCase): def check_output_with_place(self, place, - atol, + atol=0, no_check_set=None, equal_nan=False, check_dygraph=True, inplace_atol=None): + self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs) + if self.dtype == np.float64 and \ + self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_OUTPUT_THRESHOLD_OP_LIST: + atol = 0 + if no_check_set is not None: if self.op_type not in no_check_set_white_list.no_check_set_white_list: raise AssertionError( "no_check_set of op %s must be set to None." % self.op_type) + if check_dygraph: dygraph_outs = self._calc_dygraph_output( place, no_check_set=no_check_set) @@ -1145,18 +1152,30 @@ class OpTest(unittest.TestCase): max_relative_error, msg_prefix): for a, b, name in six.moves.zip(numeric_grads, analytic_grads, names): + # It asserts np.abs(a - b) / np.abs(a) < max_relative_error, in which + # max_relative_error is 1e-7. According to the value of np.abs(a), we + # change np.abs(a) to achieve dynamic threshold. For example, if + # the value of np.abs(a) is between 1e-10 and 1e-8, we set np.abs(a)*=1e4. + # Therefore, it asserts np.abs(a - b) / (np.abs(a)*1e4) < max_relative_error, + # which is the same as np.abs(a - b) / np.abs(a) < max_relative_error*1e4. abs_a = np.abs(a) - abs_a[abs_a < 1e-3] = 1 + if self.dtype == np.float64 and \ + self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST: + abs_a[abs_a < 1e-10] = 1e-3 + abs_a[np.logical_and(abs_a > 1e-10, abs_a <= 1e-8)] *= 1e4 + abs_a[np.logical_and(abs_a > 1e-8, abs_a <= 1e-6)] *= 1e2 + else: + abs_a[abs_a < 1e-3] = 1 diff_mat = np.abs(a - b) / abs_a max_diff = np.max(diff_mat) def err_msg(): offset = np.argmax(diff_mat > max_relative_error) - return ("%s Variable %s max gradient diff %f over limit %f, " - "the first error element is %d, expected %f, but got %f" - ) % (msg_prefix, name, max_diff, max_relative_error, - offset, a.flatten()[offset], b.flatten()[offset]) + return ("%s error, %s variable %s max gradient diff %f over limit %f, " + "the first error element is %d, expected %f, but got %f.") \ + % (self.op_type, msg_prefix, name, max_diff, max_relative_error, + offset, a.flatten()[offset], b.flatten()[offset]) self.assertLessEqual(max_diff, max_relative_error, err_msg()) @@ -1201,6 +1220,10 @@ class OpTest(unittest.TestCase): op_attrs = self.attrs if hasattr(self, "attrs") else dict() self._check_grad_helper() + if self.dtype == np.float64 and \ + self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST: + numeric_grad_delta = 1e-5 + max_relative_error = 1e-7 cache_list = None if hasattr(self, "cache_name_list"): diff --git a/python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py b/python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py new file mode 100644 index 0000000000000000000000000000000000000000..1dea72d8300ab66c4f8e9e12db55cd759b516a9d --- /dev/null +++ b/python/paddle/fluid/tests/unittests/white_list/op_threshold_white_list.py @@ -0,0 +1,25 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST = [ + 'affine_channel', 'bilinear_interp', 'bilinear_tensor_product', 'conv2d', + 'conv3d', 'cross_entropy', 'depthwise_conv2d_transpose', 'elementwise_pow', + 'grid_sampler', 'group_norm', 'gru', 'gru_unit', 'kldiv_loss', 'lstm', + 'lstmp', 'max_pool2d_with_index', 'max_pool3d_with_index', 'norm', 'pool3d', + 'reduce_prod', 'selu', 'sigmoid_cross_entropy_with_logits', 'soft_relu', + 'softmax_with_cross_entropy', 'spp', 'teacher_student_sigmoid_loss', + 'unpool', 'yolov3_loss' +] + +NEED_FIX_FP64_CHECK_OUTPUT_THRESHOLD_OP_LIST = ['bilinear_interp']