From ae93d9c2e21461327753d9b7dc76f01ed5f76116 Mon Sep 17 00:00:00 2001 From: chentianyu03 Date: Wed, 2 Jun 2021 19:17:45 +0800 Subject: [PATCH] change '/' method from scale Op to elementwise_div Op (#33279) * fix the bug of div operation result using scale method do not exactly equal the result of elementwise_div method * remove __div__ , __rdiv__ methods which do not define in python3 * modify the note * add test case * add test case --- python/paddle/fluid/dygraph/math_op_patch.py | 16 +++++----------- python/paddle/fluid/layers/math_op_patch.py | 16 +++++----------- .../test_tensor_scalar_type_promotion_dynamic.py | 7 +++++++ .../test_tensor_scalar_type_promotion_static.py | 6 ++++++ 4 files changed, 23 insertions(+), 22 deletions(-) diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index e39fc3e23fe..a014e0a722a 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -46,9 +46,7 @@ _supported_promote_complex_types_ = [ '__rsub__', '__mul__', '__rmul__', - '__div__', '__truediv__', - '__rdiv__', '__rtruediv__', '__matmul__', ] @@ -168,9 +166,6 @@ def monkey_patch_math_varbase(): def _scalar_mul_(var, value): return _scalar_elementwise_op_(var, value, 0.0) - def _scalar_div_(var, value): - return _scalar_elementwise_op_(var, 1.0 / value, 0.0) - # for binary operator such as elementwise, compare def _binary_creator_(method_name, op_type, @@ -201,7 +196,10 @@ def monkey_patch_math_varbase(): if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') # here use `scale` replace `elementwise` to get better performance - # but only +, -, *, / can use this method + # but only +, -, * can use this method + # NOTE(chentianyu03): / can not use `scale` method,because the result of + # `scale` method (self*(1/other_var)) do not exactly equal with the result + # of `elementwise_div` method. if scalar_method is not None: return scalar_method(self, other_var) else: @@ -288,12 +286,8 @@ def monkey_patch_math_varbase(): ## a*b == b*a. Do not need to reverse explicitly ('__rmul__', _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)), - ('__div__', _binary_creator_('__div__', 'elementwise_div', False, - _scalar_div_)), ('__truediv__', _binary_creator_('__truediv__', 'elementwise_div', - False, _scalar_div_)), - ('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True, - None)), + False, None)), ('__rtruediv__', _binary_creator_('rtruediv__', 'elementwise_div', True, None)), ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False, diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index a2dee91dbef..2a57c1a907a 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -39,9 +39,7 @@ EXPRESSION_MAP = { "__rsub__": "A -= B", "__mul__": "A * B", "__rmul__": "A *= B", - "__div__": "A / B", "__truediv__": "A / B", - "__rdiv__": "A /= B", "__rtruediv__": "A /= B", "__pow__": "A ** B", "__rpow__": "A **= B", @@ -209,9 +207,6 @@ def monkey_patch_variable(): def _scalar_mul_(var, value): return _scalar_op_(var, value, 0.0) - def _scalar_div_(var, value): - return _scalar_op_(var, 1.0 / value, 0.0) - def _binary_creator_(method_name, op_type, reverse=False, @@ -241,7 +236,10 @@ def monkey_patch_variable(): if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') # here use `scale` replace `elementwise` to get better performance - # but only +, -, *, / can use this method + # but only +, -, * can use this method + # NOTE(chentianyu03): / can not use `scale` method,because the result of + # `scale` method (self*(1/other_var)) do not exactly equal with the result + # of `elementwise_div` method. if scalar_method is not None: return scalar_method(self, other_var) else: @@ -337,12 +335,8 @@ def monkey_patch_variable(): # a*b == b*a. Do not need to reverse explicitly ('__rmul__', _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)), - ('__div__', _binary_creator_('__div__', 'elementwise_div', False, - _scalar_div_)), ('__truediv__', _binary_creator_('__truediv__', 'elementwise_div', - False, _scalar_div_)), - ('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True, - None)), + False, None)), ('__rtruediv__', _binary_creator_('__rtruediv__', 'elementwise_div', True, None)), ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False, diff --git a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py index 5f2dfbdd99e..ba375f8b3c8 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py @@ -187,6 +187,13 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): c = paddle.full([2, 2, 2], 0.5, dtype="float32") self.check_operation(a, b, c, '/') + # tensor(float32) / scalar(int) + # this behavior should be equal to elementwise_div Op + a = paddle.to_tensor([99, 99, 99], dtype='float32') + b = 100 + c = a / paddle.to_tensor([100, 100, 100], dtype='float32') + self.check_operation(a, b, c, '/') + # tensor(int64) / scalar(float, .0) a = paddle.ones([2, 2, 2], dtype='int64') b = 2.0 diff --git a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py index d697666e12d..aa241616870 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py @@ -218,6 +218,12 @@ class TestTensorScalarTypePromotionStatic(unittest.TestCase): c = paddle.full([2, 2, 2], 0.5, dtype="float32") self.check_operation(a, b, c, '/') + # this behavior should be equal to elementwise_div Op + a = paddle.full([2, 2, 2], 99, dtype="float32") + b = 100 + c = a / paddle.full([2, 2, 2], 100, dtype="float32") + self.check_operation(a, b, c, '/') + # tensor(int64) / scalar(float, .0) with program_guard(Program()): a = paddle.ones([2, 2, 2], dtype='int64') -- GitLab