From 03026ceacc83b35db3c71d23b73420bf7cbbe3e9 Mon Sep 17 00:00:00 2001 From: chentianyu03 Date: Mon, 13 Sep 2021 10:55:09 +0800 Subject: [PATCH] Revert "change '/' method from scale Op to elementwise_div Op (#33279)" (#35650) This reverts commit ae93d9c2e21461327753d9b7dc76f01ed5f76116. --- python/paddle/fluid/dygraph/math_op_patch.py | 16 +++++++++++----- python/paddle/fluid/layers/math_op_patch.py | 16 +++++++++++----- .../test_tensor_scalar_type_promotion_dynamic.py | 7 ------- .../test_tensor_scalar_type_promotion_static.py | 6 ------ 4 files changed, 22 insertions(+), 23 deletions(-) diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index 6b57544329e..b92e54d4868 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -46,7 +46,9 @@ _supported_promote_complex_types_ = [ '__rsub__', '__mul__', '__rmul__', + '__div__', '__truediv__', + '__rdiv__', '__rtruediv__', '__matmul__', ] @@ -170,6 +172,9 @@ def monkey_patch_math_varbase(): def _scalar_mul_(var, value): return _scalar_elementwise_op_(var, value, 0.0) + def _scalar_div_(var, value): + return _scalar_elementwise_op_(var, 1.0 / value, 0.0) + # for binary operator such as elementwise, compare def _binary_creator_(method_name, op_type, @@ -200,10 +205,7 @@ def monkey_patch_math_varbase(): if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') # here use `scale` replace `elementwise` to get better performance - # but only +, -, * can use this method - # NOTE(chentianyu03): / can not use `scale` method,because the result of - # `scale` method (self*(1/other_var)) do not exactly equal with the result - # of `elementwise_div` method. + # but only +, -, *, / can use this method if scalar_method is not None: return scalar_method(self, other_var) else: @@ -296,8 +298,12 @@ def monkey_patch_math_varbase(): ## a*b == b*a. Do not need to reverse explicitly ('__rmul__', _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)), + ('__div__', _binary_creator_('__div__', 'elementwise_div', False, + _scalar_div_)), ('__truediv__', _binary_creator_('__truediv__', 'elementwise_div', - False, None)), + False, _scalar_div_)), + ('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True, + None)), ('__rtruediv__', _binary_creator_('rtruediv__', 'elementwise_div', True, None)), ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False, diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index e1b61393498..47b42f65e48 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -40,7 +40,9 @@ EXPRESSION_MAP = { "__rsub__": "A -= B", "__mul__": "A * B", "__rmul__": "A *= B", + "__div__": "A / B", "__truediv__": "A / B", + "__rdiv__": "A /= B", "__rtruediv__": "A /= B", "__pow__": "A ** B", "__rpow__": "A **= B", @@ -249,6 +251,9 @@ def monkey_patch_variable(): def _scalar_mul_(var, value): return _scalar_op_(var, value, 0.0) + def _scalar_div_(var, value): + return _scalar_op_(var, 1.0 / value, 0.0) + def _binary_creator_(method_name, op_type, reverse=False, @@ -278,10 +283,7 @@ def monkey_patch_variable(): if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') # here use `scale` replace `elementwise` to get better performance - # but only +, -, * can use this method - # NOTE(chentianyu03): / can not use `scale` method,because the result of - # `scale` method (self*(1/other_var)) do not exactly equal with the result - # of `elementwise_div` method. + # but only +, -, *, / can use this method if scalar_method is not None: return scalar_method(self, other_var) else: @@ -381,8 +383,12 @@ def monkey_patch_variable(): # a*b == b*a. Do not need to reverse explicitly ('__rmul__', _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)), + ('__div__', _binary_creator_('__div__', 'elementwise_div', False, + _scalar_div_)), ('__truediv__', _binary_creator_('__truediv__', 'elementwise_div', - False, None)), + False, _scalar_div_)), + ('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True, + None)), ('__rtruediv__', _binary_creator_('__rtruediv__', 'elementwise_div', True, None)), ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False, diff --git a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py index ba375f8b3c8..5f2dfbdd99e 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py @@ -187,13 +187,6 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): c = paddle.full([2, 2, 2], 0.5, dtype="float32") self.check_operation(a, b, c, '/') - # tensor(float32) / scalar(int) - # this behavior should be equal to elementwise_div Op - a = paddle.to_tensor([99, 99, 99], dtype='float32') - b = 100 - c = a / paddle.to_tensor([100, 100, 100], dtype='float32') - self.check_operation(a, b, c, '/') - # tensor(int64) / scalar(float, .0) a = paddle.ones([2, 2, 2], dtype='int64') b = 2.0 diff --git a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py index aa241616870..d697666e12d 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py @@ -218,12 +218,6 @@ class TestTensorScalarTypePromotionStatic(unittest.TestCase): c = paddle.full([2, 2, 2], 0.5, dtype="float32") self.check_operation(a, b, c, '/') - # this behavior should be equal to elementwise_div Op - a = paddle.full([2, 2, 2], 99, dtype="float32") - b = 100 - c = a / paddle.full([2, 2, 2], 100, dtype="float32") - self.check_operation(a, b, c, '/') - # tensor(int64) / scalar(float, .0) with program_guard(Program()): a = paddle.ones([2, 2, 2], dtype='int64') -- GitLab