diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index 6b57544329e7c7e136b4a107a7696a7ee4e9db96..b92e54d4868dfe759788fa1be8af2a3496a8137b 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -46,7 +46,9 @@ _supported_promote_complex_types_ = [ '__rsub__', '__mul__', '__rmul__', + '__div__', '__truediv__', + '__rdiv__', '__rtruediv__', '__matmul__', ] @@ -170,6 +172,9 @@ def monkey_patch_math_varbase(): def _scalar_mul_(var, value): return _scalar_elementwise_op_(var, value, 0.0) + def _scalar_div_(var, value): + return _scalar_elementwise_op_(var, 1.0 / value, 0.0) + # for binary operator such as elementwise, compare def _binary_creator_(method_name, op_type, @@ -200,10 +205,7 @@ def monkey_patch_math_varbase(): if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') # here use `scale` replace `elementwise` to get better performance - # but only +, -, * can use this method - # NOTE(chentianyu03): / can not use `scale` method,because the result of - # `scale` method (self*(1/other_var)) do not exactly equal with the result - # of `elementwise_div` method. + # but only +, -, *, / can use this method if scalar_method is not None: return scalar_method(self, other_var) else: @@ -296,8 +298,12 @@ def monkey_patch_math_varbase(): ## a*b == b*a. Do not need to reverse explicitly ('__rmul__', _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)), + ('__div__', _binary_creator_('__div__', 'elementwise_div', False, + _scalar_div_)), ('__truediv__', _binary_creator_('__truediv__', 'elementwise_div', - False, None)), + False, _scalar_div_)), + ('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True, + None)), ('__rtruediv__', _binary_creator_('rtruediv__', 'elementwise_div', True, None)), ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False, diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index e1b61393498488645a4623323acb2626d2a10fdd..47b42f65e4854afeac60bc0fc72dc2bfc737dec0 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -40,7 +40,9 @@ EXPRESSION_MAP = { "__rsub__": "A -= B", "__mul__": "A * B", "__rmul__": "A *= B", + "__div__": "A / B", "__truediv__": "A / B", + "__rdiv__": "A /= B", "__rtruediv__": "A /= B", "__pow__": "A ** B", "__rpow__": "A **= B", @@ -249,6 +251,9 @@ def monkey_patch_variable(): def _scalar_mul_(var, value): return _scalar_op_(var, value, 0.0) + def _scalar_div_(var, value): + return _scalar_op_(var, 1.0 / value, 0.0) + def _binary_creator_(method_name, op_type, reverse=False, @@ -278,10 +283,7 @@ def monkey_patch_variable(): if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') # here use `scale` replace `elementwise` to get better performance - # but only +, -, * can use this method - # NOTE(chentianyu03): / can not use `scale` method,because the result of - # `scale` method (self*(1/other_var)) do not exactly equal with the result - # of `elementwise_div` method. + # but only +, -, *, / can use this method if scalar_method is not None: return scalar_method(self, other_var) else: @@ -381,8 +383,12 @@ def monkey_patch_variable(): # a*b == b*a. Do not need to reverse explicitly ('__rmul__', _binary_creator_('__rmul__', 'elementwise_mul', False, _scalar_mul_)), + ('__div__', _binary_creator_('__div__', 'elementwise_div', False, + _scalar_div_)), ('__truediv__', _binary_creator_('__truediv__', 'elementwise_div', - False, None)), + False, _scalar_div_)), + ('__rdiv__', _binary_creator_('__rdiv__', 'elementwise_div', True, + None)), ('__rtruediv__', _binary_creator_('__rtruediv__', 'elementwise_div', True, None)), ('__pow__', _binary_creator_('__pow__', 'elementwise_pow', False, diff --git a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py index ba375f8b3c8a41726c39cd890d53edcd33bbf6f4..5f2dfbdd99e1611c61883b9a34cccc5ac0ec8b71 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py @@ -187,13 +187,6 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): c = paddle.full([2, 2, 2], 0.5, dtype="float32") self.check_operation(a, b, c, '/') - # tensor(float32) / scalar(int) - # this behavior should be equal to elementwise_div Op - a = paddle.to_tensor([99, 99, 99], dtype='float32') - b = 100 - c = a / paddle.to_tensor([100, 100, 100], dtype='float32') - self.check_operation(a, b, c, '/') - # tensor(int64) / scalar(float, .0) a = paddle.ones([2, 2, 2], dtype='int64') b = 2.0 diff --git a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py index aa24161687004b5155b429752d684053a487abb4..d697666e12ddd15859b2908b42f43202e3de93ab 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py @@ -218,12 +218,6 @@ class TestTensorScalarTypePromotionStatic(unittest.TestCase): c = paddle.full([2, 2, 2], 0.5, dtype="float32") self.check_operation(a, b, c, '/') - # this behavior should be equal to elementwise_div Op - a = paddle.full([2, 2, 2], 99, dtype="float32") - b = 100 - c = a / paddle.full([2, 2, 2], 100, dtype="float32") - self.check_operation(a, b, c, '/') - # tensor(int64) / scalar(float, .0) with program_guard(Program()): a = paddle.ones([2, 2, 2], dtype='int64')