未验证 提交 f4ce8a92 编写于 作者: W Weilong Wu 提交者: GitHub

[Eager] Support div(scalar) in eager mode (#42148)

* [Eager] Support div scalar in eager mode

* Updated and remove debug logs

* Remove list, use 'or' directly

* Remove useless statement
上级 3b8f8b6c
...@@ -222,7 +222,9 @@ def monkey_patch_math_varbase(): ...@@ -222,7 +222,9 @@ def monkey_patch_math_varbase():
# so the calculation result here and the calculation result of numpy are # so the calculation result here and the calculation result of numpy are
# different after 6 decimal point. If necessary, we can also use float64 here. # different after 6 decimal point. If necessary, we can also use float64 here.
# torch's behavior here is consistent with ours # torch's behavior here is consistent with ours
if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: if (op_type == "final_state_divide" or
op_type == "elementwise_div"
) and self.dtype in _supported_int_dtype_:
self = astype(self, 'float32') self = astype(self, 'float32')
# here use `scale` replace `elementwise` to get better performance # here use `scale` replace `elementwise` to get better performance
# but only +, -, *, / can use this method # but only +, -, *, / can use this method
...@@ -277,7 +279,8 @@ def monkey_patch_math_varbase(): ...@@ -277,7 +279,8 @@ def monkey_patch_math_varbase():
self = other_var self = other_var
other_var = tmp other_var = tmp
if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: if (op_type == "final_state_divide" or op_type == "elementwise_div"
) and self.dtype in _supported_int_dtype_:
self = astype(self, 'float32') self = astype(self, 'float32')
other_var = astype(other_var, 'float32') other_var = astype(other_var, 'float32')
......
...@@ -18,8 +18,7 @@ import unittest ...@@ -18,8 +18,7 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
from paddle.fluid.framework import _enable_legacy_dygraph from paddle.fluid.framework import _test_eager_guard
_enable_legacy_dygraph()
# Support types are ref from `paddle.tensor.math` # Support types are ref from `paddle.tensor.math`
# - Related paddle dtypes: # - Related paddle dtypes:
...@@ -52,7 +51,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -52,7 +51,7 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
self.assertEqual(c_rlt.dtype, c.dtype) self.assertEqual(c_rlt.dtype, c.dtype)
self.assertTrue(np.array_equal(c_rlt.numpy(), c.numpy())) self.assertTrue(np.array_equal(c_rlt.numpy(), c.numpy()))
def test_tensor_add_scalar(self): def func_tensor_add_scalar(self):
# tensor(int64) + scalar(int) # tensor(int64) + scalar(int)
a = paddle.ones([2, 2, 2], dtype='int64') a = paddle.ones([2, 2, 2], dtype='int64')
b = 1 b = 1
...@@ -83,7 +82,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -83,7 +82,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 2.5, dtype="float32") c = paddle.full([2, 2, 2], 2.5, dtype="float32")
self.check_operation(a, b, c, '+') self.check_operation(a, b, c, '+')
def test_tensor_sub_scalar(self): def test_tensor_add_scalar(self):
with _test_eager_guard():
self.func_tensor_add_scalar()
self.func_tensor_add_scalar()
def func_tensor_sub_scalar(self):
# tensor(int64) - scalar(int) # tensor(int64) - scalar(int)
a = paddle.ones([2, 2, 2], dtype='int64') a = paddle.ones([2, 2, 2], dtype='int64')
b = 1 b = 1
...@@ -114,7 +118,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -114,7 +118,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 0.5, dtype="float32") c = paddle.full([2, 2, 2], 0.5, dtype="float32")
self.check_operation(a, b, c, '-') self.check_operation(a, b, c, '-')
def test_scalar_sub_tensor(self): def test_tensor_sub_scalar(self):
with _test_eager_guard():
self.func_tensor_sub_scalar()
self.func_tensor_sub_scalar()
def func_scalar_sub_tensor(self):
# scalar(int) - tensor(int64) # scalar(int) - tensor(int64)
a = 1 a = 1
b = paddle.ones([2, 2, 2], dtype='int64') b = paddle.ones([2, 2, 2], dtype='int64')
...@@ -145,7 +154,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -145,7 +154,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], -0.5, dtype="float32") c = paddle.full([2, 2, 2], -0.5, dtype="float32")
self.check_operation(a, b, c, '-') self.check_operation(a, b, c, '-')
def test_tensor_mul_tensor(self): def test_scalar_sub_tensor(self):
with _test_eager_guard():
self.func_scalar_sub_tensor()
self.func_scalar_sub_tensor()
def func_tensor_mul_tensor(self):
# tensor(int64) * scalar(int) # tensor(int64) * scalar(int)
a = paddle.ones([2, 2, 2], dtype='int64') a = paddle.ones([2, 2, 2], dtype='int64')
b = 1 b = 1
...@@ -176,7 +190,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -176,7 +190,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 1.5, dtype="float32") c = paddle.full([2, 2, 2], 1.5, dtype="float32")
self.check_operation(a, b, c, '*') self.check_operation(a, b, c, '*')
def test_tensor_div_scalar(self): def test_tensor_mul_tensor(self):
with _test_eager_guard():
self.func_tensor_mul_tensor()
self.func_tensor_mul_tensor()
def func_tensor_div_scalar(self):
# tensor(int64) / scalar(int) # tensor(int64) / scalar(int)
a = paddle.ones([2, 2, 2], dtype='int64') a = paddle.ones([2, 2, 2], dtype='int64')
b = 2 b = 2
...@@ -207,7 +226,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -207,7 +226,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 2, dtype="float32") c = paddle.full([2, 2, 2], 2, dtype="float32")
self.check_operation(a, b, c, '/') self.check_operation(a, b, c, '/')
def test_scalar_div_tensor(self): def test_tensor_div_scalar(self):
with _test_eager_guard():
self.func_tensor_div_scalar()
self.func_tensor_div_scalar()
def func_scalar_div_tensor(self):
# scalar(int) / tensor(int64) # scalar(int) / tensor(int64)
a = 1 a = 1
b = paddle.full([2, 2, 2], 2, dtype='int64') b = paddle.full([2, 2, 2], 2, dtype='int64')
...@@ -232,7 +256,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -232,7 +256,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 2, dtype="float32") c = paddle.full([2, 2, 2], 2, dtype="float32")
self.check_operation(a, b, c, '/') self.check_operation(a, b, c, '/')
def test_tensor_pow_scalar(self): def test_scalar_div_tensor(self):
with _test_eager_guard():
self.func_scalar_div_tensor()
self.func_scalar_div_tensor()
def func_tensor_pow_scalar(self):
# tensor(int64) ** scalar(int) # tensor(int64) ** scalar(int)
a = paddle.full([2, 2, 2], 2, dtype='int64') a = paddle.full([2, 2, 2], 2, dtype='int64')
b = 3 b = 3
...@@ -257,7 +286,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -257,7 +286,12 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 8, dtype="float32") c = paddle.full([2, 2, 2], 8, dtype="float32")
self.check_operation(a, b, c, '**') self.check_operation(a, b, c, '**')
def test_scalar_pow_tensor(self): def test_tensor_pow_scalar(self):
with _test_eager_guard():
self.func_tensor_pow_scalar()
self.func_tensor_pow_scalar()
def func_scalar_pow_tensor(self):
# scalar(int) ** tensor(int64) # scalar(int) ** tensor(int64)
a = 3 a = 3
b = paddle.full([2, 2, 2], 2, dtype='int64') b = paddle.full([2, 2, 2], 2, dtype='int64')
...@@ -282,15 +316,25 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -282,15 +316,25 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 9, dtype="float32") c = paddle.full([2, 2, 2], 9, dtype="float32")
self.check_operation(a, b, c, '**') self.check_operation(a, b, c, '**')
def test_scalar_pow_tensor(self):
with _test_eager_guard():
self.func_scalar_pow_tensor()
self.func_scalar_pow_tensor()
## TODO: floordiv op kernel doesn't support float ## TODO: floordiv op kernel doesn't support float
def test_tensor_floordiv_scalar(self): def func_tensor_floordiv_scalar(self):
# tensor(int64) // scalar(int) # tensor(int64) // scalar(int)
a = paddle.full([2, 2, 2], 3, dtype='int64') a = paddle.full([2, 2, 2], 3, dtype='int64')
b = 2 b = 2
c = paddle.full([2, 2, 2], 1, dtype="int64") c = paddle.full([2, 2, 2], 1, dtype="int64")
self.check_operation(a, b, c, '//') self.check_operation(a, b, c, '//')
def test_tensor_mod_scalar(self): def test_tensor_floordiv_scalar(self):
with _test_eager_guard():
self.func_tensor_floordiv_scalar()
self.func_tensor_floordiv_scalar()
def func_tensor_mod_scalar(self):
# tensor(int64) % scalar(int) # tensor(int64) % scalar(int)
a = paddle.full([2, 2, 2], 3, dtype='int64') a = paddle.full([2, 2, 2], 3, dtype='int64')
b = 2 b = 2
...@@ -315,6 +359,11 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase): ...@@ -315,6 +359,11 @@ class TestTensorScalarTypePromotionDynamic(unittest.TestCase):
c = paddle.full([2, 2, 2], 1, dtype="float32") c = paddle.full([2, 2, 2], 1, dtype="float32")
self.check_operation(a, b, c, '%') self.check_operation(a, b, c, '%')
def test_tensor_mod_scalar(self):
with _test_eager_guard():
self.func_tensor_mod_scalar()
self.func_tensor_mod_scalar()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册