未验证 提交 dc070ecf 编写于 作者: J joejiong 提交者: GitHub

Remove cast from paddle.pow api (#29134)

As the title
上级 4056c4f1
......@@ -13,12 +13,13 @@
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
import paddle.tensor as tensor
import paddle.fluid as fluid
from paddle.static import Program, program_guard
import numpy as np
import unittest
DYNAMIC = 1
STATIC = 2
......@@ -49,8 +50,8 @@ def _run_power(mode, x, y):
x_ = paddle.static.data(name="x", shape=x.shape, dtype=x.dtype)
y_ = y
res = paddle.pow(x_, y_)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
outs = exe.run(feed={'x': x}, fetch_list=[res])
return outs[0]
# y is tensor
......@@ -59,8 +60,8 @@ def _run_power(mode, x, y):
x_ = paddle.static.data(name="x", shape=x.shape, dtype=x.dtype)
y_ = paddle.static.data(name="y", shape=y.shape, dtype=y.dtype)
res = paddle.pow(x_, y_)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
outs = exe.run(feed={'x': x, 'y': y}, fetch_list=[res])
return outs[0]
......@@ -105,24 +106,6 @@ class TestPowerAPI(unittest.TestCase):
res = _run_power(STATIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
# test 1-d float tensor ** 1-d int tensor
dims = (np.random.randint(200, 300), )
x = (np.random.rand(*dims) * 10).astype(np.float64)
y = (np.random.rand(*dims) * 10).astype(np.int64)
res = _run_power(DYNAMIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
res = _run_power(STATIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
# test 1-d int tensor ** 1-d float tensor
dims = (np.random.randint(200, 300), )
x = (np.random.rand(*dims) * 10).astype(np.int64)
y = (np.random.rand(*dims) * 10).astype(np.float64)
res = _run_power(DYNAMIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
res = _run_power(STATIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
# test 1-d int tensor ** 1-d int tensor
dims = (np.random.randint(200, 300), )
x = (np.random.rand(*dims) * 10).astype(np.int64)
......@@ -141,24 +124,6 @@ class TestPowerAPI(unittest.TestCase):
res = _run_power(STATIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
# test 1-d int tensor ** 1-d int tensor
dims = (np.random.randint(200, 300), )
x = (np.random.rand(*dims) * 10).astype(np.int64)
y = (np.random.rand(*dims) * 10).astype(np.int32)
res = _run_power(DYNAMIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
res = _run_power(STATIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
# test 1-d int tensor ** 1-d int tensor
dims = (np.random.randint(200, 300), )
x = (np.random.rand(*dims) * 10).astype(np.int32)
y = (np.random.rand(*dims) * 10).astype(np.int64)
res = _run_power(DYNAMIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
res = _run_power(STATIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
# test 1-d int tensor ** 1-d int tensor
dims = (np.random.randint(200, 300), )
x = (np.random.rand(*dims) * 10).astype(np.float32)
......@@ -168,33 +133,6 @@ class TestPowerAPI(unittest.TestCase):
res = _run_power(STATIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
# test 1-d int tensor ** 1-d int tensor
dims = (np.random.randint(200, 300), )
x = (np.random.rand(*dims) * 10).astype(np.float64)
y = (np.random.rand(*dims) * 10).astype(np.float32)
res = _run_power(DYNAMIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
res = _run_power(STATIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
# test 1-d int tensor ** 1-d int tensor
dims = (np.random.randint(200, 300), )
x = (np.random.rand(*dims) * 10).astype(np.float64)
y = (np.random.rand(*dims) * 10).astype(np.int32)
res = _run_power(DYNAMIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
res = _run_power(STATIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
# test 1-d int tensor ** 1-d int tensor
dims = (np.random.randint(200, 300), )
x = (np.random.rand(*dims) * 10).astype(np.float32)
y = (np.random.rand(*dims) * 10).astype(np.int64)
res = _run_power(DYNAMIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
res = _run_power(STATIC, x, y)
self.assertTrue(np.allclose(res, np.power(x, y)))
# test broadcast
dims = (np.random.randint(1, 10), np.random.randint(5, 10),
np.random.randint(5, 10))
......
......@@ -184,9 +184,6 @@ def pow(x, y, name=None):
if isinstance(y, (int, float)):
return core.ops.pow(x, 'factor', y)
elif isinstance(y, (paddle.Tensor, Variable)):
if x.dtype != y.dtype:
y = cast(y, dtype='float64')
x = cast(x, dtype='float64')
return _elementwise_op_in_dygraph(
x, y, axis=-1, act=None, op_name='elementwise_pow')
else:
......@@ -204,9 +201,6 @@ def pow(x, y, name=None):
elif isinstance(y, (paddle.Tensor, Variable)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
helper = LayerHelper('elementwise_pow', **locals())
if x.dtype != y.dtype:
y = cast(y, dtype='float64')
x = cast(x, dtype='float64')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
else:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册