未验证 提交 842325dd 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle] remove some `yapf: disable` (#46410)

* remove `yapf: disable` from math.py

* disable test case only

* remove useless disable

* remove useless disable

* revert no_grad_set_white_list
上级 ed2bb051
...@@ -1094,5 +1094,3 @@ class TestIfftShift(unittest.TestCase): ...@@ -1094,5 +1094,3 @@ class TestIfftShift(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# yapf: enable
...@@ -925,5 +925,3 @@ class TestIfftShift(unittest.TestCase): ...@@ -925,5 +925,3 @@ class TestIfftShift(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
# yapf: enable
...@@ -642,7 +642,6 @@ def to_safe_name(s): ...@@ -642,7 +642,6 @@ def to_safe_name(s):
return str(re.sub("[^a-zA-Z0-9_]+", "_", s)) return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
# yapf: disable
@place(DEVICES) @place(DEVICES)
@parameterize( @parameterize(
(TEST_CASE_NAME, 'x', 'frame_length', 'hop_length', 'axis'), (TEST_CASE_NAME, 'x', 'frame_length', 'hop_length', 'axis'),
...@@ -653,10 +652,19 @@ def to_safe_name(s): ...@@ -653,10 +652,19 @@ def to_safe_name(s):
('test_2d_input2', rand_x(2, np.float64, shape=[8, 150]), 50, 15, -1), ('test_2d_input2', rand_x(2, np.float64, shape=[8, 150]), 50, 15, -1),
('test_3d_input1', rand_x(3, np.float64, shape=[150, 4, 2]), 50, 15, 0), ('test_3d_input1', rand_x(3, np.float64, shape=[150, 4, 2]), 50, 15, 0),
('test_3d_input2', rand_x(3, np.float64, shape=[4, 2, 150]), 50, 15, -1), ('test_3d_input2', rand_x(3, np.float64, shape=[4, 2, 150]), 50, 15, -1),
]) ]) # yapf: disable
class TestFrame(unittest.TestCase): class TestFrame(unittest.TestCase):
def test_frame(self): def test_frame(self):
np.testing.assert_allclose(frame_for_api_test(self.x, self.frame_length, self.hop_length, self.axis), paddle.signal.frame(paddle.to_tensor(self.x), self.frame_length, self.hop_length, self.axis), rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype))) np.testing.assert_allclose(frame_for_api_test(self.x, self.frame_length,
self.hop_length,
self.axis),
paddle.signal.frame(paddle.to_tensor(self.x),
self.frame_length,
self.hop_length,
self.axis),
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype)))
@place(DEVICES) @place(DEVICES)
...@@ -669,24 +677,29 @@ class TestFrame(unittest.TestCase): ...@@ -669,24 +677,29 @@ class TestFrame(unittest.TestCase):
('test_2d_input2', rand_x(2, np.float64, shape=[8, 150]), 50, 15, -1), ('test_2d_input2', rand_x(2, np.float64, shape=[8, 150]), 50, 15, -1),
('test_3d_input1', rand_x(3, np.float64, shape=[150, 4, 2]), 50, 15, 0), ('test_3d_input1', rand_x(3, np.float64, shape=[150, 4, 2]), 50, 15, 0),
('test_3d_input2', rand_x(3, np.float64, shape=[4, 2, 150]), 50, 15, -1), ('test_3d_input2', rand_x(3, np.float64, shape=[4, 2, 150]), 50, 15, -1),
]) ]) # yapf: disable
class TestFrameStatic(unittest.TestCase): class TestFrameStatic(unittest.TestCase):
def test_frame_static(self): def test_frame_static(self):
paddle.enable_static() paddle.enable_static()
mp, sp = paddle.static.Program(), paddle.static.Program() mp, sp = paddle.static.Program(), paddle.static.Program()
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
input = paddle.static.data('input', self.x.shape, dtype=self.x.dtype) input = paddle.static.data('input',
output = paddle.signal.frame( self.x.shape,
input, dtype=self.x.dtype)
self.frame_length, output = paddle.signal.frame(input, self.frame_length,
self.hop_length, self.hop_length, self.axis),
self.axis),
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
exe.run(sp) exe.run(sp)
[output] = exe.run(mp, feed={'input': self.x}, fetch_list=[output]) [output] = exe.run(mp, feed={'input': self.x}, fetch_list=[output])
paddle.disable_static() paddle.disable_static()
np.testing.assert_allclose(frame_for_api_test(self.x, self.frame_length, self.hop_length, self.axis), output, rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype))) np.testing.assert_allclose(frame_for_api_test(self.x, self.frame_length,
self.hop_length,
self.axis),
output,
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype)))
@place(DEVICES) @place(DEVICES)
...@@ -697,15 +710,13 @@ class TestFrameStatic(unittest.TestCase): ...@@ -697,15 +710,13 @@ class TestFrameStatic(unittest.TestCase):
('test_hop_length', rand_x(1, np.float64, shape=[150]), 50, 0, -1, ValueError), ('test_hop_length', rand_x(1, np.float64, shape=[150]), 50, 0, -1, ValueError),
('test_frame_length1', rand_x(2, np.float64, shape=[150, 8]), 0, 15, 0, ValueError), ('test_frame_length1', rand_x(2, np.float64, shape=[150, 8]), 0, 15, 0, ValueError),
('test_frame_length2', rand_x(2, np.float64, shape=[150, 8]), 151, 15, 0, ValueError), ('test_frame_length2', rand_x(2, np.float64, shape=[150, 8]), 151, 15, 0, ValueError),
]) ]) # yapf: disable
class TestFrameException(unittest.TestCase): class TestFrameException(unittest.TestCase):
def test_frame(self): def test_frame(self):
with self.assertRaises(self.expect_exception): with self.assertRaises(self.expect_exception):
paddle.signal.frame( paddle.signal.frame(paddle.to_tensor(self.x), self.frame_length,
paddle.to_tensor(self.x), self.hop_length, self.axis)
self.frame_length,
self.hop_length,
self.axis)
@place(DEVICES) @place(DEVICES)
...@@ -718,10 +729,16 @@ class TestFrameException(unittest.TestCase): ...@@ -718,10 +729,16 @@ class TestFrameException(unittest.TestCase):
('test_3d_input2', rand_x(3, np.float64, shape=[2, 40, 5]), 10, -1), ('test_3d_input2', rand_x(3, np.float64, shape=[2, 40, 5]), 10, -1),
('test_4d_input1', rand_x(4, np.float64, shape=[8, 12, 5, 3]), 5, 0), ('test_4d_input1', rand_x(4, np.float64, shape=[8, 12, 5, 3]), 5, 0),
('test_4d_input2', rand_x(4, np.float64, shape=[3, 5, 12, 8]), 5, -1), ('test_4d_input2', rand_x(4, np.float64, shape=[3, 5, 12, 8]), 5, -1),
]) ]) # yapf: disable
class TestOverlapAdd(unittest.TestCase): class TestOverlapAdd(unittest.TestCase):
def test_overlap_add(self): def test_overlap_add(self):
np.testing.assert_allclose(overlap_add_for_api_test(self.x, self.hop_length, self.axis), paddle.signal.overlap_add(paddle.to_tensor(self.x), self.hop_length, self.axis), rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype))) np.testing.assert_allclose(
overlap_add_for_api_test(self.x, self.hop_length, self.axis),
paddle.signal.overlap_add(paddle.to_tensor(self.x), self.hop_length,
self.axis),
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype)))
@place(DEVICES) @place(DEVICES)
...@@ -734,23 +751,28 @@ class TestOverlapAdd(unittest.TestCase): ...@@ -734,23 +751,28 @@ class TestOverlapAdd(unittest.TestCase):
('test_3d_input2', rand_x(3, np.float64, shape=[2, 40, 5]), 10, -1), ('test_3d_input2', rand_x(3, np.float64, shape=[2, 40, 5]), 10, -1),
('test_4d_input1', rand_x(4, np.float64, shape=[8, 12, 5, 3]), 5, 0), ('test_4d_input1', rand_x(4, np.float64, shape=[8, 12, 5, 3]), 5, 0),
('test_4d_input2', rand_x(4, np.float64, shape=[3, 5, 12, 8]), 5, -1), ('test_4d_input2', rand_x(4, np.float64, shape=[3, 5, 12, 8]), 5, -1),
]) ]) # yapf: disable
class TestOverlapAddStatic(unittest.TestCase): class TestOverlapAddStatic(unittest.TestCase):
def test_overlap_add_static(self): def test_overlap_add_static(self):
paddle.enable_static() paddle.enable_static()
mp, sp = paddle.static.Program(), paddle.static.Program() mp, sp = paddle.static.Program(), paddle.static.Program()
with paddle.static.program_guard(mp, sp): with paddle.static.program_guard(mp, sp):
input = paddle.static.data('input', self.x.shape, dtype=self.x.dtype) input = paddle.static.data('input',
output = paddle.signal.overlap_add( self.x.shape,
input, dtype=self.x.dtype)
self.hop_length, output = paddle.signal.overlap_add(input, self.hop_length,
self.axis), self.axis),
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
exe.run(sp) exe.run(sp)
[output] = exe.run(mp, feed={'input': self.x}, fetch_list=[output]) [output] = exe.run(mp, feed={'input': self.x}, fetch_list=[output])
paddle.disable_static() paddle.disable_static()
np.testing.assert_allclose(overlap_add_for_api_test(self.x, self.hop_length, self.axis), output, rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype))) np.testing.assert_allclose(overlap_add_for_api_test(
self.x, self.hop_length, self.axis),
output,
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype)))
@place(DEVICES) @place(DEVICES)
...@@ -759,14 +781,13 @@ class TestOverlapAddStatic(unittest.TestCase): ...@@ -759,14 +781,13 @@ class TestOverlapAddStatic(unittest.TestCase):
[ [
('test_axis', rand_x(2, np.float64, shape=[3, 50]), 4, 2, ValueError), ('test_axis', rand_x(2, np.float64, shape=[3, 50]), 4, 2, ValueError),
('test_hop_length', rand_x(2, np.float64, shape=[50, 3]), -1, -1, ValueError), ('test_hop_length', rand_x(2, np.float64, shape=[50, 3]), -1, -1, ValueError),
]) ]) # yapf: disable
class TestOverlapAddException(unittest.TestCase): class TestOverlapAddException(unittest.TestCase):
def test_overlap_add(self): def test_overlap_add(self):
with self.assertRaises(self.expect_exception): with self.assertRaises(self.expect_exception):
paddle.signal.overlap_add( paddle.signal.overlap_add(paddle.to_tensor(self.x), self.hop_length,
paddle.to_tensor(self.x), self.axis)
self.hop_length,
self.axis)
# ================= STFT # ================= STFT
...@@ -815,8 +836,9 @@ class TestOverlapAddException(unittest.TestCase): ...@@ -815,8 +836,9 @@ class TestOverlapAddException(unittest.TestCase):
512, None, None, None, True, 'reflect', False, True), 512, None, None, None, True, 'reflect', False, True),
('test_center', rand_x(2, np.float64, shape=[1, 160000]), ('test_center', rand_x(2, np.float64, shape=[1, 160000]),
512, None, None, None, False, 'reflect', False, True), 512, None, None, None, False, 'reflect', False, True),
]) ])# yapf: disable
class TestStft(unittest.TestCase): class TestStft(unittest.TestCase):
def test_stft(self): def test_stft(self):
if self.window is None: if self.window is None:
win_p = None win_p = None
...@@ -825,7 +847,15 @@ class TestStft(unittest.TestCase): ...@@ -825,7 +847,15 @@ class TestStft(unittest.TestCase):
win_p = paddle.to_tensor(self.window) win_p = paddle.to_tensor(self.window)
win_l = self.window win_l = self.window
np.testing.assert_allclose(stft(self.x, self.n_fft, self.hop_length, self.win_length, win_l, self.center, self.pad_mode), paddle.signal.stft(paddle.to_tensor(self.x), self.n_fft, self.hop_length, self.win_length, win_p, self.center, self.pad_mode, self.normalized, self.onesided), rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype))) np.testing.assert_allclose(
stft(self.x, self.n_fft, self.hop_length, self.win_length, win_l,
self.center, self.pad_mode),
paddle.signal.stft(paddle.to_tensor(self.x), self.n_fft,
self.hop_length, self.win_length, win_p,
self.center, self.pad_mode, self.normalized,
self.onesided),
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype)))
@place(DEVICES) @place(DEVICES)
...@@ -848,8 +878,9 @@ class TestStft(unittest.TestCase): ...@@ -848,8 +878,9 @@ class TestStft(unittest.TestCase):
512, None, None, None, True, 'nonsense', False, True, AssertionError), 512, None, None, None, True, 'nonsense', False, True, AssertionError),
('test_complex_onesided', rand_x(1, np.float64, shape=[16000], complex=True), ('test_complex_onesided', rand_x(1, np.float64, shape=[16000], complex=True),
512, None, None, None, False, 'reflect', False, True, AssertionError), 512, None, None, None, False, 'reflect', False, True, AssertionError),
]) ]) # yapf: disable
class TestStftException(unittest.TestCase): class TestStftException(unittest.TestCase):
def test_stft(self): def test_stft(self):
if self.window is None: if self.window is None:
win_p = None win_p = None
...@@ -857,16 +888,10 @@ class TestStftException(unittest.TestCase): ...@@ -857,16 +888,10 @@ class TestStftException(unittest.TestCase):
win_p = paddle.to_tensor(self.window) win_p = paddle.to_tensor(self.window)
with self.assertRaises(self.expect_exception): with self.assertRaises(self.expect_exception):
paddle.signal.stft( paddle.signal.stft(paddle.to_tensor(self.x), self.n_fft,
paddle.to_tensor(self.x), self.hop_length, self.win_length, win_p,
self.n_fft, self.center, self.pad_mode, self.normalized,
self.hop_length, self.onesided),
self.win_length,
win_p,
self.center,
self.pad_mode,
self.normalized,
self.onesided),
@place(DEVICES) @place(DEVICES)
...@@ -887,8 +912,9 @@ class TestStftException(unittest.TestCase): ...@@ -887,8 +912,9 @@ class TestStftException(unittest.TestCase):
512, None, None, None, False, False, True, None, False), 512, None, None, None, False, False, True, None, False),
('test_length', rand_x(3, np.float64, shape=[1, 257, 471], complex=True), ('test_length', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, None, None, None, False, False, True, 1888, False), 512, None, None, None, False, False, True, 1888, False),
]) ]) # yapf: disable
class TestIstft(unittest.TestCase): class TestIstft(unittest.TestCase):
def test_istft(self): def test_istft(self):
if self.window is None: if self.window is None:
win_p = None win_p = None
...@@ -897,7 +923,15 @@ class TestIstft(unittest.TestCase): ...@@ -897,7 +923,15 @@ class TestIstft(unittest.TestCase):
win_p = paddle.to_tensor(self.window) win_p = paddle.to_tensor(self.window)
win_l = self.window win_l = self.window
np.testing.assert_allclose(istft(self.x, self.hop_length, self.win_length, win_l, self.center, self.length), paddle.signal.istft(paddle.to_tensor(self.x), self.n_fft, self.hop_length, self.win_length, win_p, self.center, self.normalized, self.onesided, self.length, self.return_complex), rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype))) np.testing.assert_allclose(
istft(self.x, self.hop_length, self.win_length, win_l, self.center,
self.length),
paddle.signal.istft(paddle.to_tensor(self.x), self.n_fft,
self.hop_length, self.win_length, win_p,
self.center, self.normalized, self.onesided,
self.length, self.return_complex),
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype)))
@place(DEVICES) @place(DEVICES)
...@@ -928,8 +962,9 @@ class TestIstft(unittest.TestCase): ...@@ -928,8 +962,9 @@ class TestIstft(unittest.TestCase):
512, None, None, rand_x(1, np.float64, shape=[512], complex=True), True, False, True, None, False, AssertionError), 512, None, None, rand_x(1, np.float64, shape=[512], complex=True), True, False, True, None, False, AssertionError),
('test_NOLA', rand_x(3, np.float64, shape=[1, 257, 471], complex=True), ('test_NOLA', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, 512, None, get_window('hann', 512), True, False, True, None, False, ValueError), 512, 512, None, get_window('hann', 512), True, False, True, None, False, ValueError),
]) ]) # yapf: disable
class TestIstftException(unittest.TestCase): class TestIstftException(unittest.TestCase):
def test_istft(self): def test_istft(self):
if self.window is None: if self.window is None:
win_p = None win_p = None
...@@ -937,20 +972,11 @@ class TestIstftException(unittest.TestCase): ...@@ -937,20 +972,11 @@ class TestIstftException(unittest.TestCase):
win_p = paddle.to_tensor(self.window) win_p = paddle.to_tensor(self.window)
with self.assertRaises(self.expect_exception): with self.assertRaises(self.expect_exception):
paddle.signal.istft( paddle.signal.istft(paddle.to_tensor(self.x), self.n_fft,
paddle.to_tensor(self.x), self.hop_length, self.win_length, win_p,
self.n_fft, self.center, self.normalized, self.onesided,
self.hop_length, self.length, self.return_complex),
self.win_length,
win_p,
self.center,
self.normalized,
self.onesided,
self.length,
self.return_complex),
# yapf: enable
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -37,37 +37,35 @@ from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only ...@@ -37,37 +37,35 @@ from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
from ..fluid.layers import utils from ..fluid.layers import utils
# TODO: define math functions # TODO: define math functions
# yapf: disable from .ops import abs # noqa: F401
from .ops import abs # noqa: F401 from .ops import acos # noqa: F401
from .ops import acos # noqa: F401 from .ops import asin # noqa: F401
from .ops import asin # noqa: F401 from .ops import ceil # noqa: F401
from .ops import ceil # noqa: F401 from .ops import ceil_ # noqa: F401
from .ops import ceil_ # noqa: F401 from .ops import cos # noqa: F401
from .ops import cos # noqa: F401 from .ops import tan # noqa: F401
from .ops import tan # noqa: F401 from .ops import sinh # noqa: F401
from .ops import sinh # noqa: F401 from .ops import cosh # noqa: F401
from .ops import cosh # noqa: F401 from .ops import exp # noqa: F401
from .ops import exp # noqa: F401 from .ops import exp_ # noqa: F401
from .ops import exp_ # noqa: F401 from .ops import expm1 # noqa: F401
from .ops import expm1 # noqa: F401 from .ops import floor # noqa: F401
from .ops import floor # noqa: F401 from .ops import floor_ # noqa: F401
from .ops import floor_ # noqa: F401 from .ops import reciprocal # noqa: F401
from .ops import reciprocal # noqa: F401 from .ops import reciprocal_ # noqa: F401
from .ops import reciprocal_ # noqa: F401 from .ops import round # noqa: F401
from .ops import round # noqa: F401 from .ops import round_ # noqa: F401
from .ops import round_ # noqa: F401 from .ops import rsqrt # noqa: F401
from .ops import rsqrt # noqa: F401 from .ops import rsqrt_ # noqa: F401
from .ops import rsqrt_ # noqa: F401 from .ops import square # noqa: F401
from .ops import square # noqa: F401 from .ops import atan # noqa: F401
from .ops import atan # noqa: F401 from .ops import erf # noqa: F401
from .ops import erf # noqa: F401 from .ops import sqrt # noqa: F401
from .ops import sqrt # noqa: F401 from .ops import sqrt_ # noqa: F401
from .ops import sqrt_ # noqa: F401 from .ops import sin # noqa: F401
from .ops import sin # noqa: F401 from .ops import asinh # noqa: F401
from .ops import asinh # noqa: F401 from .ops import acosh # noqa: F401
from .ops import acosh # noqa: F401 from .ops import atanh # noqa: F401
from .ops import atanh # noqa: F401
from ..fluid.layers import elementwise_sub from ..fluid.layers import elementwise_sub
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops, _legacy_C_ops
...@@ -181,9 +179,9 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): ...@@ -181,9 +179,9 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
return dygraph_utils._append_activation_in_dygraph(out, act) return dygraph_utils._append_activation_in_dygraph(out, act)
elif _in_legacy_dygraph(): elif _in_legacy_dygraph():
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale _scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
out = _legacy_C_ops.scale(x, 'scale', out = _legacy_C_ops.scale(x, 'scale', float(_scale), 'bias',
float(_scale), 'bias', float(bias), 'bias_after_scale',
float(bias), 'bias_after_scale', bias_after_scale) bias_after_scale)
return dygraph_utils._append_activation_in_dygraph(out, act) return dygraph_utils._append_activation_in_dygraph(out, act)
check_variable_and_dtype(x, "x", [ check_variable_and_dtype(x, "x", [
...@@ -202,8 +200,10 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): ...@@ -202,8 +200,10 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
helper = LayerHelper('scale', **locals()) helper = LayerHelper('scale', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type='scale',
type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs) inputs=inputs,
outputs={'Out': out},
attrs=attrs)
return helper.append_activation(out) return helper.append_activation(out)
...@@ -242,14 +242,16 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None): ...@@ -242,14 +242,16 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
helper = LayerHelper('stanh', **locals()) helper = LayerHelper('stanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type='stanh',
type='stanh', inputs={'X': x},
inputs={'X': x}, outputs={'Out': out},
outputs={'Out': out}, attrs={
attrs={'scale_a': scale_a, 'scale_a': scale_a,
'scale_b': scale_b}) 'scale_b': scale_b
})
return out return out
def multiplex(inputs, index, name=None): def multiplex(inputs, index, name=None):
""" """
...@@ -318,13 +320,15 @@ def multiplex(inputs, index, name=None): ...@@ -318,13 +320,15 @@ def multiplex(inputs, index, name=None):
check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex') check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex')
out = helper.create_variable_for_type_inference(inputs[0].dtype) out = helper.create_variable_for_type_inference(inputs[0].dtype)
helper.append_op( helper.append_op(type='multiplex',
type='multiplex', inputs={
inputs={'X': inputs, 'X': inputs,
'Ids': index}, 'Ids': index
outputs={'Out': [out]}) },
outputs={'Out': [out]})
return out return out
@inplace_apis_in_dygraph_only @inplace_apis_in_dygraph_only
def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
""" """
...@@ -335,9 +339,9 @@ def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): ...@@ -335,9 +339,9 @@ def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
return _C_ops.scale_(x, scale, float(bias), bias_after_scale) return _C_ops.scale_(x, scale, float(bias), bias_after_scale)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale _scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
return _legacy_C_ops.scale_(x, 'scale', return _legacy_C_ops.scale_(x, 'scale', float(_scale), 'bias',
float(_scale), 'bias', float(bias), 'bias_after_scale',
float(bias), 'bias_after_scale', bias_after_scale) bias_after_scale)
def pow(x, y, name=None): def pow(x, y, name=None):
...@@ -392,23 +396,32 @@ def pow(x, y, name=None): ...@@ -392,23 +396,32 @@ def pow(x, y, name=None):
elif isinstance(y, (paddle.Tensor, Variable)): elif isinstance(y, (paddle.Tensor, Variable)):
return _C_ops.elementwise_pow(x, y) return _C_ops.elementwise_pow(x, y)
else: else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype)) raise TypeError(
'y must be scalar or tensor type, but received: %s ' %
(y.dtype))
if _in_legacy_dygraph(): if _in_legacy_dygraph():
if isinstance(y, (int, float)): if isinstance(y, (int, float)):
return _legacy_C_ops.pow(x, 'factor', y) return _legacy_C_ops.pow(x, 'factor', y)
elif isinstance(y, (paddle.Tensor, Variable)): elif isinstance(y, (paddle.Tensor, Variable)):
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(x,
x, y, axis=-1, act=None, op_name='elementwise_pow') y,
axis=-1,
act=None,
op_name='elementwise_pow')
else: else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype)) raise TypeError(
'y must be scalar or tensor type, but received: %s ' %
(y.dtype))
# in static graph mode # in static graph mode
if isinstance(y, (int, float)): if isinstance(y, (int, float)):
helper = LayerHelper('pow', **locals()) helper = LayerHelper('pow', **locals())
inputs = {'X': x} inputs = {'X': x}
attrs = {'factor': y} attrs = {'factor': y}
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type='pow',
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs) inputs=inputs,
outputs={'Out': out},
attrs=attrs)
return out return out
elif isinstance(y, (paddle.Tensor, Variable)): elif isinstance(y, (paddle.Tensor, Variable)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here # TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
...@@ -416,7 +429,8 @@ def pow(x, y, name=None): ...@@ -416,7 +429,8 @@ def pow(x, y, name=None):
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
return _elementwise_op(LayerHelper('elementwise_pow', **locals())) return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
else: else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y))) raise TypeError('y must be scalar or tensor type, but received: %s ' %
(type(y)))
OP_NAMEMAPPING = { OP_NAMEMAPPING = {
...@@ -431,6 +445,7 @@ OP_NAMEMAPPING = { ...@@ -431,6 +445,7 @@ OP_NAMEMAPPING = {
'elementwise_mod': 'remainder', 'elementwise_mod': 'remainder',
} }
@dygraph_only @dygraph_only
def _elementwise_op_in_dygraph(x, def _elementwise_op_in_dygraph(x,
y, y,
...@@ -438,23 +453,28 @@ def _elementwise_op_in_dygraph(x, ...@@ -438,23 +453,28 @@ def _elementwise_op_in_dygraph(x,
act=None, act=None,
use_mkldnn=False, use_mkldnn=False,
op_name=None): op_name=None):
def is_inplace(op_name): def is_inplace(op_name):
return op_name[-1] == "_" return op_name[-1] == "_"
if op_name not in OP_NAMEMAPPING.keys() or axis != -1: if op_name not in OP_NAMEMAPPING.keys() or axis != -1:
op = getattr(_legacy_C_ops, op_name) op = getattr(_legacy_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
else: else:
if in_dygraph_mode(): if in_dygraph_mode():
op = getattr(_C_ops, OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name) op = getattr(
_C_ops,
OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name)
out = op(x, y) out = op(x, y)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
op = getattr(_legacy_C_ops, op_name) op = getattr(_legacy_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn) out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph( return dygraph_utils._append_activation_in_dygraph(out,
out, act, use_mkldnn=use_mkldnn) act,
use_mkldnn=use_mkldnn)
def _elementwise_op(helper): def _elementwise_op(helper):
op_type = helper.layer_type op_type = helper.layer_type
...@@ -481,15 +501,20 @@ def _elementwise_op(helper): ...@@ -481,15 +501,20 @@ def _elementwise_op(helper):
if name is None: if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
else: else:
out = helper.create_variable(name=name, dtype=x.dtype, persistable=False) out = helper.create_variable(name=name,
dtype=x.dtype,
helper.append_op( persistable=False)
type=op_type,
inputs={'X': x, helper.append_op(type=op_type,
'Y': y}, inputs={
outputs={'Out': out}, 'X': x,
attrs={'axis': axis, 'Y': y
'use_mkldnn': use_mkldnn}) },
outputs={'Out': out},
attrs={
'axis': axis,
'use_mkldnn': use_mkldnn
})
return helper.append_activation(out) return helper.append_activation(out)
...@@ -549,7 +574,7 @@ def add(x, y, name=None): ...@@ -549,7 +574,7 @@ def add(x, y, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.add( x, y) return _C_ops.add(x, y)
else: else:
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.elementwise_add(x, y) return _legacy_C_ops.elementwise_add(x, y)
...@@ -568,13 +593,14 @@ def add_(x, y, name=None): ...@@ -568,13 +593,14 @@ def add_(x, y, name=None):
out_shape = broadcast_shape(x.shape, y.shape) out_shape = broadcast_shape(x.shape, y.shape)
if out_shape != x.shape: if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape)) raise ValueError(
"The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation."
.format(out_shape, x.shape))
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.add_(x, y) return _C_ops.add_(x, y)
else: else:
out = _elementwise_op_in_dygraph( out = _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type)
x, y, axis=axis, op_name=op_type)
return out return out
...@@ -639,8 +665,11 @@ def subtract(x, y, name=None): ...@@ -639,8 +665,11 @@ def subtract(x, y, name=None):
return _C_ops.subtract(x, y) return _C_ops.subtract(x, y)
else: else:
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(x,
x, y, axis=axis, act=act, op_name=op_type) y,
axis=axis,
act=act,
op_name=op_type)
else: else:
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
...@@ -656,13 +685,18 @@ def subtract_(x, y, name=None): ...@@ -656,13 +685,18 @@ def subtract_(x, y, name=None):
out_shape = broadcast_shape(x.shape, y.shape) out_shape = broadcast_shape(x.shape, y.shape)
if out_shape != x.shape: if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape)) raise ValueError(
"The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation."
.format(out_shape, x.shape))
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.subtract_(x, y) return _C_ops.subtract_(x, y)
else: else:
out = _elementwise_op_in_dygraph( out = _elementwise_op_in_dygraph(x,
x, y, axis=axis, act=act, op_name='elementwise_sub_') y,
axis=axis,
act=act,
op_name='elementwise_sub_')
return out return out
...@@ -700,11 +734,14 @@ def divide(x, y, name=None): ...@@ -700,11 +734,14 @@ def divide(x, y, name=None):
axis = -1 axis = -1
act = None act = None
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.divide( x, y) return _C_ops.divide(x, y)
else: else:
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(x,
x, y, axis=axis, act=act, op_name=op_type) y,
axis=axis,
act=act,
op_name=op_type)
else: else:
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
...@@ -744,8 +781,7 @@ def floor_divide(x, y, name=None): ...@@ -744,8 +781,7 @@ def floor_divide(x, y, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.floor_divide(x, y) return _C_ops.floor_divide(x, y)
elif _in_legacy_dygraph(): elif _in_legacy_dygraph():
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type)
x, y, axis=axis, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
...@@ -787,8 +823,7 @@ def remainder(x, y, name=None): ...@@ -787,8 +823,7 @@ def remainder(x, y, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.remainder(x, y) return _C_ops.remainder(x, y)
elif _in_legacy_dygraph(): elif _in_legacy_dygraph():
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type)
x, y, axis=axis, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
...@@ -805,8 +840,8 @@ def remainder_(x, y, name=None): ...@@ -805,8 +840,8 @@ def remainder_(x, y, name=None):
out_shape = broadcast_shape(x.shape, y.shape) out_shape = broadcast_shape(x.shape, y.shape)
if out_shape != x.shape: if out_shape != x.shape:
raise ValueError( raise ValueError(
"The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format( "The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation."
out_shape, x.shape)) .format(out_shape, x.shape))
return _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type) return _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type)
...@@ -858,8 +893,11 @@ def multiply(x, y, name=None): ...@@ -858,8 +893,11 @@ def multiply(x, y, name=None):
return _C_ops.multiply(x, y) return _C_ops.multiply(x, y)
else: else:
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(x,
x, y, axis=axis, act=act, op_name=op_type) y,
axis=axis,
act=act,
op_name=op_type)
else: else:
if x.dtype != y.dtype: if x.dtype != y.dtype:
raise TypeError( raise TypeError(
...@@ -868,6 +906,7 @@ def multiply(x, y, name=None): ...@@ -868,6 +906,7 @@ def multiply(x, y, name=None):
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
def maximum(x, y, name=None): def maximum(x, y, name=None):
""" """
Compare two tensors and returns a new tensor containing the element-wise maxima. The equation is: Compare two tensors and returns a new tensor containing the element-wise maxima. The equation is:
...@@ -925,10 +964,14 @@ def maximum(x, y, name=None): ...@@ -925,10 +964,14 @@ def maximum(x, y, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.maximum(x, y) return _C_ops.maximum(x, y)
elif _in_legacy_dygraph(): elif _in_legacy_dygraph():
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(x,
x, y, axis=axis, act=act, op_name=op_type) y,
axis=axis,
act=act,
op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
def minimum(x, y, name=None): def minimum(x, y, name=None):
""" """
Compare two tensors and return a new tensor containing the element-wise minima. The equation is: Compare two tensors and return a new tensor containing the element-wise minima. The equation is:
...@@ -986,10 +1029,14 @@ def minimum(x, y, name=None): ...@@ -986,10 +1029,14 @@ def minimum(x, y, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.minimum(x, y) return _C_ops.minimum(x, y)
elif _in_legacy_dygraph(): elif _in_legacy_dygraph():
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(x,
x, y, axis=axis, act=act, op_name=op_type) y,
axis=axis,
act=act,
op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
def fmax(x, y, name=None): def fmax(x, y, name=None):
""" """
Compares the elements at the corresponding positions of the two tensors and returns a new tensor containing the maximum value of the element. Compares the elements at the corresponding positions of the two tensors and returns a new tensor containing the maximum value of the element.
...@@ -1049,10 +1096,14 @@ def fmax(x, y, name=None): ...@@ -1049,10 +1096,14 @@ def fmax(x, y, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.fmax(x, y, axis) return _C_ops.fmax(x, y, axis)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(x,
x, y, axis=axis, act=act, op_name=op_type) y,
axis=axis,
act=act,
op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
def fmin(x, y, name=None): def fmin(x, y, name=None):
""" """
Compares the elements at the corresponding positions of the two tensors and returns a new tensor containing the minimum value of the element. Compares the elements at the corresponding positions of the two tensors and returns a new tensor containing the minimum value of the element.
...@@ -1112,8 +1163,11 @@ def fmin(x, y, name=None): ...@@ -1112,8 +1163,11 @@ def fmin(x, y, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.fmin(x, y, axis) return _C_ops.fmin(x, y, axis)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(x,
x, y, axis=axis, act=act, op_name=op_type) y,
axis=axis,
act=act,
op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
...@@ -1209,43 +1263,35 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): ...@@ -1209,43 +1263,35 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
if _in_legacy_dygraph(): if _in_legacy_dygraph():
if dtype_flag: if dtype_flag:
return _legacy_C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim, return _legacy_C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag, 'in_dtype', 'reduce_all', reduce_all_flag,
x.dtype, 'out_dtype', dtype) 'in_dtype', x.dtype, 'out_dtype',
dtype)
else: else:
return _legacy_C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim, return _legacy_C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag) 'reduce_all', reduce_all_flag)
attrs = { attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all_flag}
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
if dtype_flag: if dtype_flag:
attrs.update({ attrs.update({'in_dtype': x.dtype, 'out_dtype': dtype})
'in_dtype': x.dtype,
'out_dtype': dtype
})
check_variable_and_dtype( check_variable_and_dtype(x, 'x', [
x, 'x', ['bool', 'float16', 'float32', 'float64', 'bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64',
'int16', 'int32', 'int64', 'complex64', 'complex128', 'complex64', 'complex128', u'bool', u'float16', u'float32', u'float64',
u'bool', u'float16', u'float32', u'float64', u'int32', u'int64', u'complex64', u'complex128'
u'int32', u'int64', u'complex64', u'complex128'], 'sum') ], 'sum')
check_type(axis, 'axis', (int, list, tuple, type(None), Variable), 'sum') check_type(axis, 'axis', (int, list, tuple, type(None), Variable), 'sum')
helper = LayerHelper('sum', **locals()) helper = LayerHelper('sum', **locals())
if dtype_flag: if dtype_flag:
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(dtype=dtype)
dtype=dtype)
else: else:
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type='reduce_sum',
type='reduce_sum', inputs={'X': x},
inputs={'X': x}, outputs={'Out': out},
outputs={'Out': out}, attrs=attrs)
attrs=attrs)
return out return out
...@@ -1299,8 +1345,8 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None): ...@@ -1299,8 +1345,8 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
out5 = paddle.nansum(y, axis=[1, 2]) # [8, 19] out5 = paddle.nansum(y, axis=[1, 2]) # [8, 19]
out6 = paddle.nansum(y, axis=[0, 1]) # [9, 18] out6 = paddle.nansum(y, axis=[0, 1]) # [9, 18]
""" """
check_variable_and_dtype( check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'nansum') 'nansum')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'nansum') check_type(axis, 'axis', (int, list, tuple, type(None)), 'nansum')
zero_tensor = paddle.zeros_like(x) zero_tensor = paddle.zeros_like(x)
...@@ -1367,12 +1413,14 @@ def nanmean(x, axis=None, keepdim=False, name=None): ...@@ -1367,12 +1413,14 @@ def nanmean(x, axis=None, keepdim=False, name=None):
axis = [axis] axis = [axis]
check_variable_and_dtype(x, 'x/input', check_variable_and_dtype(x, 'x/input',
['uint16', 'float16', 'float32', 'float64'], ['uint16', 'float16', 'float32', 'float64'],
'nanmean' ) 'nanmean')
if axis is not None: if axis is not None:
check_type(axis, 'axis/dim', (int, list, tuple), 'nanmean') check_type(axis, 'axis/dim', (int, list, tuple), 'nanmean')
cnt = paddle.sum(~paddle.isnan(x), axis = axis,keepdim=keepdim) cnt = paddle.sum(~paddle.isnan(x), axis=axis, keepdim=keepdim)
return paddle.divide(paddle.nansum(x, axis=axis, keepdim=keepdim, name=name), cnt.astype(x.dtype)) return paddle.divide(
paddle.nansum(x, axis=axis, keepdim=keepdim, name=name),
cnt.astype(x.dtype))
def count_nonzero(x, axis=None, keepdim=False, name=None): def count_nonzero(x, axis=None, keepdim=False, name=None):
...@@ -1424,13 +1472,13 @@ def count_nonzero(x, axis=None, keepdim=False, name=None): ...@@ -1424,13 +1472,13 @@ def count_nonzero(x, axis=None, keepdim=False, name=None):
# [1, 3, 5] # [1, 3, 5]
""" """
if axis is not None: if axis is not None:
if isinstance(axis, int): if isinstance(axis, int):
axis = [axis] axis = [axis]
dims = len(x.shape) dims = len(x.shape)
for i in range(len(axis)): for i in range(len(axis)):
if not isinstance(axis[i], int) or not (axis[i] < dims and axis[i] >= -dims): if not isinstance(axis[i], int) or not (axis[i] < dims
and axis[i] >= -dims):
raise ValueError( raise ValueError(
"Axis should be None, int, or a list, element should in range [-rank(x), rank(x))." "Axis should be None, int, or a list, element should in range [-rank(x), rank(x))."
) )
...@@ -1518,14 +1566,12 @@ def add_n(inputs, name=None): ...@@ -1518,14 +1566,12 @@ def add_n(inputs, name=None):
check_variable_and_dtype(inputs, "inputs", \ check_variable_and_dtype(inputs, "inputs", \
['float16', 'float32', 'float64', 'int32', 'int64'], 'add_n') ['float16', 'float32', 'float64', 'int32', 'int64'], 'add_n')
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('inputs')) dtype=helper.input_dtype('inputs'))
helper.append_op( helper.append_op(type='sum',
type='sum', inputs={'X': inputs},
inputs={'X': inputs}, outputs={'Out': out},
outputs={'Out': out}, attrs={'use_mkldnn': False})
attrs={'use_mkldnn': False})
return out return out
...@@ -1559,7 +1605,7 @@ def trunc(input, name=None): ...@@ -1559,7 +1605,7 @@ def trunc(input, name=None):
# [0., 0.]])) # [0., 0.]]))
''' '''
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.trunc(input) return _C_ops.trunc(input)
else: else:
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.trunc(input) return _legacy_C_ops.trunc(input)
...@@ -1568,15 +1614,18 @@ def trunc(input, name=None): ...@@ -1568,15 +1614,18 @@ def trunc(input, name=None):
attrs = {} attrs = {}
helper = LayerHelper("trunc", **locals()) helper = LayerHelper("trunc", **locals())
check_variable_and_dtype(input, 'X', ['int32', 'int64', 'float32', 'float64'], 'trunc') check_variable_and_dtype(input, 'X',
['int32', 'int64', 'float32', 'float64'],
'trunc')
out = helper.create_variable_for_type_inference(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(type="trunc",
type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": out}) inputs=inputs,
attrs=attrs,
outputs={"Out": out})
return out return out
def mm(input, mat2, name=None): def mm(input, mat2, name=None):
""" """
...@@ -1686,9 +1735,12 @@ def mm(input, mat2, name=None): ...@@ -1686,9 +1735,12 @@ def mm(input, mat2, name=None):
helper = LayerHelper('mm', **locals()) helper = LayerHelper('mm', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(type='matmul_v2',
type='matmul_v2', inputs={'X': input, inputs={
'Y': mat2}, outputs={'Out': out}) 'X': input,
'Y': mat2
},
outputs={'Out': out})
return out return out
...@@ -1735,28 +1787,40 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): ...@@ -1735,28 +1787,40 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
x_shape = x.shape x_shape = x.shape
y_shape = y.shape y_shape = y.shape
if not len(x_shape) == len(y_shape) == 2: if not len(x_shape) == len(y_shape) == 2:
raise ValueError("The dimention of x, y should be 2 but receive x's shape: {}, y's shape: {}".format(x_shape, y_shape)) raise ValueError(
"The dimention of x, y should be 2 but receive x's shape: {}, y's shape: {}"
.format(x_shape, y_shape))
if x_shape[1] != y_shape[0]: if x_shape[1] != y_shape[0]:
raise ValueError("The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.".format(x_shape, y_shape)) raise ValueError(
"The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}."
.format(x_shape, y_shape))
if len(input_shape) == 2: if len(input_shape) == 2:
if input_shape[0] != x_shape[0]: if input_shape[0] != x_shape[0]:
if input_shape[0] != 1: if input_shape[0] != 1:
raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0])) raise ValueError(
"When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}"
.format(input_shape[0]))
if input_shape[1] != y_shape[1] and input_shape[1] != 1: if input_shape[1] != y_shape[1] and input_shape[1] != 1:
raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1])) raise ValueError(
"When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}"
.format(input_shape[1]))
if input_shape[1] != y_shape[1]: if input_shape[1] != y_shape[1]:
if input_shape[1] != 1: if input_shape[1] != 1:
raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1])) raise ValueError(
"When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}"
.format(input_shape[1]))
elif len(input_shape) == 1: elif len(input_shape) == 1:
if input_shape[0] not in (y_shape[1], 1): if input_shape[0] not in (y_shape[1], 1):
raise ValueError("The input's shape: {} is not broadcastable with [x.shape[0], y.shape[1]]: [{},{}]".format(input_shape, x_shape[0], y_shape[1])) raise ValueError(
"The input's shape: {} is not broadcastable with [x.shape[0], y.shape[1]]: [{},{}]"
.format(input_shape, x_shape[0], y_shape[1]))
else: else:
raise ValueError("The dimention of input should be 2 or 1 but receive input's shape: {}".format(input_shape)) raise ValueError(
"The dimention of input should be 2 or 1 but receive input's shape: {}"
.format(input_shape))
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.addmm( input, x, y, alpha, beta) return _C_ops.addmm(input, x, y, alpha, beta)
else: else:
if _in_legacy_dygraph(): if _in_legacy_dygraph():
out = _legacy_C_ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta) out = _legacy_C_ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta)
...@@ -1766,15 +1830,19 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): ...@@ -1766,15 +1830,19 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
attrs = {'Alpha': alpha, 'Beta': beta} attrs = {'Alpha': alpha, 'Beta': beta}
helper = LayerHelper("addmm", **locals()) helper = LayerHelper("addmm", **locals())
check_variable_and_dtype(input, 'Input', ['float32', 'float64'], 'addmm') check_variable_and_dtype(input, 'Input', ['float32', 'float64'],
'addmm')
check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm') check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm') check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm')
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type="addmm",
type="addmm", inputs=inputs, attrs=attrs, outputs={"Out": out}) inputs=inputs,
attrs=attrs,
outputs={"Out": out})
return out return out
def renorm(x, p, axis, max_norm): def renorm(x, p, axis, max_norm):
""" """
**renorm** **renorm**
...@@ -1812,30 +1880,36 @@ def renorm(x, p, axis, max_norm): ...@@ -1812,30 +1880,36 @@ def renorm(x, p, axis, max_norm):
input_shape = x.shape input_shape = x.shape
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'renorm') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'renorm')
if not axis < len(input_shape): if not axis < len(input_shape):
raise ValueError("the axis:{} should be less then the shape's size {}:{}".format(axis,len(input_shape),input_shape)) raise ValueError(
if not axis >=0: "the axis:{} should be less then the shape's size {}:{}".format(
axis, len(input_shape), input_shape))
if not axis >= 0:
if not axis >= -1 * len(input_shape): if not axis >= -1 * len(input_shape):
raise ValueError("the axis:{} should not be less than -1 * length of input_shape:{}".format(axis,-1 * len(input_shape))) raise ValueError(
"the axis:{} should not be less than -1 * length of input_shape:{}"
.format(axis, -1 * len(input_shape)))
axis = axis + len(input_shape) axis = axis + len(input_shape)
if in_dygraph_mode(): if in_dygraph_mode():
out = _C_ops.renorm(x, p, axis, max_norm) out = _C_ops.renorm(x, p, axis, max_norm)
return out return out
elif _in_legacy_dygraph(): elif _in_legacy_dygraph():
out = _legacy_C_ops.renorm(x, 'p',p, 'axis',axis, 'max_norm', max_norm) out = _legacy_C_ops.renorm(x, 'p', p, 'axis', axis, 'max_norm',
max_norm)
return out return out
inputs = {'X': x} inputs = {'X': x}
attrs = {'p': p, 'axis': axis, 'max_norm':max_norm} attrs = {'p': p, 'axis': axis, 'max_norm': max_norm}
helper = LayerHelper("renorm", **locals()) helper = LayerHelper("renorm", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type="renorm",
type="renorm", inputs=inputs, attrs=attrs, outputs={"Out": out}) inputs=inputs,
attrs=attrs,
outputs={"Out": out})
return out return out
def inner(x, y, name=None): def inner(x, y, name=None):
""" """
...@@ -1869,8 +1943,8 @@ def inner(x, y, name=None): ...@@ -1869,8 +1943,8 @@ def inner(x, y, name=None):
else: else:
xshape = x.shape xshape = x.shape
yshape = y.shape yshape = y.shape
dstshape = list(xshape[:-1])+list(yshape[:-1]) dstshape = list(xshape[:-1]) + list(yshape[:-1])
if len(dstshape)==0: if len(dstshape) == 0:
dstshape = [1] dstshape = [1]
nx = x.reshape((-1, xshape[-1])) nx = x.reshape((-1, xshape[-1]))
ny = y.reshape((-1, yshape[-1])) ny = y.reshape((-1, yshape[-1]))
...@@ -1884,7 +1958,8 @@ def inner(x, y, name=None): ...@@ -1884,7 +1958,8 @@ def inner(x, y, name=None):
var_names = {'x': x, 'y': y} var_names = {'x': x, 'y': y}
for name, val in var_names.items(): for name, val in var_names.items():
check_variable_and_dtype(val, name, check_variable_and_dtype(val, name,
['float16', 'float32', 'float64'], 'inner') ['float16', 'float32', 'float64'],
'inner')
x_shape = list(xshape) x_shape = list(xshape)
y_shape = list(yshape) y_shape = list(yshape)
...@@ -1901,9 +1976,12 @@ def inner(x, y, name=None): ...@@ -1901,9 +1976,12 @@ def inner(x, y, name=None):
helper = LayerHelper('inner', **locals()) helper = LayerHelper('inner', **locals())
out = helper.create_variable_for_type_inference(dtype=nx.dtype) out = helper.create_variable_for_type_inference(dtype=nx.dtype)
helper.append_op( helper.append_op(type='matmul_v2',
type='matmul_v2', inputs={'X': nx, inputs={
'Y': ny.T}, outputs={'Out': out}) 'X': nx,
'Y': ny.T
},
outputs={'Out': out})
return out.reshape(dstshape) return out.reshape(dstshape)
...@@ -1954,9 +2032,12 @@ def outer(x, y, name=None): ...@@ -1954,9 +2032,12 @@ def outer(x, y, name=None):
helper = LayerHelper('outer', **locals()) helper = LayerHelper('outer', **locals())
out = helper.create_variable_for_type_inference(dtype=nx.dtype) out = helper.create_variable_for_type_inference(dtype=nx.dtype)
helper.append_op( helper.append_op(type='matmul_v2',
type='matmul_v2', inputs={'X': nx, inputs={
'Y': ny}, outputs={'Out': out}) 'X': nx,
'Y': ny
},
outputs={'Out': out})
return out return out
...@@ -2015,17 +2096,18 @@ def logsumexp(x, axis=None, keepdim=False, name=None): ...@@ -2015,17 +2096,18 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
axis = range(len(x.shape)) axis = range(len(x.shape))
return _C_ops.logsumexp(x, axis, keepdim, reduce_all) return _C_ops.logsumexp(x, axis, keepdim, reduce_all)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.logsumexp(x, 'axis', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _legacy_C_ops.logsumexp(x, 'axis', axis, 'keepdim', keepdim,
'reduce_all', reduce_all)
check_variable_and_dtype(x, 'x', check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'logsumexp')
['float32', 'float64'],
'logsumexp')
helper = LayerHelper('logsumexp', **locals()) helper = LayerHelper('logsumexp', **locals())
attrs = {'axis': axis, 'keepdim': keepdim, 'reduce_all':reduce_all} attrs = {'axis': axis, 'keepdim': keepdim, 'reduce_all': reduce_all}
out = helper.create_variable_for_type_inference(x.dtype) out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op( helper.append_op(type='logsumexp',
type='logsumexp', inputs={'X': x}, outputs={'Out': out}, attrs=attrs) inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out return out
...@@ -2062,20 +2144,22 @@ def inverse(x, name=None): ...@@ -2062,20 +2144,22 @@ def inverse(x, name=None):
return _legacy_C_ops.inverse(x) return _legacy_C_ops.inverse(x)
def _check_input(x): def _check_input(x):
check_variable_and_dtype(x, 'x', check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'inverse')
['float32', 'float64'], 'inverse')
if len(x.shape) < 2: if len(x.shape) < 2:
raise ValueError( raise ValueError(
"The input of inverse is expected to be a Tensor whose number " "The input of inverse is expected to be a Tensor whose number "
"of dimensions is no less than 2. But reviced: %d, " "of dimensions is no less than 2. But reviced: %d, "
"x's shape: %s." % (len(x.shape), x.shape)) "x's shape: %s." % (len(x.shape), x.shape))
_check_input(x) _check_input(x)
helper = LayerHelper('inverse', **locals()) helper = LayerHelper('inverse', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type='inverse',
type='inverse', inputs={'Input': [x] }, outputs={'Output': [out]}) inputs={'Input': [x]},
outputs={'Output': [out]})
return out return out
def _get_reduce_axis(axis): def _get_reduce_axis(axis):
""" """
Internal function for max, min, amax and amin. Internal function for max, min, amax and amin.
...@@ -2085,20 +2169,23 @@ def _get_reduce_axis(axis): ...@@ -2085,20 +2169,23 @@ def _get_reduce_axis(axis):
if isinstance(axis, tuple): if isinstance(axis, tuple):
axis = list(axis) axis = list(axis)
elif isinstance(axis, int): elif isinstance(axis, int):
axis= [axis] axis = [axis]
else: else:
raise TypeError( raise TypeError(
"The type of axis must be int, list or tuple, but received {}".format(type(axis))) "The type of axis must be int, list or tuple, but received {}".
format(type(axis)))
reduce_all = True if axis == None or axis == [] else False reduce_all = True if axis == None or axis == [] else False
if axis == None: if axis == None:
axis = [] axis = []
return reduce_all, axis return reduce_all, axis
def _get_reduce_axis_with_tensor(axis): def _get_reduce_axis_with_tensor(axis):
if isinstance(axis, Variable): if isinstance(axis, Variable):
return False, axis return False, axis
return _get_reduce_axis(axis) return _get_reduce_axis(axis)
def _get_reduce_all_value(axis): def _get_reduce_all_value(axis):
""" """
Internal function for max, min, amax and amin. Internal function for max, min, amax and amin.
...@@ -2108,15 +2195,17 @@ def _get_reduce_all_value(axis): ...@@ -2108,15 +2195,17 @@ def _get_reduce_all_value(axis):
if isinstance(axis, tuple): if isinstance(axis, tuple):
axis = list(axis) axis = list(axis)
elif isinstance(axis, int): elif isinstance(axis, int):
axis= [axis] axis = [axis]
else: else:
raise TypeError( raise TypeError(
"The type of axis must be int, list or tuple, but received {}".format(type(axis))) "The type of axis must be int, list or tuple, but received {}".
format(type(axis)))
reduce_all = True if axis == None or axis == [] else False reduce_all = True if axis == None or axis == [] else False
axis = axis if axis != None and axis != [] else [0] axis = axis if axis != None and axis != [] else [0]
return reduce_all, axis return reduce_all, axis
def max(x, axis=None, keepdim=False, name=None): def max(x, axis=None, keepdim=False, name=None):
""" """
...@@ -2200,27 +2289,26 @@ def max(x, axis=None, keepdim=False, name=None): ...@@ -2200,27 +2289,26 @@ def max(x, axis=None, keepdim=False, name=None):
return _C_ops.max(x, axis, keepdim) return _C_ops.max(x, axis, keepdim)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim, return _legacy_C_ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all) 'reduce_all', reduce_all)
helper = LayerHelper('max', **locals()) helper = LayerHelper('max', **locals())
check_variable_and_dtype( check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max') 'max')
if not isinstance(axis, Variable) and utils._contain_var(axis): if not isinstance(axis, Variable) and utils._contain_var(axis):
axis = utils._convert_to_tensor_list(axis) axis = utils._convert_to_tensor_list(axis)
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(dtype=x.dtype)
dtype=x.dtype) helper.append_op(type='reduce_max',
helper.append_op( inputs={'X': x},
type='reduce_max', outputs={'Out': out},
inputs={'X': x}, attrs={
outputs={'Out': out}, 'dim': axis,
attrs={ 'keep_dim': keepdim,
'dim': axis, 'reduce_all': reduce_all
'keep_dim': keepdim, })
'reduce_all': reduce_all
})
return out return out
def min(x, axis=None, keepdim=False, name=None): def min(x, axis=None, keepdim=False, name=None):
""" """
...@@ -2304,27 +2392,26 @@ def min(x, axis=None, keepdim=False, name=None): ...@@ -2304,27 +2392,26 @@ def min(x, axis=None, keepdim=False, name=None):
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim, return _legacy_C_ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all) 'reduce_all', reduce_all)
helper = LayerHelper('min', **locals()) helper = LayerHelper('min', **locals())
check_variable_and_dtype( check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'min') 'min')
if not isinstance(axis, Variable) and utils._contain_var(axis): if not isinstance(axis, Variable) and utils._contain_var(axis):
axis = utils._convert_to_tensor_list(axis) axis = utils._convert_to_tensor_list(axis)
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(dtype=x.dtype)
dtype=x.dtype) helper.append_op(type='reduce_min',
helper.append_op( inputs={'X': x},
type='reduce_min', outputs={'Out': out},
inputs={'X': x}, attrs={
outputs={'Out': out}, 'dim': axis,
attrs={ 'keep_dim': keepdim,
'dim': axis, 'reduce_all': reduce_all
'keep_dim': keepdim, })
'reduce_all': reduce_all
})
return out return out
def amax(x, axis=None, keepdim=False, name=None): def amax(x, axis=None, keepdim=False, name=None):
""" """
Computes the maximum of tensor elements over the given axis. Computes the maximum of tensor elements over the given axis.
...@@ -2417,27 +2504,27 @@ def amax(x, axis=None, keepdim=False, name=None): ...@@ -2417,27 +2504,27 @@ def amax(x, axis=None, keepdim=False, name=None):
reduce_all, axis = _get_reduce_axis(axis) reduce_all, axis = _get_reduce_axis(axis)
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.amax(x, axis, keepdim) return _C_ops.amax(x, axis, keepdim)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.reduce_amax(x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all) return _legacy_C_ops.reduce_amax(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
helper = LayerHelper('amax', **locals()) helper = LayerHelper('amax', **locals())
check_variable_and_dtype( check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'amax') 'amax')
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(dtype=x.dtype)
dtype=x.dtype) helper.append_op(type='reduce_amax',
helper.append_op( inputs={'X': x},
type='reduce_amax', outputs={'Out': out},
inputs={'X': x}, attrs={
outputs={'Out': out}, 'dim': axis,
attrs={ 'keep_dim': keepdim,
'dim': axis, 'reduce_all': reduce_all
'keep_dim': keepdim, })
'reduce_all': reduce_all
})
return out return out
def amin(x, axis=None, keepdim=False, name=None): def amin(x, axis=None, keepdim=False, name=None):
""" """
...@@ -2529,28 +2616,28 @@ def amin(x, axis=None, keepdim=False, name=None): ...@@ -2529,28 +2616,28 @@ def amin(x, axis=None, keepdim=False, name=None):
#[0.1., 0.1], [[[0., 0.3333], [0.5, 0.3333]], [[0.5, 0.3333], [1., 1.]]] #[0.1., 0.1], [[[0., 0.3333], [0.5, 0.3333]], [[0.5, 0.3333], [1., 1.]]]
""" """
reduce_all, axis = _get_reduce_axis( axis ) reduce_all, axis = _get_reduce_axis(axis)
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.amin(x, axis, keepdim) return _C_ops.amin(x, axis, keepdim)
elif _in_legacy_dygraph(): elif _in_legacy_dygraph():
return _legacy_C_ops.reduce_amin(x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all) return _legacy_C_ops.reduce_amin(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
helper = LayerHelper('amin', **locals()) helper = LayerHelper('amin', **locals())
check_variable_and_dtype( check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'amin') 'amin')
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(dtype=x.dtype)
dtype=x.dtype) helper.append_op(type='reduce_amin',
helper.append_op( inputs={'X': x},
type='reduce_amin', outputs={'Out': out},
inputs={'X': x}, attrs={
outputs={'Out': out}, 'dim': axis,
attrs={ 'keep_dim': keepdim,
'dim': axis, 'reduce_all': reduce_all
'keep_dim': keepdim, })
'reduce_all': reduce_all
})
return out return out
def log1p(x, name=None): def log1p(x, name=None):
r""" r"""
Calculates the natural log of the given input tensor, element-wise. Calculates the natural log of the given input tensor, element-wise.
...@@ -2588,6 +2675,7 @@ def log1p(x, name=None): ...@@ -2588,6 +2675,7 @@ def log1p(x, name=None):
helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out})
return out return out
def log2(x, name=None): def log2(x, name=None):
r""" r"""
Calculates the log to the base 2 of the given input tensor, element-wise. Calculates the log to the base 2 of the given input tensor, element-wise.
...@@ -2768,7 +2856,8 @@ def clip(x, min=None, max=None, name=None): ...@@ -2768,7 +2856,8 @@ def clip(x, min=None, max=None, name=None):
check_dtype(max.dtype, 'max', ['float32', 'float64', 'int32'], check_dtype(max.dtype, 'max', ['float32', 'float64', 'int32'],
'clip', '(When the type of max in clip is Variable.)') 'clip', '(When the type of max in clip is Variable.)')
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], 'clip') check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'clip')
inputs = {'X': x} inputs = {'X': x}
attrs = {'min': min_, 'max': max_} attrs = {'min': min_, 'max': max_}
...@@ -2788,8 +2877,10 @@ def clip(x, min=None, max=None, name=None): ...@@ -2788,8 +2877,10 @@ def clip(x, min=None, max=None, name=None):
helper = LayerHelper('clip', **locals()) helper = LayerHelper('clip', **locals())
output = helper.create_variable_for_type_inference( output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('x')) dtype=helper.input_dtype('x'))
helper.append_op( helper.append_op(type='clip',
type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs) inputs=inputs,
outputs={'Out': [output]},
attrs=attrs)
return output return output
...@@ -2816,7 +2907,6 @@ def clip_(x, min=None, max=None, name=None): ...@@ -2816,7 +2907,6 @@ def clip_(x, min=None, max=None, name=None):
return _legacy_C_ops.clip_(x, "min", min, "max", max) return _legacy_C_ops.clip_(x, "min", min, "max", max)
def trace(x, offset=0, axis1=0, axis2=1, name=None): def trace(x, offset=0, axis1=0, axis2=1, name=None):
""" """
...@@ -2857,6 +2947,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): ...@@ -2857,6 +2947,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3] data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3]
data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5] data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
""" """
def __check_input(x, offset, axis1, axis2): def __check_input(x, offset, axis1, axis2):
check_dtype(x.dtype, 'Input', check_dtype(x.dtype, 'Input',
['int32', 'int64', 'float16', 'float32', 'float64'], ['int32', 'int64', 'float16', 'float32', 'float64'],
...@@ -2885,25 +2976,28 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): ...@@ -2885,25 +2976,28 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
"But received axis1 = %d, axis2 = %d\n"%(axis1, axis2) "But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.trace( x, offset, axis1, axis2 ) return _C_ops.trace(x, offset, axis1, axis2)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2) return _legacy_C_ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2',
axis2)
__check_input(x, offset, axis1, axis2) __check_input(x, offset, axis1, axis2)
helper = LayerHelper('trace', **locals()) helper = LayerHelper('trace', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type='trace',
type='trace', inputs={'Input': [x]},
inputs={'Input': [x]}, attrs={
attrs={'offset': offset, 'offset': offset,
'axis1': axis1, 'axis1': axis1,
'axis2': axis2}, 'axis2': axis2
outputs={'Out': [out]}) },
outputs={'Out': [out]})
return out return out
def diagonal(x, offset=0, axis1=0, axis2=1, name=None): def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
""" """
This OP computes the diagonals of the input tensor x. This OP computes the diagonals of the input tensor x.
...@@ -2973,7 +3067,8 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None): ...@@ -2973,7 +3067,8 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
return _C_ops.diagonal(x, offset, axis1, axis2) return _C_ops.diagonal(x, offset, axis1, axis2)
else: else:
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.diagonal(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2) return _legacy_C_ops.diagonal(x, 'offset', offset, 'axis1', axis1,
'axis2', axis2)
def __check_input(x, offset, axis1, axis2): def __check_input(x, offset, axis1, axis2):
check_dtype(x.dtype, 'Input', check_dtype(x.dtype, 'Input',
...@@ -3005,13 +3100,14 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None): ...@@ -3005,13 +3100,14 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
helper = LayerHelper('diagonal', **locals()) helper = LayerHelper('diagonal', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type='diagonal',
type='diagonal', inputs={'Input': [x]},
inputs={'Input': [x]}, attrs={
attrs={'offset': offset, 'offset': offset,
'axis1': axis1, 'axis1': axis1,
'axis2': axis2}, 'axis2': axis2
outputs={'Out': [out]}) },
outputs={'Out': [out]})
return out return out
...@@ -3049,8 +3145,10 @@ def kron(x, y, name=None): ...@@ -3049,8 +3145,10 @@ def kron(x, y, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.kron(x, y) return _C_ops.kron(x, y)
helper = LayerHelper('kron', **locals()) helper = LayerHelper('kron', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron') check_variable_and_dtype(
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron') x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out}) helper.append_op(type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out})
...@@ -3186,13 +3284,20 @@ def logcumsumexp(x, axis=None, dtype=None, name=None): ...@@ -3186,13 +3284,20 @@ def logcumsumexp(x, axis=None, dtype=None, name=None):
if axis is None: if axis is None:
return _legacy_C_ops.logcumsumexp(x, 'flatten', flatten) return _legacy_C_ops.logcumsumexp(x, 'flatten', flatten)
else: else:
return _legacy_C_ops.logcumsumexp(x, 'axis', axis, 'flatten', flatten) return _legacy_C_ops.logcumsumexp(x, 'axis', axis, 'flatten',
flatten)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "logcumsumexp") check_variable_and_dtype(x, 'x', ['float32', 'float64'], "logcumsumexp")
helper = LayerHelper('logcumsumexp', **locals()) helper = LayerHelper('logcumsumexp', **locals())
out = helper.create_variable_for_type_inference(x.dtype) out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='logcumsumexp', inputs={'X': x}, outputs={'Out': out}, attrs={'axis': axis, 'flatten': flatten}) helper.append_op(type='logcumsumexp',
inputs={'X': x},
outputs={'Out': out},
attrs={
'axis': axis,
'flatten': flatten
})
return out return out
...@@ -3251,14 +3356,21 @@ def cumprod(x, dim=None, dtype=None, name=None): ...@@ -3251,14 +3356,21 @@ def cumprod(x, dim=None, dtype=None, name=None):
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.cumprod(x, 'dim', dim) return _legacy_C_ops.cumprod(x, 'dim', dim)
check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'cumprod') check_variable_and_dtype(
x, "x",
['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'],
'cumprod')
check_type(dim, 'dim', int, 'cumprod') check_type(dim, 'dim', int, 'cumprod')
helper = LayerHelper('cumprod', **locals()) helper = LayerHelper('cumprod', **locals())
out = helper.create_variable_for_type_inference(x.dtype) out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='cumprod', inputs={'X': x}, outputs={'Out': out}, attrs={'dim': dim}) helper.append_op(type='cumprod',
inputs={'X': x},
outputs={'Out': out},
attrs={'dim': dim})
return out return out
def isfinite(x, name=None): def isfinite(x, name=None):
""" """
...@@ -3281,15 +3393,17 @@ def isfinite(x, name=None): ...@@ -3281,15 +3393,17 @@ def isfinite(x, name=None):
print(out) # [False True True False True False False] print(out) # [False True True False True False False]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.isfinite( x ) return _C_ops.isfinite(x)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.isfinite_v2(x) return _legacy_C_ops.isfinite_v2(x)
helper = LayerHelper("isfinite_v2", **locals()) helper = LayerHelper("isfinite_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isfinite') check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isfinite')
out = helper.create_variable_for_type_inference('bool') out = helper.create_variable_for_type_inference('bool')
helper.append_op(type="isfinite_v2", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="isfinite_v2", inputs={"X": x}, outputs={"Out": out})
return out return out
def isinf(x, name=None): def isinf(x, name=None):
""" """
...@@ -3312,15 +3426,17 @@ def isinf(x, name=None): ...@@ -3312,15 +3426,17 @@ def isinf(x, name=None):
print(out) # [ True False False True False False False] print(out) # [ True False False True False False False]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.isinf( x ) return _C_ops.isinf(x)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.isinf_v2(x) return _legacy_C_ops.isinf_v2(x)
helper = LayerHelper("isinf_v2", **locals()) helper = LayerHelper("isinf_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf') check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf')
out = helper.create_variable_for_type_inference(dtype='bool') out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isinf_v2", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="isinf_v2", inputs={"X": x}, outputs={"Out": out})
return out return out
def isnan(x, name=None): def isnan(x, name=None):
""" """
...@@ -3343,12 +3459,13 @@ def isnan(x, name=None): ...@@ -3343,12 +3459,13 @@ def isnan(x, name=None):
print(out) # [False False False False False True True] print(out) # [False False False False False True True]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.isnan( x ) return _C_ops.isnan(x)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.isnan_v2(x) return _legacy_C_ops.isnan_v2(x)
helper = LayerHelper("isnan_v2", **locals()) helper = LayerHelper("isnan_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan') check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan')
out = helper.create_variable_for_type_inference(dtype='bool') out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out}) helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out})
return out return out
...@@ -3409,7 +3526,8 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None): ...@@ -3409,7 +3526,8 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
""" """
if dtype is not None: if dtype is not None:
check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'prod') check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'],
'prod')
if x.dtype != convert_np_dtype_to_dtype_(dtype): if x.dtype != convert_np_dtype_to_dtype_(dtype):
x = cast(x, dtype) x = cast(x, dtype)
...@@ -3424,34 +3542,35 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None): ...@@ -3424,34 +3542,35 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
dim = [dim] dim = [dim]
else: else:
raise TypeError( raise TypeError(
"The type of axis must be int, list or tuple, but received {}". "The type of axis must be int, list or tuple, but received {}"
format(type(dim))) .format(type(dim)))
reduce_all = True if dim is None or len(dim) == 0 or len(dim) == len(x.shape) else False reduce_all = True if dim is None or len(dim) == 0 or len(dim) == len(
x.shape) else False
if dim is None or len(dim) == 0: if dim is None or len(dim) == 0:
dim = [0] dim = [0]
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.reduce_prod(x, dim, keepdim, reduce_all) return _C_ops.reduce_prod(x, dim, keepdim, reduce_all)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.reduce_prod( return _legacy_C_ops.reduce_prod(x, 'dim', dim, 'keep_dim', keepdim,
x, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', reduce_all) 'reduce_all', reduce_all)
helper = LayerHelper('reduce_prod', **locals()) helper = LayerHelper('reduce_prod', **locals())
check_variable_and_dtype( check_variable_and_dtype(x, 'x/input',
x, 'x/input', ['float32', 'float64', 'int32', 'int64'], 'reduce_prod') ['float32', 'float64', 'int32', 'int64'],
'reduce_prod')
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if not isinstance(dim, Variable) and utils._contain_var(dim): if not isinstance(dim, Variable) and utils._contain_var(dim):
dim = utils._convert_to_tensor_list(dim) dim = utils._convert_to_tensor_list(dim)
helper.append_op( helper.append_op(type='reduce_prod',
type='reduce_prod', inputs={'X': x},
inputs={'X': x}, outputs={'Out': out},
outputs={'Out': out}, attrs={
attrs={ 'dim': dim,
'dim': dim, 'keep_dim': keepdim,
'keep_dim': keepdim, 'reduce_all': reduce_all
'reduce_all': reduce_all })
})
return out return out
...@@ -3516,7 +3635,7 @@ def tanh(x, name=None): ...@@ -3516,7 +3635,7 @@ def tanh(x, name=None):
# [-0.37994896 -0.19737532 0.09966799 0.29131261] # [-0.37994896 -0.19737532 0.09966799 0.29131261]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.tanh( x ) return _C_ops.tanh(x)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.tanh(x) return _legacy_C_ops.tanh(x)
...@@ -3528,6 +3647,7 @@ def tanh(x, name=None): ...@@ -3528,6 +3647,7 @@ def tanh(x, name=None):
helper.append_op(type='tanh', inputs={'X': x}, outputs={'Out': out}) helper.append_op(type='tanh', inputs={'X': x}, outputs={'Out': out})
return out return out
@inplace_apis_in_dygraph_only @inplace_apis_in_dygraph_only
def tanh_(x, name=None): def tanh_(x, name=None):
r""" r"""
...@@ -3535,7 +3655,7 @@ def tanh_(x, name=None): ...@@ -3535,7 +3655,7 @@ def tanh_(x, name=None):
Please refer to :ref:`api_tensor_tanh`. Please refer to :ref:`api_tensor_tanh`.
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.tanh_( x ) return _C_ops.tanh_(x)
return _legacy_C_ops.tanh_(x) return _legacy_C_ops.tanh_(x)
...@@ -3571,11 +3691,10 @@ def increment(x, value=1.0, name=None): ...@@ -3571,11 +3691,10 @@ def increment(x, value=1.0, name=None):
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'increment') 'increment')
helper = LayerHelper("increment", **locals()) helper = LayerHelper("increment", **locals())
helper.append_op( helper.append_op(type='increment',
type='increment', inputs={'X': [x]},
inputs={'X': [x]}, outputs={'Out': [x]},
outputs={'Out': [x]}, attrs={'step': float(value)})
attrs={'step': float(value)})
return x return x
...@@ -3647,7 +3766,7 @@ def all(x, axis=None, keepdim=False, name=None): ...@@ -3647,7 +3766,7 @@ def all(x, axis=None, keepdim=False, name=None):
if _in_legacy_dygraph(): if _in_legacy_dygraph():
axis = axis if axis != None and axis != [] else [0] axis = axis if axis != None and axis != [] else [0]
return _legacy_C_ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim, return _legacy_C_ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag) 'reduce_all', reduce_all_flag)
attrs = { attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0], 'dim': axis if axis != None and axis != [] and axis != () else [0],
...@@ -3656,16 +3775,14 @@ def all(x, axis=None, keepdim=False, name=None): ...@@ -3656,16 +3775,14 @@ def all(x, axis=None, keepdim=False, name=None):
} }
check_variable_and_dtype(x, 'x', ['bool'], 'all') check_variable_and_dtype(x, 'x', ['bool'], 'all')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'all') check_type(axis, 'axis', (int, list, tuple, type(None)), 'all')
helper = LayerHelper('all', **locals()) helper = LayerHelper('all', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type='reduce_all',
type='reduce_all', inputs={'X': x},
inputs={'X': x}, outputs={'Out': out},
outputs={'Out': out}, attrs=attrs)
attrs=attrs)
return out return out
...@@ -3738,7 +3855,7 @@ def any(x, axis=None, keepdim=False, name=None): ...@@ -3738,7 +3855,7 @@ def any(x, axis=None, keepdim=False, name=None):
if _in_legacy_dygraph(): if _in_legacy_dygraph():
axis = axis if axis != None and axis != [] else [0] axis = axis if axis != None and axis != [] else [0]
return _legacy_C_ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim, return _legacy_C_ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag) 'reduce_all', reduce_all_flag)
attrs = { attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0], 'dim': axis if axis != None and axis != [] and axis != () else [0],
...@@ -3748,18 +3865,17 @@ def any(x, axis=None, keepdim=False, name=None): ...@@ -3748,18 +3865,17 @@ def any(x, axis=None, keepdim=False, name=None):
check_variable_and_dtype(x, 'x', ['bool'], 'any') check_variable_and_dtype(x, 'x', ['bool'], 'any')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'any') check_type(axis, 'axis', (int, list, tuple, type(None)), 'any')
helper = LayerHelper('any', **locals()) helper = LayerHelper('any', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type='reduce_any',
type='reduce_any', inputs={'X': x},
inputs={'X': x}, outputs={'Out': out},
outputs={'Out': out}, attrs=attrs)
attrs=attrs)
return out return out
def broadcast_shape(x_shape, y_shape): def broadcast_shape(x_shape, y_shape):
""" """
The function returns the shape of doing operation with broadcasting on tensors of x_shape and y_shape, please refer to :ref:`user_guide_broadcasting` for more details. The function returns the shape of doing operation with broadcasting on tensors of x_shape and y_shape, please refer to :ref:`user_guide_broadcasting` for more details.
...@@ -3787,6 +3903,7 @@ def broadcast_shape(x_shape, y_shape): ...@@ -3787,6 +3903,7 @@ def broadcast_shape(x_shape, y_shape):
return core.broadcast_shape(x_shape, y_shape) return core.broadcast_shape(x_shape, y_shape)
def conj(x, name=None): def conj(x, name=None):
r""" r"""
This function computes the conjugate of the Tensor elementwisely. This function computes the conjugate of the Tensor elementwisely.
...@@ -3821,15 +3938,18 @@ def conj(x, name=None): ...@@ -3821,15 +3938,18 @@ def conj(x, name=None):
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
return _legacy_C_ops.conj(x) return _legacy_C_ops.conj(x)
check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'conj') check_variable_and_dtype(
x, "x",
['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'],
'conj')
helper = LayerHelper('conj', **locals()) helper = LayerHelper('conj', **locals())
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
dtype=helper.input_dtype())
helper.append_op(type='conj', inputs={'X': x}, outputs={'Out': [out]}) helper.append_op(type='conj', inputs={'X': x}, outputs={'Out': [out]})
return out return out
def digamma(x, name=None): def digamma(x, name=None):
r""" r"""
Calculates the digamma of the given input tensor, element-wise. Calculates the digamma of the given input tensor, element-wise.
...@@ -3868,6 +3988,7 @@ def digamma(x, name=None): ...@@ -3868,6 +3988,7 @@ def digamma(x, name=None):
helper.append_op(type='digamma', inputs={'X': x}, outputs={'Out': out}) helper.append_op(type='digamma', inputs={'X': x}, outputs={'Out': out})
return out return out
def lgamma(x, name=None): def lgamma(x, name=None):
r""" r"""
Calculates the lgamma of the given input tensor, element-wise. Calculates the lgamma of the given input tensor, element-wise.
...@@ -3927,7 +4048,13 @@ def neg(x, name=None): ...@@ -3927,7 +4048,13 @@ def neg(x, name=None):
# [0.4 0.2 -0.1 -0.3] # [0.4 0.2 -0.1 -0.3]
""" """
return scale(x, scale=-1.0, bias=0.0, bias_after_scale=True, act=None, name=name) return scale(x,
scale=-1.0,
bias=0.0,
bias_after_scale=True,
act=None,
name=name)
def atan2(x, y, name=None): def atan2(x, y, name=None):
r""" r"""
...@@ -3973,21 +4100,25 @@ def atan2(x, y, name=None): ...@@ -3973,21 +4100,25 @@ def atan2(x, y, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.atan2( x, y) return _C_ops.atan2(x, y)
else: else:
if _in_legacy_dygraph(): if _in_legacy_dygraph():
return _legacy_C_ops.atan2(x, y) return _legacy_C_ops.atan2(x, y)
else: else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2') check_variable_and_dtype(
check_variable_and_dtype(y, 'y', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2') x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'],
'atan2')
check_variable_and_dtype(
y, 'y', ['int32', 'int64', 'float16', 'float32', 'float64'],
'atan2')
helper = LayerHelper('atan2', **locals()) helper = LayerHelper('atan2', **locals())
inputs = {'X1' : x, 'X2' : y} inputs = {'X1': x, 'X2': y}
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type='atan2', inputs=inputs, outputs={'Out': out})
type='atan2', inputs=inputs, outputs={'Out': out})
return out return out
def logit(x, eps=None, name=None): def logit(x, eps=None, name=None):
r""" r"""
This function generates a new tensor with the logit of the elements of input x. x is clamped to [eps, 1-eps] when eps is not zero. When eps is zero and x < 0 or x > 1, the function will yields NaN. This function generates a new tensor with the logit of the elements of input x. x is clamped to [eps, 1-eps] when eps is not zero. When eps is zero and x < 0 or x > 1, the function will yields NaN.
...@@ -4038,13 +4169,13 @@ def logit(x, eps=None, name=None): ...@@ -4038,13 +4169,13 @@ def logit(x, eps=None, name=None):
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'logit') check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'logit')
helper = LayerHelper("logit", **locals()) helper = LayerHelper("logit", **locals())
out = helper.create_variable_for_type_inference(x.dtype) out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op( helper.append_op(type='logit',
type='logit', inputs={'X': x},
inputs={'X': x}, outputs={'Out': out},
outputs={'Out': out}, attrs={'eps': eps})
attrs={'eps': eps})
return out return out
def lerp(x, y, weight, name=None): def lerp(x, y, weight, name=None):
r""" r"""
Does a linear interpolation between x and y based on weight. Does a linear interpolation between x and y based on weight.
...@@ -4080,7 +4211,7 @@ def lerp(x, y, weight, name=None): ...@@ -4080,7 +4211,7 @@ def lerp(x, y, weight, name=None):
if isinstance(weight, float): if isinstance(weight, float):
weight = paddle.to_tensor(weight, dtype=x.dtype) weight = paddle.to_tensor(weight, dtype=x.dtype)
return _C_ops.lerp( x, y, weight) return _C_ops.lerp(x, y, weight)
if _in_legacy_dygraph(): if _in_legacy_dygraph():
if isinstance(weight, float): if isinstance(weight, float):
weight = paddle.to_tensor(weight, dtype=x.dtype) weight = paddle.to_tensor(weight, dtype=x.dtype)
...@@ -4099,6 +4230,7 @@ def lerp(x, y, weight, name=None): ...@@ -4099,6 +4230,7 @@ def lerp(x, y, weight, name=None):
helper.append_op(type='lerp', inputs=inputs, outputs={'Out': out}) helper.append_op(type='lerp', inputs=inputs, outputs={'Out': out})
return out return out
@inplace_apis_in_dygraph_only @inplace_apis_in_dygraph_only
def lerp_(x, y, weight, name=None): def lerp_(x, y, weight, name=None):
r""" r"""
...@@ -4112,11 +4244,14 @@ def lerp_(x, y, weight, name=None): ...@@ -4112,11 +4244,14 @@ def lerp_(x, y, weight, name=None):
elif isinstance(weight, (paddle.Tensor, Variable)): elif isinstance(weight, (paddle.Tensor, Variable)):
out_shape = broadcast_shape(out_shape, weight.shape) out_shape = broadcast_shape(out_shape, weight.shape)
if out_shape != x.shape: if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape)) raise ValueError(
"The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation."
.format(out_shape, x.shape))
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.lerp_( x, y, weight) return _C_ops.lerp_(x, y, weight)
return _legacy_C_ops.lerp_(x, y, weight) return _legacy_C_ops.lerp_(x, y, weight)
def erfinv(x, name=None): def erfinv(x, name=None):
r""" r"""
The inverse error function of x. Please refer to :ref:`api_paddle_erf` The inverse error function of x. Please refer to :ref:`api_paddle_erf`
...@@ -4143,7 +4278,7 @@ def erfinv(x, name=None): ...@@ -4143,7 +4278,7 @@ def erfinv(x, name=None):
""" """
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.erfinv( x ) return _C_ops.erfinv(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'erfinv') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'erfinv')
...@@ -4155,6 +4290,7 @@ def erfinv(x, name=None): ...@@ -4155,6 +4290,7 @@ def erfinv(x, name=None):
helper.append_op(type='erfinv', inputs={'X': x}, outputs={'Out': out}) helper.append_op(type='erfinv', inputs={'X': x}, outputs={'Out': out})
return out return out
@inplace_apis_in_dygraph_only @inplace_apis_in_dygraph_only
def erfinv_(x, name=None): def erfinv_(x, name=None):
r""" r"""
...@@ -4163,9 +4299,10 @@ def erfinv_(x, name=None): ...@@ -4163,9 +4299,10 @@ def erfinv_(x, name=None):
""" """
check_type(x, 'x', (paddle.Tensor, Variable), 'erfinv') check_type(x, 'x', (paddle.Tensor, Variable), 'erfinv')
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.erfinv_( x ) return _C_ops.erfinv_(x)
return _legacy_C_ops.erfinv_(x) return _legacy_C_ops.erfinv_(x)
def rad2deg(x, name=None): def rad2deg(x, name=None):
r""" r"""
Convert each of the elements of input x from angles in radians to degrees. Convert each of the elements of input x from angles in radians to degrees.
...@@ -4217,18 +4354,29 @@ def rad2deg(x, name=None): ...@@ -4217,18 +4354,29 @@ def rad2deg(x, name=None):
x = cast(x, dtype="float32") x = cast(x, dtype="float32")
return _legacy_C_ops.scale(x, 'scale', rad2deg_scale) return _legacy_C_ops.scale(x, 'scale', rad2deg_scale)
else: else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'], 'rad2deg') check_variable_and_dtype(x, 'x',
['int32', 'int64', 'float32', 'float64'],
'rad2deg')
helper = LayerHelper('rad2deg', **locals()) helper = LayerHelper('rad2deg', **locals())
out_cast = x out_cast = x
if convert_dtype(x.dtype) in ['int32', 'int64']: if convert_dtype(x.dtype) in ['int32', 'int64']:
out_cast = helper.create_variable_for_type_inference(dtype=paddle.float32) out_cast = helper.create_variable_for_type_inference(
helper.append_op( dtype=paddle.float32)
type='cast', inputs={'X':x}, outputs={'Out': out_cast}, attrs={'in_dtype': x.dtype,'out_dtype': paddle.float32}) helper.append_op(type='cast',
inputs={'X': x},
outputs={'Out': out_cast},
attrs={
'in_dtype': x.dtype,
'out_dtype': paddle.float32
})
out = helper.create_variable_for_type_inference(dtype=out_cast.dtype) out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
helper.append_op( helper.append_op(type='scale',
type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': rad2deg_scale}) inputs={'X': out_cast},
outputs={'Out': out},
attrs={'scale': rad2deg_scale})
return out return out
def deg2rad(x, name=None): def deg2rad(x, name=None):
r""" r"""
Convert each of the elements of input x from degrees to angles in radians. Convert each of the elements of input x from degrees to angles in radians.
...@@ -4272,18 +4420,29 @@ def deg2rad(x, name=None): ...@@ -4272,18 +4420,29 @@ def deg2rad(x, name=None):
x = cast(x, dtype="float32") x = cast(x, dtype="float32")
return _legacy_C_ops.scale(x, 'scale', deg2rad_scale) return _legacy_C_ops.scale(x, 'scale', deg2rad_scale)
else: else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'], 'deg2rad') check_variable_and_dtype(x, 'x',
['int32', 'int64', 'float32', 'float64'],
'deg2rad')
helper = LayerHelper('deg2rad', **locals()) helper = LayerHelper('deg2rad', **locals())
out_cast = x out_cast = x
if convert_dtype(x.dtype) in ['int32', 'int64']: if convert_dtype(x.dtype) in ['int32', 'int64']:
out_cast = helper.create_variable_for_type_inference(dtype=paddle.float32) out_cast = helper.create_variable_for_type_inference(
helper.append_op( dtype=paddle.float32)
type='cast', inputs={'X':x}, outputs={'Out': out_cast}, attrs={'in_dtype': x.dtype,'out_dtype': paddle.float32}) helper.append_op(type='cast',
inputs={'X': x},
outputs={'Out': out_cast},
attrs={
'in_dtype': x.dtype,
'out_dtype': paddle.float32
})
out = helper.create_variable_for_type_inference(dtype=out_cast.dtype) out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
helper.append_op( helper.append_op(type='scale',
type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': deg2rad_scale}) inputs={'X': out_cast},
outputs={'Out': out},
attrs={'scale': deg2rad_scale})
return out return out
def gcd(x, y, name=None): def gcd(x, y, name=None):
""" """
Computes the element-wise greatest common divisor (GCD) of input |x| and |y|. Computes the element-wise greatest common divisor (GCD) of input |x| and |y|.
...@@ -4348,7 +4507,8 @@ def gcd(x, y, name=None): ...@@ -4348,7 +4507,8 @@ def gcd(x, y, name=None):
y_not_equal_0 = (y != 0) y_not_equal_0 = (y != 0)
y_safe = paddle.where(y_not_equal_0, y, paddle.ones(y.shape, y.dtype)) y_safe = paddle.where(y_not_equal_0, y, paddle.ones(y.shape, y.dtype))
x, y = (paddle.where(y_not_equal_0, y, x), x, y = (paddle.where(y_not_equal_0, y, x),
paddle.where(y_not_equal_0, paddle.mod(x, y_safe),paddle.zeros(y.shape, y.dtype))) paddle.where(y_not_equal_0, paddle.mod(x, y_safe),
paddle.zeros(y.shape, y.dtype)))
return (paddle.where(x < y, y, x), paddle.where(x < y, x, y)) return (paddle.where(x < y, y, x), paddle.where(x < y, x, y))
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
...@@ -4362,6 +4522,7 @@ def gcd(x, y, name=None): ...@@ -4362,6 +4522,7 @@ def gcd(x, y, name=None):
out, _ = paddle.static.nn.while_loop(_gcd_cond_fn, _gcd_body_fn, [x, y]) out, _ = paddle.static.nn.while_loop(_gcd_cond_fn, _gcd_body_fn, [x, y])
return out return out
def lcm(x, y, name=None): def lcm(x, y, name=None):
""" """
Computes the element-wise least common multiple (LCM) of input |x| and |y|. Computes the element-wise least common multiple (LCM) of input |x| and |y|.
...@@ -4416,9 +4577,11 @@ def lcm(x, y, name=None): ...@@ -4416,9 +4577,11 @@ def lcm(x, y, name=None):
# they won't be used. # they won't be used.
d_equal_0 = paddle.equal(d, 0) d_equal_0 = paddle.equal(d, 0)
d_safe = paddle.where(d_equal_0, paddle.ones(d.shape, d.dtype), d) d_safe = paddle.where(d_equal_0, paddle.ones(d.shape, d.dtype), d)
out = paddle.where(d_equal_0, paddle.zeros(d.shape, d.dtype), paddle.abs(x * y) // d_safe) out = paddle.where(d_equal_0, paddle.zeros(d.shape, d.dtype),
paddle.abs(x * y) // d_safe)
return out return out
def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
r""" r"""
Computes the n-th forward difference along the given axis. Computes the n-th forward difference along the given axis.
...@@ -4510,14 +4673,14 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): ...@@ -4510,14 +4673,14 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
attrs_1 += ('starts', starts_1) attrs_1 += ('starts', starts_1)
ends_1 = [dim_len - 1] ends_1 = [dim_len - 1]
attrs_1 += ('ends', ends_1) attrs_1 += ('ends', ends_1)
input_front = _C_ops.slice(new_input, axes, starts_1, ends_1, infer_flags, input_front = _C_ops.slice(new_input, axes, starts_1, ends_1,
[]) infer_flags, [])
starts_2 = [1] starts_2 = [1]
attrs_2 += ('starts', starts_2) attrs_2 += ('starts', starts_2)
ends_2 = [dim_len] ends_2 = [dim_len]
attrs_2 += ('ends', ends_2) attrs_2 += ('ends', ends_2)
input_back = _C_ops.slice(new_input, axes, starts_2, ends_2, infer_flags, input_back = _C_ops.slice(new_input, axes, starts_2, ends_2,
[]) infer_flags, [])
if x.dtype == paddle.bool: if x.dtype == paddle.bool:
return _C_ops.logical_xor(input_back, input_front) return _C_ops.logical_xor(input_back, input_front)
...@@ -4564,7 +4727,8 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): ...@@ -4564,7 +4727,8 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
else: else:
return elementwise_sub(input_back, input_front, axis=axis) return elementwise_sub(input_back, input_front, axis=axis)
else: else:
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'bool', 'int32', 'int64'], 'diff') check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'bool', 'int32', 'int64'], 'diff')
check_type(axis, 'axis', (int), 'diff') check_type(axis, 'axis', (int), 'diff')
helper = LayerHelper('diff', **locals()) helper = LayerHelper('diff', **locals())
has_pend = False has_pend = False
...@@ -4581,9 +4745,10 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): ...@@ -4581,9 +4745,10 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
if has_pend: if has_pend:
new_input = helper.create_variable_for_type_inference(dtype) new_input = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(type='concat',
type='concat', inputs={'X': input_list}, outputs={'Out': [new_input]}, attrs={'axis': axis} inputs={'X': input_list},
) outputs={'Out': [new_input]},
attrs={'axis': axis})
else: else:
new_input = x new_input = x
...@@ -4594,29 +4759,35 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): ...@@ -4594,29 +4759,35 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
attrs_1['starts'] = starts_1 attrs_1['starts'] = starts_1
attrs_1['ends'] = ends_1 attrs_1['ends'] = ends_1
input_front = helper.create_variable_for_type_inference(dtype) input_front = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(type='slice',
type='slice', inputs={'Input': new_input}, attrs=attrs_1, outputs={'Out': input_front} inputs={'Input': new_input},
) attrs=attrs_1,
outputs={'Out': input_front})
attrs_2 = {'axes': axes} attrs_2 = {'axes': axes}
starts_2 = [1] starts_2 = [1]
ends_2 = [dim_len] ends_2 = [dim_len]
attrs_2['starts'] = starts_2 attrs_2['starts'] = starts_2
attrs_2['ends'] = ends_2 attrs_2['ends'] = ends_2
input_back = helper.create_variable_for_type_inference(dtype) input_back = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(type='slice',
type='slice', inputs={'Input': new_input}, attrs=attrs_2, outputs={'Out': input_back} inputs={'Input': new_input},
) attrs=attrs_2,
outputs={'Out': input_back})
if dtype == paddle.bool: if dtype == paddle.bool:
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(type='logical_xor',
type='logical_xor', inputs={"X": input_back, "Y": input_front}, outputs={"Out": out} inputs={
) "X": input_back,
"Y": input_front
},
outputs={"Out": out})
else: else:
out = elementwise_sub(input_back, input_front, axis=axis) out = elementwise_sub(input_back, input_front, axis=axis)
return out return out
def angle(x, name=None): def angle(x, name=None):
r""" r"""
Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while
...@@ -4662,7 +4833,8 @@ def angle(x, name=None): ...@@ -4662,7 +4833,8 @@ def angle(x, name=None):
return _legacy_C_ops.angle(x) return _legacy_C_ops.angle(x)
check_variable_and_dtype(x, 'x', check_variable_and_dtype(x, 'x',
['float32', 'float64', 'complex64', 'complex128'], 'angle') ['float32', 'float64', 'complex64', 'complex128'],
'angle')
op_type = "angle" op_type = "angle"
helper = LayerHelper(op_type, **locals()) helper = LayerHelper(op_type, **locals())
inputs = {"X": x} inputs = {"X": x}
...@@ -4672,6 +4844,7 @@ def angle(x, name=None): ...@@ -4672,6 +4844,7 @@ def angle(x, name=None):
helper.append_op(type=op_type, inputs=inputs, outputs=outputs) helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
return out return out
def heaviside(x, y, name=None): def heaviside(x, y, name=None):
r""" r"""
Computes the Heaviside step function determined by corresponding element in y for each element in x. The equation is Computes the Heaviside step function determined by corresponding element in y for each element in x. The equation is
...@@ -4715,10 +4888,14 @@ def heaviside(x, y, name=None): ...@@ -4715,10 +4888,14 @@ def heaviside(x, y, name=None):
axis = -1 axis = -1
act = None act = None
if _non_static_mode(): if _non_static_mode():
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(x,
x, y, axis=axis, act=act, op_name=op_type) y,
axis=axis,
act=act,
op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
def frac(x, name=None): def frac(x, name=None):
""" """
This API is used to return the fractional portion of each element in input. This API is used to return the fractional portion of each element in input.
...@@ -4751,28 +4928,39 @@ def frac(x, name=None): ...@@ -4751,28 +4928,39 @@ def frac(x, name=None):
op_type = 'elementwise_sub' op_type = 'elementwise_sub'
axis = -1 axis = -1
act = None act = None
if x.dtype not in [paddle.int32, paddle.int64, paddle.float32, paddle.float64]: if x.dtype not in [
paddle.int32, paddle.int64, paddle.float32, paddle.float64
]:
raise TypeError( raise TypeError(
"The data type of input must be one of ['int32', 'int64', 'float32', 'float64'], but got {}".format(x.dtype)) "The data type of input must be one of ['int32', 'int64', 'float32', 'float64'], but got {}"
.format(x.dtype))
if in_dygraph_mode(): if in_dygraph_mode():
y = _C_ops.trunc(x) y = _C_ops.trunc(x)
return _C_ops.subtract(x, y) return _C_ops.subtract(x, y)
else: else:
if _in_legacy_dygraph(): if _in_legacy_dygraph():
y = _legacy_C_ops.trunc(x) y = _legacy_C_ops.trunc(x)
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(x,
x, y, axis=axis, act=act, op_name=op_type) y,
axis=axis,
act=act,
op_name=op_type)
else: else:
inputs = {"X": x} inputs = {"X": x}
attrs = {} attrs = {}
helper = LayerHelper("trunc", **locals()) helper = LayerHelper("trunc", **locals())
check_variable_and_dtype(x, "X", ['int32', 'int64', 'float32', 'float64'], 'trunc') check_variable_and_dtype(x, "X",
['int32', 'int64', 'float32', 'float64'],
'trunc')
y = helper.create_variable_for_type_inference(dtype=x.dtype) y = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(type="trunc",
type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": y}) inputs=inputs,
attrs=attrs,
outputs={"Out": y})
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
def sgn(x, name=None): def sgn(x, name=None):
""" """
For complex tensor, this API returns a new tensor whose elements have the same angles as the corresponding For complex tensor, this API returns a new tensor whose elements have the same angles as the corresponding
...@@ -4798,10 +4986,13 @@ def sgn(x, name=None): ...@@ -4798,10 +4986,13 @@ def sgn(x, name=None):
# [0.6+0.8j 1.+0.j 0.+0.j -1.+0.j]] # [0.6+0.8j 1.+0.j 0.+0.j -1.+0.j]]
""" """
if x.dtype not in [paddle.float16, paddle.float32, paddle.float64, paddle.complex64, paddle.complex128]: if x.dtype not in [
paddle.float16, paddle.float32, paddle.float64, paddle.complex64,
paddle.complex128
]:
raise TypeError( raise TypeError(
"The data type of input must be one of ['float16', 'float32', 'float64', 'complex64', 'complex128'], but got {}" "The data type of input must be one of ['float16', 'float32', 'float64', 'complex64', 'complex128'], but got {}"
.format(x.dtype)) .format(x.dtype))
if paddle.is_complex(x): if paddle.is_complex(x):
expand_x = paddle.as_real(x) expand_x = paddle.as_real(x)
x_abs = paddle.abs(x) x_abs = paddle.abs(x)
...@@ -4814,6 +5005,7 @@ def sgn(x, name=None): ...@@ -4814,6 +5005,7 @@ def sgn(x, name=None):
else: else:
return paddle.sign(x) return paddle.sign(x)
def take(x, index, mode='raise', name=None): def take(x, index, mode='raise', name=None):
""" """
Returns a new tensor with the elements of input tensor x at the given index. Returns a new tensor with the elements of input tensor x at the given index.
...@@ -4881,16 +5073,18 @@ def take(x, index, mode='raise', name=None): ...@@ -4881,16 +5073,18 @@ def take(x, index, mode='raise', name=None):
""" """
if mode not in ['raise', 'wrap', 'clip']: if mode not in ['raise', 'wrap', 'clip']:
raise ValueError( raise ValueError(
"'mode' in 'take' should be 'raise', 'wrap', 'clip', but received {}.".format(mode)) "'mode' in 'take' should be 'raise', 'wrap', 'clip', but received {}."
.format(mode))
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if not isinstance(index, (paddle.Tensor, Variable)): if not isinstance(index, (paddle.Tensor, Variable)):
raise TypeError( raise TypeError(
"The type of 'index' must be Tensor, but got {}".format(type(index))) "The type of 'index' must be Tensor, but got {}".format(
type(index)))
if index.dtype not in [paddle.int32, paddle.int64]: if index.dtype not in [paddle.int32, paddle.int64]:
raise TypeError( raise TypeError(
"The data type of 'index' must be one of ['int32', 'int64'], but got {}".format( "The data type of 'index' must be one of ['int32', 'int64'], but got {}"
index.dtype)) .format(index.dtype))
else: else:
check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'take') check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'take')
...@@ -4904,10 +5098,9 @@ def take(x, index, mode='raise', name=None): ...@@ -4904,10 +5098,9 @@ def take(x, index, mode='raise', name=None):
index_1d = paddle.where(index_1d < 0, index_1d + max_index, index_1d) index_1d = paddle.where(index_1d < 0, index_1d + max_index, index_1d)
elif mode == 'wrap': elif mode == 'wrap':
# The out of range indices are constrained by taking the remainder. # The out of range indices are constrained by taking the remainder.
index_1d = paddle.where(index_1d < 0, index_1d = paddle.where(index_1d < 0, index_1d % max_index, index_1d)
index_1d % max_index, index_1d) index_1d = paddle.where(index_1d >= max_index, index_1d % max_index,
index_1d = paddle.where(index_1d >= max_index, index_1d)
index_1d % max_index, index_1d)
elif mode == 'clip': elif mode == 'clip':
# 'clip' mode disables indexing with negative numbers. # 'clip' mode disables indexing with negative numbers.
index_1d = clip(index_1d, 0, max_index - 1) index_1d = clip(index_1d, 0, max_index - 1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册