未验证 提交 842325dd 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle] remove some `yapf: disable` (#46410)

* remove `yapf: disable` from math.py

* disable test case only

* remove useless disable

* remove useless disable

* revert no_grad_set_white_list
上级 ed2bb051
......@@ -1094,5 +1094,3 @@ class TestIfftShift(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
# yapf: enable
......@@ -925,5 +925,3 @@ class TestIfftShift(unittest.TestCase):
if __name__ == '__main__':
unittest.main()
# yapf: enable
......@@ -642,7 +642,6 @@ def to_safe_name(s):
return str(re.sub("[^a-zA-Z0-9_]+", "_", s))
# yapf: disable
@place(DEVICES)
@parameterize(
(TEST_CASE_NAME, 'x', 'frame_length', 'hop_length', 'axis'),
......@@ -653,10 +652,19 @@ def to_safe_name(s):
('test_2d_input2', rand_x(2, np.float64, shape=[8, 150]), 50, 15, -1),
('test_3d_input1', rand_x(3, np.float64, shape=[150, 4, 2]), 50, 15, 0),
('test_3d_input2', rand_x(3, np.float64, shape=[4, 2, 150]), 50, 15, -1),
])
]) # yapf: disable
class TestFrame(unittest.TestCase):
def test_frame(self):
np.testing.assert_allclose(frame_for_api_test(self.x, self.frame_length, self.hop_length, self.axis), paddle.signal.frame(paddle.to_tensor(self.x), self.frame_length, self.hop_length, self.axis), rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype)))
np.testing.assert_allclose(frame_for_api_test(self.x, self.frame_length,
self.hop_length,
self.axis),
paddle.signal.frame(paddle.to_tensor(self.x),
self.frame_length,
self.hop_length,
self.axis),
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype)))
@place(DEVICES)
......@@ -669,24 +677,29 @@ class TestFrame(unittest.TestCase):
('test_2d_input2', rand_x(2, np.float64, shape=[8, 150]), 50, 15, -1),
('test_3d_input1', rand_x(3, np.float64, shape=[150, 4, 2]), 50, 15, 0),
('test_3d_input2', rand_x(3, np.float64, shape=[4, 2, 150]), 50, 15, -1),
])
]) # yapf: disable
class TestFrameStatic(unittest.TestCase):
def test_frame_static(self):
paddle.enable_static()
mp, sp = paddle.static.Program(), paddle.static.Program()
with paddle.static.program_guard(mp, sp):
input = paddle.static.data('input', self.x.shape, dtype=self.x.dtype)
output = paddle.signal.frame(
input,
self.frame_length,
self.hop_length,
self.axis),
input = paddle.static.data('input',
self.x.shape,
dtype=self.x.dtype)
output = paddle.signal.frame(input, self.frame_length,
self.hop_length, self.axis),
exe = paddle.static.Executor(self.place)
exe.run(sp)
[output] = exe.run(mp, feed={'input': self.x}, fetch_list=[output])
paddle.disable_static()
np.testing.assert_allclose(frame_for_api_test(self.x, self.frame_length, self.hop_length, self.axis), output, rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype)))
np.testing.assert_allclose(frame_for_api_test(self.x, self.frame_length,
self.hop_length,
self.axis),
output,
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype)))
@place(DEVICES)
......@@ -697,15 +710,13 @@ class TestFrameStatic(unittest.TestCase):
('test_hop_length', rand_x(1, np.float64, shape=[150]), 50, 0, -1, ValueError),
('test_frame_length1', rand_x(2, np.float64, shape=[150, 8]), 0, 15, 0, ValueError),
('test_frame_length2', rand_x(2, np.float64, shape=[150, 8]), 151, 15, 0, ValueError),
])
]) # yapf: disable
class TestFrameException(unittest.TestCase):
def test_frame(self):
with self.assertRaises(self.expect_exception):
paddle.signal.frame(
paddle.to_tensor(self.x),
self.frame_length,
self.hop_length,
self.axis)
paddle.signal.frame(paddle.to_tensor(self.x), self.frame_length,
self.hop_length, self.axis)
@place(DEVICES)
......@@ -718,10 +729,16 @@ class TestFrameException(unittest.TestCase):
('test_3d_input2', rand_x(3, np.float64, shape=[2, 40, 5]), 10, -1),
('test_4d_input1', rand_x(4, np.float64, shape=[8, 12, 5, 3]), 5, 0),
('test_4d_input2', rand_x(4, np.float64, shape=[3, 5, 12, 8]), 5, -1),
])
]) # yapf: disable
class TestOverlapAdd(unittest.TestCase):
def test_overlap_add(self):
np.testing.assert_allclose(overlap_add_for_api_test(self.x, self.hop_length, self.axis), paddle.signal.overlap_add(paddle.to_tensor(self.x), self.hop_length, self.axis), rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype)))
np.testing.assert_allclose(
overlap_add_for_api_test(self.x, self.hop_length, self.axis),
paddle.signal.overlap_add(paddle.to_tensor(self.x), self.hop_length,
self.axis),
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype)))
@place(DEVICES)
......@@ -734,23 +751,28 @@ class TestOverlapAdd(unittest.TestCase):
('test_3d_input2', rand_x(3, np.float64, shape=[2, 40, 5]), 10, -1),
('test_4d_input1', rand_x(4, np.float64, shape=[8, 12, 5, 3]), 5, 0),
('test_4d_input2', rand_x(4, np.float64, shape=[3, 5, 12, 8]), 5, -1),
])
]) # yapf: disable
class TestOverlapAddStatic(unittest.TestCase):
def test_overlap_add_static(self):
paddle.enable_static()
mp, sp = paddle.static.Program(), paddle.static.Program()
with paddle.static.program_guard(mp, sp):
input = paddle.static.data('input', self.x.shape, dtype=self.x.dtype)
output = paddle.signal.overlap_add(
input,
self.hop_length,
self.axis),
input = paddle.static.data('input',
self.x.shape,
dtype=self.x.dtype)
output = paddle.signal.overlap_add(input, self.hop_length,
self.axis),
exe = paddle.static.Executor(self.place)
exe.run(sp)
[output] = exe.run(mp, feed={'input': self.x}, fetch_list=[output])
paddle.disable_static()
np.testing.assert_allclose(overlap_add_for_api_test(self.x, self.hop_length, self.axis), output, rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype)))
np.testing.assert_allclose(overlap_add_for_api_test(
self.x, self.hop_length, self.axis),
output,
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype)))
@place(DEVICES)
......@@ -759,14 +781,13 @@ class TestOverlapAddStatic(unittest.TestCase):
[
('test_axis', rand_x(2, np.float64, shape=[3, 50]), 4, 2, ValueError),
('test_hop_length', rand_x(2, np.float64, shape=[50, 3]), -1, -1, ValueError),
])
]) # yapf: disable
class TestOverlapAddException(unittest.TestCase):
def test_overlap_add(self):
with self.assertRaises(self.expect_exception):
paddle.signal.overlap_add(
paddle.to_tensor(self.x),
self.hop_length,
self.axis)
paddle.signal.overlap_add(paddle.to_tensor(self.x), self.hop_length,
self.axis)
# ================= STFT
......@@ -815,8 +836,9 @@ class TestOverlapAddException(unittest.TestCase):
512, None, None, None, True, 'reflect', False, True),
('test_center', rand_x(2, np.float64, shape=[1, 160000]),
512, None, None, None, False, 'reflect', False, True),
])
])# yapf: disable
class TestStft(unittest.TestCase):
def test_stft(self):
if self.window is None:
win_p = None
......@@ -825,7 +847,15 @@ class TestStft(unittest.TestCase):
win_p = paddle.to_tensor(self.window)
win_l = self.window
np.testing.assert_allclose(stft(self.x, self.n_fft, self.hop_length, self.win_length, win_l, self.center, self.pad_mode), paddle.signal.stft(paddle.to_tensor(self.x), self.n_fft, self.hop_length, self.win_length, win_p, self.center, self.pad_mode, self.normalized, self.onesided), rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype)))
np.testing.assert_allclose(
stft(self.x, self.n_fft, self.hop_length, self.win_length, win_l,
self.center, self.pad_mode),
paddle.signal.stft(paddle.to_tensor(self.x), self.n_fft,
self.hop_length, self.win_length, win_p,
self.center, self.pad_mode, self.normalized,
self.onesided),
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype)))
@place(DEVICES)
......@@ -848,8 +878,9 @@ class TestStft(unittest.TestCase):
512, None, None, None, True, 'nonsense', False, True, AssertionError),
('test_complex_onesided', rand_x(1, np.float64, shape=[16000], complex=True),
512, None, None, None, False, 'reflect', False, True, AssertionError),
])
]) # yapf: disable
class TestStftException(unittest.TestCase):
def test_stft(self):
if self.window is None:
win_p = None
......@@ -857,16 +888,10 @@ class TestStftException(unittest.TestCase):
win_p = paddle.to_tensor(self.window)
with self.assertRaises(self.expect_exception):
paddle.signal.stft(
paddle.to_tensor(self.x),
self.n_fft,
self.hop_length,
self.win_length,
win_p,
self.center,
self.pad_mode,
self.normalized,
self.onesided),
paddle.signal.stft(paddle.to_tensor(self.x), self.n_fft,
self.hop_length, self.win_length, win_p,
self.center, self.pad_mode, self.normalized,
self.onesided),
@place(DEVICES)
......@@ -887,8 +912,9 @@ class TestStftException(unittest.TestCase):
512, None, None, None, False, False, True, None, False),
('test_length', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, None, None, None, False, False, True, 1888, False),
])
]) # yapf: disable
class TestIstft(unittest.TestCase):
def test_istft(self):
if self.window is None:
win_p = None
......@@ -897,7 +923,15 @@ class TestIstft(unittest.TestCase):
win_p = paddle.to_tensor(self.window)
win_l = self.window
np.testing.assert_allclose(istft(self.x, self.hop_length, self.win_length, win_l, self.center, self.length), paddle.signal.istft(paddle.to_tensor(self.x), self.n_fft, self.hop_length, self.win_length, win_p, self.center, self.normalized, self.onesided, self.length, self.return_complex), rtol=rtol.get(str(self.x.dtype)), atol=atol.get(str(self.x.dtype)))
np.testing.assert_allclose(
istft(self.x, self.hop_length, self.win_length, win_l, self.center,
self.length),
paddle.signal.istft(paddle.to_tensor(self.x), self.n_fft,
self.hop_length, self.win_length, win_p,
self.center, self.normalized, self.onesided,
self.length, self.return_complex),
rtol=rtol.get(str(self.x.dtype)),
atol=atol.get(str(self.x.dtype)))
@place(DEVICES)
......@@ -928,8 +962,9 @@ class TestIstft(unittest.TestCase):
512, None, None, rand_x(1, np.float64, shape=[512], complex=True), True, False, True, None, False, AssertionError),
('test_NOLA', rand_x(3, np.float64, shape=[1, 257, 471], complex=True),
512, 512, None, get_window('hann', 512), True, False, True, None, False, ValueError),
])
]) # yapf: disable
class TestIstftException(unittest.TestCase):
def test_istft(self):
if self.window is None:
win_p = None
......@@ -937,20 +972,11 @@ class TestIstftException(unittest.TestCase):
win_p = paddle.to_tensor(self.window)
with self.assertRaises(self.expect_exception):
paddle.signal.istft(
paddle.to_tensor(self.x),
self.n_fft,
self.hop_length,
self.win_length,
win_p,
self.center,
self.normalized,
self.onesided,
self.length,
self.return_complex),
# yapf: enable
paddle.signal.istft(paddle.to_tensor(self.x), self.n_fft,
self.hop_length, self.win_length, win_p,
self.center, self.normalized, self.onesided,
self.length, self.return_complex),
if __name__ == '__main__':
unittest.main()
......@@ -37,37 +37,35 @@ from ..fluid.dygraph.inplace_utils import inplace_apis_in_dygraph_only
from ..fluid.layers import utils
# TODO: define math functions
# yapf: disable
from .ops import abs # noqa: F401
from .ops import acos # noqa: F401
from .ops import asin # noqa: F401
from .ops import ceil # noqa: F401
from .ops import ceil_ # noqa: F401
from .ops import cos # noqa: F401
from .ops import tan # noqa: F401
from .ops import sinh # noqa: F401
from .ops import cosh # noqa: F401
from .ops import exp # noqa: F401
from .ops import exp_ # noqa: F401
from .ops import expm1 # noqa: F401
from .ops import floor # noqa: F401
from .ops import floor_ # noqa: F401
from .ops import reciprocal # noqa: F401
from .ops import reciprocal_ # noqa: F401
from .ops import round # noqa: F401
from .ops import round_ # noqa: F401
from .ops import rsqrt # noqa: F401
from .ops import rsqrt_ # noqa: F401
from .ops import square # noqa: F401
from .ops import atan # noqa: F401
from .ops import erf # noqa: F401
from .ops import sqrt # noqa: F401
from .ops import sqrt_ # noqa: F401
from .ops import sin # noqa: F401
from .ops import asinh # noqa: F401
from .ops import acosh # noqa: F401
from .ops import atanh # noqa: F401
from .ops import abs # noqa: F401
from .ops import acos # noqa: F401
from .ops import asin # noqa: F401
from .ops import ceil # noqa: F401
from .ops import ceil_ # noqa: F401
from .ops import cos # noqa: F401
from .ops import tan # noqa: F401
from .ops import sinh # noqa: F401
from .ops import cosh # noqa: F401
from .ops import exp # noqa: F401
from .ops import exp_ # noqa: F401
from .ops import expm1 # noqa: F401
from .ops import floor # noqa: F401
from .ops import floor_ # noqa: F401
from .ops import reciprocal # noqa: F401
from .ops import reciprocal_ # noqa: F401
from .ops import round # noqa: F401
from .ops import round_ # noqa: F401
from .ops import rsqrt # noqa: F401
from .ops import rsqrt_ # noqa: F401
from .ops import square # noqa: F401
from .ops import atan # noqa: F401
from .ops import erf # noqa: F401
from .ops import sqrt # noqa: F401
from .ops import sqrt_ # noqa: F401
from .ops import sin # noqa: F401
from .ops import asinh # noqa: F401
from .ops import acosh # noqa: F401
from .ops import atanh # noqa: F401
from ..fluid.layers import elementwise_sub
from paddle import _C_ops, _legacy_C_ops
......@@ -181,9 +179,9 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
return dygraph_utils._append_activation_in_dygraph(out, act)
elif _in_legacy_dygraph():
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
out = _legacy_C_ops.scale(x, 'scale',
float(_scale), 'bias',
float(bias), 'bias_after_scale', bias_after_scale)
out = _legacy_C_ops.scale(x, 'scale', float(_scale), 'bias',
float(bias), 'bias_after_scale',
bias_after_scale)
return dygraph_utils._append_activation_in_dygraph(out, act)
check_variable_and_dtype(x, "x", [
......@@ -202,8 +200,10 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
helper = LayerHelper('scale', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs)
helper.append_op(type='scale',
inputs=inputs,
outputs={'Out': out},
attrs=attrs)
return helper.append_activation(out)
......@@ -242,14 +242,16 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
helper = LayerHelper('stanh', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='stanh',
inputs={'X': x},
outputs={'Out': out},
attrs={'scale_a': scale_a,
'scale_b': scale_b})
helper.append_op(type='stanh',
inputs={'X': x},
outputs={'Out': out},
attrs={
'scale_a': scale_a,
'scale_b': scale_b
})
return out
def multiplex(inputs, index, name=None):
"""
......@@ -318,13 +320,15 @@ def multiplex(inputs, index, name=None):
check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex')
out = helper.create_variable_for_type_inference(inputs[0].dtype)
helper.append_op(
type='multiplex',
inputs={'X': inputs,
'Ids': index},
outputs={'Out': [out]})
helper.append_op(type='multiplex',
inputs={
'X': inputs,
'Ids': index
},
outputs={'Out': [out]})
return out
@inplace_apis_in_dygraph_only
def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
"""
......@@ -335,9 +339,9 @@ def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
return _C_ops.scale_(x, scale, float(bias), bias_after_scale)
if _in_legacy_dygraph():
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
return _legacy_C_ops.scale_(x, 'scale',
float(_scale), 'bias',
float(bias), 'bias_after_scale', bias_after_scale)
return _legacy_C_ops.scale_(x, 'scale', float(_scale), 'bias',
float(bias), 'bias_after_scale',
bias_after_scale)
def pow(x, y, name=None):
......@@ -392,23 +396,32 @@ def pow(x, y, name=None):
elif isinstance(y, (paddle.Tensor, Variable)):
return _C_ops.elementwise_pow(x, y)
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
raise TypeError(
'y must be scalar or tensor type, but received: %s ' %
(y.dtype))
if _in_legacy_dygraph():
if isinstance(y, (int, float)):
return _legacy_C_ops.pow(x, 'factor', y)
elif isinstance(y, (paddle.Tensor, Variable)):
return _elementwise_op_in_dygraph(
x, y, axis=-1, act=None, op_name='elementwise_pow')
return _elementwise_op_in_dygraph(x,
y,
axis=-1,
act=None,
op_name='elementwise_pow')
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (y.dtype))
raise TypeError(
'y must be scalar or tensor type, but received: %s ' %
(y.dtype))
# in static graph mode
if isinstance(y, (int, float)):
helper = LayerHelper('pow', **locals())
inputs = {'X': x}
attrs = {'factor': y}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
helper.append_op(type='pow',
inputs=inputs,
outputs={'Out': out},
attrs=attrs)
return out
elif isinstance(y, (paddle.Tensor, Variable)):
# TODO A potential speed improvement is supporting different types in C++ and removing the cast ops here
......@@ -416,7 +429,8 @@ def pow(x, y, name=None):
out = helper.create_variable_for_type_inference(dtype=x.dtype)
return _elementwise_op(LayerHelper('elementwise_pow', **locals()))
else:
raise TypeError('y must be scalar or tensor type, but received: %s '% (type(y)))
raise TypeError('y must be scalar or tensor type, but received: %s ' %
(type(y)))
OP_NAMEMAPPING = {
......@@ -431,6 +445,7 @@ OP_NAMEMAPPING = {
'elementwise_mod': 'remainder',
}
@dygraph_only
def _elementwise_op_in_dygraph(x,
y,
......@@ -438,23 +453,28 @@ def _elementwise_op_in_dygraph(x,
act=None,
use_mkldnn=False,
op_name=None):
def is_inplace(op_name):
return op_name[-1] == "_"
return op_name[-1] == "_"
if op_name not in OP_NAMEMAPPING.keys() or axis != -1:
op = getattr(_legacy_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
else:
if in_dygraph_mode():
op = getattr(_C_ops, OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name)
op = getattr(
_C_ops,
OP_NAMEMAPPING[op_name] if not is_inplace(op_name) else op_name)
out = op(x, y)
if _in_legacy_dygraph():
op = getattr(_legacy_C_ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
out, act, use_mkldnn=use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(out,
act,
use_mkldnn=use_mkldnn)
def _elementwise_op(helper):
op_type = helper.layer_type
......@@ -481,15 +501,20 @@ def _elementwise_op(helper):
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type=op_type,
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'axis': axis,
'use_mkldnn': use_mkldnn})
out = helper.create_variable(name=name,
dtype=x.dtype,
persistable=False)
helper.append_op(type=op_type,
inputs={
'X': x,
'Y': y
},
outputs={'Out': out},
attrs={
'axis': axis,
'use_mkldnn': use_mkldnn
})
return helper.append_activation(out)
......@@ -549,7 +574,7 @@ def add(x, y, name=None):
"""
if in_dygraph_mode():
return _C_ops.add( x, y)
return _C_ops.add(x, y)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.elementwise_add(x, y)
......@@ -568,13 +593,14 @@ def add_(x, y, name=None):
out_shape = broadcast_shape(x.shape, y.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
raise ValueError(
"The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation."
.format(out_shape, x.shape))
if in_dygraph_mode():
return _C_ops.add_(x, y)
else:
out = _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type)
out = _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type)
return out
......@@ -639,8 +665,11 @@ def subtract(x, y, name=None):
return _C_ops.subtract(x, y)
else:
if _in_legacy_dygraph():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op_in_dygraph(x,
y,
axis=axis,
act=act,
op_name=op_type)
else:
return _elementwise_op(LayerHelper(op_type, **locals()))
......@@ -656,13 +685,18 @@ def subtract_(x, y, name=None):
out_shape = broadcast_shape(x.shape, y.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
raise ValueError(
"The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation."
.format(out_shape, x.shape))
if in_dygraph_mode():
return _C_ops.subtract_(x, y)
else:
out = _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name='elementwise_sub_')
out = _elementwise_op_in_dygraph(x,
y,
axis=axis,
act=act,
op_name='elementwise_sub_')
return out
......@@ -700,11 +734,14 @@ def divide(x, y, name=None):
axis = -1
act = None
if in_dygraph_mode():
return _C_ops.divide( x, y)
return _C_ops.divide(x, y)
else:
if _in_legacy_dygraph():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op_in_dygraph(x,
y,
axis=axis,
act=act,
op_name=op_type)
else:
return _elementwise_op(LayerHelper(op_type, **locals()))
......@@ -744,8 +781,7 @@ def floor_divide(x, y, name=None):
if in_dygraph_mode():
return _C_ops.floor_divide(x, y)
elif _in_legacy_dygraph():
return _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type)
return _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
......@@ -787,8 +823,7 @@ def remainder(x, y, name=None):
if in_dygraph_mode():
return _C_ops.remainder(x, y)
elif _in_legacy_dygraph():
return _elementwise_op_in_dygraph(
x, y, axis=axis, op_name=op_type)
return _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
......@@ -805,8 +840,8 @@ def remainder_(x, y, name=None):
out_shape = broadcast_shape(x.shape, y.shape)
if out_shape != x.shape:
raise ValueError(
"The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(
out_shape, x.shape))
"The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation."
.format(out_shape, x.shape))
return _elementwise_op_in_dygraph(x, y, axis=axis, op_name=op_type)
......@@ -858,8 +893,11 @@ def multiply(x, y, name=None):
return _C_ops.multiply(x, y)
else:
if _in_legacy_dygraph():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op_in_dygraph(x,
y,
axis=axis,
act=act,
op_name=op_type)
else:
if x.dtype != y.dtype:
raise TypeError(
......@@ -868,6 +906,7 @@ def multiply(x, y, name=None):
return _elementwise_op(LayerHelper(op_type, **locals()))
def maximum(x, y, name=None):
"""
Compare two tensors and returns a new tensor containing the element-wise maxima. The equation is:
......@@ -925,10 +964,14 @@ def maximum(x, y, name=None):
if in_dygraph_mode():
return _C_ops.maximum(x, y)
elif _in_legacy_dygraph():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op_in_dygraph(x,
y,
axis=axis,
act=act,
op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def minimum(x, y, name=None):
"""
Compare two tensors and return a new tensor containing the element-wise minima. The equation is:
......@@ -986,10 +1029,14 @@ def minimum(x, y, name=None):
if in_dygraph_mode():
return _C_ops.minimum(x, y)
elif _in_legacy_dygraph():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op_in_dygraph(x,
y,
axis=axis,
act=act,
op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def fmax(x, y, name=None):
"""
Compares the elements at the corresponding positions of the two tensors and returns a new tensor containing the maximum value of the element.
......@@ -1049,10 +1096,14 @@ def fmax(x, y, name=None):
if in_dygraph_mode():
return _C_ops.fmax(x, y, axis)
if _in_legacy_dygraph():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op_in_dygraph(x,
y,
axis=axis,
act=act,
op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def fmin(x, y, name=None):
"""
Compares the elements at the corresponding positions of the two tensors and returns a new tensor containing the minimum value of the element.
......@@ -1112,8 +1163,11 @@ def fmin(x, y, name=None):
if in_dygraph_mode():
return _C_ops.fmin(x, y, axis)
if _in_legacy_dygraph():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op_in_dygraph(x,
y,
axis=axis,
act=act,
op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
......@@ -1209,43 +1263,35 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
if _in_legacy_dygraph():
if dtype_flag:
return _legacy_C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag, 'in_dtype',
x.dtype, 'out_dtype', dtype)
'reduce_all', reduce_all_flag,
'in_dtype', x.dtype, 'out_dtype',
dtype)
else:
return _legacy_C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all_flag
}
attrs = {'dim': axis, 'keep_dim': keepdim, 'reduce_all': reduce_all_flag}
if dtype_flag:
attrs.update({
'in_dtype': x.dtype,
'out_dtype': dtype
})
attrs.update({'in_dtype': x.dtype, 'out_dtype': dtype})
check_variable_and_dtype(
x, 'x', ['bool', 'float16', 'float32', 'float64',
'int16', 'int32', 'int64', 'complex64', 'complex128',
u'bool', u'float16', u'float32', u'float64',
u'int32', u'int64', u'complex64', u'complex128'], 'sum')
check_variable_and_dtype(x, 'x', [
'bool', 'float16', 'float32', 'float64', 'int16', 'int32', 'int64',
'complex64', 'complex128', u'bool', u'float16', u'float32', u'float64',
u'int32', u'int64', u'complex64', u'complex128'
], 'sum')
check_type(axis, 'axis', (int, list, tuple, type(None), Variable), 'sum')
helper = LayerHelper('sum', **locals())
if dtype_flag:
out = helper.create_variable_for_type_inference(
dtype=dtype)
out = helper.create_variable_for_type_inference(dtype=dtype)
else:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_sum',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
helper.append_op(type='reduce_sum',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
......@@ -1299,8 +1345,8 @@ def nansum(x, axis=None, dtype=None, keepdim=False, name=None):
out5 = paddle.nansum(y, axis=[1, 2]) # [8, 19]
out6 = paddle.nansum(y, axis=[0, 1]) # [9, 18]
"""
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'nansum')
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'nansum')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'nansum')
zero_tensor = paddle.zeros_like(x)
......@@ -1367,12 +1413,14 @@ def nanmean(x, axis=None, keepdim=False, name=None):
axis = [axis]
check_variable_and_dtype(x, 'x/input',
['uint16', 'float16', 'float32', 'float64'],
'nanmean' )
'nanmean')
if axis is not None:
check_type(axis, 'axis/dim', (int, list, tuple), 'nanmean')
cnt = paddle.sum(~paddle.isnan(x), axis = axis,keepdim=keepdim)
return paddle.divide(paddle.nansum(x, axis=axis, keepdim=keepdim, name=name), cnt.astype(x.dtype))
cnt = paddle.sum(~paddle.isnan(x), axis=axis, keepdim=keepdim)
return paddle.divide(
paddle.nansum(x, axis=axis, keepdim=keepdim, name=name),
cnt.astype(x.dtype))
def count_nonzero(x, axis=None, keepdim=False, name=None):
......@@ -1424,13 +1472,13 @@ def count_nonzero(x, axis=None, keepdim=False, name=None):
# [1, 3, 5]
"""
if axis is not None:
if isinstance(axis, int):
axis = [axis]
dims = len(x.shape)
for i in range(len(axis)):
if not isinstance(axis[i], int) or not (axis[i] < dims and axis[i] >= -dims):
if not isinstance(axis[i], int) or not (axis[i] < dims
and axis[i] >= -dims):
raise ValueError(
"Axis should be None, int, or a list, element should in range [-rank(x), rank(x))."
)
......@@ -1518,14 +1566,12 @@ def add_n(inputs, name=None):
check_variable_and_dtype(inputs, "inputs", \
['float16', 'float32', 'float64', 'int32', 'int64'], 'add_n')
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('inputs'))
helper.append_op(
type='sum',
inputs={'X': inputs},
outputs={'Out': out},
attrs={'use_mkldnn': False})
helper.append_op(type='sum',
inputs={'X': inputs},
outputs={'Out': out},
attrs={'use_mkldnn': False})
return out
......@@ -1559,7 +1605,7 @@ def trunc(input, name=None):
# [0., 0.]]))
'''
if in_dygraph_mode():
return _C_ops.trunc(input)
return _C_ops.trunc(input)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.trunc(input)
......@@ -1568,15 +1614,18 @@ def trunc(input, name=None):
attrs = {}
helper = LayerHelper("trunc", **locals())
check_variable_and_dtype(input, 'X', ['int32', 'int64', 'float32', 'float64'], 'trunc')
check_variable_and_dtype(input, 'X',
['int32', 'int64', 'float32', 'float64'],
'trunc')
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": out})
helper.append_op(type="trunc",
inputs=inputs,
attrs=attrs,
outputs={"Out": out})
return out
def mm(input, mat2, name=None):
"""
......@@ -1686,9 +1735,12 @@ def mm(input, mat2, name=None):
helper = LayerHelper('mm', **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='matmul_v2', inputs={'X': input,
'Y': mat2}, outputs={'Out': out})
helper.append_op(type='matmul_v2',
inputs={
'X': input,
'Y': mat2
},
outputs={'Out': out})
return out
......@@ -1735,28 +1787,40 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
x_shape = x.shape
y_shape = y.shape
if not len(x_shape) == len(y_shape) == 2:
raise ValueError("The dimention of x, y should be 2 but receive x's shape: {}, y's shape: {}".format(x_shape, y_shape))
raise ValueError(
"The dimention of x, y should be 2 but receive x's shape: {}, y's shape: {}"
.format(x_shape, y_shape))
if x_shape[1] != y_shape[0]:
raise ValueError("The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}.".format(x_shape, y_shape))
raise ValueError(
"The input Variable x's width must be equal with Variable y' height. But received x's shape = {}, y's shape = {}."
.format(x_shape, y_shape))
if len(input_shape) == 2:
if input_shape[0] != x_shape[0]:
if input_shape[0] != 1:
raise ValueError( "When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}".format(input_shape[0]))
raise ValueError(
"When x's dimension[0] is not equal with input's dimension[0], input's dimension[0] must be 1 but got {}"
.format(input_shape[0]))
if input_shape[1] != y_shape[1] and input_shape[1] != 1:
raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
raise ValueError(
"When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}"
.format(input_shape[1]))
if input_shape[1] != y_shape[1]:
if input_shape[1] != 1:
raise ValueError( "When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}".format(input_shape[1]))
raise ValueError(
"When y's dimension[1] is not equal with input's dimension[1], input's dimension[1] must be 1 but got {}"
.format(input_shape[1]))
elif len(input_shape) == 1:
if input_shape[0] not in (y_shape[1], 1):
raise ValueError("The input's shape: {} is not broadcastable with [x.shape[0], y.shape[1]]: [{},{}]".format(input_shape, x_shape[0], y_shape[1]))
raise ValueError(
"The input's shape: {} is not broadcastable with [x.shape[0], y.shape[1]]: [{},{}]"
.format(input_shape, x_shape[0], y_shape[1]))
else:
raise ValueError("The dimention of input should be 2 or 1 but receive input's shape: {}".format(input_shape))
raise ValueError(
"The dimention of input should be 2 or 1 but receive input's shape: {}"
.format(input_shape))
if in_dygraph_mode():
return _C_ops.addmm( input, x, y, alpha, beta)
return _C_ops.addmm(input, x, y, alpha, beta)
else:
if _in_legacy_dygraph():
out = _legacy_C_ops.addmm(input, x, y, "Alpha", alpha, "Beta", beta)
......@@ -1766,15 +1830,19 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
attrs = {'Alpha': alpha, 'Beta': beta}
helper = LayerHelper("addmm", **locals())
check_variable_and_dtype(input, 'Input', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(input, 'Input', ['float32', 'float64'],
'addmm')
check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'addmm')
check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'addmm')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="addmm", inputs=inputs, attrs=attrs, outputs={"Out": out})
helper.append_op(type="addmm",
inputs=inputs,
attrs=attrs,
outputs={"Out": out})
return out
def renorm(x, p, axis, max_norm):
"""
**renorm**
......@@ -1812,30 +1880,36 @@ def renorm(x, p, axis, max_norm):
input_shape = x.shape
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'renorm')
if not axis < len(input_shape):
raise ValueError("the axis:{} should be less then the shape's size {}:{}".format(axis,len(input_shape),input_shape))
if not axis >=0:
raise ValueError(
"the axis:{} should be less then the shape's size {}:{}".format(
axis, len(input_shape), input_shape))
if not axis >= 0:
if not axis >= -1 * len(input_shape):
raise ValueError("the axis:{} should not be less than -1 * length of input_shape:{}".format(axis,-1 * len(input_shape)))
raise ValueError(
"the axis:{} should not be less than -1 * length of input_shape:{}"
.format(axis, -1 * len(input_shape)))
axis = axis + len(input_shape)
if in_dygraph_mode():
out = _C_ops.renorm(x, p, axis, max_norm)
return out
elif _in_legacy_dygraph():
out = _legacy_C_ops.renorm(x, 'p',p, 'axis',axis, 'max_norm', max_norm)
out = _legacy_C_ops.renorm(x, 'p', p, 'axis', axis, 'max_norm',
max_norm)
return out
inputs = {'X': x}
attrs = {'p': p, 'axis': axis, 'max_norm':max_norm}
attrs = {'p': p, 'axis': axis, 'max_norm': max_norm}
helper = LayerHelper("renorm", **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="renorm", inputs=inputs, attrs=attrs, outputs={"Out": out})
helper.append_op(type="renorm",
inputs=inputs,
attrs=attrs,
outputs={"Out": out})
return out
def inner(x, y, name=None):
"""
......@@ -1869,8 +1943,8 @@ def inner(x, y, name=None):
else:
xshape = x.shape
yshape = y.shape
dstshape = list(xshape[:-1])+list(yshape[:-1])
if len(dstshape)==0:
dstshape = list(xshape[:-1]) + list(yshape[:-1])
if len(dstshape) == 0:
dstshape = [1]
nx = x.reshape((-1, xshape[-1]))
ny = y.reshape((-1, yshape[-1]))
......@@ -1884,7 +1958,8 @@ def inner(x, y, name=None):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(val, name,
['float16', 'float32', 'float64'], 'inner')
['float16', 'float32', 'float64'],
'inner')
x_shape = list(xshape)
y_shape = list(yshape)
......@@ -1901,9 +1976,12 @@ def inner(x, y, name=None):
helper = LayerHelper('inner', **locals())
out = helper.create_variable_for_type_inference(dtype=nx.dtype)
helper.append_op(
type='matmul_v2', inputs={'X': nx,
'Y': ny.T}, outputs={'Out': out})
helper.append_op(type='matmul_v2',
inputs={
'X': nx,
'Y': ny.T
},
outputs={'Out': out})
return out.reshape(dstshape)
......@@ -1954,9 +2032,12 @@ def outer(x, y, name=None):
helper = LayerHelper('outer', **locals())
out = helper.create_variable_for_type_inference(dtype=nx.dtype)
helper.append_op(
type='matmul_v2', inputs={'X': nx,
'Y': ny}, outputs={'Out': out})
helper.append_op(type='matmul_v2',
inputs={
'X': nx,
'Y': ny
},
outputs={'Out': out})
return out
......@@ -2015,17 +2096,18 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
axis = range(len(x.shape))
return _C_ops.logsumexp(x, axis, keepdim, reduce_all)
if _in_legacy_dygraph():
return _legacy_C_ops.logsumexp(x, 'axis', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
return _legacy_C_ops.logsumexp(x, 'axis', axis, 'keepdim', keepdim,
'reduce_all', reduce_all)
check_variable_and_dtype(x, 'x',
['float32', 'float64'],
'logsumexp')
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'logsumexp')
helper = LayerHelper('logsumexp', **locals())
attrs = {'axis': axis, 'keepdim': keepdim, 'reduce_all':reduce_all}
attrs = {'axis': axis, 'keepdim': keepdim, 'reduce_all': reduce_all}
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='logsumexp', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)
helper.append_op(type='logsumexp',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
......@@ -2062,20 +2144,22 @@ def inverse(x, name=None):
return _legacy_C_ops.inverse(x)
def _check_input(x):
check_variable_and_dtype(x, 'x',
['float32', 'float64'], 'inverse')
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'inverse')
if len(x.shape) < 2:
raise ValueError(
"The input of inverse is expected to be a Tensor whose number "
"of dimensions is no less than 2. But reviced: %d, "
"x's shape: %s." % (len(x.shape), x.shape))
_check_input(x)
helper = LayerHelper('inverse', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='inverse', inputs={'Input': [x] }, outputs={'Output': [out]})
helper.append_op(type='inverse',
inputs={'Input': [x]},
outputs={'Output': [out]})
return out
def _get_reduce_axis(axis):
"""
Internal function for max, min, amax and amin.
......@@ -2085,20 +2169,23 @@ def _get_reduce_axis(axis):
if isinstance(axis, tuple):
axis = list(axis)
elif isinstance(axis, int):
axis= [axis]
axis = [axis]
else:
raise TypeError(
"The type of axis must be int, list or tuple, but received {}".format(type(axis)))
"The type of axis must be int, list or tuple, but received {}".
format(type(axis)))
reduce_all = True if axis == None or axis == [] else False
if axis == None:
axis = []
return reduce_all, axis
def _get_reduce_axis_with_tensor(axis):
if isinstance(axis, Variable):
return False, axis
return _get_reduce_axis(axis)
def _get_reduce_all_value(axis):
"""
Internal function for max, min, amax and amin.
......@@ -2108,15 +2195,17 @@ def _get_reduce_all_value(axis):
if isinstance(axis, tuple):
axis = list(axis)
elif isinstance(axis, int):
axis= [axis]
axis = [axis]
else:
raise TypeError(
"The type of axis must be int, list or tuple, but received {}".format(type(axis)))
"The type of axis must be int, list or tuple, but received {}".
format(type(axis)))
reduce_all = True if axis == None or axis == [] else False
axis = axis if axis != None and axis != [] else [0]
return reduce_all, axis
def max(x, axis=None, keepdim=False, name=None):
"""
......@@ -2200,27 +2289,26 @@ def max(x, axis=None, keepdim=False, name=None):
return _C_ops.max(x, axis, keepdim)
if _in_legacy_dygraph():
return _legacy_C_ops.reduce_max(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
'reduce_all', reduce_all)
helper = LayerHelper('max', **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'max')
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'max')
if not isinstance(axis, Variable) and utils._contain_var(axis):
axis = utils._convert_to_tensor_list(axis)
out = helper.create_variable_for_type_inference(
dtype=x.dtype)
helper.append_op(
type='reduce_max',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='reduce_max',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def min(x, axis=None, keepdim=False, name=None):
"""
......@@ -2304,27 +2392,26 @@ def min(x, axis=None, keepdim=False, name=None):
if _in_legacy_dygraph():
return _legacy_C_ops.reduce_min(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
'reduce_all', reduce_all)
helper = LayerHelper('min', **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'min')
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'min')
if not isinstance(axis, Variable) and utils._contain_var(axis):
axis = utils._convert_to_tensor_list(axis)
out = helper.create_variable_for_type_inference(
dtype=x.dtype)
helper.append_op(
type='reduce_min',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='reduce_min',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def amax(x, axis=None, keepdim=False, name=None):
"""
Computes the maximum of tensor elements over the given axis.
......@@ -2417,27 +2504,27 @@ def amax(x, axis=None, keepdim=False, name=None):
reduce_all, axis = _get_reduce_axis(axis)
if in_dygraph_mode():
return _C_ops.amax(x, axis, keepdim)
return _C_ops.amax(x, axis, keepdim)
if _in_legacy_dygraph():
return _legacy_C_ops.reduce_amax(x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all)
return _legacy_C_ops.reduce_amax(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
helper = LayerHelper('amax', **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'amax')
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'amax')
out = helper.create_variable_for_type_inference(
dtype=x.dtype)
helper.append_op(
type='reduce_amax',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='reduce_amax',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def amin(x, axis=None, keepdim=False, name=None):
"""
......@@ -2529,28 +2616,28 @@ def amin(x, axis=None, keepdim=False, name=None):
#[0.1., 0.1], [[[0., 0.3333], [0.5, 0.3333]], [[0.5, 0.3333], [1., 1.]]]
"""
reduce_all, axis = _get_reduce_axis( axis )
reduce_all, axis = _get_reduce_axis(axis)
if in_dygraph_mode():
return _C_ops.amin(x, axis, keepdim)
elif _in_legacy_dygraph():
return _legacy_C_ops.reduce_amin(x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all)
return _legacy_C_ops.reduce_amin(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all)
helper = LayerHelper('amin', **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64'], 'amin')
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'amin')
out = helper.create_variable_for_type_inference(
dtype=x.dtype)
helper.append_op(
type='reduce_amin',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='reduce_amin',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def log1p(x, name=None):
r"""
Calculates the natural log of the given input tensor, element-wise.
......@@ -2588,6 +2675,7 @@ def log1p(x, name=None):
helper.append_op(type="log1p", inputs={"X": x}, outputs={"Out": out})
return out
def log2(x, name=None):
r"""
Calculates the log to the base 2 of the given input tensor, element-wise.
......@@ -2768,7 +2856,8 @@ def clip(x, min=None, max=None, name=None):
check_dtype(max.dtype, 'max', ['float32', 'float64', 'int32'],
'clip', '(When the type of max in clip is Variable.)')
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'], 'clip')
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'clip')
inputs = {'X': x}
attrs = {'min': min_, 'max': max_}
......@@ -2788,8 +2877,10 @@ def clip(x, min=None, max=None, name=None):
helper = LayerHelper('clip', **locals())
output = helper.create_variable_for_type_inference(
dtype=helper.input_dtype('x'))
helper.append_op(
type='clip', inputs=inputs, outputs={'Out': [output]}, attrs=attrs)
helper.append_op(type='clip',
inputs=inputs,
outputs={'Out': [output]},
attrs=attrs)
return output
......@@ -2816,7 +2907,6 @@ def clip_(x, min=None, max=None, name=None):
return _legacy_C_ops.clip_(x, "min", min, "max", max)
def trace(x, offset=0, axis1=0, axis2=1, name=None):
"""
......@@ -2857,6 +2947,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
data2 = paddle.trace(case2, offset=1, axis1=1, axis2=2) # data2.shape = [3]
data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
"""
def __check_input(x, offset, axis1, axis2):
check_dtype(x.dtype, 'Input',
['int32', 'int64', 'float16', 'float32', 'float64'],
......@@ -2885,25 +2976,28 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
"But received axis1 = %d, axis2 = %d\n"%(axis1, axis2)
if in_dygraph_mode():
return _C_ops.trace( x, offset, axis1, axis2 )
return _C_ops.trace(x, offset, axis1, axis2)
if _in_legacy_dygraph():
return _legacy_C_ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
return _legacy_C_ops.trace(x, 'offset', offset, 'axis1', axis1, 'axis2',
axis2)
__check_input(x, offset, axis1, axis2)
helper = LayerHelper('trace', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='trace',
inputs={'Input': [x]},
attrs={'offset': offset,
'axis1': axis1,
'axis2': axis2},
outputs={'Out': [out]})
helper.append_op(type='trace',
inputs={'Input': [x]},
attrs={
'offset': offset,
'axis1': axis1,
'axis2': axis2
},
outputs={'Out': [out]})
return out
def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
"""
This OP computes the diagonals of the input tensor x.
......@@ -2973,7 +3067,8 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
return _C_ops.diagonal(x, offset, axis1, axis2)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.diagonal(x, 'offset', offset, 'axis1', axis1, 'axis2', axis2)
return _legacy_C_ops.diagonal(x, 'offset', offset, 'axis1', axis1,
'axis2', axis2)
def __check_input(x, offset, axis1, axis2):
check_dtype(x.dtype, 'Input',
......@@ -3005,13 +3100,14 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
helper = LayerHelper('diagonal', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='diagonal',
inputs={'Input': [x]},
attrs={'offset': offset,
'axis1': axis1,
'axis2': axis2},
outputs={'Out': [out]})
helper.append_op(type='diagonal',
inputs={'Input': [x]},
attrs={
'offset': offset,
'axis1': axis1,
'axis2': axis2
},
outputs={'Out': [out]})
return out
......@@ -3049,8 +3145,10 @@ def kron(x, y, name=None):
if in_dygraph_mode():
return _C_ops.kron(x, y)
helper = LayerHelper('kron', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
check_variable_and_dtype(
y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type="kron", inputs={"X": x, "Y": y}, outputs={"Out": out})
......@@ -3186,13 +3284,20 @@ def logcumsumexp(x, axis=None, dtype=None, name=None):
if axis is None:
return _legacy_C_ops.logcumsumexp(x, 'flatten', flatten)
else:
return _legacy_C_ops.logcumsumexp(x, 'axis', axis, 'flatten', flatten)
return _legacy_C_ops.logcumsumexp(x, 'axis', axis, 'flatten',
flatten)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "logcumsumexp")
helper = LayerHelper('logcumsumexp', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='logcumsumexp', inputs={'X': x}, outputs={'Out': out}, attrs={'axis': axis, 'flatten': flatten})
helper.append_op(type='logcumsumexp',
inputs={'X': x},
outputs={'Out': out},
attrs={
'axis': axis,
'flatten': flatten
})
return out
......@@ -3251,14 +3356,21 @@ def cumprod(x, dim=None, dtype=None, name=None):
if _in_legacy_dygraph():
return _legacy_C_ops.cumprod(x, 'dim', dim)
check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'cumprod')
check_variable_and_dtype(
x, "x",
['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'],
'cumprod')
check_type(dim, 'dim', int, 'cumprod')
helper = LayerHelper('cumprod', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='cumprod', inputs={'X': x}, outputs={'Out': out}, attrs={'dim': dim})
helper.append_op(type='cumprod',
inputs={'X': x},
outputs={'Out': out},
attrs={'dim': dim})
return out
def isfinite(x, name=None):
"""
......@@ -3281,15 +3393,17 @@ def isfinite(x, name=None):
print(out) # [False True True False True False False]
"""
if in_dygraph_mode():
return _C_ops.isfinite( x )
return _C_ops.isfinite(x)
if _in_legacy_dygraph():
return _legacy_C_ops.isfinite_v2(x)
helper = LayerHelper("isfinite_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isfinite')
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isfinite')
out = helper.create_variable_for_type_inference('bool')
helper.append_op(type="isfinite_v2", inputs={"X": x}, outputs={"Out": out})
return out
def isinf(x, name=None):
"""
......@@ -3312,15 +3426,17 @@ def isinf(x, name=None):
print(out) # [ True False False True False False False]
"""
if in_dygraph_mode():
return _C_ops.isinf( x )
return _C_ops.isinf(x)
if _in_legacy_dygraph():
return _legacy_C_ops.isinf_v2(x)
helper = LayerHelper("isinf_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf')
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isinf')
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isinf_v2", inputs={"X": x}, outputs={"Out": out})
return out
def isnan(x, name=None):
"""
......@@ -3343,12 +3459,13 @@ def isnan(x, name=None):
print(out) # [False False False False False True True]
"""
if in_dygraph_mode():
return _C_ops.isnan( x )
return _C_ops.isnan(x)
if _in_legacy_dygraph():
return _legacy_C_ops.isnan_v2(x)
helper = LayerHelper("isnan_v2", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan')
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'isnan')
out = helper.create_variable_for_type_inference(dtype='bool')
helper.append_op(type="isnan_v2", inputs={"X": x}, outputs={"Out": out})
return out
......@@ -3409,7 +3526,8 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
"""
if dtype is not None:
check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'], 'prod')
check_dtype(dtype, 'dtype', ['float32', 'float64', 'int32', 'int64'],
'prod')
if x.dtype != convert_np_dtype_to_dtype_(dtype):
x = cast(x, dtype)
......@@ -3424,34 +3542,35 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
dim = [dim]
else:
raise TypeError(
"The type of axis must be int, list or tuple, but received {}".
format(type(dim)))
"The type of axis must be int, list or tuple, but received {}"
.format(type(dim)))
reduce_all = True if dim is None or len(dim) == 0 or len(dim) == len(x.shape) else False
reduce_all = True if dim is None or len(dim) == 0 or len(dim) == len(
x.shape) else False
if dim is None or len(dim) == 0:
dim = [0]
if in_dygraph_mode():
return _C_ops.reduce_prod(x, dim, keepdim, reduce_all)
if _in_legacy_dygraph():
return _legacy_C_ops.reduce_prod(
x, 'dim', dim, 'keep_dim', keepdim, 'reduce_all', reduce_all)
return _legacy_C_ops.reduce_prod(x, 'dim', dim, 'keep_dim', keepdim,
'reduce_all', reduce_all)
helper = LayerHelper('reduce_prod', **locals())
check_variable_and_dtype(
x, 'x/input', ['float32', 'float64', 'int32', 'int64'], 'reduce_prod')
check_variable_and_dtype(x, 'x/input',
['float32', 'float64', 'int32', 'int64'],
'reduce_prod')
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
if not isinstance(dim, Variable) and utils._contain_var(dim):
dim = utils._convert_to_tensor_list(dim)
helper.append_op(
type='reduce_prod',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': dim,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
helper.append_op(type='reduce_prod',
inputs={'X': x},
outputs={'Out': out},
attrs={
'dim': dim,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
......@@ -3516,7 +3635,7 @@ def tanh(x, name=None):
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
"""
if in_dygraph_mode():
return _C_ops.tanh( x )
return _C_ops.tanh(x)
if _in_legacy_dygraph():
return _legacy_C_ops.tanh(x)
......@@ -3528,6 +3647,7 @@ def tanh(x, name=None):
helper.append_op(type='tanh', inputs={'X': x}, outputs={'Out': out})
return out
@inplace_apis_in_dygraph_only
def tanh_(x, name=None):
r"""
......@@ -3535,7 +3655,7 @@ def tanh_(x, name=None):
Please refer to :ref:`api_tensor_tanh`.
"""
if in_dygraph_mode():
return _C_ops.tanh_( x )
return _C_ops.tanh_(x)
return _legacy_C_ops.tanh_(x)
......@@ -3571,11 +3691,10 @@ def increment(x, value=1.0, name=None):
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'increment')
helper = LayerHelper("increment", **locals())
helper.append_op(
type='increment',
inputs={'X': [x]},
outputs={'Out': [x]},
attrs={'step': float(value)})
helper.append_op(type='increment',
inputs={'X': [x]},
outputs={'Out': [x]},
attrs={'step': float(value)})
return x
......@@ -3647,7 +3766,7 @@ def all(x, axis=None, keepdim=False, name=None):
if _in_legacy_dygraph():
axis = axis if axis != None and axis != [] else [0]
return _legacy_C_ops.reduce_all(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
......@@ -3656,16 +3775,14 @@ def all(x, axis=None, keepdim=False, name=None):
}
check_variable_and_dtype(x, 'x', ['bool'], 'all')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'all')
helper = LayerHelper('all', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_all',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
helper.append_op(type='reduce_all',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
......@@ -3738,7 +3855,7 @@ def any(x, axis=None, keepdim=False, name=None):
if _in_legacy_dygraph():
axis = axis if axis != None and axis != [] else [0]
return _legacy_C_ops.reduce_any(x, 'dim', axis, 'keep_dim', keepdim,
'reduce_all', reduce_all_flag)
'reduce_all', reduce_all_flag)
attrs = {
'dim': axis if axis != None and axis != [] and axis != () else [0],
......@@ -3748,18 +3865,17 @@ def any(x, axis=None, keepdim=False, name=None):
check_variable_and_dtype(x, 'x', ['bool'], 'any')
check_type(axis, 'axis', (int, list, tuple, type(None)), 'any')
helper = LayerHelper('any', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reduce_any',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
helper.append_op(type='reduce_any',
inputs={'X': x},
outputs={'Out': out},
attrs=attrs)
return out
def broadcast_shape(x_shape, y_shape):
"""
The function returns the shape of doing operation with broadcasting on tensors of x_shape and y_shape, please refer to :ref:`user_guide_broadcasting` for more details.
......@@ -3787,6 +3903,7 @@ def broadcast_shape(x_shape, y_shape):
return core.broadcast_shape(x_shape, y_shape)
def conj(x, name=None):
r"""
This function computes the conjugate of the Tensor elementwisely.
......@@ -3821,15 +3938,18 @@ def conj(x, name=None):
if paddle.in_dynamic_mode():
return _legacy_C_ops.conj(x)
check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'conj')
check_variable_and_dtype(
x, "x",
['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'],
'conj')
helper = LayerHelper('conj', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
helper.append_op(type='conj', inputs={'X': x}, outputs={'Out': [out]})
return out
def digamma(x, name=None):
r"""
Calculates the digamma of the given input tensor, element-wise.
......@@ -3868,6 +3988,7 @@ def digamma(x, name=None):
helper.append_op(type='digamma', inputs={'X': x}, outputs={'Out': out})
return out
def lgamma(x, name=None):
r"""
Calculates the lgamma of the given input tensor, element-wise.
......@@ -3927,7 +4048,13 @@ def neg(x, name=None):
# [0.4 0.2 -0.1 -0.3]
"""
return scale(x, scale=-1.0, bias=0.0, bias_after_scale=True, act=None, name=name)
return scale(x,
scale=-1.0,
bias=0.0,
bias_after_scale=True,
act=None,
name=name)
def atan2(x, y, name=None):
r"""
......@@ -3973,21 +4100,25 @@ def atan2(x, y, name=None):
"""
if in_dygraph_mode():
return _C_ops.atan2( x, y)
return _C_ops.atan2(x, y)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.atan2(x, y)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2')
check_variable_and_dtype(y, 'y', ['int32', 'int64', 'float16', 'float32', 'float64'], 'atan2')
check_variable_and_dtype(
x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'],
'atan2')
check_variable_and_dtype(
y, 'y', ['int32', 'int64', 'float16', 'float32', 'float64'],
'atan2')
helper = LayerHelper('atan2', **locals())
inputs = {'X1' : x, 'X2' : y}
inputs = {'X1': x, 'X2': y}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='atan2', inputs=inputs, outputs={'Out': out})
helper.append_op(type='atan2', inputs=inputs, outputs={'Out': out})
return out
def logit(x, eps=None, name=None):
r"""
This function generates a new tensor with the logit of the elements of input x. x is clamped to [eps, 1-eps] when eps is not zero. When eps is zero and x < 0 or x > 1, the function will yields NaN.
......@@ -4038,13 +4169,13 @@ def logit(x, eps=None, name=None):
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'logit')
helper = LayerHelper("logit", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='logit',
inputs={'X': x},
outputs={'Out': out},
attrs={'eps': eps})
helper.append_op(type='logit',
inputs={'X': x},
outputs={'Out': out},
attrs={'eps': eps})
return out
def lerp(x, y, weight, name=None):
r"""
Does a linear interpolation between x and y based on weight.
......@@ -4080,7 +4211,7 @@ def lerp(x, y, weight, name=None):
if isinstance(weight, float):
weight = paddle.to_tensor(weight, dtype=x.dtype)
return _C_ops.lerp( x, y, weight)
return _C_ops.lerp(x, y, weight)
if _in_legacy_dygraph():
if isinstance(weight, float):
weight = paddle.to_tensor(weight, dtype=x.dtype)
......@@ -4099,6 +4230,7 @@ def lerp(x, y, weight, name=None):
helper.append_op(type='lerp', inputs=inputs, outputs={'Out': out})
return out
@inplace_apis_in_dygraph_only
def lerp_(x, y, weight, name=None):
r"""
......@@ -4112,11 +4244,14 @@ def lerp_(x, y, weight, name=None):
elif isinstance(weight, (paddle.Tensor, Variable)):
out_shape = broadcast_shape(out_shape, weight.shape)
if out_shape != x.shape:
raise ValueError("The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation.".format(out_shape, x.shape))
raise ValueError(
"The shape of broadcast output {} is different from that of inplace tensor {} in the Inplace operation."
.format(out_shape, x.shape))
if in_dygraph_mode():
return _C_ops.lerp_( x, y, weight)
return _C_ops.lerp_(x, y, weight)
return _legacy_C_ops.lerp_(x, y, weight)
def erfinv(x, name=None):
r"""
The inverse error function of x. Please refer to :ref:`api_paddle_erf`
......@@ -4143,7 +4278,7 @@ def erfinv(x, name=None):
"""
if in_dygraph_mode():
return _C_ops.erfinv( x )
return _C_ops.erfinv(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'erfinv')
......@@ -4155,6 +4290,7 @@ def erfinv(x, name=None):
helper.append_op(type='erfinv', inputs={'X': x}, outputs={'Out': out})
return out
@inplace_apis_in_dygraph_only
def erfinv_(x, name=None):
r"""
......@@ -4163,9 +4299,10 @@ def erfinv_(x, name=None):
"""
check_type(x, 'x', (paddle.Tensor, Variable), 'erfinv')
if in_dygraph_mode():
return _C_ops.erfinv_( x )
return _C_ops.erfinv_(x)
return _legacy_C_ops.erfinv_(x)
def rad2deg(x, name=None):
r"""
Convert each of the elements of input x from angles in radians to degrees.
......@@ -4217,18 +4354,29 @@ def rad2deg(x, name=None):
x = cast(x, dtype="float32")
return _legacy_C_ops.scale(x, 'scale', rad2deg_scale)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'], 'rad2deg')
check_variable_and_dtype(x, 'x',
['int32', 'int64', 'float32', 'float64'],
'rad2deg')
helper = LayerHelper('rad2deg', **locals())
out_cast = x
if convert_dtype(x.dtype) in ['int32', 'int64']:
out_cast = helper.create_variable_for_type_inference(dtype=paddle.float32)
helper.append_op(
type='cast', inputs={'X':x}, outputs={'Out': out_cast}, attrs={'in_dtype': x.dtype,'out_dtype': paddle.float32})
out_cast = helper.create_variable_for_type_inference(
dtype=paddle.float32)
helper.append_op(type='cast',
inputs={'X': x},
outputs={'Out': out_cast},
attrs={
'in_dtype': x.dtype,
'out_dtype': paddle.float32
})
out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
helper.append_op(
type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': rad2deg_scale})
helper.append_op(type='scale',
inputs={'X': out_cast},
outputs={'Out': out},
attrs={'scale': rad2deg_scale})
return out
def deg2rad(x, name=None):
r"""
Convert each of the elements of input x from degrees to angles in radians.
......@@ -4272,18 +4420,29 @@ def deg2rad(x, name=None):
x = cast(x, dtype="float32")
return _legacy_C_ops.scale(x, 'scale', deg2rad_scale)
else:
check_variable_and_dtype(x, 'x', ['int32', 'int64', 'float32', 'float64'], 'deg2rad')
check_variable_and_dtype(x, 'x',
['int32', 'int64', 'float32', 'float64'],
'deg2rad')
helper = LayerHelper('deg2rad', **locals())
out_cast = x
if convert_dtype(x.dtype) in ['int32', 'int64']:
out_cast = helper.create_variable_for_type_inference(dtype=paddle.float32)
helper.append_op(
type='cast', inputs={'X':x}, outputs={'Out': out_cast}, attrs={'in_dtype': x.dtype,'out_dtype': paddle.float32})
out_cast = helper.create_variable_for_type_inference(
dtype=paddle.float32)
helper.append_op(type='cast',
inputs={'X': x},
outputs={'Out': out_cast},
attrs={
'in_dtype': x.dtype,
'out_dtype': paddle.float32
})
out = helper.create_variable_for_type_inference(dtype=out_cast.dtype)
helper.append_op(
type='scale', inputs={'X':out_cast}, outputs={'Out': out}, attrs={'scale': deg2rad_scale})
helper.append_op(type='scale',
inputs={'X': out_cast},
outputs={'Out': out},
attrs={'scale': deg2rad_scale})
return out
def gcd(x, y, name=None):
"""
Computes the element-wise greatest common divisor (GCD) of input |x| and |y|.
......@@ -4348,7 +4507,8 @@ def gcd(x, y, name=None):
y_not_equal_0 = (y != 0)
y_safe = paddle.where(y_not_equal_0, y, paddle.ones(y.shape, y.dtype))
x, y = (paddle.where(y_not_equal_0, y, x),
paddle.where(y_not_equal_0, paddle.mod(x, y_safe),paddle.zeros(y.shape, y.dtype)))
paddle.where(y_not_equal_0, paddle.mod(x, y_safe),
paddle.zeros(y.shape, y.dtype)))
return (paddle.where(x < y, y, x), paddle.where(x < y, x, y))
if paddle.in_dynamic_mode():
......@@ -4362,6 +4522,7 @@ def gcd(x, y, name=None):
out, _ = paddle.static.nn.while_loop(_gcd_cond_fn, _gcd_body_fn, [x, y])
return out
def lcm(x, y, name=None):
"""
Computes the element-wise least common multiple (LCM) of input |x| and |y|.
......@@ -4416,9 +4577,11 @@ def lcm(x, y, name=None):
# they won't be used.
d_equal_0 = paddle.equal(d, 0)
d_safe = paddle.where(d_equal_0, paddle.ones(d.shape, d.dtype), d)
out = paddle.where(d_equal_0, paddle.zeros(d.shape, d.dtype), paddle.abs(x * y) // d_safe)
out = paddle.where(d_equal_0, paddle.zeros(d.shape, d.dtype),
paddle.abs(x * y) // d_safe)
return out
def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
r"""
Computes the n-th forward difference along the given axis.
......@@ -4510,14 +4673,14 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
attrs_1 += ('starts', starts_1)
ends_1 = [dim_len - 1]
attrs_1 += ('ends', ends_1)
input_front = _C_ops.slice(new_input, axes, starts_1, ends_1, infer_flags,
[])
input_front = _C_ops.slice(new_input, axes, starts_1, ends_1,
infer_flags, [])
starts_2 = [1]
attrs_2 += ('starts', starts_2)
ends_2 = [dim_len]
attrs_2 += ('ends', ends_2)
input_back = _C_ops.slice(new_input, axes, starts_2, ends_2, infer_flags,
[])
input_back = _C_ops.slice(new_input, axes, starts_2, ends_2,
infer_flags, [])
if x.dtype == paddle.bool:
return _C_ops.logical_xor(input_back, input_front)
......@@ -4564,7 +4727,8 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
else:
return elementwise_sub(input_back, input_front, axis=axis)
else:
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'bool', 'int32', 'int64'], 'diff')
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'bool', 'int32', 'int64'], 'diff')
check_type(axis, 'axis', (int), 'diff')
helper = LayerHelper('diff', **locals())
has_pend = False
......@@ -4581,9 +4745,10 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
if has_pend:
new_input = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='concat', inputs={'X': input_list}, outputs={'Out': [new_input]}, attrs={'axis': axis}
)
helper.append_op(type='concat',
inputs={'X': input_list},
outputs={'Out': [new_input]},
attrs={'axis': axis})
else:
new_input = x
......@@ -4594,29 +4759,35 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None):
attrs_1['starts'] = starts_1
attrs_1['ends'] = ends_1
input_front = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='slice', inputs={'Input': new_input}, attrs=attrs_1, outputs={'Out': input_front}
)
helper.append_op(type='slice',
inputs={'Input': new_input},
attrs=attrs_1,
outputs={'Out': input_front})
attrs_2 = {'axes': axes}
starts_2 = [1]
ends_2 = [dim_len]
attrs_2['starts'] = starts_2
attrs_2['ends'] = ends_2
input_back = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='slice', inputs={'Input': new_input}, attrs=attrs_2, outputs={'Out': input_back}
)
helper.append_op(type='slice',
inputs={'Input': new_input},
attrs=attrs_2,
outputs={'Out': input_back})
if dtype == paddle.bool:
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='logical_xor', inputs={"X": input_back, "Y": input_front}, outputs={"Out": out}
)
helper.append_op(type='logical_xor',
inputs={
"X": input_back,
"Y": input_front
},
outputs={"Out": out})
else:
out = elementwise_sub(input_back, input_front, axis=axis)
return out
def angle(x, name=None):
r"""
Element-wise angle of complex numbers. For non-negative real numbers, the angle is 0 while
......@@ -4662,7 +4833,8 @@ def angle(x, name=None):
return _legacy_C_ops.angle(x)
check_variable_and_dtype(x, 'x',
['float32', 'float64', 'complex64', 'complex128'], 'angle')
['float32', 'float64', 'complex64', 'complex128'],
'angle')
op_type = "angle"
helper = LayerHelper(op_type, **locals())
inputs = {"X": x}
......@@ -4672,6 +4844,7 @@ def angle(x, name=None):
helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
return out
def heaviside(x, y, name=None):
r"""
Computes the Heaviside step function determined by corresponding element in y for each element in x. The equation is
......@@ -4715,10 +4888,14 @@ def heaviside(x, y, name=None):
axis = -1
act = None
if _non_static_mode():
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op_in_dygraph(x,
y,
axis=axis,
act=act,
op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
def frac(x, name=None):
"""
This API is used to return the fractional portion of each element in input.
......@@ -4751,28 +4928,39 @@ def frac(x, name=None):
op_type = 'elementwise_sub'
axis = -1
act = None
if x.dtype not in [paddle.int32, paddle.int64, paddle.float32, paddle.float64]:
if x.dtype not in [
paddle.int32, paddle.int64, paddle.float32, paddle.float64
]:
raise TypeError(
"The data type of input must be one of ['int32', 'int64', 'float32', 'float64'], but got {}".format(x.dtype))
"The data type of input must be one of ['int32', 'int64', 'float32', 'float64'], but got {}"
.format(x.dtype))
if in_dygraph_mode():
y = _C_ops.trunc(x)
return _C_ops.subtract(x, y)
else:
if _in_legacy_dygraph():
y = _legacy_C_ops.trunc(x)
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op_in_dygraph(x,
y,
axis=axis,
act=act,
op_name=op_type)
else:
inputs = {"X": x}
attrs = {}
helper = LayerHelper("trunc", **locals())
check_variable_and_dtype(x, "X", ['int32', 'int64', 'float32', 'float64'], 'trunc')
check_variable_and_dtype(x, "X",
['int32', 'int64', 'float32', 'float64'],
'trunc')
y = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": y})
helper.append_op(type="trunc",
inputs=inputs,
attrs=attrs,
outputs={"Out": y})
return _elementwise_op(LayerHelper(op_type, **locals()))
def sgn(x, name=None):
"""
For complex tensor, this API returns a new tensor whose elements have the same angles as the corresponding
......@@ -4798,10 +4986,13 @@ def sgn(x, name=None):
# [0.6+0.8j 1.+0.j 0.+0.j -1.+0.j]]
"""
if x.dtype not in [paddle.float16, paddle.float32, paddle.float64, paddle.complex64, paddle.complex128]:
if x.dtype not in [
paddle.float16, paddle.float32, paddle.float64, paddle.complex64,
paddle.complex128
]:
raise TypeError(
"The data type of input must be one of ['float16', 'float32', 'float64', 'complex64', 'complex128'], but got {}"
.format(x.dtype))
.format(x.dtype))
if paddle.is_complex(x):
expand_x = paddle.as_real(x)
x_abs = paddle.abs(x)
......@@ -4814,6 +5005,7 @@ def sgn(x, name=None):
else:
return paddle.sign(x)
def take(x, index, mode='raise', name=None):
"""
Returns a new tensor with the elements of input tensor x at the given index.
......@@ -4881,16 +5073,18 @@ def take(x, index, mode='raise', name=None):
"""
if mode not in ['raise', 'wrap', 'clip']:
raise ValueError(
"'mode' in 'take' should be 'raise', 'wrap', 'clip', but received {}.".format(mode))
"'mode' in 'take' should be 'raise', 'wrap', 'clip', but received {}."
.format(mode))
if paddle.in_dynamic_mode():
if not isinstance(index, (paddle.Tensor, Variable)):
raise TypeError(
"The type of 'index' must be Tensor, but got {}".format(type(index)))
"The type of 'index' must be Tensor, but got {}".format(
type(index)))
if index.dtype not in [paddle.int32, paddle.int64]:
raise TypeError(
"The data type of 'index' must be one of ['int32', 'int64'], but got {}".format(
index.dtype))
"The data type of 'index' must be one of ['int32', 'int64'], but got {}"
.format(index.dtype))
else:
check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'take')
......@@ -4904,10 +5098,9 @@ def take(x, index, mode='raise', name=None):
index_1d = paddle.where(index_1d < 0, index_1d + max_index, index_1d)
elif mode == 'wrap':
# The out of range indices are constrained by taking the remainder.
index_1d = paddle.where(index_1d < 0,
index_1d % max_index, index_1d)
index_1d = paddle.where(index_1d >= max_index,
index_1d % max_index, index_1d)
index_1d = paddle.where(index_1d < 0, index_1d % max_index, index_1d)
index_1d = paddle.where(index_1d >= max_index, index_1d % max_index,
index_1d)
elif mode == 'clip':
# 'clip' mode disables indexing with negative numbers.
index_1d = clip(index_1d, 0, max_index - 1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册