diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index c8ccdd6f1dc1f339215a4e556e2c88ae4165221d..4b5aee696ee3ec176f262af5a739824cde155e41 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -158,7 +158,7 @@ from .tensor.manipulation import tolist # noqa: F401 from .tensor.manipulation import tensordot # noqa: F401 from .tensor.manipulation import as_complex # noqa: F401 from .tensor.manipulation import as_real # noqa: F401 - +from .tensor.manipulation import moveaxis # noqa: F401 from .tensor.math import abs # noqa: F401 from .tensor.math import acos # noqa: F401 from .tensor.math import asin # noqa: F401 @@ -568,4 +568,5 @@ __all__ = [ # noqa 'as_real', 'diff', 'angle', + 'moveaxis', ] diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index e255350fd66186df5c7d0d41404122a575c8fc74..4270bc8de0b1459bb88aff57919a5f0a54643ce3 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -348,5 +348,77 @@ class TestTAPI(unittest.TestCase): self.assertRaises(ValueError, test_x_dimension_check) +class TestMoveAxis(unittest.TestCase): + def test_moveaxis1(self): + x_np = np.random.randn(2, 3, 4, 5, 7) + expected = np.moveaxis(x_np, [0, 4, 3, 2], [1, 3, 2, 0]) + paddle.enable_static() + with paddle.static.program_guard(fluid.Program()): + x = paddle.static.data("x", shape=[2, 3, 4, 5, 7], dtype='float64') + out = paddle.moveaxis(x, [0, 4, 3, 2], [1, 3, 2, 0]) + + exe = paddle.static.Executor() + out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0] + + self.assertEqual(np.array_equal(out_np, expected), True) + + paddle.disable_static() + x = paddle.to_tensor(x_np) + out = paddle.moveaxis(x, [0, 4, 3, 2], [1, 3, 2, 0]) + self.assertEqual(out.shape, [4, 2, 5, 7, 3]) + self.assertEqual(np.array_equal(out.numpy(), expected), True) + paddle.enable_static() + + def test_moveaxis2(self): + x_np = np.random.randn(2, 3, 5) + expected = np.moveaxis(x_np, -2, -1) + paddle.enable_static() + with paddle.static.program_guard(fluid.Program()): + x = paddle.static.data("x", shape=[2, 3, 5], dtype='float64') + out = x.moveaxis(-2, -1) + + exe = paddle.static.Executor() + out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0] + + self.assertEqual(np.array_equal(out_np, expected), True) + + paddle.disable_static() + x = paddle.to_tensor(x_np) + out = x.moveaxis(-2, -1) + self.assertEqual(out.shape, [2, 5, 3]) + self.assertEqual(np.array_equal(out.numpy(), expected), True) + paddle.enable_static() + + def test_error(self): + x = paddle.randn([2, 3, 4, 5]) + # src must have the same number with dst + with self.assertRaises(AssertionError): + paddle.moveaxis(x, [1, 0], [2]) + + # each element of src must be unique + with self.assertRaises(ValueError): + paddle.moveaxis(x, [1, 1], [0, 2]) + + # each element of dst must be unique + with self.assertRaises(ValueError): + paddle.moveaxis(x, [0, 1], [2, 2]) + + # each element of src must be integer + with self.assertRaises(AssertionError): + paddle.moveaxis(x, [0.5], [1]) + + # each element of dst must be integer + with self.assertRaises(AssertionError): + paddle.moveaxis(x, [0], [1.5]) + + # each element of src must be in the range of [-4, 3) + with self.assertRaises(AssertionError): + paddle.moveaxis(x, [-10, 1], [2, 3]) + + # each element of dst must be in the range of [-4, 3) + with self.assertRaises(AssertionError): + paddle.moveaxis(x, [2, 1], [10, 3]) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index a0d3396c8c4b2a061b721fb5356deb1dd7484096..ea3e7f00a24dbacaee1f4d008ec98d320717a598 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -113,7 +113,7 @@ from .manipulation import chunk # noqa: F401 from .manipulation import tensordot # noqa: F401 from .manipulation import as_complex # noqa: F401 from .manipulation import as_real # noqa: F401 - +from .manipulation import moveaxis # noqa: F401 from .math import abs # noqa: F401 from .math import acos # noqa: F401 from .math import asin # noqa: F401 @@ -426,6 +426,7 @@ tensor_method_func = [ #noqa 'lerp', 'lerp_', 'angle', + 'moveaxis' ] #this list used in math_op_patch.py for magic_method bind diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index a77f17ca55a1ad4f32169eba2ae03c203e952529..42abf4b1466fa2d10d7383495f18d13b355cebe4 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -13,6 +13,7 @@ # limitations under the License. from __future__ import print_function +from collections import Counter from ..fluid.layers import core from ..fluid.layer_helper import LayerHelper @@ -2579,3 +2580,103 @@ def as_real(x, name=None): outputs = {"Out": out} helper.append_op(type=op_type, inputs=inputs, outputs=outputs) return out + + +def moveaxis(x, source, destination, name=None): + """ + Move the axis of tensor from ``source`` position to ``destination`` position. + + Other axis that have not been moved remain their original order. + + Args: + x (Tensor): The input Tensor. It is a N-D Tensor of data types bool, int32, int64, float32, float64, complex64, complex128. + source(int|tuple|list): ``source`` position of axis that will be moved. Each element must be unique and integer. + destination(int|tuple|list(int)): ``destination`` position of axis that has been moved. Each element must be unique and integer. + name(str, optional): The default value is None. Normally there is no need for user to set this + property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + Tensor: A new tensor whose axis have been moved. + + Examples: + .. code-block:: python + + import paddle + + x = paddle.ones([3, 2, 4]) + paddle.moveaxis(x, [0, 1], [1, 2]).shape + # [4, 3, 2] + + x = paddle.ones([2, 3]) + paddle.moveaxis(x, 0, 1) # equivalent to paddle.t(x) + # [3, 2] + """ + src = [source] if isinstance(source, int) else source + dst = [destination] if isinstance(destination, int) else destination + + assert len(src) == len( + dst), "'source' must have the same number with 'destination'" + + count = Counter(src).most_common(1) + if count[0][1] > 1: + raise ValueError("Each elemment of 'source' must be unique!") + count = Counter(dst).most_common(1) + if count[0][1] > 1: + raise ValueError("Each elemment of 'destination' must be unique!") + + ndim = len(x.shape) + + # perm is the new order after move axis + perm = list(range(ndim)) + src_dims = list(range(ndim)) + dst_dims = list(range(ndim)) + + for i, axis in enumerate(zip(src, dst)): + assert isinstance(axis[0], + int), "Each elemment of 'source' must be integer." + if axis[0] < 0: + assert axis[ + 0] >= -ndim, "'source' must be in the range of [-{0}, {0})".format( + ndim) + src[i] += ndim + else: + assert axis[ + 0] < ndim, "'source' must be in the range of [-{0}, {0})".format( + ndim) + + assert isinstance(axis[1], + int), "Each elemment of 'source' must be integer." + if axis[1] < 0: + assert axis[ + 1] >= -ndim, "'source' must be in the range of [-{0}, {0})".format( + ndim) + dst[i] += ndim + else: + assert axis[ + 1] < ndim, "'source' must be in the range of [-{0}, {0})".format( + ndim) + perm[dst[i]] = src[i] + src_dims.remove(src[i]) + dst_dims.remove(dst[i]) + + for i in range(len(src_dims)): + perm[dst_dims[i]] = src_dims[i] + + if in_dygraph_mode(): + out, _ = _C_ops.transpose2(x, 'axis', perm) + return out + + check_variable_and_dtype( + x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'moveaxis') + + helper = LayerHelper('moveaxis', **locals()) + out = helper.create_variable_for_type_inference(x.dtype) + x_shape = helper.create_variable_for_type_inference(x.dtype) + helper.append_op( + type='transpose2', + inputs={'X': [x]}, + outputs={'Out': [out], + 'XShape': [x_shape]}, + attrs={'axis': perm}) + return out