diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index 9fbf176c22c7f8e2f78a5b3ff049002f129858a6..6b57544329e7c7e136b4a107a7696a7ee4e9db96 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -148,6 +148,16 @@ def monkey_patch_math_varbase(): def _size_(var): return np.prod(var.shape) + @property + def _T_(var): + if len(var.shape) == 1: + return var + perm = [] + for i in range(len(var.shape)): + perm.insert(0, i) + out, _ = _C_ops.transpose2(var, 'axis', perm) + return out + def _scalar_add_(var, value): return _scalar_elementwise_op_(var, 1.0, value) @@ -271,6 +281,7 @@ def monkey_patch_math_varbase(): ('ndimension', lambda x: len(x.shape)), ('ndim', _ndim_), ('size', _size_), + ('T', _T_), ('__add__', _binary_creator_('__add__', 'elementwise_add', False, _scalar_add_)), ## a+b == b+a. Do not need to reverse explicitly diff --git a/python/paddle/fluid/dygraph/varbase_patch_methods.py b/python/paddle/fluid/dygraph/varbase_patch_methods.py index 83e7d0ae1e09b7746326d610f7dfa1608c7968a6..e39a86e961d3bc2589f5dec46ef7145e2d23145a 100644 --- a/python/paddle/fluid/dygraph/varbase_patch_methods.py +++ b/python/paddle/fluid/dygraph/varbase_patch_methods.py @@ -88,17 +88,17 @@ def monkey_patch_varbase(): """ # Note: getattr(self, attr, None) will call x.grad=x.gradient(), but gradient() only available in dygraph. - # It will fail. So, for propery in dygraph only, should not let it getattr(self, attr, None). - attr_not_need_keys = ['grad'] + # It will fail. So, for propery that different between dynamic and static graph, should not getattr(self, attr, None). + attr_not_need_keys = ['grad', 'T'] if isinstance(self, ParamBase): attr_kwargs = self.__dict__.copy() else: attr_names = [] for name in dir(self): - if name not in attr_not_need_keys and not ( - inspect.ismethod(getattr(self, name)) or - name.startswith('_')): - attr_names.append(name) + if name not in attr_not_need_keys: + if not inspect.ismethod(getattr( + self, name)) and not name.startswith('_'): + attr_names.append(name) attr_kwargs = {name: getattr(self, name) for name in attr_names} attr_keys = ['block', 'shape', 'dtype', 'type', 'name', 'persistable'] diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 13477fd3422007565856dffc1a511579045adfcd..6c95c9fad5663cfce5318f0bb6776abb45b60d54 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1503,6 +1503,55 @@ class Variable(object): """ return self.desc.type() + @property + def T(self): + """ + Permute current Variable with its dimensions reversed. + + If `n` is the dimensions of `x` , `x.T` is equivalent to `x.transpose([n-1, n-2, ..., 0])`. + + Examples: + + .. code-block:: python + + import paddle + paddle.enable_static() + + x = paddle.ones(shape=[2, 3, 5]) + x_T = x.T + + exe = paddle.static.Executor() + x_T_np = exe.run(paddle.static.default_main_program(), fetch_list=[x_T])[0] + print(x_T_np.shape) + # (5, 3, 2) + """ + if len(self.shape) == 1: + return self + perm = [] + for i in range(len(self.shape)): + perm.insert(0, i) + + out = self.block.create_var( + name=unique_name.generate_with_ignorable_key(self.name + '.tmp'), + dtype=self.dtype, + type=self.type, + persistable=False, + stop_gradient=False) + input_shape = self.block.create_var( + name=unique_name.generate_with_ignorable_key(self.name + '.tmp'), + dtype=self.dtype, + type=core.VarDesc.VarType.LOD_TENSOR, + persistable=False, + stop_gradient=False) + + self.block.append_op( + type='transpose2', + inputs={'X': [self]}, + outputs={'Out': [out], + 'XShape': [input_shape]}, + attrs={'axis': perm}) + return out + def clone(self): """ Returns a new static Variable, which is the clone of the original static diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch.py b/python/paddle/fluid/tests/unittests/test_math_op_patch.py index cef5adbc5d3e3688ad534fecd1ac8b11dab512f7..258543631f970e0cf410784347df26151b811edb 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch.py @@ -335,6 +335,20 @@ class TestMathOpPatches(unittest.TestCase): fetch_list=[z]) self.assertTrue(np.array_equal(out[0], out_np)) + @prog_scope() + def test_T(self): + x_np = np.random.randint(-100, 100, [2, 8, 5, 3]).astype("int32") + out_np = x_np.T + + x = paddle.static.data(name="x", shape=[2, 8, 5, 3], dtype="int32") + z = x.T + + exe = fluid.Executor() + out = exe.run(fluid.default_main_program(), + feed={"x": x_np}, + fetch_list=[z]) + self.assertTrue(np.array_equal(out[0], out_np)) + @prog_scope() def test_ndim(self): a = paddle.static.data(name="a", shape=[10, 1]) diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py index 0afc9ee6253ea627860aae1be0fa2d0aa3cb2c6f..3f611a319215cc4ec59ffbad47fb4cd82b3e4499 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch_var_base.py @@ -527,6 +527,12 @@ class TestMathOpPatchesVarBase(unittest.TestCase): np.array_equal( x.where(a, b).numpy(), paddle.where(x, a, b).numpy())) + x_np = np.random.randn(3, 6, 9, 7) + x = paddle.to_tensor(x_np) + x_T = x.T + self.assertTrue(x_T.shape, [7, 9, 6, 3]) + self.assertTrue(np.array_equal(x_T.numpy(), x_np.T)) + self.assertTrue(inspect.ismethod(a.dot)) self.assertTrue(inspect.ismethod(a.logsumexp)) self.assertTrue(inspect.ismethod(a.multiplex)) diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 375375c8604de3b6bb72bf37fe02fc36de658781..a67b015f8ffefe4506dfdb4d461c792cad089f07 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -367,8 +367,8 @@ tensor_method_func = [ #noqa 'real', 'imag', 'digamma', - 'diagonal' - 'trunc' + 'diagonal', + 'trunc', 'bitwise_and', 'bitwise_or', 'bitwise_xor',