diff --git a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h index 6473d8501d6d6157e5d79596d91cd2d5e6e1f35b..48641d852537105dbbc4af98dae7410243ed1751 100644 --- a/paddle/fluid/prim/api/composite_backward/composite_backward_api.h +++ b/paddle/fluid/prim/api/composite_backward/composite_backward_api.h @@ -88,7 +88,11 @@ void transpose_grad(const Tensor& grad_out, std::vector reverse_perm(perm); // make origin ranks for (int i = 0; i < static_cast(perm.size()); ++i) { - reverse_perm[perm[i]] = i; + if (perm[i] >= 0) { + reverse_perm[perm[i]] = i; + } else { + reverse_perm[perm[i] + perm.size()] = i; + } } auto grad_x_tmp = transpose(grad_out, reverse_perm); set_output(grad_x_tmp, grad_x); diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 5ae8d37f3f1d9f0daea57c7e2af877e3a5e07739..5229884fb2492d6315ce8339a14b59f5d8f0ef2f 100755 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -1217,7 +1217,8 @@ set(TEST_CINN_OPS test_elementwise_sub_op test_elementwise_div_op test_elementwise_mul_op - test_gather_nd_op) + test_gather_nd_op + test_transpose_op) foreach(TEST_CINN_OPS ${TEST_CINN_OPS}) if(WITH_CINN) diff --git a/python/paddle/fluid/tests/unittests/eager_op_test.py b/python/paddle/fluid/tests/unittests/eager_op_test.py index d2e21863a87e9d081043127a86c97910abda2815..abcb671d47ac082598700bfa49e63ead46a55a1b 100644 --- a/python/paddle/fluid/tests/unittests/eager_op_test.py +++ b/python/paddle/fluid/tests/unittests/eager_op_test.py @@ -1392,6 +1392,7 @@ class OpTest(unittest.TestCase): inplace_atol=None, ): core._set_prim_all_enabled(False) + core.set_prim_eager_enabled(False) def find_imperative_actual(target_name, dygraph_outs, place): for name in dygraph_outs: @@ -1982,6 +1983,7 @@ class OpTest(unittest.TestCase): numeric_place=None, ): core._set_prim_all_enabled(False) + core.set_prim_eager_enabled(False) if check_prim: prim_grad_checker = PrimGradChecker( self, diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_mkldnn_op.py index 5dd35279d6214936afe4c5c6103f0844b35d4ec1..e1ef8a7a12f3ff7d4859126678a756371ad866d1 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_transpose_mkldnn_op.py @@ -16,10 +16,10 @@ import unittest import numpy as np -from paddle.fluid.tests.unittests.test_transpose_op import TestTransposeOp +from paddle.fluid.tests.unittests.op_test import OpTest -class TestTransposeMKLDNN(TestTransposeOp): +class TestTransposeMKLDNN(OpTest): def setUp(self): self.init_op_type() self.initTestCase() diff --git a/python/paddle/fluid/tests/unittests/test_transpose_op.py b/python/paddle/fluid/tests/unittests/test_transpose_op.py index 4ec40ea9eb87e9d345d6617b00f0b768e3815d38..a8cf2b2c92615661541d7ad8b44037f6392eb1a9 100644 --- a/python/paddle/fluid/tests/unittests/test_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_transpose_op.py @@ -32,6 +32,7 @@ class TestTransposeOp(OpTest): self.init_op_type() self.initTestCase() self.python_api = paddle.transpose + self.prim_op_type = "prim" self.inputs = {'X': np.random.random(self.shape).astype("float64")} self.attrs = { 'axis': list(self.axis), @@ -50,7 +51,7 @@ class TestTransposeOp(OpTest): self.check_output(no_check_set=['XShape']) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) def initTestCase(self): self.shape = (3, 40) @@ -118,12 +119,44 @@ class TestCase9(TestTransposeOp): class TestCase10(TestTransposeOp): + def setUp(self): + self.init_op_type() + self.initTestCase() + self.python_api = paddle.transpose + self.prim_op_type = "prim" + self.enable_cinn = False + self.inputs = {'X': np.random.random(self.shape).astype("float64")} + self.attrs = { + 'axis': list(self.axis), + 'use_mkldnn': self.use_mkldnn, + } + self.outputs = { + 'XShape': np.random.random(self.shape).astype("float64"), + 'Out': self.inputs['X'].transpose(self.axis), + } + def initTestCase(self): self.shape = (10, 8, 2) self.axis = (-1, 1, -3) class TestCase_ZeroDim(TestTransposeOp): + def setUp(self): + self.init_op_type() + self.initTestCase() + self.python_api = paddle.transpose + self.prim_op_type = "prim" + self.enable_cinn = False + self.inputs = {'X': np.random.random(self.shape).astype("float64")} + self.attrs = { + 'axis': list(self.axis), + 'use_mkldnn': self.use_mkldnn, + } + self.outputs = { + 'XShape': np.random.random(self.shape).astype("float64"), + 'Out': self.inputs['X'].transpose(self.axis), + } + def initTestCase(self): self.shape = () self.axis = () @@ -134,6 +167,7 @@ class TestAutoTuneTransposeOp(OpTest): self.init_op_type() self.initTestCase() self.python_api = paddle.transpose + self.prim_op_type = "prim" self.inputs = {'X': np.random.random(self.shape).astype("float64")} self.attrs = { 'axis': list(self.axis), @@ -160,7 +194,7 @@ class TestAutoTuneTransposeOp(OpTest): fluid.core.disable_autotune() def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class TestAutoTuneTransposeBF16Op(OpTest): @@ -169,6 +203,8 @@ class TestAutoTuneTransposeBF16Op(OpTest): self.initTestCase() self.dtype = np.uint16 self.python_api = paddle.transpose + self.prim_op_type = "prim" + self.enable_cinn = False x = np.random.random(self.shape).astype("float32") self.inputs = {'X': convert_float_to_uint16(x)} self.attrs = { @@ -198,7 +234,7 @@ class TestAutoTuneTransposeBF16Op(OpTest): fluid.core.disable_autotune() def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class TestTransposeBF16Op(OpTest): @@ -206,6 +242,8 @@ class TestTransposeBF16Op(OpTest): self.init_op_type() self.initTestCase() self.dtype = np.uint16 + self.prim_op_type = "prim" + self.enable_cinn = False self.python_api = paddle.transpose x = np.random.random(self.shape).astype("float32") diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 309efc63eb76ff1789c02bac7ed3126034924243..c9b47df4d1c5e7e1ed090c942d2e8ec69d4940cd 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -99,6 +99,7 @@ def transpose(x, perm, name=None): 'float64', 'int32', 'int64', + 'uint16', 'complex64', 'complex128', ], diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 3f8b35087f1cc8cf24806cd06ddbab860ac91de3..bf0a52e5724d3cb74948f7d37cb241a42e61fa18 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -482,6 +482,7 @@ def transpose(x, perm, name=None): 'float64', 'int32', 'int64', + 'uint16', 'complex64', 'complex128', ],