From 1a532d5133c732c60e43fb9713b5ec9be353ce47 Mon Sep 17 00:00:00 2001 From: joejiong Date: Fri, 20 Nov 2020 17:59:51 +0800 Subject: [PATCH] add uint8 support for squeeze operator (#28734) Adding uint8 support for squeeze operator. --- paddle/fluid/operators/squeeze_op.cc | 4 + paddle/fluid/operators/squeeze_op.cu.cc | 4 + paddle/fluid/operators/squeeze_op.h | 0 paddle/fluid/operators/unsqueeze_op.cc | 4 + paddle/fluid/operators/unsqueeze_op.cu.cc | 4 + .../fluid/tests/unittests/test_squeeze2_op.py | 3 +- .../fluid/tests/unittests/test_squeeze_op.py | 89 ++++++---- .../tests/unittests/test_unsqueeze2_op.py | 32 ++-- .../tests/unittests/test_unsqueeze_op.py | 165 ++++++++++++------ 9 files changed, 202 insertions(+), 103 deletions(-) mode change 100644 => 100755 paddle/fluid/operators/squeeze_op.cc mode change 100644 => 100755 paddle/fluid/operators/squeeze_op.cu.cc mode change 100644 => 100755 paddle/fluid/operators/squeeze_op.h mode change 100644 => 100755 paddle/fluid/operators/unsqueeze_op.cc mode change 100644 => 100755 paddle/fluid/operators/unsqueeze_op.cu.cc mode change 100644 => 100755 python/paddle/fluid/tests/unittests/test_squeeze2_op.py mode change 100644 => 100755 python/paddle/fluid/tests/unittests/test_squeeze_op.py mode change 100644 => 100755 python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py mode change 100644 => 100755 python/paddle/fluid/tests/unittests/test_unsqueeze_op.py diff --git a/paddle/fluid/operators/squeeze_op.cc b/paddle/fluid/operators/squeeze_op.cc old mode 100644 new mode 100755 index 479973a5daa..ff4ec2f5324 --- a/paddle/fluid/operators/squeeze_op.cc +++ b/paddle/fluid/operators/squeeze_op.cc @@ -337,6 +337,7 @@ REGISTER_OP_CPU_KERNEL( ops::SqueezeKernel, ops::SqueezeKernel, ops::SqueezeKernel, + ops::SqueezeKernel, ops::SqueezeKernel, ops::SqueezeKernel); REGISTER_OP_CPU_KERNEL( @@ -345,6 +346,7 @@ REGISTER_OP_CPU_KERNEL( ops::SqueezeGradKernel, ops::SqueezeGradKernel, ops::SqueezeGradKernel, + ops::SqueezeGradKernel, ops::SqueezeGradKernel, ops::SqueezeGradKernel); REGISTER_OP_CPU_KERNEL( @@ -352,6 +354,7 @@ REGISTER_OP_CPU_KERNEL( ops::Squeeze2Kernel, ops::Squeeze2Kernel, ops::Squeeze2Kernel, + ops::Squeeze2Kernel, ops::Squeeze2Kernel, ops::Squeeze2Kernel); REGISTER_OP_CPU_KERNEL( @@ -360,5 +363,6 @@ REGISTER_OP_CPU_KERNEL( ops::Squeeze2GradKernel, ops::Squeeze2GradKernel, ops::Squeeze2GradKernel, + ops::Squeeze2GradKernel, ops::Squeeze2GradKernel, ops::Squeeze2GradKernel); diff --git a/paddle/fluid/operators/squeeze_op.cu.cc b/paddle/fluid/operators/squeeze_op.cu.cc old mode 100644 new mode 100755 index f469118fae7..23431df12b6 --- a/paddle/fluid/operators/squeeze_op.cu.cc +++ b/paddle/fluid/operators/squeeze_op.cu.cc @@ -23,6 +23,7 @@ REGISTER_OP_CUDA_KERNEL( ops::SqueezeKernel, ops::SqueezeKernel, ops::SqueezeKernel, + ops::SqueezeKernel, ops::SqueezeKernel, ops::SqueezeKernel); REGISTER_OP_CUDA_KERNEL( @@ -32,6 +33,7 @@ REGISTER_OP_CUDA_KERNEL( ops::SqueezeGradKernel, ops::SqueezeGradKernel, ops::SqueezeGradKernel, + ops::SqueezeGradKernel, ops::SqueezeGradKernel, ops::SqueezeGradKernel); REGISTER_OP_CUDA_KERNEL( @@ -41,6 +43,7 @@ REGISTER_OP_CUDA_KERNEL( ops::Squeeze2Kernel, ops::Squeeze2Kernel, ops::Squeeze2Kernel, + ops::Squeeze2Kernel, ops::Squeeze2Kernel); REGISTER_OP_CUDA_KERNEL( squeeze2_grad, @@ -50,4 +53,5 @@ REGISTER_OP_CUDA_KERNEL( ops::Squeeze2GradKernel, ops::Squeeze2GradKernel, ops::Squeeze2GradKernel, + ops::Squeeze2GradKernel, ops::Squeeze2GradKernel); diff --git a/paddle/fluid/operators/squeeze_op.h b/paddle/fluid/operators/squeeze_op.h old mode 100644 new mode 100755 diff --git a/paddle/fluid/operators/unsqueeze_op.cc b/paddle/fluid/operators/unsqueeze_op.cc old mode 100644 new mode 100755 index 0e58e1391cf..8a645e87158 --- a/paddle/fluid/operators/unsqueeze_op.cc +++ b/paddle/fluid/operators/unsqueeze_op.cc @@ -362,6 +362,7 @@ REGISTER_OP_CPU_KERNEL( ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, + ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel); REGISTER_OP_CPU_KERNEL( @@ -370,6 +371,7 @@ REGISTER_OP_CPU_KERNEL( ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, + ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel); REGISTER_OP_CPU_KERNEL( @@ -377,6 +379,7 @@ REGISTER_OP_CPU_KERNEL( ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, + ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel); REGISTER_OP_CPU_KERNEL( @@ -385,5 +388,6 @@ REGISTER_OP_CPU_KERNEL( ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, + ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel); diff --git a/paddle/fluid/operators/unsqueeze_op.cu.cc b/paddle/fluid/operators/unsqueeze_op.cu.cc old mode 100644 new mode 100755 index 0e8f47a6923..2781b3ef8c8 --- a/paddle/fluid/operators/unsqueeze_op.cu.cc +++ b/paddle/fluid/operators/unsqueeze_op.cu.cc @@ -23,6 +23,7 @@ REGISTER_OP_CUDA_KERNEL( ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, + ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel); REGISTER_OP_CUDA_KERNEL( @@ -34,6 +35,7 @@ REGISTER_OP_CUDA_KERNEL( ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel, + ops::UnsqueezeGradKernel, ops::UnsqueezeGradKernel); REGISTER_OP_CUDA_KERNEL( unsqueeze2, @@ -42,6 +44,7 @@ REGISTER_OP_CUDA_KERNEL( ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel, + ops::UnsqueezeKernel, ops::UnsqueezeKernel, ops::UnsqueezeKernel); REGISTER_OP_CUDA_KERNEL( @@ -52,5 +55,6 @@ REGISTER_OP_CUDA_KERNEL( plat::float16>, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, + ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel, ops::Unsqueeze2GradKernel); diff --git a/python/paddle/fluid/tests/unittests/test_squeeze2_op.py b/python/paddle/fluid/tests/unittests/test_squeeze2_op.py old mode 100644 new mode 100755 index 377f8597cca..fc43a8e7823 --- a/python/paddle/fluid/tests/unittests/test_squeeze2_op.py +++ b/python/paddle/fluid/tests/unittests/test_squeeze2_op.py @@ -13,12 +13,13 @@ # limitations under the License. from __future__ import print_function - import unittest + import numpy as np from op_test import OpTest import paddle + paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_squeeze_op.py b/python/paddle/fluid/tests/unittests/test_squeeze_op.py old mode 100644 new mode 100755 index 830678fe8f6..3a26f967e9b --- a/python/paddle/fluid/tests/unittests/test_squeeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_squeeze_op.py @@ -13,13 +13,15 @@ # limitations under the License. from __future__ import print_function - import unittest + import numpy as np + +import paddle import paddle.fluid as fluid from paddle.fluid import compiler, Program, program_guard -import paddle from op_test import OpTest + paddle.enable_static() @@ -81,27 +83,30 @@ class TestSqueezeOp4(TestSqueezeOp): class TestSqueezeOpError(unittest.TestCase): def test_errors(self): + paddle.enable_static() with program_guard(Program(), Program()): # The input type of softmax_op must be Variable. x1 = fluid.create_lod_tensor( - np.array([[-1]]), [[1]], fluid.CPUPlace()) - self.assertRaises(TypeError, fluid.layers.squeeze, x1) + np.array([[-1]]), [[1]], paddle.CPUPlace()) + self.assertRaises(TypeError, paddle.squeeze, x1) # The input axes of squeeze must be list. - x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32") - self.assertRaises(TypeError, fluid.layers.squeeze, x2, axes=0) + x2 = paddle.static.data(name='x2', shape=[4], dtype="int32") + self.assertRaises(TypeError, paddle.squeeze, x2, axes=0) # The input dtype of squeeze not support float16. - x3 = fluid.layers.data(name='x3', shape=[4], dtype="float16") - self.assertRaises(TypeError, fluid.layers.squeeze, x3, axes=0) + x3 = paddle.static.data(name='x3', shape=[4], dtype="float16") + self.assertRaises(TypeError, paddle.squeeze, x3, axes=0) class API_TestSqueeze(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.layers.data( + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data1 = paddle.static.data( 'data1', shape=[-1, 1, 10], dtype='float64') result_squeeze = paddle.squeeze(data1, axis=[1]) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) input1 = np.random.random([5, 1, 10]).astype('float64') result, = exe.run(feed={"data1": input1}, fetch_list=[result_squeeze]) @@ -111,31 +116,49 @@ class API_TestSqueeze(unittest.TestCase): class API_TestDygraphSqueeze(unittest.TestCase): def test_out(self): - with fluid.dygraph.guard(): - input_1 = np.random.random([5, 1, 10]).astype("int32") - input = fluid.dygraph.to_variable(input_1) - output = paddle.squeeze(input, axis=[1]) - out_np = output.numpy() - expected_out = np.squeeze(input_1, axis=1) - self.assertTrue(np.allclose(expected_out, out_np)) + paddle.disable_static() + input_1 = np.random.random([5, 1, 10]).astype("int32") + input = paddle.to_tensor(input_1) + output = paddle.squeeze(input, axis=[1]) + out_np = output.numpy() + expected_out = np.squeeze(input_1, axis=1) + self.assertTrue(np.allclose(expected_out, out_np)) + + def test_out_int8(self): + paddle.disable_static() + input_1 = np.random.random([5, 1, 10]).astype("int8") + input = paddle.to_tensor(input_1) + output = paddle.squeeze(input, axis=[1]) + out_np = output.numpy() + expected_out = np.squeeze(input_1, axis=1) + self.assertTrue(np.allclose(expected_out, out_np)) + + def test_out_uint8(self): + paddle.disable_static() + input_1 = np.random.random([5, 1, 10]).astype("uint8") + input = paddle.to_tensor(input_1) + output = paddle.squeeze(input, axis=[1]) + out_np = output.numpy() + expected_out = np.squeeze(input_1, axis=1) + self.assertTrue(np.allclose(expected_out, out_np)) def test_axis_not_list(self): - with fluid.dygraph.guard(): - input_1 = np.random.random([5, 1, 10]).astype("int32") - input = fluid.dygraph.to_variable(input_1) - output = paddle.squeeze(input, axis=1) - out_np = output.numpy() - expected_out = np.squeeze(input_1, axis=1) - self.assertTrue(np.allclose(expected_out, out_np)) + paddle.disable_static() + input_1 = np.random.random([5, 1, 10]).astype("int32") + input = paddle.to_tensor(input_1) + output = paddle.squeeze(input, axis=1) + out_np = output.numpy() + expected_out = np.squeeze(input_1, axis=1) + self.assertTrue(np.allclose(expected_out, out_np)) def test_dimension_not_1(self): - with fluid.dygraph.guard(): - input_1 = np.random.random([5, 1, 10]).astype("int32") - input = fluid.dygraph.to_variable(input_1) - output = paddle.squeeze(input, axis=(1, 2)) - out_np = output.numpy() - expected_out = np.squeeze(input_1, axis=1) - self.assertTrue(np.allclose(expected_out, out_np)) + paddle.disable_static() + input_1 = np.random.random([5, 1, 10]).astype("int32") + input = paddle.to_tensor(input_1) + output = paddle.squeeze(input, axis=(1, 2)) + out_np = output.numpy() + expected_out = np.squeeze(input_1, axis=1) + self.assertTrue(np.allclose(expected_out, out_np)) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py b/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py old mode 100644 new mode 100755 index eaecf91215c..7a57f8a3825 --- a/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py +++ b/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py @@ -13,12 +13,14 @@ # limitations under the License. from __future__ import print_function - import unittest + import numpy as np + +import paddle import paddle.fluid as fluid from op_test import OpTest -import paddle + paddle.enable_static() @@ -208,24 +210,24 @@ class TestUnsqueezeOp4_AxesTensor(TestUnsqueezeOp_AxesTensor): class TestUnsqueezeAPI(unittest.TestCase): def test_api(self): input = np.random.random([3, 2, 5]).astype("float64") - x = fluid.data(name='x', shape=[3, 2, 5], dtype="float64") + x = paddle.static.data(name='x', shape=[3, 2, 5], dtype="float64") positive_3_int32 = fluid.layers.fill_constant([1], "int32", 3) positive_1_int64 = fluid.layers.fill_constant([1], "int64", 1) - axes_tensor_int32 = fluid.data( + axes_tensor_int32 = paddle.static.data( name='axes_tensor_int32', shape=[3], dtype="int32") - axes_tensor_int64 = fluid.data( + axes_tensor_int64 = paddle.static.data( name='axes_tensor_int64', shape=[3], dtype="int64") - out_1 = fluid.layers.unsqueeze(x, axes=[3, 1, 1]) - out_2 = fluid.layers.unsqueeze( - x, axes=[positive_3_int32, positive_1_int64, 1]) - out_3 = fluid.layers.unsqueeze(x, axes=axes_tensor_int32) - out_4 = fluid.layers.unsqueeze(x, axes=3) - out_5 = fluid.layers.unsqueeze(x, axes=axes_tensor_int64) + out_1 = paddle.unsqueeze(x, axis=[3, 1, 1]) + out_2 = paddle.unsqueeze( + x, axis=[positive_3_int32, positive_1_int64, 1]) + out_3 = paddle.unsqueeze(x, axis=axes_tensor_int32) + out_4 = paddle.unsqueeze(x, axis=3) + out_5 = paddle.unsqueeze(x, axis=axes_tensor_int64) - exe = fluid.Executor(place=fluid.CPUPlace()) + exe = paddle.static.Executor(place=paddle.CPUPlace()) res_1, res_2, res_3, res_4, res_5 = exe.run( - fluid.default_main_program(), + paddle.static.default_main_program(), feed={ "x": input, "axes_tensor_int32": np.array([3, 1, 1]).astype("int32"), @@ -241,8 +243,8 @@ class TestUnsqueezeAPI(unittest.TestCase): def test_error(self): def test_axes_type(): - x2 = fluid.data(name="x2", shape=[2, 25], dtype="int32") - fluid.layers.unsqueeze(x2, axes=2.1) + x2 = paddle.static.data(name="x2", shape=[2, 25], dtype="int32") + paddle.unsqueeze(x2, axis=2.1) self.assertRaises(TypeError, test_axes_type) diff --git a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py old mode 100644 new mode 100755 index f8d27dd42f4..98cb5cdb550 --- a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py @@ -13,12 +13,14 @@ # limitations under the License. from __future__ import print_function - import unittest + import numpy as np + import paddle import paddle.fluid as fluid from op_test import OpTest + paddle.enable_static() @@ -80,11 +82,13 @@ class TestUnsqueezeOp4(TestUnsqueezeOp): class API_TestUnsqueeze(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.layers.data('data1', shape=[-1, 10], dtype='float64') + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64') result_squeeze = paddle.unsqueeze(data1, axis=[1]) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) input1 = np.random.random([5, 1, 10]).astype('float64') input = np.squeeze(input1, axis=1) result, = exe.run(feed={"data1": input}, @@ -94,10 +98,12 @@ class API_TestUnsqueeze(unittest.TestCase): class TestUnsqueezeOpError(unittest.TestCase): def test_errors(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): # The type of axis in split_op should be int or Variable. def test_axes_type(): - x6 = fluid.layers.data( + x6 = paddle.static.data( shape=[-1, 10], dtype='float16', name='x3') paddle.unsqueeze(x6, axis=3.2) @@ -106,12 +112,14 @@ class TestUnsqueezeOpError(unittest.TestCase): class API_TestUnsqueeze2(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data('data1', shape=[-1, 10], dtype='float64') - data2 = fluid.data('data2', shape=[1], dtype='int32') + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64') + data2 = paddle.static.data('data2', shape=[1], dtype='int32') result_squeeze = paddle.unsqueeze(data1, axis=data2) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) input1 = np.random.random([5, 1, 10]).astype('float64') input2 = np.array([1]).astype('int32') input = np.squeeze(input1, axis=1) @@ -123,12 +131,14 @@ class API_TestUnsqueeze2(unittest.TestCase): class API_TestUnsqueeze3(unittest.TestCase): def test_out(self): - with fluid.program_guard(fluid.Program(), fluid.Program()): - data1 = fluid.data('data1', shape=[-1, 10], dtype='float64') - data2 = fluid.data('data2', shape=[1], dtype='int32') + paddle.enable_static() + with paddle.static.program_guard(paddle.static.Program(), + paddle.static.Program()): + data1 = paddle.static.data('data1', shape=[-1, 10], dtype='float64') + data2 = paddle.static.data('data2', shape=[1], dtype='int32') result_squeeze = paddle.unsqueeze(data1, axis=[data2, 3]) - place = fluid.CPUPlace() - exe = fluid.Executor(place) + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) input1 = np.random.random([5, 1, 10, 1]).astype('float64') input2 = np.array([1]).astype('int32') input = np.squeeze(input1) @@ -141,55 +151,102 @@ class API_TestUnsqueeze3(unittest.TestCase): class API_TestDyUnsqueeze(unittest.TestCase): def test_out(self): - with fluid.dygraph.guard(): - input_1 = np.random.random([5, 1, 10]).astype("int32") - input1 = np.expand_dims(input_1, axis=1) - input = fluid.dygraph.to_variable(input_1) - output = paddle.unsqueeze(input, axis=[1]) - out_np = output.numpy() - self.assertTrue(np.array_equal(input1, out_np)) - self.assertEqual(input1.shape, out_np.shape) + paddle.disable_static() + input_1 = np.random.random([5, 1, 10]).astype("int32") + input1 = np.expand_dims(input_1, axis=1) + input = paddle.to_tensor(input_1) + output = paddle.unsqueeze(input, axis=[1]) + out_np = output.numpy() + self.assertTrue(np.array_equal(input1, out_np)) + self.assertEqual(input1.shape, out_np.shape) class API_TestDyUnsqueeze2(unittest.TestCase): def test_out(self): - with fluid.dygraph.guard(): - input1 = np.random.random([5, 10]).astype("int32") - out1 = np.expand_dims(input1, axis=1) - input = fluid.dygraph.to_variable(input1) - output = paddle.unsqueeze(input, axis=1) - out_np = output.numpy() - self.assertTrue(np.array_equal(out1, out_np)) - self.assertEqual(out1.shape, out_np.shape) + paddle.disable_static() + input1 = np.random.random([5, 10]).astype("int32") + out1 = np.expand_dims(input1, axis=1) + input = paddle.to_tensor(input1) + output = paddle.unsqueeze(input, axis=1) + out_np = output.numpy() + self.assertTrue(np.array_equal(out1, out_np)) + self.assertEqual(out1.shape, out_np.shape) class API_TestDyUnsqueezeAxisTensor(unittest.TestCase): def test_out(self): - with fluid.dygraph.guard(): - input1 = np.random.random([5, 10]).astype("int32") - out1 = np.expand_dims(input1, axis=1) - out1 = np.expand_dims(out1, axis=2) - input = fluid.dygraph.to_variable(input1) - output = paddle.unsqueeze(input, axis=paddle.to_tensor([1, 2])) - out_np = output.numpy() - self.assertTrue(np.array_equal(out1, out_np)) - self.assertEqual(out1.shape, out_np.shape) + paddle.disable_static() + input1 = np.random.random([5, 10]).astype("int32") + out1 = np.expand_dims(input1, axis=1) + out1 = np.expand_dims(out1, axis=2) + input = paddle.to_tensor(input1) + output = paddle.unsqueeze(input, axis=paddle.to_tensor([1, 2])) + out_np = output.numpy() + self.assertTrue(np.array_equal(out1, out_np)) + self.assertEqual(out1.shape, out_np.shape) class API_TestDyUnsqueezeAxisTensorList(unittest.TestCase): def test_out(self): - with fluid.dygraph.guard(): - input1 = np.random.random([5, 10]).astype("int32") - # Actually, expand_dims supports tuple since version 1.18.0 - out1 = np.expand_dims(input1, axis=1) - out1 = np.expand_dims(out1, axis=2) - input = fluid.dygraph.to_variable(input1) - output = paddle.unsqueeze( - fluid.dygraph.to_variable(input1), - axis=[paddle.to_tensor([1]), paddle.to_tensor([2])]) - out_np = output.numpy() - self.assertTrue(np.array_equal(out1, out_np)) - self.assertEqual(out1.shape, out_np.shape) + paddle.disable_static() + input1 = np.random.random([5, 10]).astype("int32") + # Actually, expand_dims supports tuple since version 1.18.0 + out1 = np.expand_dims(input1, axis=1) + out1 = np.expand_dims(out1, axis=2) + input = paddle.to_tensor(input1) + output = paddle.unsqueeze( + paddle.to_tensor(input1), + axis=[paddle.to_tensor([1]), paddle.to_tensor([2])]) + out_np = output.numpy() + self.assertTrue(np.array_equal(out1, out_np)) + self.assertEqual(out1.shape, out_np.shape) + + +class API_TestDygraphUnSqueeze(unittest.TestCase): + def test_out(self): + paddle.disable_static() + input_1 = np.random.random([5, 1, 10]).astype("int32") + input = paddle.to_tensor(input_1) + output = paddle.unsqueeze(input, axis=[1]) + out_np = output.numpy() + expected_out = np.expand_dims(input_1, axis=1) + self.assertTrue(np.allclose(expected_out, out_np)) + + def test_out_int8(self): + paddle.disable_static() + input_1 = np.random.random([5, 1, 10]).astype("int8") + input = paddle.to_tensor(input_1) + output = paddle.unsqueeze(input, axis=[1]) + out_np = output.numpy() + expected_out = np.expand_dims(input_1, axis=1) + self.assertTrue(np.allclose(expected_out, out_np)) + + def test_out_uint8(self): + paddle.disable_static() + input_1 = np.random.random([5, 1, 10]).astype("uint8") + input = paddle.to_tensor(input_1) + output = paddle.unsqueeze(input, axis=1) + out_np = output.numpy() + expected_out = np.expand_dims(input_1, axis=1) + self.assertTrue(np.allclose(expected_out, out_np)) + + def test_axis_not_list(self): + paddle.disable_static() + input_1 = np.random.random([5, 1, 10]).astype("int32") + input = paddle.to_tensor(input_1) + output = paddle.unsqueeze(input, axis=1) + out_np = output.numpy() + expected_out = np.expand_dims(input_1, axis=1) + self.assertTrue(np.allclose(expected_out, out_np)) + + def test_dimension_not_1(self): + paddle.disable_static() + input_1 = np.random.random([5, 1, 10]).astype("int32") + input = paddle.to_tensor(input_1) + output = paddle.unsqueeze(input, axis=(1, 2)) + out_np = output.numpy() + expected_out = np.expand_dims(input_1, axis=1) + self.assertTrue(np.allclose(expected_out, out_np)) if __name__ == "__main__": -- GitLab