diff --git a/paddle/fluid/operators/squeeze_op.cc b/paddle/fluid/operators/squeeze_op.cc index b658e78629cc2a1e107d7aebf1f5895c15fd4177..859776bc2a0f0056224b69f74a7e423ff2dd0a01 100644 --- a/paddle/fluid/operators/squeeze_op.cc +++ b/paddle/fluid/operators/squeeze_op.cc @@ -13,15 +13,73 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/squeeze_op.h" + #include #include #include #include + #include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { +framework::DDim GetOutputShape(const std::vector squeeze_dims, + const framework::DDim &in_dims, + bool is_runtime) { + size_t num_squeeze_dims = squeeze_dims.size(); + std::vector should_squeeze(in_dims.size(), false); + + // Mark dimensions need to be squeezed. + if (num_squeeze_dims == 0) { + for (int i = 0; i < in_dims.size(); ++i) { + if (in_dims[i] == 1) { + should_squeeze[i] = true; + } + } + } else { + for (size_t i = 0; i < num_squeeze_dims; ++i) { + int current = squeeze_dims[i] < 0 ? squeeze_dims[i] + in_dims.size() + : squeeze_dims[i]; + + PADDLE_ENFORCE_GE( + current, 0, + platform::errors::InvalidArgument( + "Each axis in Attr(axes) should be in the range of [%d, %d]" + "But current axis is:%d, input tensor's shape = [%s].", + -in_dims.size(), in_dims.size() - 1, current, in_dims)); + PADDLE_ENFORCE_LT( + current, in_dims.size(), + platform::errors::InvalidArgument( + "Each axis in Attr(axes) should be in the range of [%d, %d]" + "But current axis is:%d, input tensor's shape = [%s].", + -in_dims.size(), in_dims.size() - 1, current, in_dims)); + + if (!should_squeeze[current]) { + if (is_runtime) { + // At run time, dim of 1 is allowed to squeeze + if (in_dims[current] == 1) { + should_squeeze[current] = true; + } + } else { + // At compile time, dim of -1 or 1 is allowed to squeeze + if (in_dims[current] == 1 || in_dims[current] == -1) { + should_squeeze[current] = true; + } + } + } + } + } + // Make output dimensions + std::vector output_shape; + for (int i = 0; i < in_dims.size(); ++i) { + if (!should_squeeze[i]) { + output_shape.push_back(in_dims[i]); + } + } + return framework::make_ddim(output_shape); +} + class SqueezeOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -40,7 +98,7 @@ class SqueezeOp : public framework::OperatorWithKernel { x_dims.size(), x_dims)); const auto &axes = ctx->Attrs().Get>("axes"); - auto out_dims = GetOutputShape(axes, x_dims); + auto out_dims = GetOutputShape(axes, x_dims, false); ctx->SetOutputDim("Out", out_dims); if (x_dims[0] == out_dims[0]) { // Only pass LoD when the first dimension of output and Input(X) @@ -49,56 +107,6 @@ class SqueezeOp : public framework::OperatorWithKernel { } } - static framework::DDim GetOutputShape(const std::vector squeeze_dims, - const framework::DDim &in_dims) { - size_t num_squeeze_dims = squeeze_dims.size(); - int cnt_squeezed_dims = 0; - bool should_squeeze[9] = {false}; - - // Determines number of dimensions of output tensor after squeeze. - // Mark and count the dimensions need to be squeezed - if (num_squeeze_dims == 0) { - for (int idx = 0; idx < in_dims.size(); ++idx) { - if (in_dims[idx] == 1) { - should_squeeze[idx] = true; - ++cnt_squeezed_dims; - } - } - } else { - for (size_t idx = 0; idx < num_squeeze_dims; ++idx) { - int current = squeeze_dims[idx] < 0 ? squeeze_dims[idx] + in_dims.size() - : squeeze_dims[idx]; - PADDLE_ENFORCE_GE( - current, 0, - platform::errors::InvalidArgument( - "Each axis in Attr(axes) should be in the range of [%d, %d]" - "But current axis is:%d, input tensor's shape = [%s].", - -in_dims.size(), in_dims.size() - 1, current, in_dims)); - PADDLE_ENFORCE_LT( - current, in_dims.size(), - platform::errors::InvalidArgument( - "Each axis in Attr(axes) should be in the range of [%d, %d]" - "But current axis is:%d, input tensor's shape = [%s].", - -in_dims.size(), in_dims.size() - 1, current, in_dims)); - - if (!(should_squeeze[current])) { - ++cnt_squeezed_dims; - } - should_squeeze[current] = true; - } - } - - // Make output dimensions - std::vector output_shape(in_dims.size() - cnt_squeezed_dims, 0); - for (int in_idx = 0, out_idx = 0; in_idx < in_dims.size(); ++in_idx) { - if (!should_squeeze[in_idx]) { - output_shape[out_idx++] = in_dims[in_idx]; - } - } - - return framework::make_ddim(output_shape); - } - protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { @@ -183,7 +191,7 @@ class Squeeze2Op : public framework::OperatorWithKernel { const auto &axes = ctx->Attrs().Get>("axes"); - auto out_dims = SqueezeOp::GetOutputShape(axes, x_dims); + auto out_dims = GetOutputShape(axes, x_dims, false); ctx->SetOutputDim("Out", out_dims); if (x_dims[0] == out_dims[0]) { // Only pass LoD when the first dimension of output and Input(X) diff --git a/paddle/fluid/operators/squeeze_op.h b/paddle/fluid/operators/squeeze_op.h index e8e53bb0f4fcd5c71776092ce429be36ac63fc25..2f621c11e58f6efbf58a58aa7e23739992052ca0 100644 --- a/paddle/fluid/operators/squeeze_op.h +++ b/paddle/fluid/operators/squeeze_op.h @@ -15,6 +15,7 @@ limitations under the License. */ #pragma once #include + #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/math_function.h" @@ -24,6 +25,9 @@ limitations under the License. */ namespace paddle { namespace operators { +framework::DDim GetOutputShape(const std::vector squeeze_dims, + const framework::DDim &in_dims, bool is_runtime); + template class SqueezeKernel : public framework::OpKernel { public: @@ -33,7 +37,7 @@ class SqueezeKernel : public framework::OpKernel { auto &axes = context.Attr>("axes"); auto x_dims = in->dims(); - auto out_dims = GetOutputShape(axes, x_dims); + auto out_dims = GetOutputShape(axes, x_dims, true); out->mutable_data(context.GetPlace(), in->type()); framework::TensorCopy( @@ -41,64 +45,6 @@ class SqueezeKernel : public framework::OpKernel { context.template device_context(), out); out->Resize(out_dims); } - - static framework::DDim GetOutputShape(const std::vector squeeze_dims, - const framework::DDim &in_dims) { - size_t num_squeeze_dims = squeeze_dims.size(); - int cnt_squeezed_dims = 0; - bool should_squeeze[9] = {false}; - - // Determines number of dimensions of output tensor after squeeze. - // Mark and count the dimensions need to be squeezed - if (num_squeeze_dims == 0) { - for (int idx = 0; idx < in_dims.size(); ++idx) { - if (in_dims[idx] == 1) { - should_squeeze[idx] = true; - ++cnt_squeezed_dims; - } - } - } else { - for (size_t idx = 0; idx < num_squeeze_dims; ++idx) { - int current = squeeze_dims[idx] < 0 ? squeeze_dims[idx] + in_dims.size() - : squeeze_dims[idx]; - - PADDLE_ENFORCE_GE( - current, 0, - platform::errors::InvalidArgument( - "Each axis in Attr(axes) should be in the range of [%d, %d]" - "But current axis is:%d, input tensor's shape = [%s].", - -in_dims.size(), in_dims.size() - 1, current, in_dims)); - PADDLE_ENFORCE_LT( - current, in_dims.size(), - platform::errors::InvalidArgument( - "Each axis in Attr(axes) should be in the range of [%d, %d]" - "But current axis is:%d, input tensor's shape = [%s].", - -in_dims.size(), in_dims.size() - 1, current, in_dims)); - - PADDLE_ENFORCE_EQ(in_dims[current], 1, - platform::errors::InvalidArgument( - "The size of axis that will be squeezed " - "should be equal to 1. But current axis = %d," - "input tensor's shape = [%s].", - in_dims[current], in_dims)); - - if (!(should_squeeze[current])) { - ++cnt_squeezed_dims; - } - should_squeeze[current] = true; - } - } - - // Make output dimensions - std::vector output_shape(in_dims.size() - cnt_squeezed_dims, 0); - for (int in_idx = 0, out_idx = 0; in_idx < in_dims.size(); ++in_idx) { - if (!should_squeeze[in_idx]) { - output_shape[out_idx++] = in_dims[in_idx]; - } - } - - return framework::make_ddim(output_shape); - } }; template @@ -126,8 +72,7 @@ class Squeeze2Kernel : public framework::OpKernel { auto &axes = context.Attr>("axes"); auto x_dims = in->dims(); - auto out_dims = - SqueezeKernel::GetOutputShape(axes, x_dims); + auto out_dims = GetOutputShape(axes, x_dims, true); out->mutable_data(context.GetPlace(), in->type()); framework::TensorCopy( diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index c511865e0be7e2b8b54f805781f9566c9758dbb5..a894dbd005707f6c2aeac2ef9d94b4d9966c1d43 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -6203,6 +6203,10 @@ def squeeze(input, axes, name=None): y = layers.squeeze(input=x, axes=[2]) # y.shape=[None, 5, 10] """ + if in_dygraph_mode(): + out, _ = core.ops.squeeze2(input, 'axes', axes) + return out + helper = LayerHelper("squeeze", **locals()) check_variable_and_dtype( input, 'input', diff --git a/python/paddle/fluid/tests/unittests/test_squeeze_op.py b/python/paddle/fluid/tests/unittests/test_squeeze_op.py index 75f474052cc94c02b3e58899f38893754299a33f..5ab13cec540aace8a2796a07d64f59ab5c332246 100644 --- a/python/paddle/fluid/tests/unittests/test_squeeze_op.py +++ b/python/paddle/fluid/tests/unittests/test_squeeze_op.py @@ -70,6 +70,14 @@ class TestSqueezeOp3(TestSqueezeOp): self.new_shape = (6, 5, 1, 4) +# Correct: The demension of axis is not of size 1 remains unchanged. +class TestSqueezeOp4(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (6, 1, 5, 1, 4, 1) + self.axes = (1, 2) + self.new_shape = (6, 5, 1, 4, 1) + + class TestSqueezeOpError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): @@ -90,7 +98,7 @@ class API_TestSqueeze(unittest.TestCase): with fluid.program_guard(fluid.Program(), fluid.Program()): data1 = fluid.layers.data( 'data1', shape=[-1, 1, 10], dtype='float64') - result_squeeze = paddle.squeeze(data1, axes=[1]) + result_squeeze = paddle.squeeze(data1, axis=[1]) place = fluid.CPUPlace() exe = fluid.Executor(place) input1 = np.random.random([5, 1, 10]).astype('float64') @@ -105,7 +113,25 @@ class API_TestDygraphSqueeze(unittest.TestCase): with fluid.dygraph.guard(): input_1 = np.random.random([5, 1, 10]).astype("int32") input = fluid.dygraph.to_variable(input_1) - output = paddle.squeeze(input, axes=[1]) + output = paddle.squeeze(input, axis=[1]) + out_np = output.numpy() + expected_out = np.squeeze(input_1, axis=1) + self.assertTrue(np.allclose(expected_out, out_np)) + + def test_axis_not_list(self): + with fluid.dygraph.guard(): + input_1 = np.random.random([5, 1, 10]).astype("int32") + input = fluid.dygraph.to_variable(input_1) + output = paddle.squeeze(input, axis=1) + out_np = output.numpy() + expected_out = np.squeeze(input_1, axis=1) + self.assertTrue(np.allclose(expected_out, out_np)) + + def test_dimension_not_1(self): + with fluid.dygraph.guard(): + input_1 = np.random.random([5, 1, 10]).astype("int32") + input = fluid.dygraph.to_variable(input_1) + output = paddle.squeeze(input, axis=(1, 2)) out_np = output.numpy() expected_out = np.squeeze(input_1, axis=1) self.assertTrue(np.allclose(expected_out, out_np)) diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index a98a07d3dbdcd95606f3d5348e233ae148624811..65d7eccb1a86c9c0e28f81bdddc30b1492cfd674 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -40,6 +40,7 @@ from ..fluid.layers import scatter_nd_add #DEFINE_ALIAS from ..fluid.layers import scatter_nd #DEFINE_ALIAS from ..fluid.layers import shard_index #DEFINE_ALIAS from ..fluid.layers import unique_with_counts #DEFINE_ALIAS +from ..fluid import layers __all__ = [ 'cast', 'concat', 'expand', 'expand_as', 'flatten', 'gather', 'gather_nd', @@ -442,83 +443,81 @@ def split(input, num_or_sections, dim=-1, name=None): return outs -def squeeze(input, axes, out=None, name=None): +def squeeze(x, axis=None, name=None): """ :alias_main: paddle.squeeze - :alias: paddle.squeeze,paddle.tensor.squeeze,paddle.tensor.manipulation.squeeze + :alias: paddle.squeeze, paddle.tensor.squeeze, paddle.tensor.manipulation.squeeze - This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will - remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal - to one will be deleted. + This OP will squeeze the dimension(s) of size 1 of input tensor x's shape. + If axis is provided, it will remove the dimension(s) by given axis that of size 1. + If the dimension of given axis is not of size 1, the dimension remain unchanged. + If axis is not provided, all dims equal of size 1 will be removed. .. code-block:: text Case1: Input: - X.shape = (1, 3, 1, 5) - axes = [0] + x.shape = [1, 3, 1, 5] # If axis is not provided, all dims equal of size 1 will be removed. + axis = None Output: - Out.shape = (3, 1, 5) + out.shape = [3, 5] Case2: Input: - X.shape = (1, 3, 1, 5) - axes = [] + x.shape = [1, 3, 1, 5] # If axis is provided, it will remove the dimension(s) by given axis that of size 1. + axis = 0 + Output: + out.shape = [3, 1, 5] + + Case4: + + Input: + x.shape = [1, 3, 1, 5] # If the dimension of one given axis (3) is not of size 1, the dimension remain unchanged. + axis = [0, 2, 3] Output: - Out.shape = (3, 5) + out.shape = [3, 5] - Case3: + Case4: Input: - X.shape = [1,3,1,5] - axes = [-2] + x.shape = [1, 3, 1, 5] # If axis is negative, axis = axis + ndim (number of dimensions in x). + axis = [-2] Output: - Out.shape = [1,3,5] + out.shape = [1, 3, 5] Args: - input (Variable): The input Tensor. Support data type: float32, float64, int8, int32, int64. - axes (list): One integer or List of integers, indicating the dimensions to be squeezed. - Axes range is :math:`[-rank(input), rank(input))`. - If axes is negative, :math:`axes=axes+rank(input)`. + input (Tensor): The input Tensor. Support data type: float32, float64, int8, int32, int64. + axis (int|list|tuple, optional): An integer or list of integers, indicating the dimensions to be squeezed. Default is None. + The range of axis is :math:`[-ndim(input), ndim(input))`. + If axis is negative, :math:`axis = axis + ndim(input)`. + If axis is None, all the dimensions of input of size 1 will be removed. name (str, optional): Please refer to :ref:`api_guide_Name`, Default None. Returns: - Variable: Output squeezed Tensor. Data type is same as input Tensor. + Tensor: Output squeezed Tensor. Data type is same as input Tensor. Examples: .. code-block:: python - import numpy as np import paddle - import paddle.fluid as fluid - with fluid.dygraph.guard(): - input_1 = np.random.random([5, 1, 10]).astype("int32") - # input is a variable which shape is [5, 1, 10] - input = fluid.dygraph.to_variable(input_1) - - output = paddle.squeeze(input, axes=[1]) - # output.shape [5, 10] + paddle.enable_imperative() + + x = paddle.rand([5, 1, 10]) + output = paddle.squeeze(x, axis=1) + # output.shape [5, 10] """ + if axis is None: + axis = [] + elif isinstance(axis, int): + axis = [axis] + elif isinstance(axis, tuple): + axis = list(axis) - helper = LayerHelper("squeeze", **locals()) - check_variable_and_dtype(input, 'input', - ['float32', 'float64', 'int8', 'int32', 'int64'], - 'squeeze') - check_type(axes, 'axes', list, 'squeeze') - out = helper.create_variable_for_type_inference(dtype=input.dtype) - x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) - helper.append_op( - type="squeeze2", - inputs={"X": input}, - attrs={"axes": axes}, - outputs={"Out": out, - "XShape": x_shape}) - - return out + return layers.squeeze(x, axis, name) def unsqueeze(input, axes, out=None, name=None):