From 8ede1605005a09a3ccfa03bb8388313b6307593a Mon Sep 17 00:00:00 2001 From: TeslaZhao Date: Mon, 12 Oct 2020 17:36:14 +0800 Subject: [PATCH] =?UTF-8?q?cherry-pick:Add=20double=20grad=20in=20Squeeze?= =?UTF-8?q?=20and=20Unsqueeze=20to=20release/1.8,=20=E2=80=A6=20(#27843)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * cherry-pick:Add double grad in Squeeze and Unsqueeze to release/1.8, test=develop Signed-off-by: TeslaZhao * cherry-pick:Add double grad in Squeeze and Unsqueeze to release/1.8, test=develop --- paddle/fluid/operators/squeeze_op.cc | 159 +++++++++++------- paddle/fluid/operators/unsqueeze_op.cc | 36 +++- .../fluid/tests/unittests/test_nn_grad.py | 49 +++++- .../fluid/tests/unittests/test_squeeze2_op.py | 1 + .../tests/unittests/test_unsqueeze2_op.py | 1 + 5 files changed, 184 insertions(+), 62 deletions(-) diff --git a/paddle/fluid/operators/squeeze_op.cc b/paddle/fluid/operators/squeeze_op.cc index 6ba56e0064a..479973a5daa 100644 --- a/paddle/fluid/operators/squeeze_op.cc +++ b/paddle/fluid/operators/squeeze_op.cc @@ -13,15 +13,73 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/squeeze_op.h" + #include #include #include #include + #include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { +framework::DDim GetOutputShape(const std::vector squeeze_dims, + const framework::DDim &in_dims, + bool is_runtime) { + size_t num_squeeze_dims = squeeze_dims.size(); + std::vector should_squeeze(in_dims.size(), false); + + // Mark dimensions need to be squeezed. + if (num_squeeze_dims == 0) { + for (int i = 0; i < in_dims.size(); ++i) { + if (in_dims[i] == 1) { + should_squeeze[i] = true; + } + } + } else { + for (size_t i = 0; i < num_squeeze_dims; ++i) { + int current = squeeze_dims[i] < 0 ? squeeze_dims[i] + in_dims.size() + : squeeze_dims[i]; + + PADDLE_ENFORCE_GE( + current, 0, + platform::errors::InvalidArgument( + "Each axis in Attr(axes) should be in the range of [%d, %d]" + "But current axis is:%d, input tensor's shape = [%s].", + -in_dims.size(), in_dims.size() - 1, current, in_dims)); + PADDLE_ENFORCE_LT( + current, in_dims.size(), + platform::errors::InvalidArgument( + "Each axis in Attr(axes) should be in the range of [%d, %d]" + "But current axis is:%d, input tensor's shape = [%s].", + -in_dims.size(), in_dims.size() - 1, current, in_dims)); + + if (!should_squeeze[current]) { + if (is_runtime) { + // At run time, dim of 1 is allowed to squeeze + if (in_dims[current] == 1) { + should_squeeze[current] = true; + } + } else { + // At compile time, dim of -1 or 1 is allowed to squeeze + if (in_dims[current] == 1 || in_dims[current] == -1) { + should_squeeze[current] = true; + } + } + } + } + } + // Make output dimensions + std::vector output_shape; + for (int i = 0; i < in_dims.size(); ++i) { + if (!should_squeeze[i]) { + output_shape.push_back(in_dims[i]); + } + } + return framework::make_ddim(output_shape); +} + class SqueezeOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -40,7 +98,7 @@ class SqueezeOp : public framework::OperatorWithKernel { x_dims.size(), x_dims)); const auto &axes = ctx->Attrs().Get>("axes"); - auto out_dims = GetOutputShape(axes, x_dims); + auto out_dims = GetOutputShape(axes, x_dims, false); ctx->SetOutputDim("Out", out_dims); if (x_dims[0] == out_dims[0]) { // Only pass LoD when the first dimension of output and Input(X) @@ -49,56 +107,6 @@ class SqueezeOp : public framework::OperatorWithKernel { } } - static framework::DDim GetOutputShape(const std::vector squeeze_dims, - const framework::DDim &in_dims) { - size_t num_squeeze_dims = squeeze_dims.size(); - int cnt_squeezed_dims = 0; - bool should_squeeze[9] = {false}; - - // Determines number of dimensions of output tensor after squeeze. - // Mark and count the dimensions need to be squeezed - if (num_squeeze_dims == 0) { - for (int idx = 0; idx < in_dims.size(); ++idx) { - if (in_dims[idx] == 1) { - should_squeeze[idx] = true; - ++cnt_squeezed_dims; - } - } - } else { - for (size_t idx = 0; idx < num_squeeze_dims; ++idx) { - int current = squeeze_dims[idx] < 0 ? squeeze_dims[idx] + in_dims.size() - : squeeze_dims[idx]; - PADDLE_ENFORCE_GE( - current, 0, - platform::errors::InvalidArgument( - "Each axis in Attr(axes) should be in the range of [%d, %d]" - "But current axis is:%d, input tensor's shape = [%s].", - -in_dims.size(), in_dims.size() - 1, current, in_dims)); - PADDLE_ENFORCE_LT( - current, in_dims.size(), - platform::errors::InvalidArgument( - "Each axis in Attr(axes) should be in the range of [%d, %d]" - "But current axis is:%d, input tensor's shape = [%s].", - -in_dims.size(), in_dims.size() - 1, current, in_dims)); - - if (!(should_squeeze[current])) { - ++cnt_squeezed_dims; - } - should_squeeze[current] = true; - } - } - - // Make output dimensions - std::vector output_shape(in_dims.size() - cnt_squeezed_dims, 0); - for (int in_idx = 0, out_idx = 0; in_idx < in_dims.size(); ++in_idx) { - if (!should_squeeze[in_idx]) { - output_shape[out_idx++] = in_dims[in_idx]; - } - } - - return framework::make_ddim(output_shape); - } - protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { @@ -183,7 +191,7 @@ class Squeeze2Op : public framework::OperatorWithKernel { const auto &axes = ctx->Attrs().Get>("axes"); - auto out_dims = SqueezeOp::GetOutputShape(axes, x_dims); + auto out_dims = GetOutputShape(axes, x_dims, false); ctx->SetOutputDim("Out", out_dims); if (x_dims[0] == out_dims[0]) { // Only pass LoD when the first dimension of output and Input(X) @@ -241,6 +249,19 @@ class Squeeze2GradOp : public framework::OperatorWithKernel { } }; +template +class SqueezeDoubleGradOpMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + void Apply(GradOpPtr grad_op) const override { + grad_op->SetType("squeeze"); + grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X"))); + grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out"))); + grad_op->SetAttrMap(this->Attrs()); + } +}; + // FIXME(zcd): squeeze2 adds an intermediate output(XShape) based on squeeze, // the XShape is used to carry the shape and lod of X which will be used in // squeeze_grad, in this way, the framework can reuse the memory of X @@ -271,11 +292,25 @@ class Squeeze2GradOpMaker : public framework::SingleGradOpMaker { } }; -DECLARE_INPLACE_OP_INFERER(SequeezeInplaceInferer, {"X", "Out"}); -DECLARE_INPLACE_OP_INFERER(SequeezeGradInplaceInferer, +template +class Squeeze2DoubleGradOpMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + void Apply(GradOpPtr grad_op) const override { + grad_op->SetType("squeeze2"); + grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X"))); + grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out"))); + grad_op->SetOutput("XShape", this->Input("XShape")); + grad_op->SetAttrMap(this->Attrs()); + } +}; + +DECLARE_INPLACE_OP_INFERER(SqueezeInplaceInferer, {"X", "Out"}); +DECLARE_INPLACE_OP_INFERER(SqueezeGradInplaceInferer, {framework::GradVarName("Out"), framework::GradVarName("X")}); -DECLARE_NO_NEED_BUFFER_VARS_INFERER(SqueezeGradNoNeedBufferVarsInference, "X"); +DECLARE_NO_NEED_BUFFER_VARS_INFERER(SqueezeGradNoNeedBufferVarsInferer, "X"); } // namespace operators } // namespace paddle @@ -284,18 +319,23 @@ REGISTER_OPERATOR(squeeze, ops::SqueezeOp, ops::SqueezeOpMaker, ops::SqueezeGradOpMaker, ops::SqueezeGradOpMaker); REGISTER_OPERATOR(squeeze_grad, ops::SqueezeGradOp, - ops::SqueezeGradNoNeedBufferVarsInference); + ops::SqueezeDoubleGradOpMaker, + ops::SqueezeDoubleGradOpMaker, + ops::SqueezeGradNoNeedBufferVarsInferer); REGISTER_OPERATOR(squeeze2, ops::Squeeze2Op, ops::Squeeze2OpMaker, ops::Squeeze2GradOpMaker, ops::Squeeze2GradOpMaker, - ops::SequeezeInplaceInferer); + ops::SqueezeInplaceInferer); REGISTER_OPERATOR(squeeze2_grad, ops::Squeeze2GradOp, - ops::SequeezeGradInplaceInferer); + ops::Squeeze2DoubleGradOpMaker, + ops::Squeeze2DoubleGradOpMaker, + ops::SqueezeGradInplaceInferer); REGISTER_OP_CPU_KERNEL( squeeze, ops::SqueezeKernel, ops::SqueezeKernel, + ops::SqueezeKernel, ops::SqueezeKernel, ops::SqueezeKernel, ops::SqueezeKernel); @@ -303,12 +343,14 @@ REGISTER_OP_CPU_KERNEL( squeeze_grad, ops::SqueezeGradKernel, ops::SqueezeGradKernel, + ops::SqueezeGradKernel, ops::SqueezeGradKernel, ops::SqueezeGradKernel, ops::SqueezeGradKernel); REGISTER_OP_CPU_KERNEL( squeeze2, ops::Squeeze2Kernel, ops::Squeeze2Kernel, + ops::Squeeze2Kernel, ops::Squeeze2Kernel, ops::Squeeze2Kernel, ops::Squeeze2Kernel); @@ -316,6 +358,7 @@ REGISTER_OP_CPU_KERNEL( squeeze2_grad, ops::Squeeze2GradKernel, ops::Squeeze2GradKernel, + ops::Squeeze2GradKernel, ops::Squeeze2GradKernel, ops::Squeeze2GradKernel, ops::Squeeze2GradKernel); diff --git a/paddle/fluid/operators/unsqueeze_op.cc b/paddle/fluid/operators/unsqueeze_op.cc index e191481f3b1..4883cbd4b6a 100644 --- a/paddle/fluid/operators/unsqueeze_op.cc +++ b/paddle/fluid/operators/unsqueeze_op.cc @@ -226,6 +226,19 @@ class UnsqueezeGradOpMaker : public framework::SingleGradOpMaker { } }; +template +class UnsqueezeDoubleGradOpMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + void Apply(GradOpPtr grad_op) const override { + grad_op->SetType("unsqueeze"); + grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X"))); + grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out"))); + grad_op->SetAttrMap(this->Attrs()); + } +}; + // FIXME(zcd): unsqueeze2 adds an intermediate output(XShape) based on // unsqueeze, the XShape is used to carry the shape and lod of X which // will be used in unsqueeze_grad, in this way, the framework can reuse @@ -302,12 +315,25 @@ class Unsqueeze2GradOp : public framework::OperatorWithKernel { } }; +template +class Unsqueeze2DoubleGradOpMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + void Apply(GradOpPtr grad_op) const override { + grad_op->SetType("unsqueeze2"); + grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X"))); + grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out"))); + grad_op->SetOutput("XShape", this->Input("XShape")); + grad_op->SetAttrMap(this->Attrs()); + } +}; + DECLARE_INPLACE_OP_INFERER(UnsqueezeInplaceInferer, {"X", "Out"}); DECLARE_INPLACE_OP_INFERER(UnsqueezeGradInplaceInferer, {framework::GradVarName("Out"), framework::GradVarName("X")}); -DECLARE_NO_NEED_BUFFER_VARS_INFERER(UnsqueezeGradOpNoNeedBufferVarInference, - "X"); +DECLARE_NO_NEED_BUFFER_VARS_INFERER(UnsqueezeGradOpNoNeedBufferVarInferer, "X"); } // namespace operators } // namespace paddle @@ -316,13 +342,17 @@ REGISTER_OPERATOR(unsqueeze, ops::UnsqueezeOp, ops::UnsqueezeOpMaker, ops::UnsqueezeGradOpMaker, ops::UnsqueezeGradOpMaker); REGISTER_OPERATOR(unsqueeze_grad, ops::UnsqueezeGradOp, - ops::UnsqueezeGradOpNoNeedBufferVarInference); + ops::UnsqueezeDoubleGradOpMaker, + ops::UnsqueezeDoubleGradOpMaker, + ops::UnsqueezeGradOpNoNeedBufferVarInferer); REGISTER_OPERATOR(unsqueeze2, ops::Unsqueeze2Op, ops::Unsqueeze2OpMaker, ops::Unsqueeze2GradOpMaker, ops::Unsqueeze2GradOpMaker, ops::UnsqueezeInplaceInferer); REGISTER_OPERATOR(unsqueeze2_grad, ops::Unsqueeze2GradOp, + ops::Unsqueeze2DoubleGradOpMaker, + ops::Unsqueeze2DoubleGradOpMaker, ops::UnsqueezeGradInplaceInferer); REGISTER_OP_CPU_KERNEL( diff --git a/python/paddle/fluid/tests/unittests/test_nn_grad.py b/python/paddle/fluid/tests/unittests/test_nn_grad.py index 72f95ff77cb..0e8b248e762 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_nn_grad.py @@ -21,7 +21,6 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers import paddle.fluid.core as core import gradient_checker - from decorator_helper import prog_scope @@ -232,5 +231,53 @@ class TestExpandDoubleGradCheck(unittest.TestCase): self.func(p) +class TestSqueezeDoubleGradCheck(unittest.TestCase): + @prog_scope() + def func(self, place): + x_shape = [1, 3, 1, 40] + axes = [0, 2] + eps = 0.005 + dtype = np.float64 + + x = layers.data('x', x_shape, False, dtype) + x.persistable = True + out = layers.squeeze(x, axes) + x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) + + gradient_checker.double_grad_check( + [x], out, x_init=x_arr, place=place, eps=eps) + + def test_grad(self): + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + +class TestUnsqueezeDoubleGradCheck(unittest.TestCase): + @prog_scope() + def func(self, place): + x_shape = [3, 40] + axes = [1, 2] + eps = 0.005 + dtype = np.float64 + + x = layers.data('x', x_shape, False, dtype) + x.persistable = True + out = layers.unsqueeze(x, axes) + x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype) + + gradient_checker.double_grad_check( + [x], out, x_init=x_arr, place=place, eps=eps) + + def test_grad(self): + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for p in places: + self.func(p) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_squeeze2_op.py b/python/paddle/fluid/tests/unittests/test_squeeze2_op.py index a1879c72459..a77a3de7cee 100644 --- a/python/paddle/fluid/tests/unittests/test_squeeze2_op.py +++ b/python/paddle/fluid/tests/unittests/test_squeeze2_op.py @@ -18,6 +18,7 @@ import unittest import numpy as np from op_test import OpTest +import paddle # Correct: General. diff --git a/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py b/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py index 340d22acbfb..337ca0d8129 100644 --- a/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py +++ b/python/paddle/fluid/tests/unittests/test_unsqueeze2_op.py @@ -18,6 +18,7 @@ import unittest import numpy as np import paddle.fluid as fluid from op_test import OpTest +import paddle # Correct: General. -- GitLab