From c3a87e3deebadbc50f3111a5b789583f43cc0ad4 Mon Sep 17 00:00:00 2001 From: Double_V Date: Sun, 8 Mar 2020 17:46:13 +0800 Subject: [PATCH] support slice double grad, test=develop (#22166) (#22836) * support slice double grad, test=develop * merge two doublegradopmaker to one doublegradopmaker,test=develop * change the shape of slice_OP's unittest, test=develop --- paddle/fluid/operators/slice_op.cc | 30 ++++++++++++++++ .../fluid/tests/unittests/test_nn_grad.py | 35 +++++++++++++++++++ 2 files changed, 65 insertions(+) diff --git a/paddle/fluid/operators/slice_op.cc b/paddle/fluid/operators/slice_op.cc index 9d96f9670c..f25b08bbba 100644 --- a/paddle/fluid/operators/slice_op.cc +++ b/paddle/fluid/operators/slice_op.cc @@ -291,6 +291,34 @@ class SliceOpGradMaker : public framework::SingleGradOpMaker { } }; +template +class SliceDoubleOpGradMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + protected: + std::unique_ptr Apply() const override { + auto *bind = new T(); + if (this->HasInput("StartsTensor")) { + bind->SetInput("StartsTensor", this->Input("StartsTensor")); + } + if (this->HasInput("EndsTensor")) { + bind->SetInput("EndsTensor", this->Input("EndsTensor")); + } + if (this->HasInput("StartsTensorList")) { + bind->SetInput("StartsTensorList", this->Input("StartsTensorList")); + } + if (this->HasInput("EndsTensorList")) { + bind->SetInput("EndsTensorList", this->Input("EndsTensorList")); + } + bind->SetInput("Input", this->OutputGrad(framework::GradVarName("Input"))); + bind->SetOutput("Out", this->InputGrad(framework::GradVarName("Out"))); + bind->SetAttrMap(this->Attrs()); + bind->SetType("slice"); + return std::unique_ptr(bind); + } +}; + DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(SliceOpGradNoNeedBufferVarsInference, "Input"); @@ -302,6 +330,8 @@ REGISTER_OPERATOR(slice, ops::SliceOp, ops::SliceOpMaker, ops::SliceOpGradMaker, ops::SliceOpGradMaker); REGISTER_OPERATOR(slice_grad, ops::SliceOpGrad, + ops::SliceDoubleOpGradMaker, + ops::SliceDoubleOpGradMaker, ops::SliceOpGradNoNeedBufferVarsInference); REGISTER_OP_CPU_KERNEL( diff --git a/python/paddle/fluid/tests/unittests/test_nn_grad.py b/python/paddle/fluid/tests/unittests/test_nn_grad.py index 4e9703fa9b..c6cfe01dce 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_nn_grad.py @@ -43,6 +43,41 @@ class TestMulGradCheck(unittest.TestCase): self.func(p) +class TestSliceOpDoubleGradCheck(unittest.TestCase): + def func(self, place): + self.config() + + out = fluid.layers.slice( + self.inputs, axes=self.axes, starts=self.starts, ends=self.ends) + gradient_checker.double_grad_check( + [self.inputs], out, x_init=self.x_arr, place=place) + + def config(self): + self.starts = [1, 0, -1] + self.ends = [3, 3, 6] + self.axes = [0, 1, 2] + self.x_arr = np.random.random([3, 4, 5, 2]).astype("float64") + self.inputs = layers.create_parameter( + dtype="float64", shape=[3, 4, 5, 2], name='x') + + def test_grad(self): + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for place in places: + self.func(place) + + +class TestSliceOpDoubleGradCheckCase3(TestSliceOpDoubleGradCheck): + def config(self): + self.starts = [1, -1, 1] + self.ends = [3, 3, 3] + self.axes = [0, 1, 2] + self.x_arr = np.random.random([3, 3, 3]).astype("float64") + self.inputs = layers.create_parameter( + dtype="float64", shape=[3, 3, 3], name='x3') + + class TestReduceMeanWithDimDoubleGradCheck(unittest.TestCase): @prog_scope() def func(self, place): -- GitLab