diff --git a/paddle/fluid/operators/slice_op.cc b/paddle/fluid/operators/slice_op.cc index 9d96f9670c633f1937f4fff4065bd219cea930df..f25b08bbbabf6ae93b8f6b94edfeb6fff466c370 100644 --- a/paddle/fluid/operators/slice_op.cc +++ b/paddle/fluid/operators/slice_op.cc @@ -291,6 +291,34 @@ class SliceOpGradMaker : public framework::SingleGradOpMaker { } }; +template +class SliceDoubleOpGradMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + protected: + std::unique_ptr Apply() const override { + auto *bind = new T(); + if (this->HasInput("StartsTensor")) { + bind->SetInput("StartsTensor", this->Input("StartsTensor")); + } + if (this->HasInput("EndsTensor")) { + bind->SetInput("EndsTensor", this->Input("EndsTensor")); + } + if (this->HasInput("StartsTensorList")) { + bind->SetInput("StartsTensorList", this->Input("StartsTensorList")); + } + if (this->HasInput("EndsTensorList")) { + bind->SetInput("EndsTensorList", this->Input("EndsTensorList")); + } + bind->SetInput("Input", this->OutputGrad(framework::GradVarName("Input"))); + bind->SetOutput("Out", this->InputGrad(framework::GradVarName("Out"))); + bind->SetAttrMap(this->Attrs()); + bind->SetType("slice"); + return std::unique_ptr(bind); + } +}; + DECLARE_NO_NEED_BUFFER_VARS_INFERENCE(SliceOpGradNoNeedBufferVarsInference, "Input"); @@ -302,6 +330,8 @@ REGISTER_OPERATOR(slice, ops::SliceOp, ops::SliceOpMaker, ops::SliceOpGradMaker, ops::SliceOpGradMaker); REGISTER_OPERATOR(slice_grad, ops::SliceOpGrad, + ops::SliceDoubleOpGradMaker, + ops::SliceDoubleOpGradMaker, ops::SliceOpGradNoNeedBufferVarsInference); REGISTER_OP_CPU_KERNEL( diff --git a/python/paddle/fluid/tests/unittests/test_nn_grad.py b/python/paddle/fluid/tests/unittests/test_nn_grad.py index 1434fdf0d0a89e633de409538008845baefae444..e38028feea221477e89db162e83c64f32a56292c 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_nn_grad.py @@ -43,6 +43,41 @@ class TestMulGradCheck(unittest.TestCase): self.func(p) +class TestSliceOpDoubleGradCheck(unittest.TestCase): + def func(self, place): + self.config() + + out = fluid.layers.slice( + self.inputs, axes=self.axes, starts=self.starts, ends=self.ends) + gradient_checker.double_grad_check( + [self.inputs], out, x_init=self.x_arr, place=place) + + def config(self): + self.starts = [1, 0, -1] + self.ends = [3, 3, 6] + self.axes = [0, 1, 2] + self.x_arr = np.random.random([3, 4, 5, 2]).astype("float64") + self.inputs = layers.create_parameter( + dtype="float64", shape=[3, 4, 5, 2], name='x') + + def test_grad(self): + places = [fluid.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(fluid.CUDAPlace(0)) + for place in places: + self.func(place) + + +class TestSliceOpDoubleGradCheckCase3(TestSliceOpDoubleGradCheck): + def config(self): + self.starts = [1, -1, 1] + self.ends = [3, 3, 3] + self.axes = [0, 1, 2] + self.x_arr = np.random.random([3, 3, 3]).astype("float64") + self.inputs = layers.create_parameter( + dtype="float64", shape=[3, 3, 3], name='x3') + + class TestReduceMeanWithDimDoubleGradCheck(unittest.TestCase): @prog_scope() def func(self, place):