未验证 提交 cc387159 编写于 作者: C ceci3 提交者: GitHub

add pad and concat double grad (#29549)

* add constant pad double grad
上级 f13c3a9c
...@@ -201,6 +201,20 @@ class ConcatGradOpMaker : public framework::SingleGradOpMaker<T> { ...@@ -201,6 +201,20 @@ class ConcatGradOpMaker : public framework::SingleGradOpMaker<T> {
} }
}; };
template <typename T>
class ConcatDoubleGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> grad_op) const override {
grad_op->SetType("concat");
grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X")));
grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out")));
grad_op->SetAttrMap(this->Attrs());
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -209,6 +223,8 @@ REGISTER_OPERATOR(concat, ops::ConcatOp, ops::ConcatOpMaker, ...@@ -209,6 +223,8 @@ REGISTER_OPERATOR(concat, ops::ConcatOp, ops::ConcatOpMaker,
ops::ConcatGradOpMaker<paddle::framework::OpDesc>, ops::ConcatGradOpMaker<paddle::framework::OpDesc>,
ops::ConcatGradOpMaker<paddle::imperative::OpBase>); ops::ConcatGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad, REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad,
ops::ConcatDoubleGradOpMaker<paddle::framework::OpDesc>,
ops::ConcatDoubleGradOpMaker<paddle::imperative::OpBase>,
ops::ConcatOpGradNoNeedBufferVarInferer); ops::ConcatOpGradNoNeedBufferVarInferer);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
concat, ops::ConcatKernel<paddle::platform::CPUDeviceContext, double>, concat, ops::ConcatKernel<paddle::platform::CPUDeviceContext, double>,
......
...@@ -893,6 +893,22 @@ class Pad3dOpGradMaker : public framework::SingleGradOpMaker<T> { ...@@ -893,6 +893,22 @@ class Pad3dOpGradMaker : public framework::SingleGradOpMaker<T> {
} }
}; };
template <typename T>
class Pad3dOpDoubleGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
void Apply(GradOpPtr<T> grad_op) const override {
if (this->HasInput("Paddings")) {
grad_op->SetInput("Paddings", this->Input("Paddings"));
}
grad_op->SetType("pad3d");
grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X")));
grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out")));
grad_op->SetAttrMap(this->Attrs());
}
};
DECLARE_NO_NEED_BUFFER_VARS_INFERER(Pad3dOpGradNoNeedBufferVarsInferer, "X"); DECLARE_NO_NEED_BUFFER_VARS_INFERER(Pad3dOpGradNoNeedBufferVarsInferer, "X");
} // namespace operators } // namespace operators
...@@ -904,6 +920,8 @@ REGISTER_OPERATOR(pad3d, ops::Pad3dOp, ops::Pad3dOpMaker, ...@@ -904,6 +920,8 @@ REGISTER_OPERATOR(pad3d, ops::Pad3dOp, ops::Pad3dOpMaker,
ops::Pad3dOpGradMaker<paddle::framework::OpDesc>, ops::Pad3dOpGradMaker<paddle::framework::OpDesc>,
ops::Pad3dOpGradMaker<paddle::imperative::OpBase>); ops::Pad3dOpGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(pad3d_grad, ops::Pad3dOpGrad, REGISTER_OPERATOR(pad3d_grad, ops::Pad3dOpGrad,
ops::Pad3dOpDoubleGradMaker<paddle::framework::OpDesc>,
ops::Pad3dOpDoubleGradMaker<paddle::imperative::OpBase>,
ops::Pad3dOpGradNoNeedBufferVarsInferer); ops::Pad3dOpGradNoNeedBufferVarsInferer);
REGISTER_OP_CPU_KERNEL(pad3d, ops::Pad3dCPUKernel<float>, REGISTER_OP_CPU_KERNEL(pad3d, ops::Pad3dCPUKernel<float>,
ops::Pad3dCPUKernel<double>, ops::Pad3dCPUKernel<int>, ops::Pad3dCPUKernel<double>, ops::Pad3dCPUKernel<int>,
......
...@@ -142,6 +142,19 @@ class PadOpGradMaker : public framework::SingleGradOpMaker<T> { ...@@ -142,6 +142,19 @@ class PadOpGradMaker : public framework::SingleGradOpMaker<T> {
} }
}; };
template <typename T>
class PadOpDoubleGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
void Apply(GradOpPtr<T> grad_op) const override {
grad_op->SetType("pad");
grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X")));
grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out")));
grad_op->SetAttrMap(this->Attrs());
}
};
} // namespace operators } // namespace operators
} // namespace paddle } // namespace paddle
...@@ -150,7 +163,9 @@ namespace ops = paddle::operators; ...@@ -150,7 +163,9 @@ namespace ops = paddle::operators;
REGISTER_OPERATOR(pad, ops::PadOp, ops::PadOpMaker, REGISTER_OPERATOR(pad, ops::PadOp, ops::PadOpMaker,
ops::PadOpGradMaker<paddle::framework::OpDesc>, ops::PadOpGradMaker<paddle::framework::OpDesc>,
ops::PadOpGradMaker<paddle::imperative::OpBase>); ops::PadOpGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(pad_grad, ops::PadOpGrad); REGISTER_OPERATOR(pad_grad, ops::PadOpGrad,
ops::PadOpDoubleGradMaker<paddle::framework::OpDesc>,
ops::PadOpDoubleGradMaker<paddle::imperative::OpBase>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
pad, ops::PadKernel<paddle::platform::CPUDeviceContext, float>, pad, ops::PadKernel<paddle::platform::CPUDeviceContext, float>,
ops::PadKernel<paddle::platform::CPUDeviceContext, double>, ops::PadKernel<paddle::platform::CPUDeviceContext, double>,
......
...@@ -394,5 +394,70 @@ class TestTransposeDoubleGradCheckCase1(unittest.TestCase): ...@@ -394,5 +394,70 @@ class TestTransposeDoubleGradCheckCase1(unittest.TestCase):
self.func(p) self.func(p)
class TestConstantPadDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
x_shape = [2, 3, 4, 5]
pad = [1, 1, 1, 1]
eps = 0.005
dtype = np.float64
x = layers.data('x', x_shape, False, dtype)
x.persistable = True
out = paddle.nn.functional.pad(x, pad)
x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
gradient_checker.double_grad_check(
[x], out, x_init=x_arr, place=place, eps=eps)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestConstantPadDoubleGradCheckCase1(TestConstantPadDoubleGradCheck):
@prog_scope()
def func(self, place):
x_shape = [2, 3, 4, 5]
pad = [1, 0, 1, 0, 1, 0, 1, 0]
dtype = np.float64
x = layers.data('x', x_shape, False, dtype)
x.persistable = True
out = paddle.nn.functional.pad(x, pad)
x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place)
class TestConcatDoubleGradCheck(unittest.TestCase):
@prog_scope()
def func(self, place):
x_shape = [2, 3, 4, 5]
pad = [1, 1, 1, 1]
dtype = np.float64
x1 = layers.data('x', x_shape, False, dtype)
x2 = layers.data('x', x_shape, False, dtype)
x1.persistable = True
x2.persistable = True
out = paddle.concat([x1, x2], axis=0)
x2_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
x1_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
gradient_checker.double_grad_check(
[x1, x2], out, x_init=[x1_arr, x2_arr], place=place)
def test_grad(self):
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册