未验证 提交 1b9753d1 编写于 作者: W whs 提交者: GitHub

Make pad2d support for variable paddings. (#14667)

* Make pad2d support for variable paddings.
test=develop

* Rename get_paddings and add inline modifier.
test=develop

* Fix comments.
上级 2af5762c
...@@ -319,20 +319,46 @@ void Pad2DGradEdgeNHWC(T* d_in_data, const int num, const int channels, ...@@ -319,20 +319,46 @@ void Pad2DGradEdgeNHWC(T* d_in_data, const int num, const int channels,
} }
} }
static inline void GetPaddings(int* paddings,
const framework::ExecutionContext& context) {
auto* paddings_t = context.Input<Tensor>("Paddings");
if (paddings_t) {
auto paddings_data = paddings_t->data<int>();
paddings[0] = paddings_data[0];
paddings[1] = paddings_data[1];
paddings[2] = paddings_data[2];
paddings[3] = paddings_data[3];
} else {
auto pads = context.Attr<std::vector<int>>("paddings");
std::copy(pads.begin(), pads.end(), paddings);
}
}
template <typename T> template <typename T>
class Pad2dCPUKernel : public framework::OpKernel<T> { class Pad2dCPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto pads = context.Attr<std::vector<int>>("paddings"); int pads[4];
GetPaddings(pads, context);
auto mode = context.Attr<std::string>("mode"); auto mode = context.Attr<std::string>("mode");
auto data_format = context.Attr<std::string>("data_format"); auto data_format = context.Attr<std::string>("data_format");
T value = context.Attr<T>("pad_value"); T value = context.Attr<T>("pad_value");
auto* x = context.Input<Tensor>("X"); auto* x = context.Input<Tensor>("X");
auto* out = context.Output<Tensor>("Out");
auto in_dims = x->dims(); auto in_dims = x->dims();
auto out_dims = out->dims();
const T* in_data = x->data<T>(); const T* in_data = x->data<T>();
auto* out = context.Output<Tensor>("Out");
if (data_format == "NCHW") {
out->Resize({in_dims[0], in_dims[1], in_dims[2] + pads[0] + pads[1],
in_dims[3] + pads[2] + pads[3]});
} else {
out->Resize({in_dims[0], in_dims[1] + pads[0] + pads[1],
in_dims[2] + pads[2] + pads[3], in_dims[3]});
}
auto out_dims = out->dims();
T* out_data = out->mutable_data<T>(context.GetPlace()); T* out_data = out->mutable_data<T>(context.GetPlace());
const int pad_top = pads[0]; const int pad_top = pads[0];
const int pad_left = pads[2]; const int pad_left = pads[2];
const int num = in_dims[0]; const int num = in_dims[0];
...@@ -376,7 +402,8 @@ template <typename T> ...@@ -376,7 +402,8 @@ template <typename T>
class Pad2dGradCPUKernel : public framework::OpKernel<T> { class Pad2dGradCPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto pads = context.Attr<std::vector<int>>("paddings"); int pads[4];
GetPaddings(pads, context);
auto mode = context.Attr<std::string>("mode"); auto mode = context.Attr<std::string>("mode");
auto data_format = context.Attr<std::string>("data_format"); auto data_format = context.Attr<std::string>("data_format");
auto* d_out = context.Input<Tensor>(framework::GradVarName("Out")); auto* d_out = context.Input<Tensor>(framework::GradVarName("Out"));
...@@ -442,21 +469,35 @@ class Pad2dOp : public framework::OperatorWithKernel { ...@@ -442,21 +469,35 @@ class Pad2dOp : public framework::OperatorWithKernel {
"Output(Out) of Pad2dOp should not be null."); "Output(Out) of Pad2dOp should not be null.");
auto x_dim = ctx->GetInputDim("X"); auto x_dim = ctx->GetInputDim("X");
auto paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
PADDLE_ENFORCE_EQ(x_dim.size(), 4, PADDLE_ENFORCE_EQ(x_dim.size(), 4,
"Size of paddings should be equal to 4."); "The size of input(X)'s dimension should be equal to 4.");
std::vector<int64_t> out_dims(x_dim.size());
std::vector<int64_t> out_dims(x_dim.size());
auto data_format = ctx->Attrs().Get<std::string>("data_format"); auto data_format = ctx->Attrs().Get<std::string>("data_format");
out_dims[0] = x_dim[0]; out_dims[0] = x_dim[0];
if (data_format == "NCHW") { if (ctx->HasInput("Paddings")) {
auto paddings_dim = ctx->GetInputDim("Paddings");
PADDLE_ENFORCE_EQ(
paddings_dim.size(), 1,
"Size of Input(Paddings)'s dimension should be equal to 1.");
PADDLE_ENFORCE_EQ(paddings_dim[0], 4,
"Shape of Input(Paddings) should be equal to [4].");
out_dims[1] = x_dim[1]; out_dims[1] = x_dim[1];
out_dims[2] = x_dim[2] + paddings[0] + paddings[1]; // height out_dims[2] = x_dim[2];
out_dims[3] = x_dim[3] + paddings[2] + paddings[3]; // width
} else { // NHWC
out_dims[3] = x_dim[3]; out_dims[3] = x_dim[3];
out_dims[1] = x_dim[1] + paddings[0] + paddings[1]; } else {
out_dims[2] = x_dim[2] + paddings[2] + paddings[3]; auto paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
PADDLE_ENFORCE_EQ(paddings.size(), 4,
"Size of paddings should be equal to 4.");
if (data_format == "NCHW") {
out_dims[1] = x_dim[1];
out_dims[2] = x_dim[2] + paddings[0] + paddings[1]; // height
out_dims[3] = x_dim[3] + paddings[2] + paddings[3]; // width
} else { // NHWC
out_dims[3] = x_dim[3];
out_dims[1] = x_dim[1] + paddings[0] + paddings[1];
out_dims[2] = x_dim[2] + paddings[2] + paddings[3];
}
} }
ctx->SetOutputDim("Out", framework::make_ddim(out_dims)); ctx->SetOutputDim("Out", framework::make_ddim(out_dims));
...@@ -466,6 +507,13 @@ class Pad2dOp : public framework::OperatorWithKernel { ...@@ -466,6 +507,13 @@ class Pad2dOp : public framework::OperatorWithKernel {
ctx->ShareLoD("X", /*->*/ "Out"); ctx->ShareLoD("X", /*->*/ "Out");
} }
} }
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("X")->type()), ctx.GetPlace());
}
}; };
class Pad2dOpMaker : public framework::OpProtoAndCheckerMaker { class Pad2dOpMaker : public framework::OpProtoAndCheckerMaker {
...@@ -477,6 +525,12 @@ class Pad2dOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -477,6 +525,12 @@ class Pad2dOpMaker : public framework::OpProtoAndCheckerMaker {
AddOutput("Out", AddOutput("Out",
"The output of pad2d op. " "The output of pad2d op. "
"A tensor with the same shape as X."); "A tensor with the same shape as X.");
AddInput("Paddings",
"A 1-D tensor to describe the padding rules."
"paddings=[0, 1, 2, 3] means "
"padding 0 row to top, 1 row to bottom, 2 columns to left "
"and 3 columns to right. Size of paddings must be 4.")
.AsDispensable();
AddAttr<std::vector<int>>( AddAttr<std::vector<int>>(
"paddings", "paddings",
"(vector<int>) " "(vector<int>) "
...@@ -554,6 +608,13 @@ class Pad2dOpGrad : public framework::OperatorWithKernel { ...@@ -554,6 +608,13 @@ class Pad2dOpGrad : public framework::OperatorWithKernel {
ctx->SetOutputDim(x_grad_name, x_dims); ctx->SetOutputDim(x_grad_name, x_dims);
} }
} }
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
framework::ToDataType(ctx.Input<Tensor>("X")->type()), ctx.GetPlace());
}
}; };
class Pad2dOpGradMaker : public framework::SingleGradOpDescMaker { class Pad2dOpGradMaker : public framework::SingleGradOpDescMaker {
...@@ -564,6 +625,7 @@ class Pad2dOpGradMaker : public framework::SingleGradOpDescMaker { ...@@ -564,6 +625,7 @@ class Pad2dOpGradMaker : public framework::SingleGradOpDescMaker {
std::unique_ptr<framework::OpDesc> Apply() const override { std::unique_ptr<framework::OpDesc> Apply() const override {
auto* bind = new framework::OpDesc(); auto* bind = new framework::OpDesc();
bind->SetInput("X", Input("X")); bind->SetInput("X", Input("X"));
bind->SetInput("Paddings", Input("Paddings"));
bind->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); bind->SetInput(framework::GradVarName("Out"), OutputGrad("Out"));
bind->SetOutput(framework::GradVarName("X"), InputGrad("X")); bind->SetOutput(framework::GradVarName("X"), InputGrad("X"));
bind->SetAttrMap(Attrs()); bind->SetAttrMap(Attrs());
......
...@@ -287,20 +287,50 @@ __global__ void Pad2DGradEdgeNHWC(const int out_size, T* d_in_data, ...@@ -287,20 +287,50 @@ __global__ void Pad2DGradEdgeNHWC(const int out_size, T* d_in_data,
} }
} }
static inline void GetPaddings(int* paddings,
const framework::ExecutionContext& context) {
auto* paddings_t = context.Input<Tensor>("Paddings");
if (paddings_t) {
Tensor pads;
framework::TensorCopySync(*paddings_t, platform::CPUPlace(), &pads);
auto pads_data = pads.data<int>();
paddings[0] = pads_data[0];
paddings[1] = pads_data[1];
paddings[2] = pads_data[2];
paddings[3] = pads_data[3];
} else {
auto pads = context.Attr<std::vector<int>>("paddings");
std::copy(pads.begin(), pads.end(), paddings);
}
}
template <typename T> template <typename T>
class Pad2dCUDAKernel : public framework::OpKernel<T> { class Pad2dCUDAKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto pads = context.Attr<std::vector<int>>("paddings"); int pads[4];
GetPaddings(pads, context);
auto mode = context.Attr<std::string>("mode"); auto mode = context.Attr<std::string>("mode");
auto data_format = context.Attr<std::string>("data_format"); auto data_format = context.Attr<std::string>("data_format");
T value = context.Attr<T>("pad_value"); T value = context.Attr<T>("pad_value");
auto* x = context.Input<Tensor>("X"); auto* x = context.Input<Tensor>("X");
auto* out = context.Output<Tensor>("Out");
auto in_dims = x->dims(); auto in_dims = x->dims();
auto out_dims = out->dims();
const T* in_data = x->data<T>(); const T* in_data = x->data<T>();
T* out_data = out->mutable_data<T>(context.GetPlace()); auto* out = context.Output<Tensor>("Out");
auto out_dims = out->dims();
if (data_format == "NCHW") {
out_dims[0] = in_dims[0];
out_dims[1] = in_dims[1];
out_dims[2] = in_dims[2] + pads[0] + pads[1];
out_dims[3] = in_dims[3] + pads[2] + pads[3];
} else {
out_dims[0] = in_dims[0];
out_dims[1] = in_dims[1] + pads[0] + pads[1];
out_dims[2] = in_dims[2] + pads[2] + pads[3];
out_dims[3] = in_dims[3];
}
T* out_data = out->mutable_data<T>(out_dims, context.GetPlace());
const int pad_top = pads[0]; const int pad_top = pads[0];
const int pad_left = pads[2]; const int pad_left = pads[2];
const int num = in_dims[0]; const int num = in_dims[0];
...@@ -356,7 +386,8 @@ template <typename T> ...@@ -356,7 +386,8 @@ template <typename T>
class Pad2dGradCUDAKernel : public framework::OpKernel<T> { class Pad2dGradCUDAKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto pads = context.Attr<std::vector<int>>("paddings"); int pads[4];
GetPaddings(pads, context);
auto mode = context.Attr<std::string>("mode"); auto mode = context.Attr<std::string>("mode");
auto data_format = context.Attr<std::string>("data_format"); auto data_format = context.Attr<std::string>("data_format");
auto* d_out = context.Input<Tensor>(framework::GradVarName("Out")); auto* d_out = context.Input<Tensor>(framework::GradVarName("Out"));
......
...@@ -6924,7 +6924,7 @@ def pad2d(input, ...@@ -6924,7 +6924,7 @@ def pad2d(input,
Args: Args:
input (Variable): The input image with [N, C, H, W] format or [N, H, W, C] format. input (Variable): The input image with [N, C, H, W] format or [N, H, W, C] format.
paddings (tuple|list): The padding size. If padding is a tuple, it must paddings (tuple|list|Variable): The padding size. If padding is a tuple, it must
contain four integers, (padding_top, padding_bottom, padding_left, padding_right). contain four integers, (padding_top, padding_bottom, padding_left, padding_right).
Default: padding = [0, 0, 0, 0]. Default: padding = [0, 0, 0, 0].
mode (str): Three modes: constant(default), reflect, edge. Default: constant mode (str): Three modes: constant(default), reflect, edge. Default: constant
...@@ -6949,16 +6949,17 @@ def pad2d(input, ...@@ -6949,16 +6949,17 @@ def pad2d(input,
helper = LayerHelper('pad2d', **locals()) helper = LayerHelper('pad2d', **locals())
dtype = helper.input_dtype(input_param_name='input') dtype = helper.input_dtype(input_param_name='input')
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
inputs = {'X': input}
attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format}
if isinstance(paddings, Variable):
inputs['Paddings'] = paddings
attrs['paddings'] = []
else:
attrs['paddings'] = paddings
helper.append_op( helper.append_op(
type='pad2d', type='pad2d', inputs=inputs, outputs={"Out": out}, attrs=attrs)
inputs={'X': input},
outputs={"Out": out},
attrs={
'paddings': paddings,
'mode': mode,
'pad_value': pad_value,
'data_frmat': data_format
})
return out return out
......
...@@ -636,13 +636,21 @@ class TestBook(unittest.TestCase): ...@@ -636,13 +636,21 @@ class TestBook(unittest.TestCase):
with program_guard(program): with program_guard(program):
input = layers.data( input = layers.data(
name="input", shape=[3, 100, 100], dtype="float32") name="input", shape=[3, 100, 100], dtype="float32")
paddings = layers.fill_constant(shape=[4], dtype='int32', value=1)
out = layers.pad2d( out = layers.pad2d(
input, input,
paddings=[1, 2, 3, 4], paddings=[1, 2, 3, 4],
mode='reflect', mode='reflect',
data_format='NCHW', data_format='NCHW',
name="shape") name="shape")
out_1 = layers.pad2d(
input,
paddings=paddings,
mode='reflect',
data_format='NCHW',
name="shape")
self.assertIsNotNone(out) self.assertIsNotNone(out)
self.assertIsNotNone(out_1)
print(str(program)) print(str(program))
def test_prelu(self): def test_prelu(self):
......
...@@ -20,11 +20,17 @@ from op_test import OpTest ...@@ -20,11 +20,17 @@ from op_test import OpTest
class TestPad2dOp(OpTest): class TestPad2dOp(OpTest):
def setUp(self): def setUp(self):
self.pad_value = 0.0 self.pad_value = 0.0
self.variable_paddings = False
self.initTestCase() self.initTestCase()
self.op_type = "pad2d" self.op_type = "pad2d"
self.inputs = {'X': np.random.random(self.shape).astype("float32"), } self.inputs = {'X': np.random.random(self.shape).astype("float32"), }
self.attrs = {} self.attrs = {}
self.attrs['paddings'] = np.array(self.paddings).flatten() if self.variable_paddings:
self.attrs['paddings'] = []
self.inputs['Paddings'] = np.array(self.paddings).flatten().astype(
"int32")
else:
self.attrs['paddings'] = np.array(self.paddings).flatten()
self.attrs['pad_value'] = self.pad_value self.attrs['pad_value'] = self.pad_value
self.attrs['mode'] = self.mode self.attrs['mode'] = self.mode
self.attrs['data_format'] = self.data_format self.attrs['data_format'] = self.data_format
...@@ -98,5 +104,24 @@ class TestCase5(TestPad2dOp): ...@@ -98,5 +104,24 @@ class TestCase5(TestPad2dOp):
self.data_format = "NHWC" self.data_format = "NHWC"
class TestCase6(TestPad2dOp):
def initTestCase(self):
self.shape = (2, 4, 4, 2)
self.paddings = [0, 1, 2, 3]
self.mode = "constant"
self.pad_value = 1.2
self.data_format = "NHWC"
self.variable_paddings = True
class TestCase7(TestPad2dOp):
def initTestCase(self):
self.shape = (2, 3, 4, 4)
self.paddings = [0, 1, 2, 3]
self.mode = "reflect"
self.data_format = "NCHW"
self.variable_paddings = True
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册