提交 5173b8d8 编写于 作者: C chengduoZH

fix code format and doc

上级 09ed5283
...@@ -73,7 +73,7 @@ function(op_library TARGET) ...@@ -73,7 +73,7 @@ function(op_library TARGET)
if ("${TARGET}" STREQUAL "conv_transpose_op") if ("${TARGET}" STREQUAL "conv_transpose_op")
set(pybind_flag 1) set(pybind_flag 1)
# It's enough to just adding one operator to pybind # It's enough to just adding one operator to pybind
file(APPEND ${pybind_file} "USE_OP(conv2dtranspose);\n") file(APPEND ${pybind_file} "USE_OP(conv2d_transpose);\n")
endif() endif()
# pool_cudnn_op contains several operators # pool_cudnn_op contains several operators
......
...@@ -46,9 +46,9 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -46,9 +46,9 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const {
PADDLE_ENFORCE_EQ(paddings.size(), strides.size(), PADDLE_ENFORCE_EQ(paddings.size(), strides.size(),
"ConvTransposeOp paddings dimension and Conv strides " "ConvTransposeOp paddings dimension and Conv strides "
"dimension should be the same."); "dimension should be the same.");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[0],
in_dims[1], filter_dims[0], "In ConvTransposeOp, The input channel should be the same "
"ConvTransposeOp input and kernel input dimension should be equal."); "as the number of filters.");
std::vector<int64_t> output_shape({in_dims[0], filter_dims[1]}); std::vector<int64_t> output_shape({in_dims[0], filter_dims[1]});
for (size_t i = 0; i < paddings.size(); ++i) { for (size_t i = 0; i < paddings.size(); ++i) {
...@@ -76,16 +76,33 @@ Conv2DTransposeOpMaker::Conv2DTransposeOpMaker( ...@@ -76,16 +76,33 @@ Conv2DTransposeOpMaker::Conv2DTransposeOpMaker(
AddOutput("Output", AddOutput("Output",
"(Tensor) The output tensor of convolution transpose operator." "(Tensor) The output tensor of convolution transpose operator."
"The format of output tensor is also NCHW."); "The format of output tensor is also NCHW.");
AddAttr<std::vector<int>>("strides", AddAttr<std::vector<int>>(
"strides of convolution transpose operator.") "strides",
"(vector defalut:{1, 1}), strides of convolution transpose operator.")
.SetDefault({1, 1}); .SetDefault({1, 1});
AddAttr<std::vector<int>>("paddings", AddAttr<std::vector<int>>(
"paddings of convolution transpose operator.") "paddings",
"(vector defalut:{0, 0}), paddings of convolution transpose operator.")
.SetDefault({0, 0}); .SetDefault({0, 0});
AddComment(R"DOC( AddComment(R"DOC(
The convolution transpose operation calculates the output based on the input, filter The convolution transpose operation calculates the output based on the input, filter
and strides, paddings, groups parameters. The size of each dimension of the and strides, paddings, groups parameters. The size of each dimension of the
parameters is checked in the infer-shape. parameters is checked in the infer-shape.
Input(Input, Filter) and output(Output) are in NCHW format. Where N is batch
size, C is the number of channels, H and W is the height and
width of feature. Parameters(ksize, strides, paddings) are two elements.
These two elements represent height and width, respectively.
The input(X) size and output(Out) size may be different.
Example:
Input:
Input shape: (N, C_in, H_in, W_in)
Filter shape: (C_in, C_out, H_f, W_f)
Output:
Output shape: (N, C_out, H_out, W_out)
where
H_out = (H_in - 1) * strides[0] - 2 * paddings[0] + filter_size[0];
W_out = (W_in - 1) * strides[1] - 2 * paddings[1] + filter_size[1];
)DOC"); )DOC");
} }
...@@ -111,16 +128,34 @@ Conv3DTransposeOpMaker::Conv3DTransposeOpMaker( ...@@ -111,16 +128,34 @@ Conv3DTransposeOpMaker::Conv3DTransposeOpMaker(
"Where N is batch size, C is " "Where N is batch size, C is "
"the number of channels, D, H and W is the depth, height and " "the number of channels, D, H and W is the depth, height and "
"width of feature."); "width of feature.");
AddAttr<std::vector<int>>("strides", AddAttr<std::vector<int>>(
"strides of convolution transpose operator.") "strides",
"(vector defalut:{1, 1, 1}), strides of convolution transpose operator.")
.SetDefault({1, 1, 1}); .SetDefault({1, 1, 1});
AddAttr<std::vector<int>>("paddings", AddAttr<std::vector<int>>(
"paddings of convolution transpose operator.") "paddings",
"(vector defalut:{0, 0, 0}), paddings of convolution transpose operator.")
.SetDefault({0, 0, 0}); .SetDefault({0, 0, 0});
AddComment(R"DOC( AddComment(R"DOC(
The convolution transpose operation calculates the output based on the input, filter The convolution transpose operation calculates the output based on the input, filter
and strides, paddings, groups parameters. The size of each dimension of the and strides, paddings, groups parameters. The size of each dimension of the
parameters is checked in the infer-shape. parameters is checked in the infer-shape.
Input(Input, Filter) and output(Output) are in NCDHW format. Where N is batch
size, C is the number of channels, d, H and W is the depth, height and
width of feature. Parameters(ksize, strides, paddings) are three elements.
These three elements represent depth, height and width, respectively.
The input(X) size and output(Out) size may be different.
Example:
Input:
Input shape: (N, C_in, D_in, H_in, W_in)
Filter shape: (C_in, C_out, D_f, H_f, W_f)
Output:
Output shape: (N, C_out, D_out, H_out, W_out)
where
D_out = (D_in - 1) * strides[0] - 2 * paddings[0] + filter_size[0];
H_out = (H_in - 1) * strides[1] - 2 * paddings[1] + filter_size[1];
W_out = (W_in - 1) * strides[2] - 2 * paddings[2] + filter_size[2];
)DOC"); )DOC");
} }
...@@ -140,22 +175,22 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const { ...@@ -140,22 +175,22 @@ void ConvTransposeOpGrad::InferShape(framework::InferShapeContext* ctx) const {
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP(conv2dtranspose, ops::ConvTransposeOp, ops::Conv2DTransposeOpMaker, REGISTER_OP(conv2d_transpose, ops::ConvTransposeOp, ops::Conv2DTransposeOpMaker,
conv2dtranspose_grad, ops::ConvTransposeOpGrad); conv2d_transpose_grad, ops::ConvTransposeOpGrad);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
conv2dtranspose, conv2d_transpose,
ops::GemmConv2DTransposeKernel<paddle::platform::CPUPlace, float>); ops::GemmConv2DTransposeKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
conv2dtranspose_grad, conv2d_transpose_grad,
ops::GemmConv2DTransposeGradKernel<paddle::platform::CPUPlace, float>); ops::GemmConv2DTransposeGradKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP(conv3dtranspose, ops::ConvTransposeOp, ops::Conv3DTransposeOpMaker, REGISTER_OP(conv3d_transpose, ops::ConvTransposeOp, ops::Conv3DTransposeOpMaker,
conv3dtranspose_grad, ops::ConvTransposeOpGrad); conv3d_transpose_grad, ops::ConvTransposeOpGrad);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
conv3dtranspose, conv3d_transpose,
ops::GemmConv3DTransposeKernel<paddle::platform::CPUPlace, float>); ops::GemmConv3DTransposeKernel<paddle::platform::CPUPlace, float>);
REGISTER_OP_CPU_KERNEL( REGISTER_OP_CPU_KERNEL(
conv3dtranspose_grad, conv3d_transpose_grad,
ops::GemmConv3DTransposeGradKernel<paddle::platform::CPUPlace, float>); ops::GemmConv3DTransposeGradKernel<paddle::platform::CPUPlace, float>);
...@@ -17,15 +17,15 @@ ...@@ -17,15 +17,15 @@
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(
conv2dtranspose, conv2d_transpose,
ops::GemmConv2DTransposeKernel<paddle::platform::GPUPlace, float>); ops::GemmConv2DTransposeKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(
conv2dtranspose_grad, conv2d_transpose_grad,
ops::GemmConv2DTransposeGradKernel<paddle::platform::GPUPlace, float>); ops::GemmConv2DTransposeGradKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(
conv3dtranspose, conv3d_transpose,
ops::GemmConv3DTransposeKernel<paddle::platform::GPUPlace, float>); ops::GemmConv3DTransposeKernel<paddle::platform::GPUPlace, float>);
REGISTER_OP_GPU_KERNEL( REGISTER_OP_GPU_KERNEL(
conv3dtranspose_grad, conv3d_transpose_grad,
ops::GemmConv3DTransposeGradKernel<paddle::platform::GPUPlace, float>); ops::GemmConv3DTransposeGradKernel<paddle::platform::GPUPlace, float>);
...@@ -26,7 +26,7 @@ def conv2dtranspose_forward_naive(input_, filter_, conv2dtranspose_param): ...@@ -26,7 +26,7 @@ def conv2dtranspose_forward_naive(input_, filter_, conv2dtranspose_param):
for k in range(out_c): for k in range(out_c):
tmp_out = np.sum(input_masked * filter_[:, k, :, :], axis=0) tmp_out = np.sum(input_masked * filter_[:, k, :, :], axis=0)
i1, i2 = i * stride[0], i * stride[0] + f_h i1, i2 = i * stride[0], i * stride[0] + f_h
j1, j2 = j * stride[0], j * stride[0] + f_w j1, j2 = j * stride[1], j * stride[1] + f_w
out[n, k, i1:i2, j1:j2] += tmp_out out[n, k, i1:i2, j1:j2] += tmp_out
return out return out
...@@ -86,7 +86,7 @@ class TestConv2dTransposeOp(OpTest): ...@@ -86,7 +86,7 @@ class TestConv2dTransposeOp(OpTest):
self.filter_size = [f_c, 6, 3, 3] self.filter_size = [f_c, 6, 3, 3]
def init_op_type(self): def init_op_type(self):
self.op_type = "conv2dtranspose" self.op_type = "conv2d_transpose"
""" """
......
...@@ -90,7 +90,7 @@ class TestConv3dTransposeOp(OpTest): ...@@ -90,7 +90,7 @@ class TestConv3dTransposeOp(OpTest):
self.filter_size = [f_c, 6, 3, 3, 3] self.filter_size = [f_c, 6, 3, 3, 3]
def init_op_type(self): def init_op_type(self):
self.op_type = "conv3dtranspose" self.op_type = "conv3d_transpose"
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册