提交 500e29a4 编写于 作者: W wanghaoshuang

1. Reduce attributes

2. Rename 'get_output_size' to 'OutputSize'
3. Remove redundant whitespace char.
上级 fe45f211
......@@ -30,28 +30,24 @@ class Im2SequenceOp : public framework::OperatorWithKernel {
auto in_dim = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(in_dim.size(), 4,
"Input(X) format must be 4D tensor, eg., NCHW.");
"Input(X) format must be 4D tensor, eg., NCHW.");
int block_height = ctx->Attrs().Get<int>("block_height");
int block_width = ctx->Attrs().Get<int>("block_width");
int stride_height = ctx->Attrs().Get<int>("stride_height");
int stride_width = ctx->Attrs().Get<int>("stride_width");
int padding_height = ctx->Attrs().Get<int>("padding_height");
int padding_width = ctx->Attrs().Get<int>("padding_width");
auto kernels = ctx->Attrs().Get<std::vector<int>>("kernels");
auto strides = ctx->Attrs().Get<std::vector<int>>("strides");
auto paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
int batch_size = in_dim[0];
int img_channels = in_dim[1];
int img_height = in_dim[2];
int img_width = in_dim[3];
int output_height = get_output_size(img_height, block_height, stride_height,
padding_height);
int output_height = OutputSize(img_height, kernels[0], paddings[0],
paddings[2], strides[0]);
int output_width =
get_output_size(img_width, block_width, stride_width, padding_width);
OutputSize(img_width, kernels[1], paddings[1], paddings[3], strides[1]);
ctx->SetOutputDim("Out", {batch_size * output_height * output_width,
img_channels * block_height * block_width});
// TODO(wanghaoshuang): cal lod in complie time
img_channels * kernels[0] * kernels[1]});
}
};
......@@ -66,26 +62,30 @@ class Im2SequenceOpMaker : public framework::OpProtoAndCheckerMaker {
"H: height"
"W: width");
AddOutput("Out", "(LodTensor)The output data of im2sequence op,");
AddAttr<int>("block_height", "(int)height of block.");
AddAttr<int>("block_width", "(int)width of block.");
AddAttr<int>("stride_height", "(int)height of stride.");
AddAttr<int>("stride_width", "(int)width of stride.");
AddAttr<int>("padding_height", "(int)height of padding.");
AddAttr<int>("padding_width", "(int)width of padding.");
AddAttr<std::vector<int>>("kernels",
"(vector<int>), the "
"kernels(kernel_height, kernel_width)")
AddAttr<std::vector<int>>("strides",
"(vector<int> default:{1, 1}), the "
"strides(h_stride, w_stride)")
.SetDefault({1, 1});
AddAttr<std::vector<int>>("paddings",
"(vector<int> default:{0, 0, 0, 0}), the "
"paddings(up_pad, left_pad, down_pad, right_pad)")
.SetDefault({0, 0, 0, 0});
AddComment(R"DOC(
Convert feature map to minibatch matrix.
- matirx height is: output_height * output_width
- matrix width is: block_height * block_width * channels
This op uses kernels to scan images and converts these images to sequences.
After expanding, The number of time steps are output_height * output_width
and the dimension of each time step is kernel_height * kernel_width * channels,
in which:
output_height =
1 + (2 * padding_height + img_height - block_height + stride_height - 1) /
1 + (padding_height + padding_down + img_height - kernel_height + stride_height - 1) /
stride_height;
output_width =
1 + (2 * padding_width + img_width - block_width + stride_width - 1) /
1 + (padding_left + padding+right + img_width - kernel_width + stride_width - 1) /
stride_width;
After expanding, The number of time steps are output_height * output_width
and the dimension of each time step is block_height * block_width * channels.
This op can be used after convolution neural network, and before recurrent neural network.
Given:
......@@ -109,12 +109,9 @@ x.dims = {2, 2, 3, 3}
And:
block_height = 2
block_width = 2
stride_height = 1
stride_width = 1
padding_height = 0
padding_width = 0
kernels = [2, 2]
strides = [1, 1]
paddings = [0, 0, 0, 0]
Then:
......
......@@ -26,9 +26,11 @@ namespace operators {
using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor;
inline int get_output_size(int img_size, int block_size, int stride,
int padding) {
return (1 + (img_size + 2 * padding - block_size + stride - 1) / stride);
inline int OutputSize(int input_size, int filter_size, int padding_0,
int padding_1, int stride) {
const int output_size =
(input_size + padding_0 + padding_1 - filter_size) / stride + 1;
return output_size;
}
template <typename DeviceContext, typename T>
......@@ -47,32 +49,24 @@ class Im2SequenceKernel : public framework::OpKernel<T> {
int img_channels = in_dim[1];
int img_height = in_dim[2];
int img_width = in_dim[3];
int block_height = ctx.Attr<int>("block_height");
int block_width = ctx.Attr<int>("block_width");
int stride_height = ctx.Attr<int>("stride_height");
int stride_width = ctx.Attr<int>("stride_width");
int padding_height = ctx.Attr<int>("padding_height");
int padding_width = ctx.Attr<int>("padding_width");
int output_height = get_output_size(img_height, block_height, stride_height,
padding_height);
auto kernels = ctx->Attrs().Get<std::vector<int>>("kernels");
auto strides = ctx->Attrs().Get<std::vector<int>>("strides");
auto paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
int output_height =
OutputSize(img_height, kernels[0], paddings[0], paddings[2] strides[0]);
int output_width =
get_output_size(img_width, block_width, stride_width, padding_width);
OutputSize(img_width, kernels[1], paddings[1], paddings[3], strides[1]);
const std::vector<int> dilations({1, 1});
const std::vector<int> strides(
{stride_height, stride_width, stride_height, stride_width});
const std::vector<int> paddings(
{padding_height, padding_width, padding_height, padding_width});
auto out_dims = out->dims();
out->Resize({batch_size, out->numel() / batch_size});
for (int i = 0; i < batch_size; i++) {
const Tensor src =
in->Slice(i, i + 1).Resize({img_channels, img_height, img_width});
Tensor dst = out->Slice(i, i + 1).Resize({output_height, output_width,
img_channels, block_height,
block_width});
Tensor dst = out->Slice(i, i + 1).Resize(
{output_height, output_width, img_channels, kernels[0], kernels[1]});
math::Im2ColFunctor<math::ColFormat::kOCF, DeviceContext, T> f;
auto& dev_ctx = ctx.template device_context<DeviceContext>();
......@@ -112,22 +106,15 @@ class Im2SequenceGradKernel : public framework::OpKernel<T> {
int img_height = in_dim[2];
int img_width = in_dim[3];
int block_height = ctx.Attr<int>("block_height");
int block_width = ctx.Attr<int>("block_width");
int stride_height = ctx.Attr<int>("stride_height");
int stride_width = ctx.Attr<int>("stride_width");
int padding_height = ctx.Attr<int>("padding_height");
int padding_width = ctx.Attr<int>("padding_width");
int output_height = get_output_size(img_height, block_height, stride_height,
padding_height);
auto kernels = ctx->Attrs().Get<std::vector<int>>("kernels");
auto strides = ctx->Attrs().Get<std::vector<int>>("strides");
auto paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
int output_height = OutputSize(img_height, kernels[0], paddings[0],
paddings[2], strides[0]);
int output_width =
get_output_size(img_width, block_width, stride_width, padding_width);
OutputSize(img_width, kernels[1], paddings[1], paddings[3], strides[1]);
const std::vector<int> dilations({1, 1});
const std::vector<int> strides(
{stride_height, stride_width, stride_height, stride_width});
const std::vector<int> paddings(
{padding_height, padding_width, padding_height, padding_width});
auto d_out_dims = d_out->dims();
d_out->Resize({batch_size, d_out->numel() / batch_size});
......@@ -135,8 +122,7 @@ class Im2SequenceGradKernel : public framework::OpKernel<T> {
Tensor dst =
d_x->Slice(i, i + 1).Resize({img_channels, img_height, img_width});
const Tensor src = d_out->Slice(i, i + 1).Resize(
{output_height, output_width, img_channels, block_height,
block_width});
{output_height, output_width, img_channels, kernels[0], kernels[1]});
math::Col2ImFunctor<math::ColFormat::kOCF, DeviceContext, T> f;
auto& dev_ctx = ctx.template device_context<DeviceContext>();
f(dev_ctx, src, dilations, strides, paddings, &dst);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册