提交 6197c09b 编写于 作者: G gongweibao

modify styles

上级 f1ca3f7e
......@@ -33,32 +33,33 @@ class BlockExpandOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(in_dim.size(), 4, "Input format must be NCHW.");
PADDLE_ENFORCE_GE(in_dim[0], 1, "Input batchsize must >= 1.");
int blockHeight = ctx->Attrs().Get<int>("blockHeight");
int blockWidth = ctx->Attrs().Get<int>("blockWidth");
int strideHeight = ctx->Attrs().Get<int>("strideHeight");
int strideWidth = ctx->Attrs().Get<int>("strideWidth");
int paddingHeight = ctx->Attrs().Get<int>("paddingHeight");
int paddingWidth = ctx->Attrs().Get<int>("paddingWidth");
int block_height = ctx->Attrs().Get<int>("blockHeight");
int block_width = ctx->Attrs().Get<int>("blockWidth");
int stride_height = ctx->Attrs().Get<int>("strideHeight");
int stride_width = ctx->Attrs().Get<int>("strideWidth");
int padding_height = ctx->Attrs().Get<int>("paddingHeight");
int padding_width = ctx->Attrs().Get<int>("paddingWidth");
int N = in_dim[0];
int C = in_dim[1];
int imgHeight = in_dim[3];
int imgWidth = in_dim[4];
int img_height = in_dim[3];
int img_width = in_dim[4];
int outputHeight = 0;
int outputWidth = 0;
int output_height = 0;
int output_width = 0;
get_blockexpand_output_shape(imgHeight, imgWidth, blockHeight, blockWidth,
strideHeight, strideWidth, paddingHeight,
paddingWidth, outputHeight, outputWidth);
get_blockexpand_output_shape(img_height, img_width, block_height,
block_width, stride_height, stride_width,
padding_height, padding_width, output_height,
output_width);
// The result of im2col is [outputHeight, outputWidth,
// The result of im2col is [output_height, output_width,
// inputChannels, filterHeight, filterWidth], and it is easy to
// reshape into [seqLength, stepSize], where seqLength is equal
// outputHeight * outputWidth, stepSize is equal
// output_height * output_width, stepSize is equal
// input_channels * blockHeight * blockWidth
ctx->SetOutputDim(
"Out", {N, outputHeight, outputWidth, C, blockHeight, blockWidth});
"Out", {N, output_height, output_width, C, block_height, block_width});
// ctx->ShareLoD("X", /*->*/ "Out");
}
......@@ -85,18 +86,18 @@ class BlockExpandOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<int>("paddingWidth", "(int)width of padding.");
AddComment(R"DOC(
Expand feature map to minibatch matrix.
- matirx height is: outputHeight * outputWidth
- matirx height is: output_height * output_width
- matrix width is: blockHeight * blockWidth * channels
outputHeight =
1 + (2 * paddingHeight + imgHeight - blockHeight + strideHeight - 1) /
output_height =
1 + (2 * paddingHeight + img_height - blockHeight + strideHeight - 1) /
strideHeight;
outputWidth =
1 + (2 * paddingWidth + imgWidth - blockWidth + strideWidth - 1) /
output_width =
1 + (2 * paddingWidth + img_width - blockWidth + strideWidth - 1) /
strideWidth;
The expand method is the same with ExpandConvLayer, but saved the transposed
value. After expanding, The number of time steps are outputHeight * outputWidth
value. After expanding, The number of time steps are output_height * output_width
and the dimension of each time step is blockHeight * blockWidth * channels.
This layer can be used after convolution neural network, and before recurrent neural network.
)DOC");
......
......@@ -18,24 +18,25 @@
#include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h"
#include "paddle/operators/math/img2col.h"
#include "paddle/operators/math/im2col.h"
namespace paddle {
namespace operators {
inline void get_blockexpand_output_shape(int imgHeight, int imgWidth,
int blockHeight, int blockWidth,
int strideHeight, int strideWidth,
int paddingHeight, int paddingWidth,
inline void get_blockexpand_output_shape(int img_height, int img_width,
int block_height, int block_width,
int stride_height, int stride_width,
int padding_height, int padding_width,
int& outputHeight, int& outputWidth) {
outputHeight =
1 +
(imgHeight + 2 * paddingHeight - blockHeight + strideHeight - 1) /
strideHeight;
(img_height + 2 * padding_height - block_height + stride_height - 1) /
stride_height;
outputWidth = 1 +
(imgWidth + 2 * paddingWidth - blockWidth + strideWidth - 1) /
strideWidth;
outputWidth =
1 +
(img_width + 2 * padding_width - block_width + stride_width - 1) /
stride_width;
}
template <typename Place, typename T>
......@@ -50,30 +51,30 @@ class BlockExpandKernel : public framework::OpKernel<T> {
auto in_dim = in->dims();
int N = in_dim[0];
int C = in_dim[1];
int imgHeight = in_dim[2];
int imgWidth = in_dim[3];
int img_height = in_dim[2];
int img_width = in_dim[3];
int blockHeight = ctx.Attr<int>("blockHeight");
int blockWidth = ctx.Attr<int>("blockWidth");
int strideHeight = ctx.Attr<int>("strideHeight");
int strideWidth = ctx.Attr<int>("strideWidth");
int paddingHeight = ctx.Attr<int>("paddingHeight");
int paddingWidth = ctx.Attr<int>("paddingWidth");
int block_height = ctx.Attr<int>("blockHeight");
int block_width = ctx.Attr<int>("blockWidth");
int stride_height = ctx.Attr<int>("strideHeight");
int stride_width = ctx.Attr<int>("strideWidth");
int padding_height = ctx.Attr<int>("paddingHeight");
int padding_width = ctx.Attr<int>("paddingWidth");
int outputHeight = 0;
int outputWidth = 0;
get_blockexpand_output_shape(imgHeight, imgWidth, blockHeight, blockWidth,
strideHeight, strideWidth, paddingHeight,
paddingWidth, outputHeight, outputWidth);
get_blockexpand_output_shape(
img_height, img_width, block_height, block_width, stride_height,
stride_width, padding_height, padding_width, outputHeight, outputWidth);
for (int i = 0; i < N; i++) {
Tensor src = in->Slice<T>(i, i + 1).Resize(C, imgHeight, imgWidth);
Tensor src = in->Slice<T>(i, i + 1).Resize(C, img_height, img_width);
Tensor dst = out->Slice<T>(i, i + 1).Resize(outputHeight, outputWidth, C,
blockHeight, blockWidth);
math::Im2ColFunctor<kOCF, ctx->GetPlace(), T>(ctx, src, dst, strideHeight,
strideWidth, paddingHeight,
paddingWidth);
block_height, block_width);
math::Im2ColFunctor<math::ColFormat::kOCF, Place, T>(
ctx, src, dst, stride_height, stride_width, padding_height,
padding_width);
}
}
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册