提交 6197c09b 编写于 作者: G gongweibao

modify styles

上级 f1ca3f7e
...@@ -33,32 +33,33 @@ class BlockExpandOp : public framework::OperatorWithKernel { ...@@ -33,32 +33,33 @@ class BlockExpandOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(in_dim.size(), 4, "Input format must be NCHW."); PADDLE_ENFORCE_EQ(in_dim.size(), 4, "Input format must be NCHW.");
PADDLE_ENFORCE_GE(in_dim[0], 1, "Input batchsize must >= 1."); PADDLE_ENFORCE_GE(in_dim[0], 1, "Input batchsize must >= 1.");
int blockHeight = ctx->Attrs().Get<int>("blockHeight"); int block_height = ctx->Attrs().Get<int>("blockHeight");
int blockWidth = ctx->Attrs().Get<int>("blockWidth"); int block_width = ctx->Attrs().Get<int>("blockWidth");
int strideHeight = ctx->Attrs().Get<int>("strideHeight"); int stride_height = ctx->Attrs().Get<int>("strideHeight");
int strideWidth = ctx->Attrs().Get<int>("strideWidth"); int stride_width = ctx->Attrs().Get<int>("strideWidth");
int paddingHeight = ctx->Attrs().Get<int>("paddingHeight"); int padding_height = ctx->Attrs().Get<int>("paddingHeight");
int paddingWidth = ctx->Attrs().Get<int>("paddingWidth"); int padding_width = ctx->Attrs().Get<int>("paddingWidth");
int N = in_dim[0]; int N = in_dim[0];
int C = in_dim[1]; int C = in_dim[1];
int imgHeight = in_dim[3]; int img_height = in_dim[3];
int imgWidth = in_dim[4]; int img_width = in_dim[4];
int outputHeight = 0; int output_height = 0;
int outputWidth = 0; int output_width = 0;
get_blockexpand_output_shape(imgHeight, imgWidth, blockHeight, blockWidth, get_blockexpand_output_shape(img_height, img_width, block_height,
strideHeight, strideWidth, paddingHeight, block_width, stride_height, stride_width,
paddingWidth, outputHeight, outputWidth); padding_height, padding_width, output_height,
output_width);
// The result of im2col is [outputHeight, outputWidth, // The result of im2col is [output_height, output_width,
// inputChannels, filterHeight, filterWidth], and it is easy to // inputChannels, filterHeight, filterWidth], and it is easy to
// reshape into [seqLength, stepSize], where seqLength is equal // reshape into [seqLength, stepSize], where seqLength is equal
// outputHeight * outputWidth, stepSize is equal // output_height * output_width, stepSize is equal
// input_channels * blockHeight * blockWidth // input_channels * blockHeight * blockWidth
ctx->SetOutputDim( ctx->SetOutputDim(
"Out", {N, outputHeight, outputWidth, C, blockHeight, blockWidth}); "Out", {N, output_height, output_width, C, block_height, block_width});
// ctx->ShareLoD("X", /*->*/ "Out"); // ctx->ShareLoD("X", /*->*/ "Out");
} }
...@@ -85,18 +86,18 @@ class BlockExpandOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -85,18 +86,18 @@ class BlockExpandOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<int>("paddingWidth", "(int)width of padding."); AddAttr<int>("paddingWidth", "(int)width of padding.");
AddComment(R"DOC( AddComment(R"DOC(
Expand feature map to minibatch matrix. Expand feature map to minibatch matrix.
- matirx height is: outputHeight * outputWidth - matirx height is: output_height * output_width
- matrix width is: blockHeight * blockWidth * channels - matrix width is: blockHeight * blockWidth * channels
outputHeight = output_height =
1 + (2 * paddingHeight + imgHeight - blockHeight + strideHeight - 1) / 1 + (2 * paddingHeight + img_height - blockHeight + strideHeight - 1) /
strideHeight; strideHeight;
outputWidth = output_width =
1 + (2 * paddingWidth + imgWidth - blockWidth + strideWidth - 1) / 1 + (2 * paddingWidth + img_width - blockWidth + strideWidth - 1) /
strideWidth; strideWidth;
The expand method is the same with ExpandConvLayer, but saved the transposed The expand method is the same with ExpandConvLayer, but saved the transposed
value. After expanding, The number of time steps are outputHeight * outputWidth value. After expanding, The number of time steps are output_height * output_width
and the dimension of each time step is blockHeight * blockWidth * channels. and the dimension of each time step is blockHeight * blockWidth * channels.
This layer can be used after convolution neural network, and before recurrent neural network. This layer can be used after convolution neural network, and before recurrent neural network.
)DOC"); )DOC");
......
...@@ -18,24 +18,25 @@ ...@@ -18,24 +18,25 @@
#include "paddle/framework/eigen.h" #include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/math/img2col.h" #include "paddle/operators/math/im2col.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
inline void get_blockexpand_output_shape(int imgHeight, int imgWidth, inline void get_blockexpand_output_shape(int img_height, int img_width,
int blockHeight, int blockWidth, int block_height, int block_width,
int strideHeight, int strideWidth, int stride_height, int stride_width,
int paddingHeight, int paddingWidth, int padding_height, int padding_width,
int& outputHeight, int& outputWidth) { int& outputHeight, int& outputWidth) {
outputHeight = outputHeight =
1 + 1 +
(imgHeight + 2 * paddingHeight - blockHeight + strideHeight - 1) / (img_height + 2 * padding_height - block_height + stride_height - 1) /
strideHeight; stride_height;
outputWidth = 1 + outputWidth =
(imgWidth + 2 * paddingWidth - blockWidth + strideWidth - 1) / 1 +
strideWidth; (img_width + 2 * padding_width - block_width + stride_width - 1) /
stride_width;
} }
template <typename Place, typename T> template <typename Place, typename T>
...@@ -50,30 +51,30 @@ class BlockExpandKernel : public framework::OpKernel<T> { ...@@ -50,30 +51,30 @@ class BlockExpandKernel : public framework::OpKernel<T> {
auto in_dim = in->dims(); auto in_dim = in->dims();
int N = in_dim[0]; int N = in_dim[0];
int C = in_dim[1]; int C = in_dim[1];
int imgHeight = in_dim[2]; int img_height = in_dim[2];
int imgWidth = in_dim[3]; int img_width = in_dim[3];
int blockHeight = ctx.Attr<int>("blockHeight"); int block_height = ctx.Attr<int>("blockHeight");
int blockWidth = ctx.Attr<int>("blockWidth"); int block_width = ctx.Attr<int>("blockWidth");
int strideHeight = ctx.Attr<int>("strideHeight"); int stride_height = ctx.Attr<int>("strideHeight");
int strideWidth = ctx.Attr<int>("strideWidth"); int stride_width = ctx.Attr<int>("strideWidth");
int paddingHeight = ctx.Attr<int>("paddingHeight"); int padding_height = ctx.Attr<int>("paddingHeight");
int paddingWidth = ctx.Attr<int>("paddingWidth"); int padding_width = ctx.Attr<int>("paddingWidth");
int outputHeight = 0; int outputHeight = 0;
int outputWidth = 0; int outputWidth = 0;
get_blockexpand_output_shape(imgHeight, imgWidth, blockHeight, blockWidth, get_blockexpand_output_shape(
strideHeight, strideWidth, paddingHeight, img_height, img_width, block_height, block_width, stride_height,
paddingWidth, outputHeight, outputWidth); stride_width, padding_height, padding_width, outputHeight, outputWidth);
for (int i = 0; i < N; i++) { for (int i = 0; i < N; i++) {
Tensor src = in->Slice<T>(i, i + 1).Resize(C, imgHeight, imgWidth); Tensor src = in->Slice<T>(i, i + 1).Resize(C, img_height, img_width);
Tensor dst = out->Slice<T>(i, i + 1).Resize(outputHeight, outputWidth, C, Tensor dst = out->Slice<T>(i, i + 1).Resize(outputHeight, outputWidth, C,
blockHeight, blockWidth); block_height, block_width);
math::Im2ColFunctor<kOCF, ctx->GetPlace(), T>(ctx, src, dst, strideHeight, math::Im2ColFunctor<math::ColFormat::kOCF, Place, T>(
strideWidth, paddingHeight, ctx, src, dst, stride_height, stride_width, padding_height,
paddingWidth); padding_width);
} }
} }
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册