未验证 提交 6566d7c7 编写于 作者: H helinwang 提交者: GitHub

Merge pull request #8458 from tonyyang-svail/rename_output

change outputsize func name
...@@ -60,8 +60,9 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { ...@@ -60,8 +60,9 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const {
"Due to the settings of paddings, filter_dims and " "Due to the settings of paddings, filter_dims and "
"dilations, the output size is less than 0, please check " "dilations, the output size is less than 0, please check "
"again."); "again.");
output_shape.push_back(OutputSize(in_dims[i + 2], filter_dims[i + 2], output_shape.push_back(ConvOutputSize(in_dims[i + 2], filter_dims[i + 2],
dilations[i], paddings[i], strides[i])); dilations[i], paddings[i],
strides[i]));
} }
ctx->SetOutputDim("Output", framework::make_ddim(output_shape)); ctx->SetOutputDim("Output", framework::make_ddim(output_shape));
ctx->ShareLoD("Input", "Output"); ctx->ShareLoD("Input", "Output");
......
...@@ -28,8 +28,8 @@ using Tensor = framework::Tensor; ...@@ -28,8 +28,8 @@ using Tensor = framework::Tensor;
// Base convolution operator definations for other conv // Base convolution operator definations for other conv
// like operators to reuse the implementation. // like operators to reuse the implementation.
inline int OutputSize(int input_size, int filter_size, int dilation, inline int ConvOutputSize(int input_size, int filter_size, int dilation,
int padding, int stride) { int padding, int stride) {
const int dkernel = dilation * (filter_size - 1) + 1; const int dkernel = dilation * (filter_size - 1) + 1;
const int output_size = (input_size + 2 * padding - dkernel) / stride + 1; const int output_size = (input_size + 2 * padding - dkernel) / stride + 1;
return output_size; return output_size;
......
...@@ -41,10 +41,10 @@ class Im2SequenceOp : public framework::OperatorWithKernel { ...@@ -41,10 +41,10 @@ class Im2SequenceOp : public framework::OperatorWithKernel {
int img_height = in_dim[2]; int img_height = in_dim[2];
int img_width = in_dim[3]; int img_width = in_dim[3];
int output_height = OutputSize(img_height, kernels[0], paddings[0], int output_height = Im2SeqOutputSize(img_height, kernels[0], paddings[0],
paddings[2], strides[0]); paddings[2], strides[0]);
int output_width = int output_width = Im2SeqOutputSize(img_width, kernels[1], paddings[1],
OutputSize(img_width, kernels[1], paddings[1], paddings[3], strides[1]); paddings[3], strides[1]);
ctx->SetOutputDim("Out", {batch_size * output_height * output_width, ctx->SetOutputDim("Out", {batch_size * output_height * output_width,
img_channels * kernels[0] * kernels[1]}); img_channels * kernels[0] * kernels[1]});
......
...@@ -26,8 +26,8 @@ namespace operators { ...@@ -26,8 +26,8 @@ namespace operators {
using Tensor = framework::Tensor; using Tensor = framework::Tensor;
using LoDTensor = framework::LoDTensor; using LoDTensor = framework::LoDTensor;
inline int OutputSize(int input_size, int filter_size, int padding_0, inline int Im2SeqOutputSize(int input_size, int filter_size, int padding_0,
int padding_1, int stride) { int padding_1, int stride) {
const int output_size = const int output_size =
(input_size + padding_0 + padding_1 - filter_size) / stride + 1; (input_size + padding_0 + padding_1 - filter_size) / stride + 1;
return output_size; return output_size;
...@@ -53,10 +53,10 @@ class Im2SequenceKernel : public framework::OpKernel<T> { ...@@ -53,10 +53,10 @@ class Im2SequenceKernel : public framework::OpKernel<T> {
auto kernels = ctx.Attr<std::vector<int>>("kernels"); auto kernels = ctx.Attr<std::vector<int>>("kernels");
auto strides = ctx.Attr<std::vector<int>>("strides"); auto strides = ctx.Attr<std::vector<int>>("strides");
auto paddings = ctx.Attr<std::vector<int>>("paddings"); auto paddings = ctx.Attr<std::vector<int>>("paddings");
int output_height = OutputSize(img_height, kernels[0], paddings[0], int output_height = Im2SeqOutputSize(img_height, kernels[0], paddings[0],
paddings[2], strides[0]); paddings[2], strides[0]);
int output_width = int output_width = Im2SeqOutputSize(img_width, kernels[1], paddings[1],
OutputSize(img_width, kernels[1], paddings[1], paddings[3], strides[1]); paddings[3], strides[1]);
const std::vector<int> dilations({1, 1}); const std::vector<int> dilations({1, 1});
...@@ -109,10 +109,10 @@ class Im2SequenceGradKernel : public framework::OpKernel<T> { ...@@ -109,10 +109,10 @@ class Im2SequenceGradKernel : public framework::OpKernel<T> {
auto kernels = ctx.Attr<std::vector<int>>("kernels"); auto kernels = ctx.Attr<std::vector<int>>("kernels");
auto strides = ctx.Attr<std::vector<int>>("strides"); auto strides = ctx.Attr<std::vector<int>>("strides");
auto paddings = ctx.Attr<std::vector<int>>("paddings"); auto paddings = ctx.Attr<std::vector<int>>("paddings");
int output_height = OutputSize(img_height, kernels[0], paddings[0], int output_height = Im2SeqOutputSize(img_height, kernels[0], paddings[0],
paddings[2], strides[0]); paddings[2], strides[0]);
int output_width = int output_width = Im2SeqOutputSize(img_width, kernels[1], paddings[1],
OutputSize(img_width, kernels[1], paddings[1], paddings[3], strides[1]); paddings[3], strides[1]);
const std::vector<int> dilations({1, 1}); const std::vector<int> dilations({1, 1});
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
int OutputSizePool(int input_size, int filter_size, int padding, int stride) { int PoolOutputSize(int input_size, int filter_size, int padding, int stride) {
int output_size = (input_size - filter_size + 2 * padding) / stride + 1; int output_size = (input_size - filter_size + 2 * padding) / stride + 1;
return output_size; return output_size;
} }
...@@ -55,7 +55,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { ...@@ -55,7 +55,7 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const {
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]}); std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
output_shape.push_back( output_shape.push_back(
OutputSizePool(in_x_dims[i + 2], ksize[i], paddings[i], strides[i])); PoolOutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i]));
} }
ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
ctx->ShareLoD("X", "Out"); ctx->ShareLoD("X", "Out");
......
...@@ -17,7 +17,7 @@ limitations under the License. */ ...@@ -17,7 +17,7 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
inline int OutputSizeMaxPool(int input_size, int filter_size, int padding, inline int MaxPoolOutputSize(int input_size, int filter_size, int padding,
int stride) { int stride) {
int output_size = (input_size - filter_size + 2 * padding) / stride + 1; int output_size = (input_size - filter_size + 2 * padding) / stride + 1;
return output_size; return output_size;
...@@ -61,7 +61,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel { ...@@ -61,7 +61,7 @@ class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]}); std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
output_shape.push_back(OutputSizeMaxPool(in_x_dims[i + 2], ksize[i], output_shape.push_back(MaxPoolOutputSize(in_x_dims[i + 2], ksize[i],
paddings[i], strides[i])); paddings[i], strides[i]));
} }
ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
......
...@@ -64,7 +64,7 @@ Paper: http://www.matthewzeiler.com/wp-content/uploads/2017/07/iccv2011.pdf ...@@ -64,7 +64,7 @@ Paper: http://www.matthewzeiler.com/wp-content/uploads/2017/07/iccv2011.pdf
} }
}; };
int OutputSize(int input_size, int ksize, int padding, int stride) { int UnpoolOutputSize(int input_size, int ksize, int padding, int stride) {
int output_size = (input_size - 1) * stride - 2 * padding + ksize; int output_size = (input_size - 1) * stride - 2 * padding + ksize;
return output_size; return output_size;
} }
...@@ -101,8 +101,8 @@ class UnpoolOp : public framework::OperatorWithKernel { ...@@ -101,8 +101,8 @@ class UnpoolOp : public framework::OperatorWithKernel {
PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims); PADDLE_ENFORCE_EQ(in_x_dims, in_y_dims);
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]}); std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
output_shape.push_back( output_shape.push_back(UnpoolOutputSize(in_x_dims[i + 2], ksize[i],
OutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i])); paddings[i], strides[i]));
} }
ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册