提交 40688d22 编写于 作者: C chengduoZH

refine im2col (up_pad,down_pad)

上级 1e60c9b2
......@@ -140,11 +140,8 @@ class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF,
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& im, framework::Tensor& col,
int stride, int pad, int row_begin, int row_end) {
int stride_height = stride;
int stride_width = 0;
int padding_height = pad;
int padding_width = 0;
int stride_height, int stride_width, int up_pad,
int down_pad) {
PADDLE_ENFORCE(im.dims().size() == 3);
PADDLE_ENFORCE(col.dims().size() == 5);
int input_channels = im.dims()[0];
......@@ -155,6 +152,18 @@ class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF,
// int output_height = col.dims()[0];
int output_width = col.dims()[1];
int row_begin, row_end;
int padding_height = std::max(up_pad, down_pad);
int padding_width = 0;
if (up_pad >= down_pad) {
row_begin = 0;
} else {
row_begin = down_pad - up_pad;
}
row_end = row_begin + ((input_height + up_pad + down_pad - filter_height) /
stride_height +
1);
const T* im_data = im.data<T>();
T* col_data = col.data<T>();
......@@ -204,12 +213,8 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
platform::CPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context, framework::Tensor& im,
const framework::Tensor& col, int stride, int pad,
int row_start, int row_end) {
int stride_height = stride;
int stride_width = 0;
int padding_height = pad;
int padding_width = 0;
const framework::Tensor& col, int stride_height,
int stride_width, int up_pad, int down_pad) {
PADDLE_ENFORCE(im.dims().size() == 3);
PADDLE_ENFORCE(col.dims().size() == 5);
int input_channels = im.dims()[0];
......@@ -220,10 +225,22 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
// int output_height = col.dims()[0];
int output_width = col.dims()[1];
int row_begin, row_end;
int padding_height = std::max(up_pad, down_pad);
int padding_width = 0;
if (up_pad >= down_pad) {
row_begin = 0;
} else {
row_begin = down_pad - up_pad;
}
row_end = row_begin + ((input_height + up_pad + down_pad - filter_height) /
stride_height +
1);
T* im_data = im.data<T>();
const T* col_data = col.data<T>();
for (int col_row_idx = row_start; col_row_idx < row_end; ++col_row_idx) {
for (int col_row_idx = row_begin; col_row_idx < row_end; ++col_row_idx) {
for (int col_col_idx = 0; col_col_idx < output_width; ++col_col_idx) {
for (int channel = 0; channel < input_channels; ++channel) {
for (int filter_row_idx = 0; filter_row_idx < filter_height;
......@@ -235,7 +252,7 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
int im_col_offset =
col_col_idx * stride_width + filter_col_idx - padding_width;
int col_offset =
((((col_row_idx - row_start) * output_width + col_col_idx) *
((((col_row_idx - row_begin) * output_width + col_col_idx) *
input_channels +
channel) *
filter_height +
......
......@@ -240,12 +240,8 @@ class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF,
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& im, framework::Tensor& col,
int stride, int pad, int row_begin, int row_end) {
int stride_height = stride;
int stride_width = 0;
int padding_height = pad;
int padding_width = 0;
int stride_height, int stride_width, int up_pad,
int down_pad) {
PADDLE_ENFORCE(im.dims().size() == 3);
PADDLE_ENFORCE(col.dims().size() == 5);
int input_channels = im.dims()[0];
......@@ -253,6 +249,19 @@ class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF,
int input_width = im.dims()[2];
int filter_height = col.dims()[3];
int filter_width = col.dims()[4];
int row_begin, row_end;
int padding_height = std::max(up_pad, down_pad);
int padding_width = 0;
if (up_pad >= down_pad) {
row_begin = 0;
} else {
row_begin = down_pad - up_pad;
}
row_end = row_begin + ((input_height + up_pad + down_pad - filter_height) /
stride_height +
1);
int output_height = row_end - row_begin; // col.dims()[0];
int output_width = col.dims()[1];
......@@ -295,7 +304,6 @@ __global__ void col2imOCF(T* im_data, const T* col_data, int input_channels,
int row_end) {
int swid = blockIdx.x;
int shid = blockIdx.y;
// if (shid < row_begin || shid > row_end) return;
for (int channelid = threadIdx.z; channelid < input_channels;
channelid += blockDim.z) {
for (int idy = threadIdx.y; idy < filter_height; idy += blockDim.y) {
......@@ -331,12 +339,8 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context, framework::Tensor& im,
const framework::Tensor& col, int stride, int pad,
int row_begin, int row_end) {
int stride_height = stride;
int stride_width = 0;
int padding_height = pad;
int padding_width = 0;
const framework::Tensor& col, int stride_height,
int stride_width, int up_pad, int down_pad) {
PADDLE_ENFORCE(im.dims().size() == 3);
PADDLE_ENFORCE(col.dims().size() == 5);
int input_channels = im.dims()[0];
......@@ -344,6 +348,19 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
int input_width = im.dims()[2];
int filter_height = col.dims()[3];
int filter_width = col.dims()[4];
int row_begin, row_end;
int padding_height = std::max(up_pad, down_pad);
int padding_width = 0;
if (up_pad >= down_pad) {
row_begin = 0;
} else {
row_begin = down_pad - up_pad;
}
row_end = row_begin + ((input_height + up_pad + down_pad - filter_height) /
stride_height +
1);
int output_height = row_end - row_begin; // col.dims()[0];
int output_width = col.dims()[1];
......
......@@ -35,6 +35,12 @@ void testIm2col() {
*
* output_ocf = [0, 1, 3, 4
* 1, 2, 4, 5]
*
* col2im_cfo = [0, 2, 2
* 3, 4, 5]
*
* col2im_ocf = [0, 2, 2
* 3, 4, 5]
*/
int input_height = 2;
int input_width = 3;
......@@ -59,7 +65,7 @@ void testIm2col() {
new paddle::platform::CUDADeviceContext(paddle::platform::GPUPlace());
#else
PADDLE_THROW("no GPU support");
#endif // PADDLE_ONLY_CPU
#endif // PADDLE_WITH_CUDA
}
if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp;
......@@ -71,6 +77,7 @@ void testIm2col() {
output_ocf.mutable_data<float>(
{output_height, output_width, 1, filter_size, filter_size}, *place);
// Im2Col
paddle::operators::math::Im2ColFunctor<
paddle::operators::math::ColFormat::kCFO, Place, float>
im2col;
......@@ -79,8 +86,12 @@ void testIm2col() {
im2col_ocf;
im2col(*context, input, output_cfo, stride, stride, padding, padding);
im2col_ocf(*context, input, output_ocf, stride, padding, 0,
output_height * output_width);
im2col_ocf(*context, input, output_ocf, /*stride_height*/ stride,
/*stride_width*/ stride, /*up_pad*/ padding,
/*down_pad*/ padding);
float out_cfo_data[] = {0, 1, 1, 2, 3, 4, 4, 5};
float out_ocf_data[] = {0, 1, 3, 4, 1, 2, 4, 5};
float* out_cfo_ptr;
if (paddle::platform::is_cpu_place(*place)) {
......@@ -90,14 +101,9 @@ void testIm2col() {
*context);
out_cfo_ptr = output_tmp.data<float>();
}
EXPECT_EQ(out_cfo_ptr[0], 0);
EXPECT_EQ(out_cfo_ptr[1], 1);
EXPECT_EQ(out_cfo_ptr[2], 1);
EXPECT_EQ(out_cfo_ptr[3], 2);
EXPECT_EQ(out_cfo_ptr[4], 3);
EXPECT_EQ(out_cfo_ptr[5], 4);
EXPECT_EQ(out_cfo_ptr[6], 4);
EXPECT_EQ(out_cfo_ptr[7], 5);
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(out_cfo_ptr[i], out_cfo_data[i]);
}
float* out_ocf_ptr;
if (paddle::platform::is_cpu_place(*place)) {
......@@ -107,14 +113,60 @@ void testIm2col() {
*context);
out_ocf_ptr = output_tmp.data<float>();
}
EXPECT_EQ(out_ocf_ptr[0], 0);
EXPECT_EQ(out_ocf_ptr[1], 1);
EXPECT_EQ(out_ocf_ptr[2], 3);
EXPECT_EQ(out_ocf_ptr[3], 4);
EXPECT_EQ(out_ocf_ptr[4], 1);
EXPECT_EQ(out_ocf_ptr[5], 2);
EXPECT_EQ(out_ocf_ptr[6], 4);
EXPECT_EQ(out_ocf_ptr[7], 5);
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(out_ocf_ptr[i], out_ocf_data[i]);
}
// Col2Im: kCFO
paddle::operators::math::Col2ImFunctor<
paddle::operators::math::ColFormat::kCFO, Place, float>
col2im;
paddle::operators::math::Col2ImFunctor<
paddle::operators::math::ColFormat::kOCF, Place, float>
col2im_ocf;
float col2im_data[] = {0, 2, 2, 3, 8, 5};
memset(input_ptr, 0, 6 * sizeof(float));
if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp;
} else {
input.CopyFrom<float>(input_tmp, *place, *context);
}
col2im(*context, input, output_cfo, stride, stride, padding, padding);
float* in_ptr;
if (paddle::platform::is_cpu_place(*place)) {
in_ptr = input.data<float>();
} else {
input_tmp.CopyFrom<float>(input, paddle::platform::CPUPlace(), *context);
in_ptr = input_tmp.data<float>();
}
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(in_ptr[i], col2im_data[i]);
}
// Col2Im: kOCF
memset(input_ptr, 0, 6 * sizeof(float));
if (paddle::platform::is_cpu_place(*place)) {
input = input_tmp;
} else {
input.CopyFrom<float>(input_tmp, *place, *context);
}
col2im_ocf(*context, input, output_ocf, /*stride_height*/ stride,
/*stride_width*/ stride, /*up_pad*/ padding,
/*down_pad*/ padding);
if (paddle::platform::is_cpu_place(*place)) {
in_ptr = input.data<float>();
} else {
input_tmp.CopyFrom<float>(input, paddle::platform::CPUPlace(), *context);
in_ptr = input_tmp.data<float>();
}
for (int i = 0; i < 6; ++i) {
EXPECT_EQ(in_ptr[i], col2im_data[i]);
}
}
TEST(math, im2col) {
......
......@@ -87,24 +87,9 @@ class SequenceProjectKernel : public framework::OpKernel<T> {
sequence_width}); // input_channels, input_height, input_width
in_t.Resize(framework::make_ddim(input_shape));
for (int j = 0; j < context_length; ++j) {
int pad;
int row_start;
if (up_pad != 0) {
pad = up_pad;
row_start = 0;
} else if (down_pad != 0) {
pad = down_pad;
row_start = down_pad;
} else {
pad = 0;
row_start = 0;
}
im2col_ocf(context.device_context(), in_t, out_t,
/*stride*/ context_stride, /*pad*/ pad,
/*row_start*/ row_start,
/*row_end*/ row_start + sequence_height);
/*stride_height*/ context_stride, /*stride_width*/ 0, up_pad,
down_pad);
if (padding_trainable) {
// add up trainable data
out_t.Resize(framework::make_ddim(
......@@ -229,23 +214,9 @@ class SequenceProjectGradKernel : public framework::OpKernel<T> {
out_g_t.Resize(framework::make_ddim(
{sequence_height, 1, 1, context_length, sequence_width}));
int pad;
int row_start;
if (up_pad != 0) {
pad = up_pad;
row_start = 0;
} else if (down_pad != 0) {
pad = down_pad;
row_start = down_pad;
} else {
pad = 0;
row_start = 0;
}
col2im_ocf(context.device_context(), in_g_t, out_g_t,
/*stride*/ context_stride, /*pad*/ pad,
/*row_start*/ row_start,
/*row_end*/ row_start + sequence_height);
/*stride_height*/ context_stride, /*stride_width*/ 0, up_pad,
down_pad);
// out_g_t back to orign size
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册