未验证 提交 bbad3414 编写于 作者: Z Zhong Hui 提交者: GitHub

Enhance the error messages for files in operators/math

Enhance the error messages for  files in operators/math
上级 d4f03dfb
...@@ -79,8 +79,16 @@ void ConcatCase1(DeviceContext* context) { ...@@ -79,8 +79,16 @@ void ConcatCase1(DeviceContext* context) {
concat_functor(*context, input, 0, &out); concat_functor(*context, input, 0, &out);
// check the dim of input_a, input_b // check the dim of input_a, input_b
PADDLE_ENFORCE_EQ(input_a.dims(), dim_a); PADDLE_ENFORCE_EQ(input_a.dims(), dim_a,
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); paddle::platform::errors::InvalidArgument(
"The dims of Input tensor should be the same as the "
"declared dims. Tensor dims: [%s], declared dims: [%s]",
input_a.dims(), dim_a));
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b,
paddle::platform::errors::InvalidArgument(
"The dims of Input tensor should be the same as the "
"declared dims. Tensor dims: [%s], declared dims: [%s]",
input_b.dims(), dim_b));
int* out_ptr = nullptr; int* out_ptr = nullptr;
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
...@@ -95,10 +103,14 @@ void ConcatCase1(DeviceContext* context) { ...@@ -95,10 +103,14 @@ void ConcatCase1(DeviceContext* context) {
int idx_a = 0, idx_b = 0; int idx_a = 0, idx_b = 0;
for (int j = 0; j < 5 * 3 * 4; ++j) { for (int j = 0; j < 5 * 3 * 4; ++j) {
if (j >= cols) { if (j >= cols) {
PADDLE_ENFORCE_EQ(out_ptr[j], b_ptr[idx_b]); PADDLE_ENFORCE_EQ(out_ptr[j], b_ptr[idx_b],
paddle::platform::errors::InvalidArgument(
"Concat test failed, the result should be equal."));
++idx_b; ++idx_b;
} else { } else {
PADDLE_ENFORCE_EQ(out_ptr[j], a_ptr[idx_a]); PADDLE_ENFORCE_EQ(out_ptr[j], a_ptr[idx_a],
paddle::platform::errors::InvalidArgument(
"Concat test failed, the result should be equal."));
++idx_a; ++idx_a;
} }
} }
...@@ -166,8 +178,16 @@ void ConcatCase2(DeviceContext* context) { ...@@ -166,8 +178,16 @@ void ConcatCase2(DeviceContext* context) {
concat_functor(*context, input, 1, &out); concat_functor(*context, input, 1, &out);
// check the dim of input_a, input_b // check the dim of input_a, input_b
PADDLE_ENFORCE_EQ(input_a.dims(), dim_a); PADDLE_ENFORCE_EQ(input_a.dims(), dim_a,
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); paddle::platform::errors::InvalidArgument(
"The dims of Input tensor should be the same as the "
"declared dims. Tensor dims: [%s], declared dims: [%s]",
input_a.dims(), dim_a));
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b,
paddle::platform::errors::InvalidArgument(
"The dims of Input tensor should be the same as the "
"declared dims. Tensor dims: [%s], declared dims: [%s]",
input_b.dims(), dim_b));
int* out_ptr = nullptr; int* out_ptr = nullptr;
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
...@@ -183,10 +203,16 @@ void ConcatCase2(DeviceContext* context) { ...@@ -183,10 +203,16 @@ void ConcatCase2(DeviceContext* context) {
for (int i = 0; i < 2; ++i) { for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 28; ++j) { for (int j = 0; j < 28; ++j) {
if (j >= cols) { if (j >= cols) {
PADDLE_ENFORCE_EQ(out_ptr[i * 28 + j], b_ptr[idx_b]); PADDLE_ENFORCE_EQ(
out_ptr[i * 28 + j], b_ptr[idx_b],
paddle::platform::errors::InvalidArgument(
"Concat test failed, the result should be equal."));
++idx_b; ++idx_b;
} else { } else {
PADDLE_ENFORCE_EQ(out_ptr[i * 28 + j], a_ptr[idx_a]); PADDLE_ENFORCE_EQ(
out_ptr[i * 28 + j], a_ptr[idx_a],
paddle::platform::errors::InvalidArgument(
"Concat test failed, the result should be equal."));
++idx_a; ++idx_a;
} }
} }
...@@ -255,8 +281,16 @@ void ConcatCase3(DeviceContext* context) { ...@@ -255,8 +281,16 @@ void ConcatCase3(DeviceContext* context) {
concat_functor(*context, input, 2, &out); concat_functor(*context, input, 2, &out);
// check the dim of input_a, input_b // check the dim of input_a, input_b
PADDLE_ENFORCE_EQ(input_a.dims(), dim_a); PADDLE_ENFORCE_EQ(input_a.dims(), dim_a,
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); paddle::platform::errors::InvalidArgument(
"The dims of Input tensor should be the same as the "
"declared dims. Tensor dims: [%s], declared dims: [%s]",
input_a.dims(), dim_a));
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b,
paddle::platform::errors::InvalidArgument(
"The dims of Input tensor should be the same as the "
"declared dims. Tensor dims: [%s], declared dims: [%s]",
input_b.dims(), dim_b));
int* out_ptr = nullptr; int* out_ptr = nullptr;
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
...@@ -273,10 +307,16 @@ void ConcatCase3(DeviceContext* context) { ...@@ -273,10 +307,16 @@ void ConcatCase3(DeviceContext* context) {
for (int i = 0; i < 6; ++i) { for (int i = 0; i < 6; ++i) {
for (int j = 0; j < 9; ++j) { for (int j = 0; j < 9; ++j) {
if (j >= cols) { if (j >= cols) {
PADDLE_ENFORCE_EQ(out_ptr[i * 9 + j], b_ptr[idx_b]); PADDLE_ENFORCE_EQ(
out_ptr[i * 9 + j], b_ptr[idx_b],
paddle::platform::errors::InvalidArgument(
"Concat test failed, the result should be equal."));
++idx_b; ++idx_b;
} else { } else {
PADDLE_ENFORCE_EQ(out_ptr[i * 9 + j], a_ptr[idx_a]); PADDLE_ENFORCE_EQ(
out_ptr[i * 9 + j], a_ptr[idx_a],
paddle::platform::errors::InvalidArgument(
"Concat test failed, the result should be equal."));
++idx_a; ++idx_a;
} }
} }
...@@ -347,8 +387,16 @@ void ConcatCase4(DeviceContext* context) { ...@@ -347,8 +387,16 @@ void ConcatCase4(DeviceContext* context) {
context->Wait(); context->Wait();
// check the dim of input_a, input_b // check the dim of input_a, input_b
PADDLE_ENFORCE_EQ(input_a.dims(), dim_a); PADDLE_ENFORCE_EQ(input_a.dims(), dim_a,
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b); paddle::platform::errors::InvalidArgument(
"The dims of Input tensor should be the same as the "
"declared dims. Tensor dims: [%s], declared dims: [%s]",
input_a.dims(), dim_a));
PADDLE_ENFORCE_EQ(input_b.dims(), dim_b,
paddle::platform::errors::InvalidArgument(
"The dims of Input tensor should be the same as the "
"declared dims. Tensor dims: [%s], declared dims: [%s]",
input_b.dims(), dim_b));
int* out_ptr = nullptr; int* out_ptr = nullptr;
if (paddle::platform::is_gpu_place(Place())) { if (paddle::platform::is_gpu_place(Place())) {
...@@ -365,10 +413,16 @@ void ConcatCase4(DeviceContext* context) { ...@@ -365,10 +413,16 @@ void ConcatCase4(DeviceContext* context) {
for (int i = 0; i < 2; ++i) { for (int i = 0; i < 2; ++i) {
for (int j = 0; j < 24; ++j) { for (int j = 0; j < 24; ++j) {
if (j >= cols) { if (j >= cols) {
PADDLE_ENFORCE_EQ(out_ptr[i * 24 + j], b_ptr[idx_b]); PADDLE_ENFORCE_EQ(
out_ptr[i * 24 + j], b_ptr[idx_b],
paddle::platform::errors::InvalidArgument(
"Concat test failed, the result should be equal."));
++idx_b; ++idx_b;
} else { } else {
PADDLE_ENFORCE_EQ(out_ptr[i * 24 + j], a_ptr[idx_a]); PADDLE_ENFORCE_EQ(
out_ptr[i * 24 + j], a_ptr[idx_a],
paddle::platform::errors::InvalidArgument(
"Concat test failed, the result should be equal."));
++idx_a; ++idx_a;
} }
} }
......
...@@ -134,7 +134,10 @@ class ContextProjectFunctor { ...@@ -134,7 +134,10 @@ class ContextProjectFunctor {
} }
} }
if (padding_trainable) { if (padding_trainable) {
PADDLE_ENFORCE_NOT_NULL(padding_data); PADDLE_ENFORCE_NOT_NULL(
padding_data,
platform::errors::InvalidArgument(
"The input tensor 'padding_data' should not be NULL."));
for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) { for (int i = 0; i < static_cast<int>(lod_level_0.size()) - 1; ++i) {
if (lod_level_0[i] == lod_level_0[i + 1]) continue; if (lod_level_0[i] == lod_level_0[i + 1]) continue;
......
...@@ -621,7 +621,10 @@ class VecActivations { ...@@ -621,7 +621,10 @@ class VecActivations {
} else if (type == "identity" || type == "") { } else if (type == "identity" || type == "") {
return vec_identity<T, isa>; return vec_identity<T, isa>;
} }
PADDLE_THROW("Not support type: %s", type); PADDLE_THROW(platform::errors::InvalidArgument(
"Expected type should be one of sigmod, relu, tanh, identity. But got "
"not support type: %s.",
type));
} }
}; };
......
...@@ -27,8 +27,8 @@ __global__ void CrossEntropyKernel(T* Y, const T* X, const int64_t* label, ...@@ -27,8 +27,8 @@ __global__ void CrossEntropyKernel(T* Y, const T* X, const int64_t* label,
const int ignore_index) { const int ignore_index) {
CUDA_KERNEL_LOOP(i, N) { CUDA_KERNEL_LOOP(i, N) {
PADDLE_ENFORCE(label[i] >= 0 && label[i] < D || label[i] == ignore_index, PADDLE_ENFORCE(label[i] >= 0 && label[i] < D || label[i] == ignore_index,
"label[%d] expected >= 0 and < %ld, or == %ld, but got " "The value of label[%d] expected >= 0 and < %ld, or == %ld, "
"%ld. Please check input value.", "but got %ld. Please check input value.",
i, D, ignore_index, label[i]); i, D, ignore_index, label[i]);
Y[i] = ignore_index == label[i] Y[i] = ignore_index == label[i]
? static_cast<T>(0) ? static_cast<T>(0)
......
...@@ -34,9 +34,16 @@ class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO, ...@@ -34,9 +34,16 @@ class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO,
const std::vector<int>& stride, const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* col, const std::vector<int>& padding, framework::Tensor* col,
const DataLayout data_layout) { const DataLayout data_layout) {
PADDLE_ENFORCE_EQ(im.dims().size(), 3, "The dimension of im should be 3."); PADDLE_ENFORCE_EQ(im.dims().size(), 3,
platform::errors::InvalidArgument(
"The dimension of tensor 'im' should be 3. But got "
"the dims of tensor 'im' is [%s].",
im.dims()));
PADDLE_ENFORCE_EQ(col->dims().size(), 5, PADDLE_ENFORCE_EQ(col->dims().size(), 5,
"The dimension of col should be 5."); platform::errors::InvalidArgument(
"The dimension of tensor 'col' should be 5. But got "
"the dims of tensor 'col' is [%s].",
col->dims()));
if (stride[0] == 1 && stride[1] == 1 && dilation[0] == 1 && if (stride[0] == 1 && stride[1] == 1 && dilation[0] == 1 &&
dilation[1] == 1) { dilation[1] == 1) {
...@@ -70,9 +77,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO, ...@@ -70,9 +77,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO,
const std::vector<int>& stride, const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* im, const std::vector<int>& padding, framework::Tensor* im,
const DataLayout data_layout) { const DataLayout data_layout) {
PADDLE_ENFORCE_EQ(im->dims().size(), 3, "The dimension of im should be 3."); PADDLE_ENFORCE_EQ(im->dims().size(), 3,
platform::errors::InvalidArgument(
"The dimension of tensor 'im' should be 3. But got "
"the dims of tensor 'im' is [%s].",
im->dims()));
PADDLE_ENFORCE_EQ(col.dims().size(), 5, PADDLE_ENFORCE_EQ(col.dims().size(), 5,
"The dimension of col should be 5."); platform::errors::InvalidArgument(
"The dimension of tensor 'col' should be 5. But got "
"the dims of tensor 'col' is [%s].",
col.dims()));
int im_channels = int im_channels =
(data_layout != DataLayout::kNHWC ? im->dims()[0] : im->dims()[2]); (data_layout != DataLayout::kNHWC ? im->dims()[0] : im->dims()[2]);
int im_height = int im_height =
...@@ -88,16 +102,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO, ...@@ -88,16 +102,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO,
((dilation[0] * (filter_height - 1) + 1))) / ((dilation[0] * (filter_height - 1) + 1))) /
stride[0] + stride[0] +
1, 1,
col_height, col_height, platform::errors::InvalidArgument(
"Output_height and padding(padding_up, padding_down) are " "Output_height and padding(padding_up, "
"inconsistent."); "padding_down) are inconsistent."));
PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] -
((dilation[1] * (filter_width - 1) + 1))) / ((dilation[1] * (filter_width - 1) + 1))) /
stride[1] + stride[1] +
1, 1,
col_width, col_width, platform::errors::InvalidArgument(
"Output_height and padding(padding_up, padding_down) are " "Output_height and padding(padding_up, "
"inconsistent."); "padding_down) are inconsistent."));
int channels_col = im_channels * filter_height * filter_width; int channels_col = im_channels * filter_height * filter_width;
...@@ -154,9 +168,16 @@ class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF, ...@@ -154,9 +168,16 @@ class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF,
const std::vector<int>& stride, const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* col, const std::vector<int>& padding, framework::Tensor* col,
const DataLayout data_layout) { const DataLayout data_layout) {
PADDLE_ENFORCE_EQ(im.dims().size(), 3, "The dimension of im should be 3."); PADDLE_ENFORCE_EQ(im.dims().size(), 3,
platform::errors::InvalidArgument(
"The dimension of tensor 'im' should be 3. But got "
"the dims of tensor 'im' is [%s].",
im.dims()));
PADDLE_ENFORCE_EQ(col->dims().size(), 5, PADDLE_ENFORCE_EQ(col->dims().size(), 5,
"The dimension of col should be 5."); platform::errors::InvalidArgument(
"The dimension of tensor 'col' should be 5. But got "
"the dims of tensor 'col' is [%s].",
col->dims()));
int im_channels = im.dims()[0]; int im_channels = im.dims()[0];
int im_height = im.dims()[1]; int im_height = im.dims()[1];
int im_width = im.dims()[2]; int im_width = im.dims()[2];
...@@ -218,9 +239,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF, ...@@ -218,9 +239,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
const std::vector<int>& stride, const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* im, const std::vector<int>& padding, framework::Tensor* im,
const DataLayout data_layout) { const DataLayout data_layout) {
PADDLE_ENFORCE_EQ(im->dims().size(), 3, "The dimension of im should be 3."); PADDLE_ENFORCE_EQ(im->dims().size(), 3,
platform::errors::InvalidArgument(
"The dimension of tensor 'im' should be 3. But got "
"the dims of tensor 'im' is [%s].",
im->dims()));
PADDLE_ENFORCE_EQ(col.dims().size(), 5, PADDLE_ENFORCE_EQ(col.dims().size(), 5,
"The dimension of col should be 5."); platform::errors::InvalidArgument(
"The dimension of tensor 'col' should be 5. But got "
"the dims of tensor 'col' is [%s].",
col.dims()));
int im_channels = im->dims()[0]; int im_channels = im->dims()[0];
int im_height = im->dims()[1]; int im_height = im->dims()[1];
int im_width = im->dims()[2]; int im_width = im->dims()[2];
...@@ -231,14 +259,14 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF, ...@@ -231,14 +259,14 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
(im_height + padding[0] + padding[2] - filter_height) / stride[0] + 1, (im_height + padding[0] + padding[2] - filter_height) / stride[0] + 1,
col_height, col_height, platform::errors::InvalidArgument(
"Output_height and padding(padding_up, padding_down) are " "Output_height and padding(padding_up, padding_down) "
"inconsistent."); "are inconsistent."));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
(im_width + padding[1] + padding[3] - filter_width) / stride[1] + 1, (im_width + padding[1] + padding[3] - filter_width) / stride[1] + 1,
col_width, col_width,
"col_width and padding(padding_left, padding_right) are " platform::errors::InvalidArgument("col_width and padding(padding_left, "
"inconsistent."); "padding_right) are inconsistent."));
T* im_data = im->data<T>(); T* im_data = im->data<T>();
const T* col_data = col.data<T>(); const T* col_data = col.data<T>();
......
...@@ -81,9 +81,16 @@ class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO, ...@@ -81,9 +81,16 @@ class Im2ColFunctor<paddle::operators::math::ColFormat::kCFO,
const std::vector<int>& stride, const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* col, const std::vector<int>& padding, framework::Tensor* col,
const DataLayout data_layout) { const DataLayout data_layout) {
PADDLE_ENFORCE_EQ(im.dims().size(), 3, "The dimension of im should be 3."); PADDLE_ENFORCE_EQ(im.dims().size(), 3,
platform::errors::InvalidArgument(
"The dimension of tensor 'im' should be 3. But got "
"the dims of tensor 'im' is [%s].",
im.dims()));
PADDLE_ENFORCE_EQ(col->dims().size(), 5, PADDLE_ENFORCE_EQ(col->dims().size(), 5,
"The dimension of col should be 5."); platform::errors::InvalidArgument(
"The dimension of tensor 'col' should be 5. But got "
"the dims of tensor 'col' is [%s].",
col->dims()));
int im_channels = int im_channels =
(data_layout != DataLayout::kNHWC ? im.dims()[0] : im.dims()[2]); (data_layout != DataLayout::kNHWC ? im.dims()[0] : im.dims()[2]);
...@@ -182,9 +189,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO, ...@@ -182,9 +189,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO,
const std::vector<int>& stride, const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* im, const std::vector<int>& padding, framework::Tensor* im,
const DataLayout data_layout) { const DataLayout data_layout) {
PADDLE_ENFORCE_EQ(im->dims().size(), 3, "The dimension of im should be 3."); PADDLE_ENFORCE_EQ(im->dims().size(), 3,
platform::errors::InvalidArgument(
"The dimension of tensor 'im' should be 3. But got "
"the dims of tensor 'im' is [%s].",
im->dims()));
PADDLE_ENFORCE_EQ(col.dims().size(), 5, PADDLE_ENFORCE_EQ(col.dims().size(), 5,
"The dimension of col should be 5."); platform::errors::InvalidArgument(
"The dimension of tensor 'col' should be 5. But got "
"the dims of tensor 'col' is [%s].",
col.dims()));
int im_channels = int im_channels =
(data_layout != DataLayout::kNHWC ? im->dims()[0] : im->dims()[2]); (data_layout != DataLayout::kNHWC ? im->dims()[0] : im->dims()[2]);
...@@ -201,16 +215,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO, ...@@ -201,16 +215,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kCFO,
(dilation[0] * (filter_height - 1) + 1)) / (dilation[0] * (filter_height - 1) + 1)) /
stride[0] + stride[0] +
1, 1,
col_height, col_height, platform::errors::InvalidArgument(
"Output_height and padding(padding_up, padding_down) are " "Output_height and padding(padding_up, "
"inconsistent."); "padding_down) are inconsistent."));
PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] -
(dilation[1] * (filter_width - 1) + 1)) / (dilation[1] * (filter_width - 1) + 1)) /
stride[1] + stride[1] +
1, 1,
col_width, col_width, platform::errors::InvalidArgument(
"col_width and padding(padding_left, padding_right) are " "col_width and padding(padding_left, "
"inconsistent."); "padding_right) are inconsistent."));
size_t num_kernels = im_channels * im_height * im_width; size_t num_kernels = im_channels * im_height * im_width;
...@@ -285,9 +299,16 @@ class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF, ...@@ -285,9 +299,16 @@ class Im2ColFunctor<paddle::operators::math::ColFormat::kOCF,
const std::vector<int>& stride, const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* col, const std::vector<int>& padding, framework::Tensor* col,
const DataLayout data_layout) { const DataLayout data_layout) {
PADDLE_ENFORCE_EQ(im.dims().size(), 3, "The dimension of im should be 3."); PADDLE_ENFORCE_EQ(im.dims().size(), 3,
platform::errors::InvalidArgument(
"The dimension of tensor 'im' should be 3. But got "
"the dims of tensor 'im' is [%s].",
im.dims()));
PADDLE_ENFORCE_EQ(col->dims().size(), 5, PADDLE_ENFORCE_EQ(col->dims().size(), 5,
"The dimension of col should be 5."); platform::errors::InvalidArgument(
"The dimension of tensor 'col' should be 5. But got "
"the dims of tensor 'col' is [%s].",
col->dims()));
int im_channels = im.dims()[0]; int im_channels = im.dims()[0];
int im_height = im.dims()[1]; int im_height = im.dims()[1];
...@@ -370,9 +391,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF, ...@@ -370,9 +391,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
const std::vector<int>& stride, const std::vector<int>& stride,
const std::vector<int>& padding, framework::Tensor* im, const std::vector<int>& padding, framework::Tensor* im,
const DataLayout data_layout) { const DataLayout data_layout) {
PADDLE_ENFORCE_EQ(im->dims().size(), 3, "The dimension of im should be 3."); PADDLE_ENFORCE_EQ(im->dims().size(), 3,
platform::errors::InvalidArgument(
"The dimension of tensor 'im' should be 3. But got "
"the dims of tensor 'im' is [%s].",
im->dims()));
PADDLE_ENFORCE_EQ(col.dims().size(), 5, PADDLE_ENFORCE_EQ(col.dims().size(), 5,
"The dimension of col should be 5."); platform::errors::InvalidArgument(
"The dimension of tensor 'col' should be 5. But got "
"the dims of tensor 'col' is [%s].",
col.dims()));
int im_channels = im->dims()[0]; int im_channels = im->dims()[0];
int im_height = im->dims()[1]; int im_height = im->dims()[1];
...@@ -386,16 +414,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF, ...@@ -386,16 +414,16 @@ class Col2ImFunctor<paddle::operators::math::ColFormat::kOCF,
(dilation[0] * (filter_height - 1) + 1)) / (dilation[0] * (filter_height - 1) + 1)) /
stride[0] + stride[0] +
1, 1,
col_height, col_height, platform::errors::InvalidArgument(
"Output_height and padding(padding_up, padding_down) are " "Output_height and padding(padding_up, "
"inconsistent."); "padding_down) are inconsistent."));
PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] -
(dilation[1] * (filter_width - 1) + 1)) / (dilation[1] * (filter_width - 1) + 1)) /
stride[1] + stride[1] +
1, 1,
col_width, col_width, platform::errors::InvalidArgument(
"col_width and padding(padding_left, padding_right) are " "col_width and padding(padding_left, "
"inconsistent."); "padding_right) are inconsistent."));
int block_dim_x = 0; int block_dim_x = 0;
int block_dim_y = 0; int block_dim_y = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册