diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index 824e66b1eb4ae05cc74dc1cd8c21f16f286592e6..f44b33fcf2fc23f79483909046dd9e292fd8dde8 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -128,9 +128,23 @@ struct RowwiseAdd { const framework::Tensor& input, const framework::Tensor& vector, framework::Tensor* output) { auto in_dims = input.dims(); + auto out_dims = output->dims(); auto size = input.numel() / in_dims[0]; - PADDLE_ENFORCE_EQ(vector.numel(), size); - PADDLE_ENFORCE_EQ(output->dims(), in_dims); + PADDLE_ENFORCE_EQ( + vector.numel(), size, + platform::errors::InvalidArgument( + "The input vector size" + " should be equal to the size of each row of input tensor." + " Expected vector size=%d, but received %d", + size, vector.numel())); + const char* in_dims_cstr = in_dims.to_str().c_str(); + const char* out_dims_cstr = out_dims.to_str().c_str(); + PADDLE_ENFORCE_EQ(out_dims, in_dims, + platform::errors::InvalidArgument( + "The output tensor shape should be same as the input" + " tensor shape. Expected output tensor shape: %s," + " but received %s", + in_dims_cstr, out_dims_cstr)); auto in = framework::EigenMatrix::From(input); auto vec = framework::EigenVector::Flatten(vector); diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index fba143d017deb4b4814ad8b10e614357a7ebee23..1c519d226ebfe5ff19876f17b79fd36aa12c4130 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -88,9 +88,24 @@ struct RowwiseAdd { const framework::Tensor& input, const framework::Tensor& vector, framework::Tensor* output) { auto in_dims = input.dims(); + auto out_dims = output->dims(); auto size = input.numel() / in_dims[0]; - PADDLE_ENFORCE_EQ(vector.numel(), size); - PADDLE_ENFORCE_EQ(output->dims(), in_dims); + PADDLE_ENFORCE_EQ( + vector.numel(), size, + platform::errors::InvalidArgument( + "The input vector size" + " should be equal to the size of each row of input tensor." + " Expected vector size=%d, but received %d", + size, vector.numel())); + const char* in_dims_cstr = in_dims.to_str().c_str(); + const char* out_dims_cstr = out_dims.to_str().c_str(); + PADDLE_ENFORCE_EQ( + out_dims, in_dims, + platform::errors::InvalidArgument( + "The output tensor shape should be same as the input tensor" + " shape. Expected output tensor shape: %s," + " but received %s", + in_dims_cstr, out_dims_cstr)); int blocks = 512; int grids = (input.numel() + blocks - 1) / blocks; RowwiseAddKernel<<>>( @@ -113,7 +128,12 @@ void ColwiseSum::operator()( framework::Tensor* vector) { auto in_dims = input.dims(); auto size = input.numel() / in_dims[0]; - PADDLE_ENFORCE_EQ(vector->numel(), size); + PADDLE_ENFORCE_EQ(vector->numel(), size, + platform::errors::InvalidArgument( + "The size of input vector" + " should be equal to the size of input tensor column" + " dimension. Expected vector size=%d, but received %d", + size, vector->numel())); framework::Tensor one; one.mutable_data({in_dims[0]}, context.GetPlace()); SetConstant set; @@ -134,7 +154,12 @@ void RowwiseSum::operator()( framework::Tensor* vector) { auto in_dims = input.dims(); auto size = input.numel() / in_dims[0]; - PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0]); + PADDLE_ENFORCE_EQ(vector->numel(), in_dims[0], + platform::errors::InvalidArgument( + "The size of input vector" + " should be equal to the size of input tensor row" + " dimension. Expected vector size=%d, but received %d", + in_dims[0], vector->numel())); framework::Tensor one; one.mutable_data({size}, context.GetPlace()); SetConstant set; diff --git a/paddle/fluid/operators/math/math_function_impl.h b/paddle/fluid/operators/math/math_function_impl.h index 693d5620460e1fe6f6d82bd0749b0780b64841f5..869a3054598da9cd2223ca0e705c0f910ba043ec 100644 --- a/paddle/fluid/operators/math/math_function_impl.h +++ b/paddle/fluid/operators/math/math_function_impl.h @@ -59,7 +59,12 @@ void ColwiseSum::operator()(const DeviceContext& context, framework::Tensor* out) { auto in_dims = input.dims(); auto size = input.numel() / in_dims[0]; - PADDLE_ENFORCE_EQ(out->numel(), size); + PADDLE_ENFORCE_EQ(out->numel(), size, + platform::errors::InvalidArgument( + "The size of output tensor " + "should be equal to the size of input tensor column" + " dimension. Expected output size=%d, but received %d", + size, out->numel())); auto in = framework::EigenMatrix::From(input); auto vec = framework::EigenVector::Flatten(*out); @@ -78,7 +83,13 @@ class ColwiseSum { auto& in_dims = input.dims(); auto height = in_dims[0]; auto size = in_dims[1]; - PADDLE_ENFORCE_EQ(out->numel(), size); + PADDLE_ENFORCE_EQ( + out->numel(), size, + platform::errors::InvalidArgument( + "The size of output tensor " + "should be equal to the size of input tensor column" + " dimension. Expected output size=%d, but received %d", + size, out->numel())); T* out_buf = out->mutable_data(out->place()); const T* in_buf = input.data(); @@ -100,8 +111,16 @@ void RowwiseMean::operator()(const DeviceContext& context, const framework::Tensor& input, framework::Tensor* out) { auto in_dims = input.dims(); - PADDLE_ENFORCE_EQ(in_dims.size(), 2U); - PADDLE_ENFORCE_EQ(out->numel(), in_dims[0]); + PADDLE_ENFORCE_EQ(in_dims.size(), 2U, platform::errors::InvalidArgument( + "The rank of input tensor " + "should be 2, but received %d", + in_dims.size())); + PADDLE_ENFORCE_EQ(out->numel(), in_dims[0], + platform::errors::InvalidArgument( + "The size of output tensor " + "should be equal to the size of input tensor row" + " dimension. Expected output size=%d, but received %d", + in_dims[0], out->numel())); auto in = framework::EigenMatrix::From(input); auto vec = framework::EigenVector::Flatten(*out); @@ -118,10 +137,19 @@ class RowwiseMean { void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, framework::Tensor* out) { auto& in_dims = input.dims(); - PADDLE_ENFORCE_EQ(in_dims.size(), 2U); + PADDLE_ENFORCE_EQ(in_dims.size(), 2U, platform::errors::InvalidArgument( + "The rank of input tensor " + "should be 2, but received %d", + in_dims.size())); auto height = in_dims[0]; auto size = in_dims[1]; - PADDLE_ENFORCE_EQ(out->numel(), height); + PADDLE_ENFORCE_EQ( + out->numel(), height, + platform::errors::InvalidArgument( + "The size of output tensor " + "should be equal to the size of input tensor row" + " dimension. Expected output size=%d, but received %d", + height, out->numel())); auto inv_size = 1.0 / size; T* out_buf = out->mutable_data(out->place()); const T* in_buf = input.data(); @@ -141,8 +169,16 @@ void RowwiseSum::operator()(const DeviceContext& context, const framework::Tensor& input, framework::Tensor* out) { auto in_dims = input.dims(); - PADDLE_ENFORCE_EQ(in_dims.size(), 2U); - PADDLE_ENFORCE_EQ(out->numel(), in_dims[0]); + PADDLE_ENFORCE_EQ(in_dims.size(), 2U, platform::errors::InvalidArgument( + "The rank of input tensor " + "should be 2, but received %d", + in_dims.size())); + PADDLE_ENFORCE_EQ(out->numel(), in_dims[0], + platform::errors::InvalidArgument( + "The size of output tensor " + "should be equal to the size of input tensor row" + " dimension. Expected output size=%d, but received %d", + in_dims[0], out->numel())); auto in = framework::EigenMatrix::From(input); auto vec = framework::EigenVector::Flatten(*out); @@ -159,10 +195,19 @@ class RowwiseSum { void operator()(const platform::CPUDeviceContext& context, const framework::Tensor& input, framework::Tensor* out) { auto& in_dims = input.dims(); - PADDLE_ENFORCE_EQ(in_dims.size(), 2U); + PADDLE_ENFORCE_EQ(in_dims.size(), 2U, platform::errors::InvalidArgument( + "The rank of input tensor " + "should be 2, but received %d", + in_dims.size())); auto height = in_dims[0]; auto size = in_dims[1]; - PADDLE_ENFORCE_EQ(out->numel(), height); + PADDLE_ENFORCE_EQ( + out->numel(), height, + platform::errors::InvalidArgument( + "The size of output tensor " + "should be equal to the size of input tensor row" + " dimension. Expected output size=%d, but received %d", + height, out->numel())); T* out_buf = out->mutable_data(out->place()); const T* in_buf = input.data(); diff --git a/paddle/fluid/operators/math/math_function_test.cc b/paddle/fluid/operators/math/math_function_test.cc index 2343e0ee965303c9fdb2ad3faf9ddf6e5bb7782f..587823e535ac67f926fd469d2f43df536c8c88b6 100644 --- a/paddle/fluid/operators/math/math_function_test.cc +++ b/paddle/fluid/operators/math/math_function_test.cc @@ -224,7 +224,11 @@ TEST(math_funciton, set_constant) { auto* ctx = new paddle::platform::CPUDeviceContext(); paddle::operators::math::set_constant(*ctx, &t, 10); for (int64_t i = 0; i < t.numel(); ++i) { - PADDLE_ENFORCE_EQ(10, t.data()[i]); + PADDLE_ENFORCE_EQ(10, t.data()[i], + paddle::platform::errors::InvalidArgument( + "Each value of input" + "tensor should be 10, but received %d.", + t.data()[i])); } delete ctx; } diff --git a/paddle/fluid/operators/math/math_function_test.cu b/paddle/fluid/operators/math/math_function_test.cu index bcbb4a8274f149240b9f0990f38d9f38bdd0e5b1..44b1ee45a4fe9b6f2ea7ba5e09c7cbc60c1aff28 100644 --- a/paddle/fluid/operators/math/math_function_test.cu +++ b/paddle/fluid/operators/math/math_function_test.cu @@ -18,7 +18,12 @@ void fill_fp16_data(paddle::platform::float16* in_ptr, size_t size, const std::vector& data) { - PADDLE_ENFORCE_EQ(size, data.size()); + PADDLE_ENFORCE_EQ( + size, data.size(), + paddle::platform::errors::InvalidArgument( + "The size of argument data should" + " be equal to the argument size. Expected %d, but received %d.", + size, data.size())); for (size_t i = 0; i < data.size(); ++i) { in_ptr[i] = paddle::platform::float16(data[i]); } diff --git a/paddle/fluid/operators/math/padding.h b/paddle/fluid/operators/math/padding.h index 63f793433de07ea2e43ad03ea3ccae1a259f7ae2..379b21c3c18888989663221052e6e99df80e7e9d 100644 --- a/paddle/fluid/operators/math/padding.h +++ b/paddle/fluid/operators/math/padding.h @@ -85,8 +85,9 @@ void PaddingFunctor(int rank, const framework::ExecutionContext& context, PadFunction(context, pads, src, pad_value, out); break; default: - PADDLE_THROW( - "PadOp only support tensors with no more than 6 dimensions."); + PADDLE_THROW(platform::errors::Unimplemented( + "PadOp only support tensors with no more" + " than 6 dimensions currently.")); } } @@ -114,8 +115,9 @@ void PaddingGradFunctor(int rank, const framework::ExecutionContext& context, PadGradFunction(context, pads, src, out); break; default: - PADDLE_THROW( - "PadOp only support tensors with no more than 6 dimensions."); + PADDLE_THROW(platform::errors::Unimplemented( + "PadOp only support tensors with no more" + " than 6 dimensions currently.")); } } diff --git a/paddle/fluid/operators/math/sampler.h b/paddle/fluid/operators/math/sampler.h index 480576ef9dc8c21811a1a867d553ccc6d97fa22a..de9113f2bb616b489747d8d960154f55bb988847 100644 --- a/paddle/fluid/operators/math/sampler.h +++ b/paddle/fluid/operators/math/sampler.h @@ -19,6 +19,8 @@ limitations under the License. */ #include #include +#include "paddle/fluid/platform/enforce.h" + namespace paddle { namespace operators { namespace math { @@ -31,7 +33,10 @@ namespace math { class Sampler { public: explicit Sampler(int64_t range, unsigned int seed = 0UL) : range_(range) { - // PADDLE_ENFORCE_GT(range, 0, "Range should be greater than 0."); + PADDLE_ENFORCE_GT(range, 0, platform::errors::InvalidArgument( + "Range should be" + " greater than 0, but recevied %d.", + range)); if (seed == 0) { std::random_device r; seed_ = r(); diff --git a/paddle/fluid/operators/math/vol2col.cc b/paddle/fluid/operators/math/vol2col.cc index 01f50727b442579fa62059560d0c75d329d6e288..c05da0062f2bab66746feb9d8ebedeca0c0f9688 100644 --- a/paddle/fluid/operators/math/vol2col.cc +++ b/paddle/fluid/operators/math/vol2col.cc @@ -34,10 +34,16 @@ class Vol2ColFunctor { const std::vector& strides, const std::vector& paddings, framework::Tensor* col, const DataLayout data_layout) const { - PADDLE_ENFORCE_EQ(vol.dims().size(), 4, - "The dimension of vol should be 4."); - PADDLE_ENFORCE_EQ(col->dims().size(), 7, - "The dimension of col should be 7."); + PADDLE_ENFORCE_EQ( + vol.dims().size(), 4, + platform::errors::InvalidArgument("The dimension of" + " vol should be 4, but received %d.", + vol.dims().size())); + PADDLE_ENFORCE_EQ( + col->dims().size(), 7, + platform::errors::InvalidArgument("The dimension of" + "col should be 7, but received %d.", + col->dims().size())); int input_channels = (data_layout != DataLayout::kNHWC ? vol.dims()[0] : vol.dims()[3]); @@ -65,27 +71,33 @@ class Vol2ColFunctor { int pad_w_left = paddings_size_is_6 ? paddings[4] : paddings[2]; int pad_w_right = paddings_size_is_6 ? paddings[5] : paddings[2]; - PADDLE_ENFORCE_EQ((input_depth + pad_d_forth + pad_d_back - - ((dilations[0] * (filter_depth - 1) + 1))) / - strides[0] + - 1, - output_depth, - "input_depth and output_depth are " - "mismatching."); - PADDLE_ENFORCE_EQ((input_height + pad_h_up + pad_h_down - - ((dilations[1] * (filter_height - 1) + 1))) / - strides[1] + - 1, - output_height, - "input_height and output_height are " - "mismatching."); - PADDLE_ENFORCE_EQ((input_width + pad_w_left + pad_w_right - - ((dilations[2] * (filter_width - 1) + 1))) / - strides[2] + - 1, - output_width, - "input_width and output_width are " - "mismatching."); + auto input_depth_tmp = (input_depth + pad_d_forth + pad_d_back - + ((dilations[0] * (filter_depth - 1) + 1))) / + strides[0] + + 1; + PADDLE_ENFORCE_EQ( + input_depth_tmp, output_depth, + platform::errors::InvalidArgument( + "input_depth(%d) and output_depth(%d) are mismatching.", + input_depth_tmp, output_depth)); + auto input_height_tmp = (input_height + pad_h_up + pad_h_down - + ((dilations[1] * (filter_height - 1) + 1))) / + strides[1] + + 1; + PADDLE_ENFORCE_EQ( + input_height_tmp, output_height, + platform::errors::InvalidArgument( + "input_height(%d) and output_height(%d) are mismatching.", + input_height_tmp, output_height)); + auto input_width_tmp = (input_width + pad_w_left + pad_w_right - + ((dilations[2] * (filter_width - 1) + 1))) / + strides[2] + + 1; + PADDLE_ENFORCE_EQ( + input_width_tmp, output_width, + platform::errors::InvalidArgument( + "input_width(%d) and output_width(%d) are mismatching.", + input_width_tmp, output_width)); const T* vol_data = vol.data(); T* col_data = col->data(); @@ -140,10 +152,16 @@ class Col2VolFunctor { const std::vector& strides, const std::vector& paddings, framework::Tensor* vol, const DataLayout data_layout) const { - PADDLE_ENFORCE_EQ(vol->dims().size(), 4, - "The dimension of vol should be 4."); - PADDLE_ENFORCE_EQ(col.dims().size(), 7, - "The dimension of col should be 7."); + PADDLE_ENFORCE_EQ( + vol->dims().size(), 4, + platform::errors::InvalidArgument("The dimension of vol" + " should be 4, but received %d.", + vol->dims().size())); + PADDLE_ENFORCE_EQ( + col.dims().size(), 7, + platform::errors::InvalidArgument("The dimension of col" + " should be 7, but received %d.", + col.dims().size())); int input_channels = (data_layout != DataLayout::kNHWC ? vol->dims()[0] : vol->dims()[3]); @@ -170,27 +188,33 @@ class Col2VolFunctor { int pad_w_left = paddings_size_is_6 ? paddings[4] : paddings[2]; int pad_w_right = paddings_size_is_6 ? paddings[5] : paddings[2]; - PADDLE_ENFORCE_EQ((input_depth + pad_d_forth + pad_d_back - - ((dilations[0] * (filter_depth - 1) + 1))) / - strides[0] + - 1, - output_depth, - "input_depth and output_depth are " - "mismatching."); - PADDLE_ENFORCE_EQ((input_height + pad_h_up + pad_h_down - - ((dilations[1] * (filter_height - 1) + 1))) / - strides[1] + - 1, - output_height, - "input_height and output_height are " - "mismatching."); - PADDLE_ENFORCE_EQ((input_width + pad_w_left + pad_w_right - - ((dilations[2] * (filter_width - 1) + 1))) / - strides[2] + - 1, - output_width, - "input_width and output_width are " - "mismatching."); + auto input_depth_tmp = (input_depth + pad_d_forth + pad_d_back - + ((dilations[0] * (filter_depth - 1) + 1))) / + strides[0] + + 1; + PADDLE_ENFORCE_EQ(input_depth_tmp, output_depth, + platform::errors::InvalidArgument( + "input_depth(%d)" + " and output_depth(%d) are mismatching.", + input_depth_tmp, output_depth)); + auto input_height_tmp = (input_height + pad_h_up + pad_h_down - + ((dilations[1] * (filter_height - 1) + 1))) / + strides[1] + + 1; + PADDLE_ENFORCE_EQ(input_height_tmp, output_height, + platform::errors::InvalidArgument( + "input_height(%d)" + " and output_height(%d) are mismatching.", + input_height_tmp, output_height)); + auto input_width_tmp = (input_width + pad_w_left + pad_w_right - + ((dilations[2] * (filter_width - 1) + 1))) / + strides[2] + + 1; + PADDLE_ENFORCE_EQ(input_width_tmp, output_width, + platform::errors::InvalidArgument( + "input_width(%d)" + " and output_width(%d) are mismatching.", + input_width_tmp, output_width)); T* vol_data = vol->data(); const T* col_data = col.data(); diff --git a/paddle/fluid/operators/math/vol2col.cu b/paddle/fluid/operators/math/vol2col.cu index 9de9051f512348f2567bfc35ae775b1852ed25fc..fe5a600909893b8313d470923ef4d43eae155e76 100644 --- a/paddle/fluid/operators/math/vol2col.cu +++ b/paddle/fluid/operators/math/vol2col.cu @@ -90,10 +90,16 @@ class Vol2ColFunctor { const std::vector& strides, const std::vector& paddings, framework::Tensor* col, const DataLayout data_layout) const { - PADDLE_ENFORCE_EQ(vol.dims().size(), 4, - "The dimension of vol should be 4."); - PADDLE_ENFORCE_EQ(col->dims().size(), 7, - "The dimension of col should be 7."); + PADDLE_ENFORCE_EQ( + vol.dims().size(), 4, + platform::errors::InvalidArgument("The dimension of" + " vol should be 4, but received %d.", + vol.dims().size())); + PADDLE_ENFORCE_EQ( + col->dims().size(), 7, + platform::errors::InvalidArgument("The dimension of" + "col should be 7, but received %d.", + col->dims().size())); int input_channels = (data_layout != DataLayout::kNHWC ? vol.dims()[0] : vol.dims()[3]); @@ -117,27 +123,33 @@ class Vol2ColFunctor { int pad_h_down = paddings_size_is_6 ? paddings[3] : paddings[1]; int pad_w_left = paddings_size_is_6 ? paddings[4] : paddings[2]; int pad_w_right = paddings_size_is_6 ? paddings[5] : paddings[2]; - PADDLE_ENFORCE_EQ((input_depth + pad_d_forth + pad_d_back - - ((dilations[0] * (filter_depth - 1) + 1))) / - strides[0] + - 1, - output_depth, - "input_depth and output_depth are " - "mismatching."); - PADDLE_ENFORCE_EQ((input_height + pad_h_up + pad_h_down - - ((dilations[1] * (filter_height - 1) + 1))) / - strides[1] + - 1, - output_height, - "input_height and output_height are " - "mismatching."); - PADDLE_ENFORCE_EQ((input_width + pad_w_left + pad_w_right - - ((dilations[2] * (filter_width - 1) + 1))) / - strides[2] + - 1, - output_width, - "input_width and output_width are " - "mismatching."); + auto input_depth_tmp = (input_depth + pad_d_forth + pad_d_back - + ((dilations[0] * (filter_depth - 1) + 1))) / + strides[0] + + 1; + PADDLE_ENFORCE_EQ( + input_depth_tmp, output_depth, + platform::errors::InvalidArgument( + "input_depth(%d) and output_depth(%d) are mismatching.", + input_depth_tmp, output_depth)); + auto input_height_tmp = (input_height + pad_h_up + pad_h_down - + ((dilations[1] * (filter_height - 1) + 1))) / + strides[1] + + 1; + PADDLE_ENFORCE_EQ( + input_height_tmp, output_height, + platform::errors::InvalidArgument( + "input_height(%d) and output_height(%d) are mismatching.", + input_height_tmp, output_height)); + auto input_width_tmp = (input_width + pad_w_left + pad_w_right - + ((dilations[2] * (filter_width - 1) + 1))) / + strides[2] + + 1; + PADDLE_ENFORCE_EQ( + input_width_tmp, output_width, + platform::errors::InvalidArgument( + "input_width(%d) and output_width(%d) are mismatching.", + input_width_tmp, output_width)); int num_outputs = input_channels * output_depth * output_height * output_width; @@ -241,10 +253,16 @@ class Col2VolFunctor { const std::vector& strides, const std::vector& paddings, framework::Tensor* vol, const DataLayout data_layout) const { - PADDLE_ENFORCE_EQ(vol->dims().size(), 4, - "The dimension of vol should be 4."); - PADDLE_ENFORCE_EQ(col.dims().size(), 7, - "The dimension of col should be 7."); + PADDLE_ENFORCE_EQ( + vol->dims().size(), 4, + platform::errors::InvalidArgument("The dimension of vol" + " should be 4, but received %d.", + vol->dims().size())); + PADDLE_ENFORCE_EQ( + col.dims().size(), 7, + platform::errors::InvalidArgument("The dimension of col" + " should be 7, but received %d.", + col.dims().size())); int input_channels = (data_layout != DataLayout::kNHWC ? vol->dims()[0] : vol->dims()[3]); @@ -269,27 +287,33 @@ class Col2VolFunctor { int pad_w_left = paddings_size_is_6 ? paddings[4] : paddings[2]; int pad_w_right = paddings_size_is_6 ? paddings[5] : paddings[2]; - PADDLE_ENFORCE_EQ((input_depth + pad_d_forth + pad_d_back - - ((dilations[0] * (filter_depth - 1) + 1))) / - strides[0] + - 1, - output_depth, - "input_depth and output_depth are " - "mismatching."); - PADDLE_ENFORCE_EQ((input_height + pad_h_up + pad_h_down - - ((dilations[1] * (filter_height - 1) + 1))) / - strides[1] + - 1, - output_height, - "input_height and output_height are " - "mismatching."); - PADDLE_ENFORCE_EQ((input_width + pad_w_left + pad_w_right - - ((dilations[2] * (filter_width - 1) + 1))) / - strides[2] + - 1, - output_width, - "input_width and output_width are " - "mismatching."); + auto input_depth_tmp = (input_depth + pad_d_forth + pad_d_back - + ((dilations[0] * (filter_depth - 1) + 1))) / + strides[0] + + 1; + PADDLE_ENFORCE_EQ(input_depth_tmp, output_depth, + platform::errors::InvalidArgument( + "input_depth(%d)" + " and output_depth(%d) are mismatching.", + input_depth_tmp, output_depth)); + auto input_height_tmp = (input_height + pad_h_up + pad_h_down - + ((dilations[1] * (filter_height - 1) + 1))) / + strides[1] + + 1; + PADDLE_ENFORCE_EQ(input_height_tmp, output_height, + platform::errors::InvalidArgument( + "input_height(%d)" + " and output_height(%d) are mismatching.", + input_height_tmp, output_height)); + auto input_width_tmp = (input_width + pad_w_left + pad_w_right - + ((dilations[2] * (filter_width - 1) + 1))) / + strides[2] + + 1; + PADDLE_ENFORCE_EQ(input_width_tmp, output_width, + platform::errors::InvalidArgument( + "input_width(%d)" + " and output_width(%d) are mismatching.", + input_width_tmp, output_width)); int num_kernels = input_channels * input_depth * input_height * input_width;