未验证 提交 ed292695 编写于 作者: K kinghuin 提交者: GitHub

optimize the error message for math dir

optimize the error message for math dir
上级 eb276632
...@@ -29,11 +29,24 @@ class CopyMatrixRowsFunctor<platform::CPUDeviceContext, T> { ...@@ -29,11 +29,24 @@ class CopyMatrixRowsFunctor<platform::CPUDeviceContext, T> {
auto src_dims = src.dims(); auto src_dims = src.dims();
auto dst_dims = dst->dims(); auto dst_dims = dst->dims();
PADDLE_ENFORCE_EQ(src_dims.size(), 2UL, PADDLE_ENFORCE_EQ(src_dims.size(), 2UL,
"The src must be matrix with rank 2."); platform::errors::InvalidArgument(
"The source tensor must be a matrix with rank 2, but "
"got the source tensor rank is %lu. "
"Please check the rank of the source tensor",
src_dims.size()));
PADDLE_ENFORCE_EQ(dst_dims.size(), 2UL, PADDLE_ENFORCE_EQ(dst_dims.size(), 2UL,
"The dst must be matrix with rank 2."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(src_dims[1], dst_dims[1], "The destination tensor must be a matrix with rank, "
"The width of src and dst must be same."); "but got the destination tensor rank is %lu. "
"Please check the rank of the destination tensor",
dst_dims.size()));
PADDLE_ENFORCE_EQ(
src_dims[1], dst_dims[1],
platform::errors::InvalidArgument(
"The width of the source tensor and the destination tensor must be "
"same. But got %lu != %lu.Please check the rank of the source "
"tensor",
src_dims.size(), dst_dims.size()));
auto height = dst_dims[0]; auto height = dst_dims[0];
auto width = dst_dims[1]; auto width = dst_dims[1];
auto* src_data = src.data<T>(); auto* src_data = src.data<T>();
......
...@@ -46,11 +46,24 @@ class CopyMatrixRowsFunctor<platform::CUDADeviceContext, T> { ...@@ -46,11 +46,24 @@ class CopyMatrixRowsFunctor<platform::CUDADeviceContext, T> {
auto src_dims = src.dims(); auto src_dims = src.dims();
auto dst_dims = dst->dims(); auto dst_dims = dst->dims();
PADDLE_ENFORCE_EQ(src_dims.size(), 2, PADDLE_ENFORCE_EQ(src_dims.size(), 2,
"The src must be matrix with rank 2."); platform::errors::InvalidArgument(
"The source tensor must be a matrix with rank 2, but "
"got the source tensor rank is %lu. "
"Please check the rank of the source tensor",
src_dims.size()));
PADDLE_ENFORCE_EQ(dst_dims.size(), 2, PADDLE_ENFORCE_EQ(dst_dims.size(), 2,
"The dst must be matrix with rank 2."); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(src_dims[1], dst_dims[1], "The destination tensor must be a matrix with rank, "
"The width of src and dst must be same."); "but got the destination tensor rank is %lu. "
"Please check the rank of the destination tensor",
dst_dims.size()));
PADDLE_ENFORCE_EQ(
src_dims[1], dst_dims[1],
platform::errors::InvalidArgument(
"The width of the source tensor and the destination tensor must be "
"same. But got %lu != %lu.Please check the rank of the source "
"tensor",
src_dims.size(), dst_dims.size()));
auto height = dst_dims[0]; auto height = dst_dims[0];
auto width = dst_dims[1]; auto width = dst_dims[1];
auto* src_data = src.data<T>(); auto* src_data = src.data<T>();
......
...@@ -64,19 +64,30 @@ class LoDTensor2BatchFunctor { ...@@ -64,19 +64,30 @@ class LoDTensor2BatchFunctor {
bool is_reverse = false) const { bool is_reverse = false) const {
if (!is_cal_batch_lod) { if (!is_cal_batch_lod) {
auto lods = batch->lod(); auto lods = batch->lod();
PADDLE_ENFORCE_GT(lods.size(), 2UL, PADDLE_ENFORCE_GT(
"The LoD of LoDTensor should inlcude at least 2-level " lods.size(), 2UL,
"sequence information."); platform::errors::InvalidArgument(
"The LoD of LoDTensor should inlcude at least 2-level "
"sequence information, but got the LoD level is %lu. Please "
"check the input value.",
lods.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
lods[1].size(), static_cast<size_t>(lod_tensor.dims()[0]), lods[1].size(), static_cast<size_t>(lod_tensor.dims()[0]),
"The LoD information should be consistent with the dims."); platform::errors::InvalidArgument(
"The LoD information should be consistent with the dims, but got "
"%lu != %lu. Please check the input value.",
lods[1].size(), static_cast<size_t>(lod_tensor.dims()[0])));
CopyMatrixRowsFunctor<DeviceContext, T> to_batch; CopyMatrixRowsFunctor<DeviceContext, T> to_batch;
to_batch(context, lod_tensor, lods[1], batch, true); to_batch(context, lod_tensor, lods[1], batch, true);
return; return;
} }
auto lods = lod_tensor.lod(); auto lods = lod_tensor.lod();
PADDLE_ENFORCE_EQ(lods.size(), 1UL, "Only support one level sequence now."); PADDLE_ENFORCE_EQ(lods.size(), 1UL,
platform::errors::InvalidArgument(
"Only support one level sequence now, but got the "
"LoD level is %lu. Please check the input value.",
lods.size()));
const auto& lod = lods[0]; const auto& lod = lods[0];
...@@ -161,12 +172,19 @@ class Batch2LoDTensorFunctor { ...@@ -161,12 +172,19 @@ class Batch2LoDTensorFunctor {
const framework::LoDTensor& batch, const framework::LoDTensor& batch,
framework::LoDTensor* lod_tensor) const { framework::LoDTensor* lod_tensor) const {
auto in_lod = batch.lod(); auto in_lod = batch.lod();
PADDLE_ENFORCE_GT(in_lod.size(), 2UL, PADDLE_ENFORCE_GT(
"The LoD of LoDTensor should inlcude at least 2-level " in_lod.size(), 2UL,
"sequence information."); platform::errors::InvalidArgument(
"The LoD of LoDTensor should inlcude at least 2-level "
"sequence information, but got the LoD level is %lu. Please check "
"the input value.",
in_lod.size()));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
in_lod[1].size(), static_cast<size_t>(lod_tensor->dims()[0]), in_lod[1].size(), static_cast<size_t>(lod_tensor->dims()[0]),
"The LoD information should be consistent with the dims."); platform::errors::InvalidArgument(
"The LoD information should be consistent with the dims, but got "
"%lu != %lu. Please check the input value.",
in_lod[1].size(), static_cast<size_t>(lod_tensor->dims()[0])));
CopyMatrixRowsFunctor<DeviceContext, T> to_seq; CopyMatrixRowsFunctor<DeviceContext, T> to_seq;
to_seq(context, batch, in_lod[1], lod_tensor, false); to_seq(context, batch, in_lod[1], lod_tensor, false);
} }
......
...@@ -35,7 +35,11 @@ void CopyValidData(framework::Tensor* dst_tensor, ...@@ -35,7 +35,11 @@ void CopyValidData(framework::Tensor* dst_tensor,
int valid_seq_len = seq_offsets[seq_idx + 1] - seq_offsets[seq_idx]; int valid_seq_len = seq_offsets[seq_idx + 1] - seq_offsets[seq_idx];
PADDLE_ENFORCE_GE( PADDLE_ENFORCE_GE(
pad_seq_len, valid_seq_len, pad_seq_len, valid_seq_len,
"The padded sequence length can not be less than its original length."); platform::errors::InvalidArgument(
"The padded sequence length can not "
"be less than its original length. Expected %ld >= %ld, but got "
"%ld < %ld. Please check input value.",
pad_seq_len, valid_seq_len, pad_seq_len, valid_seq_len));
int seq_data_offset = seq_offsets[seq_idx] * step_width; int seq_data_offset = seq_offsets[seq_idx] * step_width;
int pad_data_offset = layout == kBatchLengthWidth int pad_data_offset = layout == kBatchLengthWidth
? seq_idx * pad_seq_len * step_width ? seq_idx * pad_seq_len * step_width
...@@ -95,9 +99,14 @@ class PaddingLoDTensorFunctor<platform::CPUDeviceContext, T> { ...@@ -95,9 +99,14 @@ class PaddingLoDTensorFunctor<platform::CPUDeviceContext, T> {
CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len, CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len,
step_width, layout); step_width, layout);
PADDLE_ENFORCE(pad_value.numel() == 1 || pad_value.numel() == step_width,
"The numel of 'pad_value' can only be 1 or be equal to the " PADDLE_ENFORCE_EQ(
"'step_width'."); pad_value.numel() == 1 || pad_value.numel() == step_width, true,
platform::errors::InvalidArgument(
"The numel of 'pad_value' can only be 1 or be equal to the "
"'step_width', but got %ld != 1 and %ld. Please check the input "
"value.",
pad_value.numel(), step_width));
// fill padding value // fill padding value
T* pad_data = pad_tensor->data<T>(); T* pad_data = pad_tensor->data<T>();
......
...@@ -66,17 +66,25 @@ class PaddingLoDTensorFunctor<platform::CUDADeviceContext, T> { ...@@ -66,17 +66,25 @@ class PaddingLoDTensorFunctor<platform::CUDADeviceContext, T> {
if (pad_seq_len == -1) { if (pad_seq_len == -1) {
pad_seq_len = max_seq_len; pad_seq_len = max_seq_len;
} }
PADDLE_ENFORCE_GE(pad_seq_len, max_seq_len, PADDLE_ENFORCE_GE(
"The pad_seq_len must be equal to or greater than the " pad_seq_len, max_seq_len,
"original max sequence length."); platform::errors::InvalidArgument(
"The pad_seq_len must be equal to or greater than the "
"original max sequence length. Expected %ld >= %ld, but got %ld < "
"%ld. Please check the input value.",
pad_seq_len, max_seq_len, pad_seq_len, max_seq_len));
int step_width = seq_tensor.numel() / seq_tensor_dims[0]; int step_width = seq_tensor.numel() / seq_tensor_dims[0];
int seq_num = seq_offsets.size() - 1; int seq_num = seq_offsets.size() - 1;
CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len, CheckDims(seq_tensor_dims, pad_tensor_dims, seq_offsets, pad_seq_len,
step_width, layout); step_width, layout);
PADDLE_ENFORCE(pad_value.numel() == 1 || pad_value.numel() == step_width, PADDLE_ENFORCE_EQ(
"The numel of 'pad_value' can only be 1 or be equal to the " pad_value.numel() == 1 || pad_value.numel() == step_width, true,
"'step_width'."); platform::errors::InvalidArgument(
"The numel of 'pad_value' can only be 1 or be equal to "
"the 'step_width', but got %ld != 1 and %ld. Please check the "
"input value.",
pad_value.numel(), step_width));
const int kBlockSize = 512; const int kBlockSize = 512;
......
...@@ -52,14 +52,25 @@ inline static void CheckDims(const framework::DDim& seq_tensor_dims, ...@@ -52,14 +52,25 @@ inline static void CheckDims(const framework::DDim& seq_tensor_dims,
const framework::Vector<size_t>& seq_offset, const framework::Vector<size_t>& seq_offset,
int64_t padded_seq_len, int64_t step_width, int64_t padded_seq_len, int64_t step_width,
const PadLayout& layout) { const PadLayout& layout) {
PADDLE_ENFORCE_EQ(static_cast<size_t>(seq_tensor_dims[0]), seq_offset.back(), PADDLE_ENFORCE_EQ(
"Value of 1st dimension of the sequence tensor should be " static_cast<size_t>(seq_tensor_dims[0]), seq_offset.back(),
"equal to sum of lengths of all sequences."); platform::errors::InvalidArgument(
"Value of 1st dimension of the sequence tensor should be "
"equal to sum of lengths of all sequences. Expected %ld == %ld, but "
"got %ld != %ld. Please check the input value.",
static_cast<size_t>(seq_tensor_dims[0]), seq_offset.back(),
static_cast<size_t>(seq_tensor_dims[0]), seq_offset.back()));
PADDLE_ENFORCE(seq_tensor_dims.size() + 1 == pad_tensor_dims.size() || PADDLE_ENFORCE_EQ(
seq_tensor_dims.size() == pad_tensor_dims.size(), seq_tensor_dims.size() + 1 == pad_tensor_dims.size() ||
"pad_tensor's rank should be 1 greater than seq_tensor's " seq_tensor_dims.size() == pad_tensor_dims.size(),
"rank, or be equal with it."); true, platform::errors::InvalidArgument(
"pad_tensor's rank should be 1 greater than seq_tensor's "
"rank, or be equal with it. The pad_tensor's rank is %ld, "
"expected the seq_tensor's rank is %ld or %ld, but got %ld. "
"Please check the input value.",
pad_tensor_dims.size(), pad_tensor_dims.size(),
pad_tensor_dims.size() - 1, seq_tensor_dims.size()));
} }
/* /*
......
...@@ -42,15 +42,29 @@ class MaxSeqPoolFunctor { ...@@ -42,15 +42,29 @@ class MaxSeqPoolFunctor {
auto out_dims = output->dims(); auto out_dims = output->dims();
auto idx_dims = index->dims(); auto idx_dims = index->dims();
PADDLE_ENFORCE_GT(in_dims.size(), 1, PADDLE_ENFORCE_GT(in_dims.size(), 1,
"The rank of input shall be greater than 1."); platform::errors::InvalidArgument(
"The rank of input shall be greater than 1, but got "
"the rank is %ld. Please check the input value",
in_dims.size()));
PADDLE_ENFORCE_GT(out_dims.size(), 1, PADDLE_ENFORCE_GT(out_dims.size(), 1,
"The rank of output shall be greater than 1."); platform::errors::InvalidArgument(
"The rank of output shall be greater than 1, but got "
"the rank is %ld. Please check the input value",
out_dims.size()));
for (int64_t i = 1; i < in_dims.size(); ++i) { for (int64_t i = 1; i < in_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i], PADDLE_ENFORCE_EQ(
"The dimension of input and output shall be same."); in_dims[i], out_dims[i],
platform::errors::InvalidArgument(
"The dimension of input and output shall be same. Expected %ld "
"== %ld, but got %ld != %ld. Please check the input value.",
in_dims[i], out_dims[i], in_dims[i], out_dims[i]));
} }
PADDLE_ENFORCE_EQ(idx_dims, out_dims, PADDLE_ENFORCE_EQ(
"The dimension of index and output shall be same."); idx_dims, out_dims,
platform::errors::InvalidArgument(
"The dimension of index and output shall be same. Expected %ld == "
"%ld, but got %ld != %ld. Please check the input value.",
idx_dims, out_dims, idx_dims, out_dims));
auto lod_level = input.lod().size(); auto lod_level = input.lod().size();
auto starts = input.lod()[lod_level - 1]; auto starts = input.lod()[lod_level - 1];
...@@ -94,12 +108,22 @@ class MaxSeqPoolFunctor<T, true> { ...@@ -94,12 +108,22 @@ class MaxSeqPoolFunctor<T, true> {
auto in_dims = input.dims(); auto in_dims = input.dims();
auto out_dims = output->dims(); auto out_dims = output->dims();
PADDLE_ENFORCE_GT(in_dims.size(), 1, PADDLE_ENFORCE_GT(in_dims.size(), 1,
"The rank of input shall be greater than 1."); platform::errors::InvalidArgument(
"The rank of input shall be greater than 1, but got "
"%ld <= 1. Please check the input value.",
in_dims.size()));
PADDLE_ENFORCE_GT(out_dims.size(), 1, PADDLE_ENFORCE_GT(out_dims.size(), 1,
"The rank of output shall be greater than 1."); platform::errors::InvalidArgument(
"The rank of output shall be greater than 1, but got "
"%ld <= 1. Please check the input value.",
out_dims.size()));
for (int64_t i = 1; i < in_dims.size(); ++i) { for (int64_t i = 1; i < in_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(in_dims[i], out_dims[i], PADDLE_ENFORCE_EQ(
"The dimension of input and output shall be same."); in_dims[i], out_dims[i],
platform::errors::InvalidArgument(
"The dimension of input and output shall be same. Expected %ld "
"== %ld, but got %ld != %ld. Please check the input value.",
in_dims[i], out_dims[i], in_dims[i], out_dims[i]));
} }
auto lod_level = input.lod().size(); auto lod_level = input.lod().size();
...@@ -139,16 +163,29 @@ class MaxSeqPoolGradFunctor { ...@@ -139,16 +163,29 @@ class MaxSeqPoolGradFunctor {
auto ig_dims = in_grad->dims(); auto ig_dims = in_grad->dims();
auto idx_dims = index.dims(); auto idx_dims = index.dims();
PADDLE_ENFORCE_GT(og_dims.size(), 1, PADDLE_ENFORCE_GT(og_dims.size(), 1,
"The rank of output@Grad shall be greater than 1."); platform::errors::InvalidArgument(
"The rank of output@Grad shall be greater than 1, "
"but got %ld <= 1. Please check the input value.",
og_dims.size()));
PADDLE_ENFORCE_GT(ig_dims.size(), 1, PADDLE_ENFORCE_GT(ig_dims.size(), 1,
"The rank of input@Grad shall be greater than 1."); platform::errors::InvalidArgument(
"The rank of input@Grad shall be greater than 1, but "
"got %ld <= 1. Please check the input value.",
ig_dims.size()));
for (int64_t i = 1; i < og_dims.size(); ++i) { for (int64_t i = 1; i < og_dims.size(); ++i) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(og_dims[i], ig_dims[i],
og_dims[i], ig_dims[i], platform::errors::InvalidArgument(
"The dimension of input@Grad and output@Grad shall be same."); "The dimension of input@Grad and output@Grad shall "
"be same. Expected %ld == %ld, but got %ld != %ld. "
"Please check the input value.",
og_dims[i], ig_dims[i], og_dims[i], ig_dims[i]));
} }
PADDLE_ENFORCE_EQ(idx_dims, og_dims, PADDLE_ENFORCE_EQ(
"The dimension of index and output@Grad shall be same."); idx_dims, og_dims,
platform::errors::InvalidArgument(
"The dimension of index and output@Grad shall be same. Expected "
"%ld == %ld, but got %ld != %ld. Please check the input value.",
idx_dims, og_dims, idx_dims, og_dims));
const T* og_data = out_grad.data<T>(); const T* og_data = out_grad.data<T>();
const int* max_index = index.data<int>(); const int* max_index = index.data<int>();
...@@ -244,9 +281,12 @@ class SumSeqPoolGradFunctor { ...@@ -244,9 +281,12 @@ class SumSeqPoolGradFunctor {
auto lod = in_grad->lod()[lod_level - 1]; auto lod = in_grad->lod()[lod_level - 1];
int64_t out_w = out_grad.numel() / out_grad.dims()[0]; int64_t out_w = out_grad.numel() / out_grad.dims()[0];
int64_t in_w = in_grad->numel() / in_grad->dims()[0]; int64_t in_w = in_grad->numel() / in_grad->dims()[0];
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(in_w, out_w,
in_w, out_w, platform::errors::InvalidArgument(
"The feature size of input@Grad and output@Grad shall be same."); "The feature size of input@Grad and output@Grad "
"shall be same. Expected %ld == %ld, but got %ld != "
"%ld. Please check the input value.",
in_w, out_w, in_w, out_w));
const T* out_g_data = out_grad.data<T>(); const T* out_g_data = out_grad.data<T>();
T* in_g_data = in_grad->mutable_data<T>(context.GetPlace()); T* in_g_data = in_grad->mutable_data<T>(context.GetPlace());
auto blas = math::GetBlas<platform::CPUDeviceContext, T>(context); auto blas = math::GetBlas<platform::CPUDeviceContext, T>(context);
...@@ -298,7 +338,8 @@ class SequencePoolFunctor<platform::CPUDeviceContext, T> { ...@@ -298,7 +338,8 @@ class SequencePoolFunctor<platform::CPUDeviceContext, T> {
auto place = context.GetPlace(); auto place = context.GetPlace();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
platform::is_cpu_place(place), true, platform::is_cpu_place(place), true,
"Sequence_pool should run on CPU Device when pooltype is SUM"); platform::errors::InvalidArgument(
"Sequence_pool should run on CPU Device when pooltype is SUM"));
const T* src = input.data<T>(); const T* src = input.data<T>();
T* dst = output->mutable_data<T>(place); T* dst = output->mutable_data<T>(place);
jit::seq_pool_attr_t attr( jit::seq_pool_attr_t attr(
...@@ -342,7 +383,10 @@ class SequencePoolFunctor<platform::CPUDeviceContext, T> { ...@@ -342,7 +383,10 @@ class SequencePoolFunctor<platform::CPUDeviceContext, T> {
out_e.device(place) = in_e.sum(Eigen::array<int, 1>({{0}})) / out_e.device(place) = in_e.sum(Eigen::array<int, 1>({{0}})) /
std::sqrt(static_cast<T>(h)); std::sqrt(static_cast<T>(h));
} else { } else {
PADDLE_THROW("unsupported pooling pooltype"); PADDLE_THROW(platform::errors::InvalidArgument(
"unsupported pooling pooltype: %s. Only support \"AVERAGE\" and "
"\"SQRT\"",
pooltype));
} }
} }
} }
...@@ -400,7 +444,10 @@ class SequencePoolGradFunctor<platform::CPUDeviceContext, T> { ...@@ -400,7 +444,10 @@ class SequencePoolGradFunctor<platform::CPUDeviceContext, T> {
} else if (pooltype == "FIRST") { } else if (pooltype == "FIRST") {
in_g_e.chip(0, 0).device(place) = out_g_e_v; in_g_e.chip(0, 0).device(place) = out_g_e_v;
} else { } else {
PADDLE_THROW("unsupported pooling pooltype"); PADDLE_THROW(platform::errors::InvalidArgument(
"unsupported pooling pooltype: %s. Only support \"AVERAGE\", "
"\"SQRT\", \"LAST\" and \"FIRST\"",
pooltype));
} }
} }
} }
......
...@@ -205,7 +205,10 @@ class SequencePoolFunctor<platform::CUDADeviceContext, T> { ...@@ -205,7 +205,10 @@ class SequencePoolFunctor<platform::CUDADeviceContext, T> {
lod.CUDAData(context.GetPlace()), lod.size(), item_dim, lod.CUDAData(context.GetPlace()), lod.size(), item_dim,
output->mutable_data<T>(context.GetPlace()), nullptr); output->mutable_data<T>(context.GetPlace()), nullptr);
} else { } else {
PADDLE_THROW("unsupported pooling pooltype"); PADDLE_THROW(platform::errors::InvalidArgument(
"unsupported pooling pooltype: %s. Only support \"MAX\", "
"\"AVERAGE\", \"SUM\", \"SQRT\", \"LAST\" and \"FIRST\"",
pooltype));
} }
} }
}; };
...@@ -370,7 +373,10 @@ class SequencePoolGradFunctor<platform::CUDADeviceContext, T> { ...@@ -370,7 +373,10 @@ class SequencePoolGradFunctor<platform::CUDADeviceContext, T> {
in_grad->mutable_data<T>(context.GetPlace()), nullptr); in_grad->mutable_data<T>(context.GetPlace()), nullptr);
} else { } else {
PADDLE_THROW("unsupported pooling pooltype"); PADDLE_THROW(platform::errors::InvalidArgument(
"unsupported pooling pooltype: %s. Only support \"MAX\", "
"\"AVERAGE\", \"SUM\", \"SQRT\", \"LAST\" and \"FIRST\"",
pooltype));
} }
} }
}; };
......
...@@ -50,9 +50,21 @@ void TestSequencePoolingSum(const DeviceContext &context, ...@@ -50,9 +50,21 @@ void TestSequencePoolingSum(const DeviceContext &context,
in_grad.mutable_data<T>(in_dims, place); in_grad.mutable_data<T>(in_dims, place);
// check tensor contruction result // check tensor contruction result
PADDLE_ENFORCE_EQ(in_grad.dims().size(), out_grad.dims().size()); PADDLE_ENFORCE_EQ(
in_grad.dims().size(), out_grad.dims().size(),
paddle::platform::errors::InvalidArgument(
"The dimension of input and output shall be same. Expected %ld == "
"%ld, but got %ld != %ld. Please check the input value.",
in_grad.dims().size(), out_grad.dims().size(), in_grad.dims().size(),
out_grad.dims().size()));
for (int64_t i = 1; i < out_grad.dims().size(); ++i) { for (int64_t i = 1; i < out_grad.dims().size(); ++i) {
PADDLE_ENFORCE_EQ(in_grad.dims()[i], out_grad.dims()[i]); PADDLE_ENFORCE_EQ(
in_grad.dims()[i], out_grad.dims()[i],
paddle::platform::errors::InvalidArgument(
"The dimension of input and output shall be same. Expected %ld == "
"%ld, but got %ld != %ld. Please check the input value.",
in_grad.dims()[i], out_grad.dims()[i], in_grad.dims()[i],
out_grad.dims()[i]));
} }
// call functor // call functor
......
...@@ -55,7 +55,11 @@ void Tree2ColUtil::construct_tree(const paddle::Tensor &EdgeSet, ...@@ -55,7 +55,11 @@ void Tree2ColUtil::construct_tree(const paddle::Tensor &EdgeSet,
std::vector<std::vector<int>> *tr, std::vector<std::vector<int>> *tr,
size_t *node_count) { size_t *node_count) {
auto edge_set_dims = EdgeSet.dims(); auto edge_set_dims = EdgeSet.dims();
PADDLE_ENFORCE_EQ(edge_set_dims[1], 2); PADDLE_ENFORCE_EQ(edge_set_dims[1], 2,
platform::errors::InvalidArgument(
"The second dimension of the EdgeSet shall be 2, but "
"got %ld != 2. Please check the input value.",
edge_set_dims[1]));
int64_t edge_count = EdgeSet.numel(); int64_t edge_count = EdgeSet.numel();
const int *edge_data = EdgeSet.data<int>(); const int *edge_data = EdgeSet.data<int>();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册