未验证 提交 4e8582fe 编写于 作者: W wawltor 提交者: GitHub

update the error message check for the some ops

update the error message check for the some ops
上级 d003573f
......@@ -33,9 +33,12 @@ namespace operators {
static void Memcpy(void *dst, const void *src, size_t n, bool copy_to_gpu) {
if (copy_to_gpu) {
#ifdef PADDLE_WITH_CUDA
PADDLE_ENFORCE(cudaMemcpy(dst, src, n, cudaMemcpyHostToDevice));
PADDLE_ENFORCE_CUDA_SUCCESS(
cudaMemcpy(dst, src, n, cudaMemcpyHostToDevice));
#else
PADDLE_THROW("Not compiled with cuda");
PADDLE_THROW(
platform::errors::InvalidArgument("Check your paddle version, current "
"version is not compiled with cuda"));
#endif
} else {
std::memcpy(dst, src, n);
......@@ -88,11 +91,22 @@ bool TestMain(const platform::Place &place, const framework::DDim &dims,
framework::LoDTensor cpu_out;
auto &out_tensor = scope.FindVar(out_name)->Get<framework::LoDTensor>();
PADDLE_ENFORCE(scope.kids().empty());
PADDLE_ENFORCE_EQ(scope.kids().empty(), true,
platform::errors::InvalidArgument(
"The scope can not have the child scopes,"
"please check your code."));
if (inplace) {
PADDLE_ENFORCE_EQ(&out_tensor, x);
PADDLE_ENFORCE_EQ(
&out_tensor, x,
platform::errors::InvalidArgument(
"The output tensor should be same as input x in inplace mode,"
" but now is not same."));
} else {
PADDLE_ENFORCE_EQ(&out_tensor, z);
PADDLE_ENFORCE_EQ(
&out_tensor, z,
platform::errors::InvalidArgument(
"The output tensor should be same as output z in normal mode,"
" but now is not same."));
}
if (is_gpu_place) {
......
......@@ -92,7 +92,9 @@ class TestElementwiseOpGradGrad {
auto dst_place = BOOST_GET_CONST(platform::CUDAPlace, place_);
memory::Copy(dst_place, dst, src_place, src, bytes, nullptr);
#else
PADDLE_THROW("Not compiled with cuda");
PADDLE_THROW(platform::errors::InvalidArgument(
"Check your paddle version, current version is not compiled with "
"cuda"));
#endif
}
}
......@@ -107,7 +109,10 @@ class TestElementwiseOpGradGrad {
op->Run(scope_, place_);
platform::DeviceContextPool::Instance().Get(place_)->Wait();
framework::LoDTensor cpu_out;
PADDLE_ENFORCE_EQ(scope_.kids().empty(), true, "scope has child scopes");
PADDLE_ENFORCE_EQ(scope_.kids().empty(), true,
platform::errors::InvalidArgument(
"The scope can not have the child scopes,"
"please check your code."));
// get outputs from scope and compare them with expected_outs
bool all_equal = true;
......
......@@ -186,10 +186,17 @@ class SumOp : public framework::OperatorWithKernel {
}
}
}
PADDLE_THROW("Cannot find the input data type by all input data");
PADDLE_THROW(platform::errors::InvalidArgument(
"Expected each tensor in Input(x) in sum op has be initialized, but "
"some tensor in Input(x) is not be initialized, please check your "
"code.",
framework::ToTypeName(x_vars[0]->Type())));
}
PADDLE_THROW("Unexpected branch. Input type is %s",
framework::ToTypeName(x_vars[0]->Type()));
PADDLE_THROW(platform::errors::InvalidArgument(
"Expected type of Input(X) must be Tensor, SelectedRows or "
"LodTensorArray. But got "
"unsupport type: %s.",
framework::ToTypeName(x_vars[0]->Type())));
}
};
......
......@@ -169,8 +169,18 @@ void SumToLoDTensor(const framework::ExecutionContext &context) {
auto row_numel = sr_value.numel() / sr_rows.size();
auto out_dims = out->dims();
PADDLE_ENFORCE_EQ(sr.height(), out_dims[0]);
PADDLE_ENFORCE_EQ(row_numel, out->numel() / sr.height());
PADDLE_ENFORCE_EQ(sr.height(), out_dims[0],
platform::errors::InvalidArgument(
"The table height of input must be same as output, "
"but received input height is %d"
", output height is %d",
sr.height(), out_dims[0]));
PADDLE_ENFORCE_EQ(row_numel, out->numel() / sr.height(),
platform::errors::InvalidArgument(
"The table width of input must be same as output, "
"but received input width is %d"
", output width is %d",
row_numel, out->numel() / sr.height()));
auto *sr_data = sr_value.data<T>();
auto *sr_out_data = out->data<T>();
......@@ -231,8 +241,11 @@ class SumKernel<platform::CUDADeviceContext, T>
} else if (out_var->IsType<framework::LoDTensorArray>()) {
LodTensorArrayCompute<platform::CUDADeviceContext, T>(context);
} else {
PADDLE_THROW("Unexpected branch, output variable type is %s",
framework::ToTypeName(out_var->Type()));
PADDLE_THROW(platform::errors::InvalidArgument(
"Expected type of Ouput(out) must be Tensor, SelectedRows or "
"LodTensorArray. But got "
"unsupport type: %s.",
framework::ToTypeName(out_var->Type())));
}
}
};
......
......@@ -182,7 +182,11 @@ class SumKernel : public framework::OpKernel<T> {
auto &in_t = in_vars[i]->Get<framework::SelectedRows>();
functor(context.template device_context<DeviceContext>(), in_t, out);
} else {
PADDLE_THROW("Variable type must be LoDTensor/SelectedRows.");
PADDLE_THROW(platform::errors::InvalidArgument(
"Expected type of Input(X) of %d-th must be Tensor, "
"SelectedRows. But got "
"unsupport type: %s.",
framework::ToTypeName(in_vars[i]->Type())));
}
}
} else if (out_var->IsType<framework::SelectedRows>()) {
......@@ -190,8 +194,11 @@ class SumKernel : public framework::OpKernel<T> {
} else if (out_var->IsType<framework::LoDTensorArray>()) {
LodTensorArrayCompute<DeviceContext, T>(context);
} else {
PADDLE_THROW("Unexpected branch, output variable type is %s",
framework::ToTypeName(out_var->Type()));
PADDLE_THROW(platform::errors::InvalidArgument(
"Expected type of Output(out) must be Tensor, SelectedRows, "
"LoDTensorArray. But got "
"unsupport type: %s.",
framework::ToTypeName(out_var->Type())));
}
}
};
......
......@@ -54,9 +54,11 @@ class CPUUniformRandomKernel : public framework::OpKernel<T> {
tensor = out_var->GetMutable<framework::LoDTensor>();
if (!new_shape.empty()) tensor->Resize(framework::make_ddim(new_shape));
} else {
PADDLE_THROW(
"uniform_random_op's output only"
"supports SelectedRows and LoDTensor");
PADDLE_THROW(platform::errors::InvalidArgument(
"Expected type of Output(out) in uniform_random_op must be Tensor, "
"SelectedRows. But got "
"unsupport type: %s.",
framework::ToTypeName(out_var->Type())));
}
T *data = tensor->mutable_data<T>(ctx.GetPlace());
......
......@@ -116,9 +116,11 @@ class GPUUniformRandomKernel : public framework::OpKernel<T> {
tensor = out_var->GetMutable<framework::LoDTensor>();
if (!new_shape.empty()) tensor->Resize(framework::make_ddim(new_shape));
} else {
PADDLE_THROW(
"uniform_random_op's output only"
"supports SelectedRows and LoDTensor");
PADDLE_THROW(platform::errors::InvalidArgument(
"Expected type of Output(out) in uniform_random_op must be Tensor, "
"SelectedRows. But got "
"unsupport type: %s.",
framework::ToTypeName(out_var->Type())));
}
T* data = tensor->mutable_data<T>(context.GetPlace());
unsigned int seed = static_cast<unsigned int>(context.Attr<int>("seed"));
......
......@@ -50,7 +50,10 @@ inline std::vector<int64_t> GetNewDataFromShapeTensor(
}
return vec_new_data;
} else {
PADDLE_THROW("The dtype of shape tensor must be int32 or int64.");
PADDLE_THROW(platform::errors::InvalidArgument(
"Expected dtype of ShapeTensor must be int32, int64. But got "
"unsupport dtype: %s.",
paddle::framework::DataTypeToString(new_data_tensor->type())));
}
}
......@@ -84,7 +87,11 @@ inline std::vector<int64_t> GetNewDataFromShapeTensorList(
vec_new_shape.push_back(*tensor->data<int64_t>());
}
} else {
PADDLE_THROW("The dtype of shape tensor must be int32 or int64.");
PADDLE_THROW(platform::errors::InvalidArgument(
"Expected dtype of ShapeTensorList of %d-th must be int32, int64. "
"But got "
"unsupport dtype: %s.",
i, paddle::framework::DataTypeToString(tensor->type())));
}
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册