未验证 提交 6190023a 编写于 作者: L Leo Chen 提交者: GitHub

Refine error message in pybind folder (#24886)

* refine err_msg of pybind.cc, test=develop

* refine err_msg in tensor_py.h, test=develop

* refine error msg, test=develop

* fix test_exception, test=develop

* follow comments, test=develop
上级 4058e736
......@@ -55,13 +55,19 @@ class IterableDatasetWrapper {
batch_size_(batch_size),
drop_last_(drop_last) {
#if defined _WIN32
PADDLE_THROW("Dataset is not supported on Windows");
PADDLE_THROW(
platform::errors::Unimplemented("Dataset is not supported on Windows"));
#elif defined __APPLE__
PADDLE_THROW("Dataset is not supported on MAC");
PADDLE_THROW(
platform::errors::Unimplemented("Dataset is not supported on MAC"));
#else
size_t device_num = places_.size();
PADDLE_ENFORCE_GT(device_num, 0, "thread_num must be larger than 0");
PADDLE_ENFORCE_GT(slots_.size(), 0, "slot_num cannot be 0");
PADDLE_ENFORCE_GT(device_num, 0,
platform::errors::InvalidArgument(
"The number of devices must be larger than 0"));
PADDLE_ENFORCE_GT(slots_.size(), 0,
platform::errors::InvalidArgument(
"The number of slots must be larger than 0"));
scopes_.reserve(device_num);
tensors_.reserve(device_num);
for (size_t i = 0; i < device_num; ++i) {
......@@ -80,14 +86,19 @@ class IterableDatasetWrapper {
}
void Start() {
PADDLE_ENFORCE_EQ(is_started_, false, "Reader has been started");
PADDLE_ENFORCE_EQ(
is_started_, false,
platform::errors::AlreadyExists("Reader has been started already"));
data_feeds_ = dataset_->GetReaders();
PADDLE_ENFORCE_EQ(data_feeds_.size(), places_.size(),
"Device number does not match reader number");
platform::errors::InvalidArgument(
"Device number does not match reader number"));
for (size_t i = 0; i < places_.size(); ++i) {
data_feeds_[i]->AssignFeedVar(*scopes_[i]);
data_feeds_[i]->SetPlace(platform::CPUPlace());
PADDLE_ENFORCE_EQ(data_feeds_[i]->Start(), true, "Reader start failed");
PADDLE_ENFORCE_EQ(data_feeds_[i]->Start(), true,
platform::errors::Unavailable(
"Failed to start the reader on device %d.", i));
}
is_started_ = true;
......@@ -96,7 +107,10 @@ class IterableDatasetWrapper {
}
std::vector<std::unordered_map<std::string, framework::LoDTensor>> Next() {
PADDLE_ENFORCE_EQ(is_started_, true, "Reader must be started");
PADDLE_ENFORCE_EQ(
is_started_, true,
platform::errors::PreconditionNotMet(
"Reader must be started when getting next batch data."));
size_t device_num = places_.size();
std::vector<std::unordered_map<std::string, framework::LoDTensor>> result(
......@@ -154,7 +168,9 @@ class IterableDatasetWrapper {
private:
bool IsValidLoDTensor(const framework::LoDTensor &tensor) const {
auto &lod = tensor.lod();
PADDLE_ENFORCE_LE(lod.size(), 1, "lod level must be not larger than 1");
PADDLE_ENFORCE_LE(lod.size(), 1,
platform::errors::InvalidArgument(
"LoD level must be not larger than 1"));
if (!drop_last_) return true;
if (lod.empty()) {
......
......@@ -30,8 +30,10 @@ void BindException(pybind11::module* m) {
}
});
m->def("__unittest_throw_exception__",
[] { PADDLE_THROW("test exception"); });
m->def("__unittest_throw_exception__", [] {
PADDLE_THROW(
platform::errors::PermissionDenied("This is a test of exception"));
});
}
} // namespace pybind
......
......@@ -175,7 +175,9 @@ static T PyObjectCast(PyObject *obj) {
try {
return py::cast<T>(py::handle(obj));
} catch (py::cast_error &) {
PADDLE_THROW("Python object is not type of %s", typeid(T).name());
PADDLE_THROW(platform::errors::InvalidArgument(
"Python object is not type of %s, the real type is %s",
typeid(T).name(), obj->ob_type->tp_name));
}
}
......@@ -189,7 +191,8 @@ static std::vector<std::shared_ptr<imperative::VarBase>> GetVarBaseList(
for (auto &para : state_dict) {
PyObject *py_obj = para.second.ptr();
if (!py_obj || py_obj == Py_None) {
PADDLE_THROW("Save parameter [%s] is None", para.first);
PADDLE_THROW(platform::errors::InvalidArgument(
"The parameter [%s] to save is None", para.first));
}
vec_res.emplace_back(
PyObjectCast<std::shared_ptr<imperative::VarBase>>(py_obj));
......@@ -205,7 +208,8 @@ static std::vector<std::string> inline GetNameList(
PyObject *py_obj = py_handle.ptr(); // get underlying PyObject
// Python None is not nullptr in C++!
if (!py_obj || py_obj == Py_None) {
PADDLE_THROW("Save parameter list is None");
PADDLE_THROW(platform::errors::InvalidArgument(
"The parameter list to save is None"));
}
if (PyList_Check(py_obj)) {
......@@ -218,14 +222,16 @@ static std::vector<std::string> inline GetNameList(
for (size_t i = 0; i < len; ++i) {
PyObject *py_name =
PyObject_GetAttrString(PyList_GET_ITEM(py_obj, i), kNameField);
PADDLE_ENFORCE_NOT_NULL(py_name);
PADDLE_ENFORCE_NOT_NULL(py_name,
platform::errors::InvalidArgument(
"The name of parameter to save is None"));
vec_res.emplace_back(PyObjectCast<std::string>(py_name));
Py_DECREF(py_name);
}
} else {
PADDLE_THROW("Set parameter should be a list");
PADDLE_THROW(platform::errors::InvalidArgument(
"The parameters to save is not a list"));
}
return vec_res;
}
......@@ -237,7 +243,8 @@ static void inline CreateVariableIfNotExit(
PyObject *py_obj = py_handle.ptr(); // get underlying PyObject
// Python None is not nullptr in C++!
if (!py_obj || py_obj == Py_None) {
PADDLE_THROW("Save parameter list is None");
PADDLE_THROW(
platform::errors::InvalidArgument("The parameter list to set is None"));
}
if (PyList_Check(py_obj)) {
......@@ -251,19 +258,24 @@ static void inline CreateVariableIfNotExit(
for (size_t i = 0; i < len; ++i) {
PyObject *py_name =
PyObject_GetAttrString(PyList_GET_ITEM(py_obj, i), kNameField);
PADDLE_ENFORCE_NOT_NULL(py_name);
PADDLE_ENFORCE_NOT_NULL(py_name,
platform::errors::InvalidArgument(
"The name of parameter to set is None"));
auto para_name = PyObjectCast<std::string>(py_name);
Py_DECREF(py_name);
auto var = scope.FindVar(para_name);
if (var == nullptr) {
PADDLE_ENFORCE_NE(exe, nullptr,
PADDLE_ENFORCE_NOT_NULL(exe,
platform::errors::InvalidArgument(
"Parameter not Initialized, "
"Please set argument [executor] not None "
"or run startup program first");
"or run startup program first"));
PyObject *py_var_desc =
PyObject_GetAttrString(PyList_GET_ITEM(py_obj, i), kVarDescField);
PADDLE_ENFORCE_NOT_NULL(py_var_desc);
PADDLE_ENFORCE_NOT_NULL(
py_var_desc, platform::errors::InvalidArgument(
"The var_desc of parameter to set is None"));
auto var_desc = PyObjectCast<framework::VarDesc>(py_var_desc);
Py_DECREF(py_var_desc);
var = const_cast<framework::Scope *>(&scope)->Var(para_name);
......@@ -273,7 +285,8 @@ static void inline CreateVariableIfNotExit(
}
}
} else {
PADDLE_THROW("Set parameter should be a list");
PADDLE_THROW(platform::errors::InvalidArgument(
"The parameters to set is not a list"));
}
return;
......@@ -670,7 +683,10 @@ PYBIND11_MODULE(core_noavx, m) {
LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
PADDLE_ENFORCE_EQ(
CheckLoD(new_offset_lod, -1), true,
"the provided recursive_sequence_lengths info is invalid");
platform::errors::InvalidArgument(
"The provided recursive_sequence_lengths info is invalid, "
"the LoD converted by recursive_sequence_lengths is %s",
new_lod));
new (&instance) LoDTensor(new_offset_lod);
})
.def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); })
......@@ -688,7 +704,8 @@ PYBIND11_MODULE(core_noavx, m) {
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
PADDLE_ENFORCE_EQ(
CheckLoD(new_lod, vectorize(self.dims()).front()), true,
"the provided lod info is invalid");
platform::errors::InvalidArgument(
"The provided LoD is invalid, the LoD is %s", new_lod));
self.set_lod(new_lod);
},
py::arg("lod"), R"DOC(
......@@ -724,7 +741,11 @@ PYBIND11_MODULE(core_noavx, m) {
LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
PADDLE_ENFORCE_EQ(
CheckLoD(new_offset_lod, vectorize(self.dims()).front()), true,
"the provided recursive_sequence_lengths info is invalid");
platform::errors::InvalidArgument(
"The provided recursive_sequence_lengths info is invalid, "
"the LoD converted by recursive_sequence_lengths is "
"%s",
new_lod));
self.set_lod(new_offset_lod);
},
py::arg("recursive_sequence_lengths"), R"DOC(
......@@ -988,7 +1009,10 @@ All parameter, weight, gradient are variables in Paddle.
#endif
.def("get_reader",
[](Variable &self) -> framework::ReaderHolder * {
PADDLE_ENFORCE_EQ(self.IsType<framework::ReaderHolder>(), true);
PADDLE_ENFORCE_EQ(
self.IsType<framework::ReaderHolder>(), true,
platform::errors::InvalidArgument(
"The variable is not type of ReaderHolder."));
return self.GetMutable<framework::ReaderHolder>();
},
py::return_value_policy::reference)
......@@ -1091,7 +1115,8 @@ All parameter, weight, gradient are variables in Paddle.
std::string str;
PADDLE_ENFORCE_EQ(
info.Proto().SerializeToString(&str), true,
"Serialize OpProto Error. This could be a bug of Paddle.");
platform::errors::Fatal(
"Serialize OpProto Error. This could be a bug of Paddle."));
ret_values.emplace_back(str);
}
}
......@@ -1204,7 +1229,10 @@ All parameter, weight, gradient are variables in Paddle.
[](paddle::platform::CUDAPlace& place)
-> paddle::platform::DeviceContext* {
#ifndef PADDLE_WITH_CUDA
PADDLE_THROW("CUDAPlace is not supported in CPU device.");
PADDLE_THROW(
platform::errors::PermissionDenied(
"Cannot use CUDAPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#else
return new paddle::platform::CUDADeviceContext(place);
#endif
......@@ -1214,7 +1242,9 @@ All parameter, weight, gradient are variables in Paddle.
-> paddle::platform::DeviceContext* {
#ifndef PADDLE_WITH_CUDA
PADDLE_THROW(
"CUDAPinnedPlace is not supported in CPU device.");
platform::errors::PermissionDenied(
"Cannot use CUDAPinnedPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#else
return new paddle::platform::CUDAPinnedDeviceContext(place);
#endif
......@@ -1335,7 +1365,9 @@ All parameter, weight, gradient are variables in Paddle.
.def("__init__",
[](platform::CUDAPinnedPlace &self) {
#ifndef PADDLE_WITH_CUDA
PADDLE_THROW("Cannot use CUDAPinnedPlace in CPU only version");
PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use CUDAPinnedPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#endif
new (&self) platform::CUDAPinnedPlace();
})
......@@ -1389,10 +1421,13 @@ All parameter, weight, gradient are variables in Paddle.
[](py::bytes protobin) {
proto::OpDesc desc;
PADDLE_ENFORCE_EQ(desc.ParsePartialFromString(protobin), true,
"Cannot parse user input to OpDesc");
PADDLE_ENFORCE_EQ(desc.IsInitialized(), true,
"User OpDesc is not initialized, reason %s",
desc.InitializationErrorString());
platform::errors::InvalidArgument(
"Cannot parse user input to OpDesc"));
PADDLE_ENFORCE_EQ(
desc.IsInitialized(), true,
platform::errors::InvalidArgument(
"The provided OpDesc is not initialized, the reason is: %s",
desc.InitializationErrorString()));
return OpRegistry::CreateOp(desc);
})
.def("run",
......@@ -1564,7 +1599,10 @@ All parameter, weight, gradient are variables in Paddle.
.def("__len__", [](LoDTensorArray &self) { return self.size(); })
.def("__setitem__",
[](LoDTensorArray &self, size_t i, const LoDTensor &t) {
PADDLE_ENFORCE_LT(i, self.size());
PADDLE_ENFORCE_LT(i, self.size(),
platform::errors::InvalidArgument(
"The index to set is larger than the size "
"of LoDTensorArray."));
self[i].ShareDataWith(t);
self[i].set_lod(t.lod());
})
......@@ -2099,7 +2137,7 @@ All parameter, weight, gradient are variables in Paddle.
[](BuildStrategy &self, int num_trainers) {
#ifdef WIN32
PADDLE_THROW(platform::errors::Unavailable(
"Windows has NO support to distribute mode."));
"Distribution mode is not supported on Windows platform."));
#endif
self.num_trainers_ = num_trainers;
})
......@@ -2324,7 +2362,7 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef WIN32
if (b) {
PADDLE_THROW(platform::errors::Unavailable(
"Windows has NO support to distribute mode."));
"Distribution mode is not supported on Windows platform."));
}
#else
self.is_distribution_ = b;
......
......@@ -160,8 +160,8 @@ class MultiDeviceFeedReader {
reader, p, 2));
} else {
if (platform::is_gpu_place(p)) {
PADDLE_THROW(
"Place cannot be CUDAPlace when use_double_buffer is False");
PADDLE_THROW(platform::errors::PermissionDenied(
"Place cannot be CUDAPlace when use_double_buffer is False"));
}
holder->Reset(reader);
}
......@@ -233,7 +233,11 @@ class MultiDeviceFeedReader {
auto each_status = futures_[i].get();
if (UNLIKELY(each_status != Status::kSuccess)) {
if (UNLIKELY(each_status == Status::kException)) {
PADDLE_ENFORCE_NOT_NULL(exceptions_[i]);
PADDLE_ENFORCE_NOT_NULL(
exceptions_[i],
platform::errors::NotFound("exceptions_[%d] is NULL, but the "
"result status is Status::kException",
i));
*excep = exceptions_[i];
exceptions_[i] = nullptr;
}
......@@ -280,7 +284,10 @@ class MultiDeviceFeedReader {
Status status = WaitFutures(&excep);
if (UNLIKELY(excep)) {
PADDLE_ENFORCE_EQ(status, Status::kException);
PADDLE_ENFORCE_EQ(status, Status::kException,
platform::errors::NotFound(
"The exception raised is not NULL, but "
"the result status is not Status::kException"));
std::rethrow_exception(excep);
}
......@@ -290,7 +297,10 @@ class MultiDeviceFeedReader {
throw py::stop_iteration();
}
PADDLE_ENFORCE_EQ(status, Status::kSuccess);
PADDLE_ENFORCE_EQ(status, Status::kSuccess,
platform::errors::NotFound(
"The function executed sucessfully, but "
"the result status is not Status::kSuccess"));
}
std::shared_ptr<QueueType> queue_;
......
......@@ -20,6 +20,7 @@ limitations under the License. */
#include <string>
#include <tuple>
#include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/math/concat_and_split.h"
......@@ -119,22 +120,28 @@ inline std::string TensorDTypeToPyDTypeStr(
return "e"; \
} else { \
constexpr auto kIsValidDType = ValidDTypeToPyArrayChecker<T>::kValue; \
PADDLE_ENFORCE_EQ(kIsValidDType, true, \
"This type of tensor cannot be expose to Python"); \
PADDLE_ENFORCE_EQ( \
kIsValidDType, true, \
platform::errors::Unimplemented( \
"This type [%s] of tensor cannot be expose to Python", \
typeid(T).name())); \
return py::format_descriptor<T>::format(); \
} \
}
_ForEachDataType_(TENSOR_DTYPE_TO_PY_DTYPE);
#undef TENSOR_DTYPE_TO_PY_DTYPE
PADDLE_THROW("Unsupported data type %d", static_cast<int>(type));
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported tensor data type: %s", framework::DataTypeToString(type)));
}
} // namespace details
template <typename T>
T TensorGetElement(const framework::Tensor &self, size_t offset) {
PADDLE_ENFORCE_LT(offset, self.numel());
PADDLE_ENFORCE_LT(offset, self.numel(),
platform::errors::InvalidArgument(
"The offset exceeds the size of tensor."));
T b = static_cast<T>(0);
if (platform::is_cpu_place(self.place())) {
b = self.data<T>()[offset];
......@@ -151,7 +158,9 @@ T TensorGetElement(const framework::Tensor &self, size_t offset) {
template <typename T>
void TensorSetElement(framework::Tensor *self, size_t offset, T elem) {
PADDLE_ENFORCE_LT(offset, self->numel());
PADDLE_ENFORCE_LT(offset, self->numel(),
platform::errors::InvalidArgument(
"The offset exceeds the size of tensor."));
if (platform::is_cpu_place(self->place())) {
self->mutable_data<T>(self->place())[offset] = elem;
#ifdef PADDLE_WITH_CUDA
......@@ -194,13 +203,16 @@ void SetTensorFromPyArrayT(
paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(),
cudaMemcpyHostToDevice);
} else {
PADDLE_THROW(
"Incompatible place type: Tensor.set() supports CPUPlace, CUDAPlace "
PADDLE_THROW(platform::errors::InvalidArgument(
"Incompatible place type: Tensor.set() supports "
"CPUPlace, CUDAPlace "
"and CUDAPinnedPlace, but got %s!",
place);
place));
}
#else
PADDLE_THROW("Not supported GPU, please compile WITH_GPU option");
PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use CUDAPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#endif
}
}
......@@ -234,12 +246,12 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj,
} else if (py::isinstance<py::array_t<bool>>(array)) {
SetTensorFromPyArrayT<bool, P>(self, array, place, zero_copy);
} else {
PADDLE_THROW(
"Incompatible data or style type: tensor.set() supports bool, float16, "
PADDLE_THROW(platform::errors::InvalidArgument(
"Incompatible data type: tensor.set() supports bool, float16, "
"float32, "
"float64, "
"int8, int16, int32, int64 and uint8, uint16, but got %s!",
array.dtype());
array.dtype()));
}
}
......@@ -389,7 +401,8 @@ void _sliceDapper(const framework::Tensor *in, framework::Tensor *out,
_sliceCompute<T, 9>(in, out, ctx, axes, starts);
break;
default:
PADDLE_THROW("dim size not exepected, current is %d", size);
PADDLE_THROW(platform::errors::InvalidArgument(
"The dim size should be 1 to 9, current is %d", size));
break;
}
}
......@@ -454,7 +467,9 @@ inline framework::Tensor *_sliceTensor(const framework::Tensor &self,
case framework::proto::VarType::UINT8:
return _sliceAndConcat<uint8_t>(self, obj, dim);
default:
PADDLE_THROW("Not support type %d", src_type);
PADDLE_THROW(platform::errors::InvalidArgument(
"Not support tensor type: %s",
framework::DataTypeToString(src_type)));
}
}
......@@ -525,13 +540,15 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor,
static_cast<size_t>(tensor.dims().size()), py_dims, py_strides));
} else {
py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
PADDLE_ENFORCE_EQ(
py_arr.writeable(), true,
platform::errors::InvalidArgument(
"PyArray must be writable, otherwise memory leak "
"PyArray is not writable, in which case memory leak "
"or double free would occur"));
PADDLE_ENFORCE_EQ(py_arr.owndata(), true,
PADDLE_ENFORCE_EQ(
py_arr.owndata(), true,
platform::errors::InvalidArgument(
"PyArray must own data, otherwise memory leak "
"PyArray does not own data, in which case memory leak "
"or double free would occur"));
platform::CPUPlace place;
size_t copy_bytes = sizeof_dtype * numel;
......@@ -543,16 +560,23 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor,
#ifdef PADDLE_WITH_CUDA
py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
PADDLE_ENFORCE(py_arr.writeable() && py_arr.owndata(),
"PyArray must be writable and own data, otherwise memory leak "
"or double free would occur");
PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
platform::errors::InvalidArgument(
"PyArray is not writable, in which case memory leak "
"or double free would occur"));
PADDLE_ENFORCE_EQ(py_arr.owndata(), true,
platform::errors::InvalidArgument(
"PyArray does not own data, in which case memory leak "
"or double free would occur"));
size_t copy_bytes = sizeof_dtype * numel;
paddle::platform::GpuMemcpySync(py_arr.mutable_data(), tensor_buf_ptr,
copy_bytes, cudaMemcpyDeviceToHost);
return py_arr;
#else
PADDLE_THROW("CUDAPlace is not supported when not compiled with CUDA");
PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use CUDAPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#endif
}
......
......@@ -25,7 +25,8 @@ class TestException(unittest.TestCase):
try:
core.__unittest_throw_exception__()
except core.EnforceNotMet as ex:
self.assertIn("test exception", cpt.get_exception_message(ex))
self.assertIn("This is a test of exception",
cpt.get_exception_message(ex))
exception = ex
self.assertIsNotNone(exception)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册