未验证 提交 6190023a 编写于 作者: L Leo Chen 提交者: GitHub

Refine error message in pybind folder (#24886)

* refine err_msg of pybind.cc, test=develop

* refine err_msg in tensor_py.h, test=develop

* refine error msg, test=develop

* fix test_exception, test=develop

* follow comments, test=develop
上级 4058e736
...@@ -55,13 +55,19 @@ class IterableDatasetWrapper { ...@@ -55,13 +55,19 @@ class IterableDatasetWrapper {
batch_size_(batch_size), batch_size_(batch_size),
drop_last_(drop_last) { drop_last_(drop_last) {
#if defined _WIN32 #if defined _WIN32
PADDLE_THROW("Dataset is not supported on Windows"); PADDLE_THROW(
platform::errors::Unimplemented("Dataset is not supported on Windows"));
#elif defined __APPLE__ #elif defined __APPLE__
PADDLE_THROW("Dataset is not supported on MAC"); PADDLE_THROW(
platform::errors::Unimplemented("Dataset is not supported on MAC"));
#else #else
size_t device_num = places_.size(); size_t device_num = places_.size();
PADDLE_ENFORCE_GT(device_num, 0, "thread_num must be larger than 0"); PADDLE_ENFORCE_GT(device_num, 0,
PADDLE_ENFORCE_GT(slots_.size(), 0, "slot_num cannot be 0"); platform::errors::InvalidArgument(
"The number of devices must be larger than 0"));
PADDLE_ENFORCE_GT(slots_.size(), 0,
platform::errors::InvalidArgument(
"The number of slots must be larger than 0"));
scopes_.reserve(device_num); scopes_.reserve(device_num);
tensors_.reserve(device_num); tensors_.reserve(device_num);
for (size_t i = 0; i < device_num; ++i) { for (size_t i = 0; i < device_num; ++i) {
...@@ -80,14 +86,19 @@ class IterableDatasetWrapper { ...@@ -80,14 +86,19 @@ class IterableDatasetWrapper {
} }
void Start() { void Start() {
PADDLE_ENFORCE_EQ(is_started_, false, "Reader has been started"); PADDLE_ENFORCE_EQ(
is_started_, false,
platform::errors::AlreadyExists("Reader has been started already"));
data_feeds_ = dataset_->GetReaders(); data_feeds_ = dataset_->GetReaders();
PADDLE_ENFORCE_EQ(data_feeds_.size(), places_.size(), PADDLE_ENFORCE_EQ(data_feeds_.size(), places_.size(),
"Device number does not match reader number"); platform::errors::InvalidArgument(
"Device number does not match reader number"));
for (size_t i = 0; i < places_.size(); ++i) { for (size_t i = 0; i < places_.size(); ++i) {
data_feeds_[i]->AssignFeedVar(*scopes_[i]); data_feeds_[i]->AssignFeedVar(*scopes_[i]);
data_feeds_[i]->SetPlace(platform::CPUPlace()); data_feeds_[i]->SetPlace(platform::CPUPlace());
PADDLE_ENFORCE_EQ(data_feeds_[i]->Start(), true, "Reader start failed"); PADDLE_ENFORCE_EQ(data_feeds_[i]->Start(), true,
platform::errors::Unavailable(
"Failed to start the reader on device %d.", i));
} }
is_started_ = true; is_started_ = true;
...@@ -96,7 +107,10 @@ class IterableDatasetWrapper { ...@@ -96,7 +107,10 @@ class IterableDatasetWrapper {
} }
std::vector<std::unordered_map<std::string, framework::LoDTensor>> Next() { std::vector<std::unordered_map<std::string, framework::LoDTensor>> Next() {
PADDLE_ENFORCE_EQ(is_started_, true, "Reader must be started"); PADDLE_ENFORCE_EQ(
is_started_, true,
platform::errors::PreconditionNotMet(
"Reader must be started when getting next batch data."));
size_t device_num = places_.size(); size_t device_num = places_.size();
std::vector<std::unordered_map<std::string, framework::LoDTensor>> result( std::vector<std::unordered_map<std::string, framework::LoDTensor>> result(
...@@ -154,7 +168,9 @@ class IterableDatasetWrapper { ...@@ -154,7 +168,9 @@ class IterableDatasetWrapper {
private: private:
bool IsValidLoDTensor(const framework::LoDTensor &tensor) const { bool IsValidLoDTensor(const framework::LoDTensor &tensor) const {
auto &lod = tensor.lod(); auto &lod = tensor.lod();
PADDLE_ENFORCE_LE(lod.size(), 1, "lod level must be not larger than 1"); PADDLE_ENFORCE_LE(lod.size(), 1,
platform::errors::InvalidArgument(
"LoD level must be not larger than 1"));
if (!drop_last_) return true; if (!drop_last_) return true;
if (lod.empty()) { if (lod.empty()) {
......
...@@ -30,8 +30,10 @@ void BindException(pybind11::module* m) { ...@@ -30,8 +30,10 @@ void BindException(pybind11::module* m) {
} }
}); });
m->def("__unittest_throw_exception__", m->def("__unittest_throw_exception__", [] {
[] { PADDLE_THROW("test exception"); }); PADDLE_THROW(
platform::errors::PermissionDenied("This is a test of exception"));
});
} }
} // namespace pybind } // namespace pybind
......
...@@ -175,7 +175,9 @@ static T PyObjectCast(PyObject *obj) { ...@@ -175,7 +175,9 @@ static T PyObjectCast(PyObject *obj) {
try { try {
return py::cast<T>(py::handle(obj)); return py::cast<T>(py::handle(obj));
} catch (py::cast_error &) { } catch (py::cast_error &) {
PADDLE_THROW("Python object is not type of %s", typeid(T).name()); PADDLE_THROW(platform::errors::InvalidArgument(
"Python object is not type of %s, the real type is %s",
typeid(T).name(), obj->ob_type->tp_name));
} }
} }
...@@ -189,7 +191,8 @@ static std::vector<std::shared_ptr<imperative::VarBase>> GetVarBaseList( ...@@ -189,7 +191,8 @@ static std::vector<std::shared_ptr<imperative::VarBase>> GetVarBaseList(
for (auto &para : state_dict) { for (auto &para : state_dict) {
PyObject *py_obj = para.second.ptr(); PyObject *py_obj = para.second.ptr();
if (!py_obj || py_obj == Py_None) { if (!py_obj || py_obj == Py_None) {
PADDLE_THROW("Save parameter [%s] is None", para.first); PADDLE_THROW(platform::errors::InvalidArgument(
"The parameter [%s] to save is None", para.first));
} }
vec_res.emplace_back( vec_res.emplace_back(
PyObjectCast<std::shared_ptr<imperative::VarBase>>(py_obj)); PyObjectCast<std::shared_ptr<imperative::VarBase>>(py_obj));
...@@ -205,7 +208,8 @@ static std::vector<std::string> inline GetNameList( ...@@ -205,7 +208,8 @@ static std::vector<std::string> inline GetNameList(
PyObject *py_obj = py_handle.ptr(); // get underlying PyObject PyObject *py_obj = py_handle.ptr(); // get underlying PyObject
// Python None is not nullptr in C++! // Python None is not nullptr in C++!
if (!py_obj || py_obj == Py_None) { if (!py_obj || py_obj == Py_None) {
PADDLE_THROW("Save parameter list is None"); PADDLE_THROW(platform::errors::InvalidArgument(
"The parameter list to save is None"));
} }
if (PyList_Check(py_obj)) { if (PyList_Check(py_obj)) {
...@@ -218,14 +222,16 @@ static std::vector<std::string> inline GetNameList( ...@@ -218,14 +222,16 @@ static std::vector<std::string> inline GetNameList(
for (size_t i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
PyObject *py_name = PyObject *py_name =
PyObject_GetAttrString(PyList_GET_ITEM(py_obj, i), kNameField); PyObject_GetAttrString(PyList_GET_ITEM(py_obj, i), kNameField);
PADDLE_ENFORCE_NOT_NULL(py_name); PADDLE_ENFORCE_NOT_NULL(py_name,
platform::errors::InvalidArgument(
"The name of parameter to save is None"));
vec_res.emplace_back(PyObjectCast<std::string>(py_name)); vec_res.emplace_back(PyObjectCast<std::string>(py_name));
Py_DECREF(py_name); Py_DECREF(py_name);
} }
} else { } else {
PADDLE_THROW("Set parameter should be a list"); PADDLE_THROW(platform::errors::InvalidArgument(
"The parameters to save is not a list"));
} }
return vec_res; return vec_res;
} }
...@@ -237,7 +243,8 @@ static void inline CreateVariableIfNotExit( ...@@ -237,7 +243,8 @@ static void inline CreateVariableIfNotExit(
PyObject *py_obj = py_handle.ptr(); // get underlying PyObject PyObject *py_obj = py_handle.ptr(); // get underlying PyObject
// Python None is not nullptr in C++! // Python None is not nullptr in C++!
if (!py_obj || py_obj == Py_None) { if (!py_obj || py_obj == Py_None) {
PADDLE_THROW("Save parameter list is None"); PADDLE_THROW(
platform::errors::InvalidArgument("The parameter list to set is None"));
} }
if (PyList_Check(py_obj)) { if (PyList_Check(py_obj)) {
...@@ -251,19 +258,24 @@ static void inline CreateVariableIfNotExit( ...@@ -251,19 +258,24 @@ static void inline CreateVariableIfNotExit(
for (size_t i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
PyObject *py_name = PyObject *py_name =
PyObject_GetAttrString(PyList_GET_ITEM(py_obj, i), kNameField); PyObject_GetAttrString(PyList_GET_ITEM(py_obj, i), kNameField);
PADDLE_ENFORCE_NOT_NULL(py_name); PADDLE_ENFORCE_NOT_NULL(py_name,
platform::errors::InvalidArgument(
"The name of parameter to set is None"));
auto para_name = PyObjectCast<std::string>(py_name); auto para_name = PyObjectCast<std::string>(py_name);
Py_DECREF(py_name); Py_DECREF(py_name);
auto var = scope.FindVar(para_name); auto var = scope.FindVar(para_name);
if (var == nullptr) { if (var == nullptr) {
PADDLE_ENFORCE_NE(exe, nullptr, PADDLE_ENFORCE_NOT_NULL(exe,
"Parameter not Initialized, " platform::errors::InvalidArgument(
"Please set argument [executor] not None " "Parameter not Initialized, "
"or run startup program first"); "Please set argument [executor] not None "
"or run startup program first"));
PyObject *py_var_desc = PyObject *py_var_desc =
PyObject_GetAttrString(PyList_GET_ITEM(py_obj, i), kVarDescField); PyObject_GetAttrString(PyList_GET_ITEM(py_obj, i), kVarDescField);
PADDLE_ENFORCE_NOT_NULL(py_var_desc); PADDLE_ENFORCE_NOT_NULL(
py_var_desc, platform::errors::InvalidArgument(
"The var_desc of parameter to set is None"));
auto var_desc = PyObjectCast<framework::VarDesc>(py_var_desc); auto var_desc = PyObjectCast<framework::VarDesc>(py_var_desc);
Py_DECREF(py_var_desc); Py_DECREF(py_var_desc);
var = const_cast<framework::Scope *>(&scope)->Var(para_name); var = const_cast<framework::Scope *>(&scope)->Var(para_name);
...@@ -273,7 +285,8 @@ static void inline CreateVariableIfNotExit( ...@@ -273,7 +285,8 @@ static void inline CreateVariableIfNotExit(
} }
} }
} else { } else {
PADDLE_THROW("Set parameter should be a list"); PADDLE_THROW(platform::errors::InvalidArgument(
"The parameters to set is not a list"));
} }
return; return;
...@@ -670,7 +683,10 @@ PYBIND11_MODULE(core_noavx, m) { ...@@ -670,7 +683,10 @@ PYBIND11_MODULE(core_noavx, m) {
LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod); LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
CheckLoD(new_offset_lod, -1), true, CheckLoD(new_offset_lod, -1), true,
"the provided recursive_sequence_lengths info is invalid"); platform::errors::InvalidArgument(
"The provided recursive_sequence_lengths info is invalid, "
"the LoD converted by recursive_sequence_lengths is %s",
new_lod));
new (&instance) LoDTensor(new_offset_lod); new (&instance) LoDTensor(new_offset_lod);
}) })
.def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); }) .def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); })
...@@ -688,7 +704,8 @@ PYBIND11_MODULE(core_noavx, m) { ...@@ -688,7 +704,8 @@ PYBIND11_MODULE(core_noavx, m) {
std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod));
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
CheckLoD(new_lod, vectorize(self.dims()).front()), true, CheckLoD(new_lod, vectorize(self.dims()).front()), true,
"the provided lod info is invalid"); platform::errors::InvalidArgument(
"The provided LoD is invalid, the LoD is %s", new_lod));
self.set_lod(new_lod); self.set_lod(new_lod);
}, },
py::arg("lod"), R"DOC( py::arg("lod"), R"DOC(
...@@ -724,7 +741,11 @@ PYBIND11_MODULE(core_noavx, m) { ...@@ -724,7 +741,11 @@ PYBIND11_MODULE(core_noavx, m) {
LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod); LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod);
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
CheckLoD(new_offset_lod, vectorize(self.dims()).front()), true, CheckLoD(new_offset_lod, vectorize(self.dims()).front()), true,
"the provided recursive_sequence_lengths info is invalid"); platform::errors::InvalidArgument(
"The provided recursive_sequence_lengths info is invalid, "
"the LoD converted by recursive_sequence_lengths is "
"%s",
new_lod));
self.set_lod(new_offset_lod); self.set_lod(new_offset_lod);
}, },
py::arg("recursive_sequence_lengths"), R"DOC( py::arg("recursive_sequence_lengths"), R"DOC(
...@@ -988,7 +1009,10 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -988,7 +1009,10 @@ All parameter, weight, gradient are variables in Paddle.
#endif #endif
.def("get_reader", .def("get_reader",
[](Variable &self) -> framework::ReaderHolder * { [](Variable &self) -> framework::ReaderHolder * {
PADDLE_ENFORCE_EQ(self.IsType<framework::ReaderHolder>(), true); PADDLE_ENFORCE_EQ(
self.IsType<framework::ReaderHolder>(), true,
platform::errors::InvalidArgument(
"The variable is not type of ReaderHolder."));
return self.GetMutable<framework::ReaderHolder>(); return self.GetMutable<framework::ReaderHolder>();
}, },
py::return_value_policy::reference) py::return_value_policy::reference)
...@@ -1091,7 +1115,8 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -1091,7 +1115,8 @@ All parameter, weight, gradient are variables in Paddle.
std::string str; std::string str;
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
info.Proto().SerializeToString(&str), true, info.Proto().SerializeToString(&str), true,
"Serialize OpProto Error. This could be a bug of Paddle."); platform::errors::Fatal(
"Serialize OpProto Error. This could be a bug of Paddle."));
ret_values.emplace_back(str); ret_values.emplace_back(str);
} }
} }
...@@ -1204,7 +1229,10 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -1204,7 +1229,10 @@ All parameter, weight, gradient are variables in Paddle.
[](paddle::platform::CUDAPlace& place) [](paddle::platform::CUDAPlace& place)
-> paddle::platform::DeviceContext* { -> paddle::platform::DeviceContext* {
#ifndef PADDLE_WITH_CUDA #ifndef PADDLE_WITH_CUDA
PADDLE_THROW("CUDAPlace is not supported in CPU device."); PADDLE_THROW(
platform::errors::PermissionDenied(
"Cannot use CUDAPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#else #else
return new paddle::platform::CUDADeviceContext(place); return new paddle::platform::CUDADeviceContext(place);
#endif #endif
...@@ -1213,8 +1241,10 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -1213,8 +1241,10 @@ All parameter, weight, gradient are variables in Paddle.
[](paddle::platform::CUDAPinnedPlace& place) [](paddle::platform::CUDAPinnedPlace& place)
-> paddle::platform::DeviceContext* { -> paddle::platform::DeviceContext* {
#ifndef PADDLE_WITH_CUDA #ifndef PADDLE_WITH_CUDA
PADDLE_THROW( PADDLE_THROW(
"CUDAPinnedPlace is not supported in CPU device."); platform::errors::PermissionDenied(
"Cannot use CUDAPinnedPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#else #else
return new paddle::platform::CUDAPinnedDeviceContext(place); return new paddle::platform::CUDAPinnedDeviceContext(place);
#endif #endif
...@@ -1335,7 +1365,9 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -1335,7 +1365,9 @@ All parameter, weight, gradient are variables in Paddle.
.def("__init__", .def("__init__",
[](platform::CUDAPinnedPlace &self) { [](platform::CUDAPinnedPlace &self) {
#ifndef PADDLE_WITH_CUDA #ifndef PADDLE_WITH_CUDA
PADDLE_THROW("Cannot use CUDAPinnedPlace in CPU only version"); PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use CUDAPinnedPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#endif #endif
new (&self) platform::CUDAPinnedPlace(); new (&self) platform::CUDAPinnedPlace();
}) })
...@@ -1389,10 +1421,13 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -1389,10 +1421,13 @@ All parameter, weight, gradient are variables in Paddle.
[](py::bytes protobin) { [](py::bytes protobin) {
proto::OpDesc desc; proto::OpDesc desc;
PADDLE_ENFORCE_EQ(desc.ParsePartialFromString(protobin), true, PADDLE_ENFORCE_EQ(desc.ParsePartialFromString(protobin), true,
"Cannot parse user input to OpDesc"); platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(desc.IsInitialized(), true, "Cannot parse user input to OpDesc"));
"User OpDesc is not initialized, reason %s", PADDLE_ENFORCE_EQ(
desc.InitializationErrorString()); desc.IsInitialized(), true,
platform::errors::InvalidArgument(
"The provided OpDesc is not initialized, the reason is: %s",
desc.InitializationErrorString()));
return OpRegistry::CreateOp(desc); return OpRegistry::CreateOp(desc);
}) })
.def("run", .def("run",
...@@ -1564,7 +1599,10 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -1564,7 +1599,10 @@ All parameter, weight, gradient are variables in Paddle.
.def("__len__", [](LoDTensorArray &self) { return self.size(); }) .def("__len__", [](LoDTensorArray &self) { return self.size(); })
.def("__setitem__", .def("__setitem__",
[](LoDTensorArray &self, size_t i, const LoDTensor &t) { [](LoDTensorArray &self, size_t i, const LoDTensor &t) {
PADDLE_ENFORCE_LT(i, self.size()); PADDLE_ENFORCE_LT(i, self.size(),
platform::errors::InvalidArgument(
"The index to set is larger than the size "
"of LoDTensorArray."));
self[i].ShareDataWith(t); self[i].ShareDataWith(t);
self[i].set_lod(t.lod()); self[i].set_lod(t.lod());
}) })
...@@ -2099,7 +2137,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -2099,7 +2137,7 @@ All parameter, weight, gradient are variables in Paddle.
[](BuildStrategy &self, int num_trainers) { [](BuildStrategy &self, int num_trainers) {
#ifdef WIN32 #ifdef WIN32
PADDLE_THROW(platform::errors::Unavailable( PADDLE_THROW(platform::errors::Unavailable(
"Windows has NO support to distribute mode.")); "Distribution mode is not supported on Windows platform."));
#endif #endif
self.num_trainers_ = num_trainers; self.num_trainers_ = num_trainers;
}) })
...@@ -2324,7 +2362,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -2324,7 +2362,7 @@ All parameter, weight, gradient are variables in Paddle.
#ifdef WIN32 #ifdef WIN32
if (b) { if (b) {
PADDLE_THROW(platform::errors::Unavailable( PADDLE_THROW(platform::errors::Unavailable(
"Windows has NO support to distribute mode.")); "Distribution mode is not supported on Windows platform."));
} }
#else #else
self.is_distribution_ = b; self.is_distribution_ = b;
......
...@@ -160,8 +160,8 @@ class MultiDeviceFeedReader { ...@@ -160,8 +160,8 @@ class MultiDeviceFeedReader {
reader, p, 2)); reader, p, 2));
} else { } else {
if (platform::is_gpu_place(p)) { if (platform::is_gpu_place(p)) {
PADDLE_THROW( PADDLE_THROW(platform::errors::PermissionDenied(
"Place cannot be CUDAPlace when use_double_buffer is False"); "Place cannot be CUDAPlace when use_double_buffer is False"));
} }
holder->Reset(reader); holder->Reset(reader);
} }
...@@ -233,7 +233,11 @@ class MultiDeviceFeedReader { ...@@ -233,7 +233,11 @@ class MultiDeviceFeedReader {
auto each_status = futures_[i].get(); auto each_status = futures_[i].get();
if (UNLIKELY(each_status != Status::kSuccess)) { if (UNLIKELY(each_status != Status::kSuccess)) {
if (UNLIKELY(each_status == Status::kException)) { if (UNLIKELY(each_status == Status::kException)) {
PADDLE_ENFORCE_NOT_NULL(exceptions_[i]); PADDLE_ENFORCE_NOT_NULL(
exceptions_[i],
platform::errors::NotFound("exceptions_[%d] is NULL, but the "
"result status is Status::kException",
i));
*excep = exceptions_[i]; *excep = exceptions_[i];
exceptions_[i] = nullptr; exceptions_[i] = nullptr;
} }
...@@ -280,7 +284,10 @@ class MultiDeviceFeedReader { ...@@ -280,7 +284,10 @@ class MultiDeviceFeedReader {
Status status = WaitFutures(&excep); Status status = WaitFutures(&excep);
if (UNLIKELY(excep)) { if (UNLIKELY(excep)) {
PADDLE_ENFORCE_EQ(status, Status::kException); PADDLE_ENFORCE_EQ(status, Status::kException,
platform::errors::NotFound(
"The exception raised is not NULL, but "
"the result status is not Status::kException"));
std::rethrow_exception(excep); std::rethrow_exception(excep);
} }
...@@ -290,7 +297,10 @@ class MultiDeviceFeedReader { ...@@ -290,7 +297,10 @@ class MultiDeviceFeedReader {
throw py::stop_iteration(); throw py::stop_iteration();
} }
PADDLE_ENFORCE_EQ(status, Status::kSuccess); PADDLE_ENFORCE_EQ(status, Status::kSuccess,
platform::errors::NotFound(
"The function executed sucessfully, but "
"the result status is not Status::kSuccess"));
} }
std::shared_ptr<QueueType> queue_; std::shared_ptr<QueueType> queue_;
......
...@@ -20,6 +20,7 @@ limitations under the License. */ ...@@ -20,6 +20,7 @@ limitations under the License. */
#include <string> #include <string>
#include <tuple> #include <tuple>
#include <vector> #include <vector>
#include "paddle/fluid/framework/data_type.h"
#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/math/concat_and_split.h" #include "paddle/fluid/operators/math/concat_and_split.h"
...@@ -119,22 +120,28 @@ inline std::string TensorDTypeToPyDTypeStr( ...@@ -119,22 +120,28 @@ inline std::string TensorDTypeToPyDTypeStr(
return "e"; \ return "e"; \
} else { \ } else { \
constexpr auto kIsValidDType = ValidDTypeToPyArrayChecker<T>::kValue; \ constexpr auto kIsValidDType = ValidDTypeToPyArrayChecker<T>::kValue; \
PADDLE_ENFORCE_EQ(kIsValidDType, true, \ PADDLE_ENFORCE_EQ( \
"This type of tensor cannot be expose to Python"); \ kIsValidDType, true, \
platform::errors::Unimplemented( \
"This type [%s] of tensor cannot be expose to Python", \
typeid(T).name())); \
return py::format_descriptor<T>::format(); \ return py::format_descriptor<T>::format(); \
} \ } \
} }
_ForEachDataType_(TENSOR_DTYPE_TO_PY_DTYPE); _ForEachDataType_(TENSOR_DTYPE_TO_PY_DTYPE);
#undef TENSOR_DTYPE_TO_PY_DTYPE #undef TENSOR_DTYPE_TO_PY_DTYPE
PADDLE_THROW("Unsupported data type %d", static_cast<int>(type)); PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported tensor data type: %s", framework::DataTypeToString(type)));
} }
} // namespace details } // namespace details
template <typename T> template <typename T>
T TensorGetElement(const framework::Tensor &self, size_t offset) { T TensorGetElement(const framework::Tensor &self, size_t offset) {
PADDLE_ENFORCE_LT(offset, self.numel()); PADDLE_ENFORCE_LT(offset, self.numel(),
platform::errors::InvalidArgument(
"The offset exceeds the size of tensor."));
T b = static_cast<T>(0); T b = static_cast<T>(0);
if (platform::is_cpu_place(self.place())) { if (platform::is_cpu_place(self.place())) {
b = self.data<T>()[offset]; b = self.data<T>()[offset];
...@@ -151,7 +158,9 @@ T TensorGetElement(const framework::Tensor &self, size_t offset) { ...@@ -151,7 +158,9 @@ T TensorGetElement(const framework::Tensor &self, size_t offset) {
template <typename T> template <typename T>
void TensorSetElement(framework::Tensor *self, size_t offset, T elem) { void TensorSetElement(framework::Tensor *self, size_t offset, T elem) {
PADDLE_ENFORCE_LT(offset, self->numel()); PADDLE_ENFORCE_LT(offset, self->numel(),
platform::errors::InvalidArgument(
"The offset exceeds the size of tensor."));
if (platform::is_cpu_place(self->place())) { if (platform::is_cpu_place(self->place())) {
self->mutable_data<T>(self->place())[offset] = elem; self->mutable_data<T>(self->place())[offset] = elem;
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
...@@ -194,13 +203,16 @@ void SetTensorFromPyArrayT( ...@@ -194,13 +203,16 @@ void SetTensorFromPyArrayT(
paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(), paddle::platform::GpuMemcpySync(dst, array.data(), array.nbytes(),
cudaMemcpyHostToDevice); cudaMemcpyHostToDevice);
} else { } else {
PADDLE_THROW( PADDLE_THROW(platform::errors::InvalidArgument(
"Incompatible place type: Tensor.set() supports CPUPlace, CUDAPlace " "Incompatible place type: Tensor.set() supports "
"CPUPlace, CUDAPlace "
"and CUDAPinnedPlace, but got %s!", "and CUDAPinnedPlace, but got %s!",
place); place));
} }
#else #else
PADDLE_THROW("Not supported GPU, please compile WITH_GPU option"); PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use CUDAPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#endif #endif
} }
} }
...@@ -234,12 +246,12 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj, ...@@ -234,12 +246,12 @@ void SetTensorFromPyArray(framework::Tensor *self, const py::object &obj,
} else if (py::isinstance<py::array_t<bool>>(array)) { } else if (py::isinstance<py::array_t<bool>>(array)) {
SetTensorFromPyArrayT<bool, P>(self, array, place, zero_copy); SetTensorFromPyArrayT<bool, P>(self, array, place, zero_copy);
} else { } else {
PADDLE_THROW( PADDLE_THROW(platform::errors::InvalidArgument(
"Incompatible data or style type: tensor.set() supports bool, float16, " "Incompatible data type: tensor.set() supports bool, float16, "
"float32, " "float32, "
"float64, " "float64, "
"int8, int16, int32, int64 and uint8, uint16, but got %s!", "int8, int16, int32, int64 and uint8, uint16, but got %s!",
array.dtype()); array.dtype()));
} }
} }
...@@ -389,7 +401,8 @@ void _sliceDapper(const framework::Tensor *in, framework::Tensor *out, ...@@ -389,7 +401,8 @@ void _sliceDapper(const framework::Tensor *in, framework::Tensor *out,
_sliceCompute<T, 9>(in, out, ctx, axes, starts); _sliceCompute<T, 9>(in, out, ctx, axes, starts);
break; break;
default: default:
PADDLE_THROW("dim size not exepected, current is %d", size); PADDLE_THROW(platform::errors::InvalidArgument(
"The dim size should be 1 to 9, current is %d", size));
break; break;
} }
} }
...@@ -454,7 +467,9 @@ inline framework::Tensor *_sliceTensor(const framework::Tensor &self, ...@@ -454,7 +467,9 @@ inline framework::Tensor *_sliceTensor(const framework::Tensor &self,
case framework::proto::VarType::UINT8: case framework::proto::VarType::UINT8:
return _sliceAndConcat<uint8_t>(self, obj, dim); return _sliceAndConcat<uint8_t>(self, obj, dim);
default: default:
PADDLE_THROW("Not support type %d", src_type); PADDLE_THROW(platform::errors::InvalidArgument(
"Not support tensor type: %s",
framework::DataTypeToString(src_type)));
} }
} }
...@@ -525,14 +540,16 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor, ...@@ -525,14 +540,16 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor,
static_cast<size_t>(tensor.dims().size()), py_dims, py_strides)); static_cast<size_t>(tensor.dims().size()), py_dims, py_strides));
} else { } else {
py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides); py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
PADDLE_ENFORCE_EQ(py_arr.writeable(), true, PADDLE_ENFORCE_EQ(
platform::errors::InvalidArgument( py_arr.writeable(), true,
"PyArray must be writable, otherwise memory leak " platform::errors::InvalidArgument(
"or double free would occur")); "PyArray is not writable, in which case memory leak "
PADDLE_ENFORCE_EQ(py_arr.owndata(), true, "or double free would occur"));
platform::errors::InvalidArgument( PADDLE_ENFORCE_EQ(
"PyArray must own data, otherwise memory leak " py_arr.owndata(), true,
"or double free would occur")); platform::errors::InvalidArgument(
"PyArray does not own data, in which case memory leak "
"or double free would occur"));
platform::CPUPlace place; platform::CPUPlace place;
size_t copy_bytes = sizeof_dtype * numel; size_t copy_bytes = sizeof_dtype * numel;
paddle::memory::Copy(place, py_arr.mutable_data(), place, tensor_buf_ptr, paddle::memory::Copy(place, py_arr.mutable_data(), place, tensor_buf_ptr,
...@@ -543,16 +560,23 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor, ...@@ -543,16 +560,23 @@ inline py::array TensorToPyArray(const framework::Tensor &tensor,
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides); py::array py_arr(py::dtype(py_dtype_str.c_str()), py_dims, py_strides);
PADDLE_ENFORCE(py_arr.writeable() && py_arr.owndata(), PADDLE_ENFORCE_EQ(py_arr.writeable(), true,
"PyArray must be writable and own data, otherwise memory leak " platform::errors::InvalidArgument(
"or double free would occur"); "PyArray is not writable, in which case memory leak "
"or double free would occur"));
PADDLE_ENFORCE_EQ(py_arr.owndata(), true,
platform::errors::InvalidArgument(
"PyArray does not own data, in which case memory leak "
"or double free would occur"));
size_t copy_bytes = sizeof_dtype * numel; size_t copy_bytes = sizeof_dtype * numel;
paddle::platform::GpuMemcpySync(py_arr.mutable_data(), tensor_buf_ptr, paddle::platform::GpuMemcpySync(py_arr.mutable_data(), tensor_buf_ptr,
copy_bytes, cudaMemcpyDeviceToHost); copy_bytes, cudaMemcpyDeviceToHost);
return py_arr; return py_arr;
#else #else
PADDLE_THROW("CUDAPlace is not supported when not compiled with CUDA"); PADDLE_THROW(platform::errors::PermissionDenied(
"Cannot use CUDAPlace in CPU only version, "
"Please recompile or reinstall Paddle with CUDA support."));
#endif #endif
} }
......
...@@ -25,7 +25,8 @@ class TestException(unittest.TestCase): ...@@ -25,7 +25,8 @@ class TestException(unittest.TestCase):
try: try:
core.__unittest_throw_exception__() core.__unittest_throw_exception__()
except core.EnforceNotMet as ex: except core.EnforceNotMet as ex:
self.assertIn("test exception", cpt.get_exception_message(ex)) self.assertIn("This is a test of exception",
cpt.get_exception_message(ex))
exception = ex exception = ex
self.assertIsNotNone(exception) self.assertIsNotNone(exception)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册