未验证 提交 1dcb9961 编写于 作者: J Jiaying Zhao 提交者: GitHub

Catch mobile exceptions test=develop (#1867)

* Catch mobile exceptions test=develop

* code style format test=develop
上级 d85fcc1f
...@@ -43,7 +43,6 @@ struct PaddleMobileException : public std::exception { ...@@ -43,7 +43,6 @@ struct PaddleMobileException : public std::exception {
{ \ { \
char buffer[1000]; \ char buffer[1000]; \
snprintf(buffer, sizeof(buffer), __VA_ARGS__); \ snprintf(buffer, sizeof(buffer), __VA_ARGS__); \
std::string detail(buffer); \
throw paddle_mobile::PaddleMobileException("Custom Exception", buffer, \ throw paddle_mobile::PaddleMobileException("Custom Exception", buffer, \
__FILE__, __LINE__); \ __FILE__, __LINE__); \
} \ } \
...@@ -55,7 +54,6 @@ struct PaddleMobileException : public std::exception { ...@@ -55,7 +54,6 @@ struct PaddleMobileException : public std::exception {
} else { \ } else { \
char buffer[1000]; \ char buffer[1000]; \
snprintf(buffer, sizeof(buffer), __VA_ARGS__); \ snprintf(buffer, sizeof(buffer), __VA_ARGS__); \
std::string detail(buffer); \
throw paddle_mobile::PaddleMobileException("paddle-mobile enforce", \ throw paddle_mobile::PaddleMobileException("paddle-mobile enforce", \
buffer, __FILE__, __LINE__); \ buffer, __FILE__, __LINE__); \
} \ } \
......
...@@ -83,7 +83,8 @@ enum PMStatus { ...@@ -83,7 +83,8 @@ enum PMStatus {
PMOutOfAuthority = 0x05, /*!< Try to modified data not your own*/ PMOutOfAuthority = 0x05, /*!< Try to modified data not your own*/
PMOutOfMem = 0x06, /*!< OOM error*/ PMOutOfMem = 0x06, /*!< OOM error*/
PMUnImplError = 0x07, /*!< Unimplement error. */ PMUnImplError = 0x07, /*!< Unimplement error. */
PMWrongDevice = 0x08 /*!< un-correct device. */ PMWrongDevice = 0x08, /*!< un-correct device. */
PMException = 0x09 /*!< throw exception. */
}; };
enum RoundType { enum RoundType {
......
...@@ -480,44 +480,52 @@ const CLImage *Executor<Device, T>::GetOutputImage( ...@@ -480,44 +480,52 @@ const CLImage *Executor<Device, T>::GetOutputImage(
template <typename Device, typename T> template <typename Device, typename T>
PMStatus Executor<Device, T>::Predict() { PMStatus Executor<Device, T>::Predict() {
try {
#if _OPENMP #if _OPENMP
omp_set_num_threads(CPUContext::Context()->get_thread_num()); omp_set_num_threads(CPUContext::Context()->get_thread_num());
#endif #endif
// clear all no persistable tensor array since write_to_array // clear all no persistable tensor array since write_to_array
// is always push back a new tensor in the array // is always push back a new tensor in the array
ClearNoPersistableTensorArray(program_desc_.get(), program_.scope.get()); ClearNoPersistableTensorArray(program_desc_.get(), program_.scope.get());
#ifdef PADDLE_MOBILE_PROFILE #ifdef PADDLE_MOBILE_PROFILE
std::vector<ProfInfo> profile(ops_of_block0_.size()); std::vector<ProfInfo> profile(ops_of_block0_.size());
struct timespec ts; struct timespec ts;
int op_index = 0; int op_index = 0;
#endif #endif
for (int i = 0; i < ops_of_block0_.size(); ++i) { for (int i = 0; i < ops_of_block0_.size(); ++i) {
auto &op_handler = ops_of_block0_[i]; auto &op_handler = ops_of_block0_[i];
#ifdef PADDLE_MOBILE_PROFILE #ifdef PADDLE_MOBILE_PROFILE
clock_gettime(CLOCK_MONOTONIC, &ts); clock_gettime(CLOCK_MONOTONIC, &ts);
profile[op_index].runBegin = (uint64_t)ts.tv_sec * 1e9 + ts.tv_nsec; profile[op_index].runBegin = (uint64_t)ts.tv_sec * 1e9 + ts.tv_nsec;
#endif #endif
DLOG << i << "th, " DLOG << i << "th, "
<< "run op: " << op_handler->Type(); << "run op: " << op_handler->Type();
if (lod_mode_ && input_dim_has_changed_) { if (lod_mode_ && input_dim_has_changed_) {
op_handler->InferShape(); op_handler->InferShape();
} }
op_handler->Run(); op_handler->Run();
#ifdef PADDLE_MOBILE_PROFILE #ifdef PADDLE_MOBILE_PROFILE
clock_gettime(CLOCK_MONOTONIC, &ts); clock_gettime(CLOCK_MONOTONIC, &ts);
profile[op_index].runEnd = (uint64_t)ts.tv_sec * 1e9 + ts.tv_nsec; profile[op_index].runEnd = (uint64_t)ts.tv_sec * 1e9 + ts.tv_nsec;
++op_index; ++op_index;
#endif #endif
} }
if (feed_indices_.size() == 1) { if (feed_indices_.size() == 1) {
input_dim_has_changed_ = false; input_dim_has_changed_ = false;
} }
#ifdef PADDLE_MOBILE_PROFILE #ifdef PADDLE_MOBILE_PROFILE
PrintProfile(profile); PrintProfile(profile);
#endif #endif
return PMSuccess; return PMSuccess;
} catch (PaddleMobileException &e) {
exception_msg_ = e.what();
return PMException;
} catch (std::exception &e) {
exception_msg_ = e.what();
return PMException;
}
} }
#ifdef PADDLE_MOBILE_PROFILE #ifdef PADDLE_MOBILE_PROFILE
...@@ -588,6 +596,11 @@ void Executor<Device, T>::GetTensorResults( ...@@ -588,6 +596,11 @@ void Executor<Device, T>::GetTensorResults(
} }
} }
template <typename Device, typename T>
std::string Executor<Device, T>::GetExceptionMsg() {
return exception_msg_;
}
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
template <typename Device, typename T> template <typename Device, typename T>
void Executor<Device, T>::InjectVariable(const Tensor &t, void Executor<Device, T>::InjectVariable(const Tensor &t,
......
...@@ -60,6 +60,7 @@ class Executor { ...@@ -60,6 +60,7 @@ class Executor {
void FeedTensorData(const std::vector<framework::Tensor> &v); void FeedTensorData(const std::vector<framework::Tensor> &v);
void GetTensorResults(std::vector<framework::Tensor *> *v); void GetTensorResults(std::vector<framework::Tensor *> *v);
std::string GetExceptionMsg();
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
void InjectVariable(const Tensor &t, std::string var_name); void InjectVariable(const Tensor &t, std::string var_name);
...@@ -100,6 +101,7 @@ class Executor { ...@@ -100,6 +101,7 @@ class Executor {
std::vector<std::shared_ptr<OperatorBase<Device>>> ops_of_block0_; std::vector<std::shared_ptr<OperatorBase<Device>>> ops_of_block0_;
std::unordered_map<std::string, int> feed_indices_; std::unordered_map<std::string, int> feed_indices_;
std::unordered_map<std::string, int> fetch_indices_; std::unordered_map<std::string, int> fetch_indices_;
std::string exception_msg_;
// for super resoltion // for super resoltion
DDim input_dim_last_; DDim input_dim_last_;
......
...@@ -20,7 +20,7 @@ limitations under the License. */ ...@@ -20,7 +20,7 @@ limitations under the License. */
#endif // _OPENMP #endif // _OPENMP
#ifdef PADDLE_MOBILE_CL #ifdef PADDLE_MOBILE_CL
#include <CL/cl.h> #include <CL/cl.h>
#include <mutex> #include <mutex> // NOLINT
#include "framework/cl/cl_engine.h" #include "framework/cl/cl_engine.h"
#include "framework/cl/cl_tensor.h" #include "framework/cl/cl_tensor.h"
#endif #endif
...@@ -64,6 +64,7 @@ PMStatus PaddleMobile<Device, T>::Load(const std::string &model_path, ...@@ -64,6 +64,7 @@ PMStatus PaddleMobile<Device, T>::Load(const std::string &model_path,
loader_ = std::make_shared<framework::Loader<Device, T>>(); loader_ = std::make_shared<framework::Loader<Device, T>>();
} else { } else {
LOG(kLOG_INFO) << "loader inited"; LOG(kLOG_INFO) << "loader inited";
LOG(kLOG_INFO) << "loader inited";
} }
if (executor_.get() == nullptr) { if (executor_.get() == nullptr) {
...@@ -187,6 +188,14 @@ void PaddleMobile<Device, T>::Clear() { ...@@ -187,6 +188,14 @@ void PaddleMobile<Device, T>::Clear() {
template <typename Device, typename T> template <typename Device, typename T>
double PaddleMobile<Device, T>::GetPredictTime() {} double PaddleMobile<Device, T>::GetPredictTime() {}
template <typename Device, typename T>
std::string PaddleMobile<Device, T>::GetExceptionMsg() {
if (executor_.get() != nullptr) {
return executor_->GetExceptionMsg();
}
return "";
}
#ifdef PADDLE_MOBILE_CPU #ifdef PADDLE_MOBILE_CPU
template <> template <>
double PaddleMobile<CPU, float>::GetPredictTime() { double PaddleMobile<CPU, float>::GetPredictTime() {
......
...@@ -90,6 +90,7 @@ class PaddleMobile { ...@@ -90,6 +90,7 @@ class PaddleMobile {
PowerMode power_mode = PERFORMANCE_PRIORITY); PowerMode power_mode = PERFORMANCE_PRIORITY);
void Clear(); void Clear();
double GetPredictTime(); double GetPredictTime();
std::string GetExceptionMsg();
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
void InjectVariable(const framework::Tensor &t, std::string var_name); void InjectVariable(const framework::Tensor &t, std::string var_name);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册