未验证 提交 1dcb9961 编写于 作者: J Jiaying Zhao 提交者: GitHub

Catch mobile exceptions test=develop (#1867)

* Catch mobile exceptions test=develop

* code style format test=develop
上级 d85fcc1f
......@@ -43,7 +43,6 @@ struct PaddleMobileException : public std::exception {
{ \
char buffer[1000]; \
snprintf(buffer, sizeof(buffer), __VA_ARGS__); \
std::string detail(buffer); \
throw paddle_mobile::PaddleMobileException("Custom Exception", buffer, \
__FILE__, __LINE__); \
} \
......@@ -55,7 +54,6 @@ struct PaddleMobileException : public std::exception {
} else { \
char buffer[1000]; \
snprintf(buffer, sizeof(buffer), __VA_ARGS__); \
std::string detail(buffer); \
throw paddle_mobile::PaddleMobileException("paddle-mobile enforce", \
buffer, __FILE__, __LINE__); \
} \
......
......@@ -83,7 +83,8 @@ enum PMStatus {
PMOutOfAuthority = 0x05, /*!< Try to modified data not your own*/
PMOutOfMem = 0x06, /*!< OOM error*/
PMUnImplError = 0x07, /*!< Unimplement error. */
PMWrongDevice = 0x08 /*!< un-correct device. */
PMWrongDevice = 0x08, /*!< un-correct device. */
PMException = 0x09 /*!< throw exception. */
};
enum RoundType {
......
......@@ -480,6 +480,7 @@ const CLImage *Executor<Device, T>::GetOutputImage(
template <typename Device, typename T>
PMStatus Executor<Device, T>::Predict() {
try {
#if _OPENMP
omp_set_num_threads(CPUContext::Context()->get_thread_num());
#endif
......@@ -518,6 +519,13 @@ PMStatus Executor<Device, T>::Predict() {
PrintProfile(profile);
#endif
return PMSuccess;
} catch (PaddleMobileException &e) {
exception_msg_ = e.what();
return PMException;
} catch (std::exception &e) {
exception_msg_ = e.what();
return PMException;
}
}
#ifdef PADDLE_MOBILE_PROFILE
......@@ -588,6 +596,11 @@ void Executor<Device, T>::GetTensorResults(
}
}
template <typename Device, typename T>
std::string Executor<Device, T>::GetExceptionMsg() {
return exception_msg_;
}
#ifdef PADDLE_MOBILE_FPGA
template <typename Device, typename T>
void Executor<Device, T>::InjectVariable(const Tensor &t,
......
......@@ -60,6 +60,7 @@ class Executor {
void FeedTensorData(const std::vector<framework::Tensor> &v);
void GetTensorResults(std::vector<framework::Tensor *> *v);
std::string GetExceptionMsg();
#ifdef PADDLE_MOBILE_FPGA
void InjectVariable(const Tensor &t, std::string var_name);
......@@ -100,6 +101,7 @@ class Executor {
std::vector<std::shared_ptr<OperatorBase<Device>>> ops_of_block0_;
std::unordered_map<std::string, int> feed_indices_;
std::unordered_map<std::string, int> fetch_indices_;
std::string exception_msg_;
// for super resoltion
DDim input_dim_last_;
......
......@@ -20,7 +20,7 @@ limitations under the License. */
#endif // _OPENMP
#ifdef PADDLE_MOBILE_CL
#include <CL/cl.h>
#include <mutex>
#include <mutex> // NOLINT
#include "framework/cl/cl_engine.h"
#include "framework/cl/cl_tensor.h"
#endif
......@@ -64,6 +64,7 @@ PMStatus PaddleMobile<Device, T>::Load(const std::string &model_path,
loader_ = std::make_shared<framework::Loader<Device, T>>();
} else {
LOG(kLOG_INFO) << "loader inited";
LOG(kLOG_INFO) << "loader inited";
}
if (executor_.get() == nullptr) {
......@@ -187,6 +188,14 @@ void PaddleMobile<Device, T>::Clear() {
template <typename Device, typename T>
double PaddleMobile<Device, T>::GetPredictTime() {}
template <typename Device, typename T>
std::string PaddleMobile<Device, T>::GetExceptionMsg() {
if (executor_.get() != nullptr) {
return executor_->GetExceptionMsg();
}
return "";
}
#ifdef PADDLE_MOBILE_CPU
template <>
double PaddleMobile<CPU, float>::GetPredictTime() {
......
......@@ -90,6 +90,7 @@ class PaddleMobile {
PowerMode power_mode = PERFORMANCE_PRIORITY);
void Clear();
double GetPredictTime();
std::string GetExceptionMsg();
#ifdef PADDLE_MOBILE_FPGA
void InjectVariable(const framework::Tensor &t, std::string var_name);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册