未验证 提交 d1062d52 编写于 作者: C Chen Weihang 提交者: GitHub

Replace all errors thrown by LOG(FATAL) with PADDLE_THROW (#24759)

* remove REPLACE_ENFORCE_GLOG compile option & add ci rule prohibit LOG(FATAL) using, test=develop

* remove ci test case, test=develop

* replace all LOG(FATAL) & polish message, test=develop

* fix typo, test=develop

* polish error info detail, test=develop
上级 a4f60034
...@@ -80,7 +80,6 @@ option(WITH_PSLIB "Compile with pslib support" OFF) ...@@ -80,7 +80,6 @@ option(WITH_PSLIB "Compile with pslib support" OFF)
option(WITH_BOX_PS "Compile with box_ps support" OFF) option(WITH_BOX_PS "Compile with box_ps support" OFF)
option(WITH_XBYAK "Compile with xbyak support" ON) option(WITH_XBYAK "Compile with xbyak support" ON)
option(WITH_CONTRIB "Compile the third-party contributation" OFF) option(WITH_CONTRIB "Compile the third-party contributation" OFF)
option(REPLACE_ENFORCE_GLOG "Replace PADDLE_ENFORCE with glog/CHECK for better debug." OFF)
option(WITH_GRPC "Use grpc as the default rpc framework" ${WITH_DISTRIBUTE}) option(WITH_GRPC "Use grpc as the default rpc framework" ${WITH_DISTRIBUTE})
option(WITH_INFERENCE_API_TEST "Test fluid inference C++ high-level api interface" OFF) option(WITH_INFERENCE_API_TEST "Test fluid inference C++ high-level api interface" OFF)
option(PY_VERSION "Compile PaddlePaddle with python3 support" ${PY_VERSION}) option(PY_VERSION "Compile PaddlePaddle with python3 support" ${PY_VERSION})
...@@ -103,11 +102,6 @@ if(NOT CMAKE_BUILD_TYPE) ...@@ -103,11 +102,6 @@ if(NOT CMAKE_BUILD_TYPE)
FORCE) FORCE)
endif() endif()
# Replace PADDLE_ENFORCE with glog/CHECK for better debug
if(REPLACE_ENFORCE_GLOG)
add_definitions("-DREPLACE_ENFORCE_GLOG")
endif()
# the type of sanitizer, options are: Address, Leak, Memory, Thread, Undefined. Default: OFF # the type of sanitizer, options are: Address, Leak, Memory, Thread, Undefined. Default: OFF
if(SANITIZER_TYPE AND NOT "${SANITIZER_TYPE}" MATCHES "^(Address|Leak|Memory|Thread|Undefined)$") if(SANITIZER_TYPE AND NOT "${SANITIZER_TYPE}" MATCHES "^(Address|Leak|Memory|Thread|Undefined)$")
message("Choose the correct type of sanitizer") message("Choose the correct type of sanitizer")
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <valarray> #include <valarray>
#include <vector> #include <vector>
#include "paddle/fluid/framework/expect.h" #include "paddle/fluid/framework/expect.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -43,7 +44,10 @@ class ArchiveBase { ...@@ -43,7 +44,10 @@ class ArchiveBase {
// Archive is not copyable. But to allow move capture by function objects, // Archive is not copyable. But to allow move capture by function objects,
// check it at runtime rather than at compile time. // check it at runtime rather than at compile time.
ArchiveBase(const ArchiveBase&) { LOG(FATAL) << "Not supported"; } ArchiveBase(const ArchiveBase&) {
PADDLE_THROW(platform::errors::Unavailable(
"ArchiveBase class does not support copy construction."));
}
ArchiveBase(ArchiveBase&& other) ArchiveBase(ArchiveBase&& other)
: buffer_(other.buffer_), : buffer_(other.buffer_),
...@@ -62,7 +66,8 @@ class ArchiveBase { ...@@ -62,7 +66,8 @@ class ArchiveBase {
public: public:
ArchiveBase& operator=(const ArchiveBase&) { ArchiveBase& operator=(const ArchiveBase&) {
LOG(FATAL) << "Not supported"; PADDLE_THROW(platform::errors::Unavailable(
"ArchiveBase class does not support assignment construction."));
return *this; return *this;
} }
......
...@@ -34,7 +34,7 @@ paddle::framework::DataFeedDesc load_datafeed_param_from_file( ...@@ -34,7 +34,7 @@ paddle::framework::DataFeedDesc load_datafeed_param_from_file(
const char* filename) { const char* filename) {
paddle::framework::DataFeedDesc data_feed_desc; paddle::framework::DataFeedDesc data_feed_desc;
int file_descriptor = open(filename, O_RDONLY); int file_descriptor = open(filename, O_RDONLY);
PADDLE_ENFORCE_NE(file_descriptor, -1, platform::errors::Unavaliable( PADDLE_ENFORCE_NE(file_descriptor, -1, platform::errors::Unavailable(
"Cannot open file %s.", filename)); "Cannot open file %s.", filename));
google::protobuf::io::FileInputStream fileInput(file_descriptor); google::protobuf::io::FileInputStream fileInput(file_descriptor);
google::protobuf::TextFormat::Parse(&fileInput, &data_feed_desc); google::protobuf::TextFormat::Parse(&fileInput, &data_feed_desc);
...@@ -45,7 +45,7 @@ paddle::framework::DataFeedDesc load_datafeed_param_from_file( ...@@ -45,7 +45,7 @@ paddle::framework::DataFeedDesc load_datafeed_param_from_file(
const std::vector<std::string> load_filelist_from_file(const char* filename) { const std::vector<std::string> load_filelist_from_file(const char* filename) {
std::vector<std::string> filelist; std::vector<std::string> filelist;
std::ifstream fin(filename); std::ifstream fin(filename);
PADDLE_ENFORCE_EQ(fin.good(), true, platform::errors::Unavaliable( PADDLE_ENFORCE_EQ(fin.good(), true, platform::errors::Unavailable(
"Cannot open file %s.", filename)); "Cannot open file %s.", filename));
std::string line; std::string line;
while (getline(fin, line)) { while (getline(fin, line)) {
......
...@@ -187,16 +187,8 @@ void AllReduceOpHandle::SyncNCCLAllReduce() { ...@@ -187,16 +187,8 @@ void AllReduceOpHandle::SyncNCCLAllReduce() {
nccl_ctxs_->GetRunEnvNCCLCtx(run_order_, use_hierarchical_allreduce_); nccl_ctxs_->GetRunEnvNCCLCtx(run_order_, use_hierarchical_allreduce_);
auto &nccl_ctx = nccl_ctxs->at(dev_id); auto &nccl_ctx = nccl_ctxs->at(dev_id);
auto stream = nccl_ctx.stream(); auto stream = nccl_ctx.stream();
cudaError_t e_sync = cudaStreamSynchronize(stream); PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream));
if (e_sync != 0) { PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetLastError());
LOG(FATAL) << "cudaStreamSynchronize " << cudaGetErrorString(e_sync);
}
cudaError_t e_get = cudaGetLastError();
if (e_get != 0) {
LOG(FATAL) << "cudaGetLastError " << cudaGetErrorString(e_get)
<< " errno:" << e_get;
}
} }
} }
} }
......
...@@ -34,9 +34,10 @@ class ExceptionHolder { ...@@ -34,9 +34,10 @@ class ExceptionHolder {
} catch (platform::EnforceNotMet& exp) { } catch (platform::EnforceNotMet& exp) {
Catch(exp); Catch(exp);
} catch (std::exception& ex) { } catch (std::exception& ex) {
LOG(FATAL) << "std::exception caught, " << ex.what(); PADDLE_THROW(platform::errors::Fatal(
"Unknown std::exception caught:\n%s.", ex.what()));
} catch (...) { } catch (...) {
LOG(FATAL) << "Unknown exception caught"; PADDLE_THROW(platform::errors::Fatal("Unknown exception caught."));
} }
} }
......
...@@ -104,7 +104,8 @@ void DensePullThread::wait_all() { ...@@ -104,7 +104,8 @@ void DensePullThread::wait_all() {
} }
if (_pull_dense_fail_times > 20) { if (_pull_dense_fail_times > 20) {
LOG(FATAL) << "pull dense failed times more than 20 times"; PADDLE_THROW(
platform::errors::Fatal("Pull dense failed more than 20 times."));
exit(-1); exit(-1);
} }
......
...@@ -214,35 +214,35 @@ class AfsManager { ...@@ -214,35 +214,35 @@ class AfsManager {
int fd_read[2]; int fd_read[2];
int fd_write[2]; int fd_write[2];
if (read) { if (read) {
if (pipe(fd_read) != 0) { PADDLE_ENFORCE_EQ(
LOG(FATAL) << "create read pipe failed"; pipe(fd_read), 0,
return -1; platform::errors::External("Create read pipe failed in AfsManager."));
}
} }
if (write) { if (write) {
if (pipe(fd_write) != 0) { PADDLE_ENFORCE_EQ(pipe(fd_write), 0,
LOG(FATAL) << "create write pipe failed"; platform::errors::External(
return -1; "Create write pipe failed in AfsManager."));
}
} }
pid = vfork(); pid = vfork();
if (pid < 0) { PADDLE_ENFORCE_GE(
LOG(FATAL) << "fork failed"; pid, 0,
return -1; platform::errors::External(
} "Failed to create a child process via fork in AfsManager."));
if (pid == 0) { if (pid == 0) {
if (read) { if (read) {
if (-1 == dup2(fd_read[1], STDOUT_FILENO)) { PADDLE_ENFORCE_NE(
LOG(FATAL) << "dup2 failed"; dup2(fd_read[1], STDOUT_FILENO), -1,
} platform::errors::External(
"Failed to duplicate file descriptor via dup2 in AfsManager."));
close(fd_read[1]); close(fd_read[1]);
close(fd_read[0]); close(fd_read[0]);
} }
if (write) { if (write) {
if (-1 == dup2(fd_write[0], STDIN_FILENO)) { PADDLE_ENFORCE_NE(
LOG(FATAL) << "dup2 failed"; dup2(fd_write[0], STDIN_FILENO), -1,
} platform::errors::External(
"Failed to duplicate file descriptor via dup2 in AfsManager."));
close(fd_write[0]); close(fd_write[0]);
close(fd_write[1]); close(fd_write[1]);
} }
...@@ -265,20 +265,20 @@ class AfsManager { ...@@ -265,20 +265,20 @@ class AfsManager {
close(fd_read[1]); close(fd_read[1]);
fcntl(fd_read[0], F_SETFD, FD_CLOEXEC); fcntl(fd_read[0], F_SETFD, FD_CLOEXEC);
fp_read = fdopen(fd_read[0], "r"); fp_read = fdopen(fd_read[0], "r");
if (0 == fp_read) { PADDLE_ENFORCE_NE(
LOG(FATAL) << "fdopen failed."; fp_read, 0,
return -1; platform::errors::External(
} "Failed to open file descriptor via fdopen in AfsManager."));
} }
if (write) { if (write) {
close(fd_write[0]); close(fd_write[0]);
fcntl(fd_write[1], F_SETFD, FD_CLOEXEC); fcntl(fd_write[1], F_SETFD, FD_CLOEXEC);
fp_write = fdopen(fd_write[1], "w"); fp_write = fdopen(fd_write[1], "w");
if (0 == fp_write) { PADDLE_ENFORCE_NE(
LOG(FATAL) << "fdopen failed."; fp_write, 0,
return -1; platform::errors::External(
} "Failed to open file descriptor via fdopen in AfsManager."));
} }
return 0; return 0;
} }
......
...@@ -1085,7 +1085,8 @@ void FleetWrapper::ShrinkDenseTable(int table_id, Scope* scope, ...@@ -1085,7 +1085,8 @@ void FleetWrapper::ShrinkDenseTable(int table_id, Scope* scope,
push_status.wait(); push_status.wait();
auto status = push_status.get(); auto status = push_status.get();
if (status != 0) { if (status != 0) {
LOG(FATAL) << "push shrink dense param failed, status[" << status << "]"; PADDLE_THORW(platform::errors::Fatal(
"push shrink dense param failed, status is [%d].", status));
sleep(sleep_seconds_before_fail_exit_); sleep(sleep_seconds_before_fail_exit_);
exit(-1); exit(-1);
} }
......
...@@ -13,8 +13,11 @@ See the License for the specific language governing permissions and ...@@ -13,8 +13,11 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/framework/io/fs.h" #include "paddle/fluid/framework/io/fs.h"
#include <memory> #include <memory>
#include "paddle/fluid/platform/enforce.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
...@@ -127,7 +130,8 @@ std::shared_ptr<FILE> localfs_open_write(std::string path, ...@@ -127,7 +130,8 @@ std::shared_ptr<FILE> localfs_open_write(std::string path,
int64_t localfs_file_size(const std::string& path) { int64_t localfs_file_size(const std::string& path) {
struct stat buf; struct stat buf;
if (0 != stat(path.c_str(), &buf)) { if (0 != stat(path.c_str(), &buf)) {
LOG(FATAL) << "file stat not zero"; PADDLE_THROW(platform::errors::External(
"Failed to get file status via stat function."));
return -1; return -1;
} }
return (int64_t)buf.st_size; return (int64_t)buf.st_size;
...@@ -365,7 +369,9 @@ std::shared_ptr<FILE> fs_open_read(const std::string& path, int* err_no, ...@@ -365,7 +369,9 @@ std::shared_ptr<FILE> fs_open_read(const std::string& path, int* err_no,
return hdfs_open_read(path, err_no, converter); return hdfs_open_read(path, err_no, converter);
default: default:
LOG(FATAL) << "Not supported"; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
} }
return {}; return {};
...@@ -381,7 +387,9 @@ std::shared_ptr<FILE> fs_open_write(const std::string& path, int* err_no, ...@@ -381,7 +387,9 @@ std::shared_ptr<FILE> fs_open_write(const std::string& path, int* err_no,
return hdfs_open_write(path, err_no, converter); return hdfs_open_write(path, err_no, converter);
default: default:
LOG(FATAL) << "Not supported"; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
} }
return {}; return {};
...@@ -397,7 +405,9 @@ std::shared_ptr<FILE> fs_open(const std::string& path, const std::string& mode, ...@@ -397,7 +405,9 @@ std::shared_ptr<FILE> fs_open(const std::string& path, const std::string& mode,
return fs_open_write(path, err_no, converter); return fs_open_write(path, err_no, converter);
} }
LOG(FATAL) << "Unknown mode: " << mode; PADDLE_THROW(platform::errors::Unavailable(
"Unsupport file open mode: %s. Only supports 'r', 'rb', 'w' or 'wb'.",
mode));
return {}; return {};
} }
...@@ -407,7 +417,8 @@ int64_t fs_file_size(const std::string& path) { ...@@ -407,7 +417,8 @@ int64_t fs_file_size(const std::string& path) {
return localfs_file_size(path); return localfs_file_size(path);
default: default:
LOG(FATAL) << "Not supported"; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system."));
} }
return 0; return 0;
...@@ -422,7 +433,9 @@ void fs_remove(const std::string& path) { ...@@ -422,7 +433,9 @@ void fs_remove(const std::string& path) {
return hdfs_remove(path); return hdfs_remove(path);
default: default:
LOG(FATAL) << "Not supported"; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
} }
} }
...@@ -435,7 +448,9 @@ std::vector<std::string> fs_list(const std::string& path) { ...@@ -435,7 +448,9 @@ std::vector<std::string> fs_list(const std::string& path) {
return hdfs_list(path); return hdfs_list(path);
default: default:
LOG(FATAL) << "Not supported"; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
} }
return {}; return {};
...@@ -450,7 +465,9 @@ std::string fs_tail(const std::string& path) { ...@@ -450,7 +465,9 @@ std::string fs_tail(const std::string& path) {
return hdfs_tail(path); return hdfs_tail(path);
default: default:
LOG(FATAL) << "Not supported"; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
} }
return ""; return "";
...@@ -465,7 +482,9 @@ bool fs_exists(const std::string& path) { ...@@ -465,7 +482,9 @@ bool fs_exists(const std::string& path) {
return hdfs_exists(path); return hdfs_exists(path);
default: default:
LOG(FATAL) << "Not supported"; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
} }
return false; return false;
...@@ -480,7 +499,9 @@ void fs_mkdir(const std::string& path) { ...@@ -480,7 +499,9 @@ void fs_mkdir(const std::string& path) {
return hdfs_mkdir(path); return hdfs_mkdir(path);
default: default:
LOG(FATAL) << "Not supported"; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
} }
} }
......
...@@ -29,14 +29,16 @@ std::shared_ptr<FILE> shell_fopen(const std::string& path, ...@@ -29,14 +29,16 @@ std::shared_ptr<FILE> shell_fopen(const std::string& path,
} }
FILE* fp; FILE* fp;
if (!(fp = fopen(path.c_str(), mode.c_str()))) { if (!(fp = fopen(path.c_str(), mode.c_str()))) {
LOG(FATAL) << "fopen fail, path[" << path << "], mode[" << mode << "]"; PADDLE_THROW(platform::errors::Unavailable(
"Failed to open file, path[%s], mode[%s].", path, mode));
} }
return {fp, [path](FILE* fp) { return {fp, [path](FILE* fp) {
if (shell_verbose()) { if (shell_verbose()) {
LOG(INFO) << "Closing file[" << path << "]"; LOG(INFO) << "Closing file[" << path << "]";
} }
if (0 != fclose(fp)) { if (0 != fclose(fp)) {
LOG(FATAL) << "fclose fail, path[" << path << "]"; PADDLE_THROW(platform::errors::Unavailable(
"Failed to close file, path[%s].", path));
} }
}}; }};
#endif #endif
...@@ -58,7 +60,7 @@ static int close_open_fds_internal() { ...@@ -58,7 +60,7 @@ static int close_open_fds_internal() {
int dir_fd = -1; int dir_fd = -1;
if ((dir_fd = open("/proc/self/fd", O_RDONLY)) < 0) { if ((dir_fd = open("/proc/self/fd", O_RDONLY)) < 0) {
LOG(FATAL) << "proc/self/fd open fail"; PADDLE_THROW(platform::errors::Unavailable("Failed to open proc/self/fd."));
return -1; return -1;
} }
char buffer[sizeof(linux_dirent)]; char buffer[sizeof(linux_dirent)];
...@@ -68,7 +70,8 @@ static int close_open_fds_internal() { ...@@ -68,7 +70,8 @@ static int close_open_fds_internal() {
if ((bytes = syscall(SYS_getdents, dir_fd, if ((bytes = syscall(SYS_getdents, dir_fd,
reinterpret_cast<linux_dirent*>(buffer), reinterpret_cast<linux_dirent*>(buffer),
sizeof(buffer))) < 0) { sizeof(buffer))) < 0) {
LOG(FATAL) << "syscall fail"; PADDLE_THROW(platform::errors::Unavailable(
"System call failed via syscall function."));
return -1; return -1;
} }
......
...@@ -281,7 +281,9 @@ void MultiDevSSAGraphBuilderBase::InsertScaleLossGradOp( ...@@ -281,7 +281,9 @@ void MultiDevSSAGraphBuilderBase::InsertScaleLossGradOp(
loss_scale = 0; loss_scale = 0;
break; break;
default: default:
LOG(FATAL) << "Unknown gradient scale strategy."; PADDLE_THROW(platform::errors::Unimplemented(
"Unknown gradient scale strategy. Now only supports One, "
"CoeffNumDevice and Customized strategies."));
break; break;
} }
...@@ -1054,7 +1056,9 @@ void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result, ...@@ -1054,7 +1056,9 @@ void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result,
} }
break; break;
default: default:
LOG(FATAL) << "Unknown reduce strategy."; PADDLE_THROW(platform::errors::Unimplemented(
"Unknown reduce strategy. Now only supports Reduce and AllReduce "
"strategies."));
break; break;
} }
} }
......
...@@ -126,7 +126,8 @@ class Pass { ...@@ -126,7 +126,8 @@ class Pass {
protected: protected:
virtual void ApplyImpl(Graph *graph) const { virtual void ApplyImpl(Graph *graph) const {
LOG(FATAL) << "Calling virtual Pass not implemented."; PADDLE_THROW(platform::errors::Unimplemented(
"The virtual Pass called is not implemented."));
} }
// Some Pass must be placed before this Pass, and some // Some Pass must be placed before this Pass, and some
......
...@@ -70,8 +70,8 @@ void PullDenseWorker::Wait(std::vector<::std::future<int32_t>>* status_vec) { ...@@ -70,8 +70,8 @@ void PullDenseWorker::Wait(std::vector<::std::future<int32_t>>* status_vec) {
size_t MAX_FAIL_NUM = 20; size_t MAX_FAIL_NUM = 20;
if (pull_dense_fail_times_ > MAX_FAIL_NUM) { if (pull_dense_fail_times_ > MAX_FAIL_NUM) {
LOG(FATAL) << "Pull Dense Failed Times More Than " << MAX_FAIL_NUM PADDLE_THROW(platform::errors::Fatal(
<< " Times"; "Pull dense failed more than %d times.", MAX_FAIL_NUM));
exit(-1); exit(-1);
} }
status_vec->resize(0); status_vec->resize(0);
......
...@@ -38,10 +38,11 @@ struct ExceptionHandler { ...@@ -38,10 +38,11 @@ struct ExceptionHandler {
void operator()() const { void operator()() const {
auto ex = this->future_.get(); auto ex = this->future_.get();
if (ex != nullptr) { if (ex != nullptr) {
LOG(FATAL) << "The exception is thrown inside the thread pool. You " PADDLE_THROW(platform::errors::Fatal(
"should use RunAndGetException to handle the exception.\n" "The exception is thrown inside the thread pool. You "
"The default exception handler is LOG(FATAL)." "should use RunAndGetException to handle the exception."
<< ex->what(); "The exception is:\n %s.",
ex->what()));
} }
} }
}; };
...@@ -78,9 +79,11 @@ class ThreadPool { ...@@ -78,9 +79,11 @@ class ThreadPool {
return std::unique_ptr<platform::EnforceNotMet>( return std::unique_ptr<platform::EnforceNotMet>(
new platform::EnforceNotMet(ex)); new platform::EnforceNotMet(ex));
} catch (const std::exception& e) { } catch (const std::exception& e) {
LOG(FATAL) << "Unexpected exception is catched in thread pool. All " PADDLE_THROW(platform::errors::Fatal(
"throwable exception in Fluid should be an EnforceNotMet." "Unexpected exception is catched in thread pool. All "
<< e.what(); "throwable exception in Paddle should be an EnforceNotMet."
"The exception is:\n %s.",
e.what()));
} }
return nullptr; return nullptr;
}); });
......
...@@ -579,11 +579,12 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor< ...@@ -579,11 +579,12 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
if (config.thread_local_stream_enabled() && if (config.thread_local_stream_enabled() &&
process_level_allocator_enabled) { process_level_allocator_enabled) {
LOG(FATAL) << " When binding threads and streams, the use of " PADDLE_THROW(platform::errors::Fatal(
"When binding threads and streams, the use of "
"process-level allocators will result in undefined result " "process-level allocators will result in undefined result "
"errors due to memory asynchronous operations." "errors due to memory asynchronous operations."
"The thread and stream binding configuration of all " "The thread and stream binding configuration of all "
"predictors should be the same in a single process."; "predictors should be the same in a single process."));
} }
} }
...@@ -917,8 +918,9 @@ std::string AnalysisPredictor::GetSerializedProgram() const { ...@@ -917,8 +918,9 @@ std::string AnalysisPredictor::GetSerializedProgram() const {
bool AnalysisPredictor::CheckOperatorCompatible() { bool AnalysisPredictor::CheckOperatorCompatible() {
if (!inference_program_) { if (!inference_program_) {
LOG(FATAL) << "Inference program version check failed because the program " PADDLE_THROW(platform::errors::PreconditionNotMet(
"does not exist."; "Inference program version check failed because the program does not "
"exist."));
return false; return false;
} }
bool res = true; bool res = true;
......
...@@ -46,7 +46,8 @@ PaddleTensor LodTensorToPaddleTensor(framework::LoDTensor* t) { ...@@ -46,7 +46,8 @@ PaddleTensor LodTensorToPaddleTensor(framework::LoDTensor* t) {
pt.data.Reset(t->data<void>(), t->numel() * sizeof(int32_t)); pt.data.Reset(t->data<void>(), t->numel() * sizeof(int32_t));
pt.dtype = PaddleDType::INT32; pt.dtype = PaddleDType::INT32;
} else { } else {
LOG(FATAL) << "unsupported type."; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported tensor date type. Now only supports INT64, FP32, INT32."));
} }
pt.shape = framework::vectorize<int>(t->dims()); pt.shape = framework::vectorize<int>(t->dims());
return pt; return pt;
......
...@@ -47,7 +47,9 @@ platform::Place GetNativePlace(const TargetType& type, int id = 0) { ...@@ -47,7 +47,9 @@ platform::Place GetNativePlace(const TargetType& type, int id = 0) {
case TargetType::kCUDA: case TargetType::kCUDA:
return platform::CUDAPlace(id); return platform::CUDAPlace(id);
default: default:
LOG(FATAL) << "Error target type."; PADDLE_THROW(
platform::errors::Unavailable("Unsupported target type. Now only "
"supports Host, x86, CUDA target."));
return platform::Place(); return platform::Place();
} }
} }
...@@ -70,7 +72,9 @@ PrecisionType GetLitePrecisionType(framework::proto::VarType::Type type) { ...@@ -70,7 +72,9 @@ PrecisionType GetLitePrecisionType(framework::proto::VarType::Type type) {
case framework::proto::VarType_Type_INT64: case framework::proto::VarType_Type_INT64:
return PrecisionType::kInt64; return PrecisionType::kInt64;
default: default:
LOG(FATAL) << "Error precision type."; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported precision type. Now only supports FP32, INT8, INT32 and "
"INT64."));
return PrecisionType::kUnk; return PrecisionType::kUnk;
} }
} }
...@@ -87,7 +91,9 @@ framework::proto::VarType::Type GetNativePrecisionType( ...@@ -87,7 +91,9 @@ framework::proto::VarType::Type GetNativePrecisionType(
case PrecisionType::kInt64: case PrecisionType::kInt64:
return framework::proto::VarType_Type_INT64; return framework::proto::VarType_Type_INT64;
default: default:
LOG(FATAL) << "Error precision type."; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported precision type. Now only supports FP32, INT8, INT32 and "
"INT64."));
return static_cast<framework::proto::VarType::Type>(-1); return static_cast<framework::proto::VarType::Type>(-1);
} }
} }
...@@ -97,7 +103,8 @@ framework::DataLayout GetNativeLayoutType(const DataLayoutType& type) { ...@@ -97,7 +103,8 @@ framework::DataLayout GetNativeLayoutType(const DataLayoutType& type) {
case DataLayoutType::kNCHW: case DataLayoutType::kNCHW:
return framework::DataLayout::kNCHW; return framework::DataLayout::kNCHW;
default: default:
LOG(FATAL) << "Error layout type."; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported layout type. Now only supports NCHW."));
return static_cast<framework::DataLayout>(-1); return static_cast<framework::DataLayout>(-1);
} }
} }
...@@ -112,10 +119,12 @@ void MemoryCopyAsync(const platform::Place& dst_place, void* dst_data, ...@@ -112,10 +119,12 @@ void MemoryCopyAsync(const platform::Place& dst_place, void* dst_data,
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
if (platform::is_cpu_place(dst_place) && if (platform::is_cpu_place(dst_place) &&
platform::is_gpu_place(src_place)) { platform::is_gpu_place(src_place)) {
LOG(FATAL) << "lite::MemoryCopy GPU->CPU is not yet implemented."; PADDLE_THROW(platform::errors::Unimplemented(
"Lite::MemoryCopy GPU->CPU is not yet implemented."));
} else if (platform::is_gpu_place(dst_place) && } else if (platform::is_gpu_place(dst_place) &&
platform::is_cpu_place(src_place)) { platform::is_cpu_place(src_place)) {
LOG(FATAL) << "lite::MemoryCopy CPU->GPU is not yet implemented."; PADDLE_THROW(platform::errors::Unimplemented(
"Lite::MemoryCopy CPU->GPU is not yet implemented."));
} else if (platform::is_gpu_place(dst_place) && } else if (platform::is_gpu_place(dst_place) &&
platform::is_gpu_place(src_place)) { platform::is_gpu_place(src_place)) {
auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place); auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
...@@ -124,7 +133,8 @@ void MemoryCopyAsync(const platform::Place& dst_place, void* dst_data, ...@@ -124,7 +133,8 @@ void MemoryCopyAsync(const platform::Place& dst_place, void* dst_data,
static_cast<const platform::CUDADeviceContext&>(ctx).stream()); static_cast<const platform::CUDADeviceContext&>(ctx).stream());
} }
#else #else
LOG(FATAL) << "You must define PADDLE_WITH_CUDA for using CUDAPlace."; PADDLE_THROW(platform::errors::PreconditionNotMet(
"You must define PADDLE_WITH_CUDA for using CUDAPlace."));
#endif #endif
} }
} }
......
...@@ -78,8 +78,9 @@ bool TRTInt8Calibrator::setBatch( ...@@ -78,8 +78,9 @@ bool TRTInt8Calibrator::setBatch(
for (const auto& it : data) { for (const auto& it : data) {
auto dataptr = data_buffers_.find(it.first); auto dataptr = data_buffers_.find(it.first);
if (dataptr == data_buffers_.end()) { if (dataptr == data_buffers_.end()) {
LOG(FATAL) << "FATAL " << engine_name_ << " input name '" << it.first PADDLE_THROW(platform::errors::Fatal(
<< "' does not match with the buffer names"; "%s input name '%s' does not match with the buffer names.",
engine_name_, it.first));
} }
const auto& d = dataptr->second; const auto& d = dataptr->second;
PADDLE_ENFORCE( PADDLE_ENFORCE(
...@@ -109,8 +110,10 @@ bool TRTInt8Calibrator::getBatch(void** bindings, const char** names, ...@@ -109,8 +110,10 @@ bool TRTInt8Calibrator::getBatch(void** bindings, const char** names,
for (int i = 0; i < num_bindings; i++) { for (int i = 0; i < num_bindings; i++) {
auto it = data_buffers_.find(names[i]); auto it = data_buffers_.find(names[i]);
if (it == data_buffers_.end()) { if (it == data_buffers_.end()) {
LOG(FATAL) << "Calibration engine asked for unknown tensor name '" PADDLE_THROW(
<< names[i] << "' at position " << i; platform::errors::Fatal("Calibration engine asked for unknown tensor "
"name '%s' at position %d.",
names[i], i));
} }
bindings[i] = it->second.first; bindings[i] = it->second.first;
} }
......
...@@ -180,15 +180,14 @@ void *Alloc<platform::CUDAPlace>(const platform::CUDAPlace &place, ...@@ -180,15 +180,14 @@ void *Alloc<platform::CUDAPlace>(const platform::CUDAPlace &place,
platform::CUDADeviceGuard(place.device); platform::CUDADeviceGuard(place.device);
size_t avail, total; size_t avail, total;
platform::GpuMemoryUsage(&avail, &total); platform::GpuMemoryUsage(&avail, &total);
LOG(FATAL) << "Cannot allocate " << string::HumanReadableSize(size) PADDLE_THROW(platform::errors::ResourceExhausted(
<< " in GPU " << place.device << ", available " "Cannot allocate %s in GPU %d, avaliable %s, total %s, GpuMinChunkSize "
<< string::HumanReadableSize(avail) << ", total " "%s, GpuMaxChunkSize %s, GPU memory used: %s.",
<< string::HumanReadableSize(total) << ", GpuMinChunkSize " string::HumanReadableSize(size), place.device,
<< string::HumanReadableSize(buddy_allocator->GetMinChunkSize()) string::HumanReadableSize(avail), string::HumanReadableSize(total),
<< ", GpuMaxChunkSize " string::HumanReadableSize(buddy_allocator->GetMinChunkSize()),
<< string::HumanReadableSize(buddy_allocator->GetMaxChunkSize()) string::HumanReadableSize(buddy_allocator->GetMaxChunkSize()),
<< ", GPU memory used: " string::HumanReadableSize(Used<platform::CUDAPlace>(place))));
<< string::HumanReadableSize(Used<platform::CUDAPlace>(place));
} else { } else {
if (FLAGS_init_allocated_mem) { if (FLAGS_init_allocated_mem) {
cudaMemset(ptr, 0xEF, size); cudaMemset(ptr, 0xEF, size);
......
...@@ -27,7 +27,8 @@ ThreadLocalAllocatorImpl::ThreadLocalAllocatorImpl(const platform::Place& p) ...@@ -27,7 +27,8 @@ ThreadLocalAllocatorImpl::ThreadLocalAllocatorImpl(const platform::Place& p)
BOOST_GET_CONST(platform::CUDAPlace, place_).device)), BOOST_GET_CONST(platform::CUDAPlace, place_).device)),
platform::GpuMinChunkSize(), platform::GpuMaxChunkSize())); platform::GpuMinChunkSize(), platform::GpuMaxChunkSize()));
} else { } else {
LOG(FATAL) << "Thread local allocator only supports CUDAPlace now."; PADDLE_THROW(platform::errors::Unavailable(
"Thread local allocator only supports CUDAPlace now."));
} }
} }
......
...@@ -47,7 +47,8 @@ void OpTester::Init(const OpTesterConfig &config) { ...@@ -47,7 +47,8 @@ void OpTester::Init(const OpTesterConfig &config) {
CreateInputVarDesc(); CreateInputVarDesc();
CreateOutputVarDesc(); CreateOutputVarDesc();
} else { } else {
LOG(FATAL) << "Op \"" << config_.op_type << "\" is not registered."; PADDLE_THROW(platform::errors::NotFound("Operator '%s' is not registered.",
config_.op_type));
} }
if (config_.device_id >= 0) { if (config_.device_id >= 0) {
...@@ -169,10 +170,10 @@ void OpTester::CreateInputVarDesc() { ...@@ -169,10 +170,10 @@ void OpTester::CreateInputVarDesc() {
std::vector<std::string> input_names = GetOpProtoInputNames(); std::vector<std::string> input_names = GetOpProtoInputNames();
for (auto &name : input_names) { for (auto &name : input_names) {
const OpInputConfig *input = config_.GetInput(name); const OpInputConfig *input = config_.GetInput(name);
if (input == nullptr) { PADDLE_ENFORCE_NOT_NULL(
LOG(FATAL) << "The input " << name << " of op " << config_.op_type input, platform::errors::NotFound(
<< " is not correctlly provided."; "The input %s of operator %s is not correctlly provided.",
} name, config_.op_type));
std::string var_name = config_.op_type + "." + name; std::string var_name = config_.op_type + "." + name;
framework::VarDesc *var = Var(var_name); framework::VarDesc *var = Var(var_name);
...@@ -207,9 +208,10 @@ void OpTester::CreateOpDesc() { ...@@ -207,9 +208,10 @@ void OpTester::CreateOpDesc() {
GetOpProtoAttrNames(); GetOpProtoAttrNames();
for (auto item : config_.attrs) { for (auto item : config_.attrs) {
const std::string &name = item.first; const std::string &name = item.first;
if (attr_types.find(name) == attr_types.end()) { PADDLE_ENFORCE_NE(
LOG(FATAL) << "Operator " << type_ << " do not have attr " << name; attr_types.find(name), attr_types.end(),
} platform::errors::NotFound("Operator %s does not have attribute %d.",
type_, name));
const std::string &value_str = item.second; const std::string &value_str = item.second;
const framework::proto::AttrType &type = attr_types[name]; const framework::proto::AttrType &type = attr_types[name];
...@@ -231,7 +233,8 @@ void OpTester::CreateOpDesc() { ...@@ -231,7 +233,8 @@ void OpTester::CreateOpDesc() {
case framework::proto::AttrType::INTS: case framework::proto::AttrType::INTS:
case framework::proto::AttrType::FLOATS: case framework::proto::AttrType::FLOATS:
case framework::proto::AttrType::STRINGS: case framework::proto::AttrType::STRINGS:
LOG(FATAL) << "Not supported yet."; PADDLE_THROW(
platform::errors::Unimplemented("Not supported STRINGS type yet."));
break; break;
case framework::proto::AttrType::LONG: { case framework::proto::AttrType::LONG: {
int64_t value = StringTo<int64_t>(value_str); int64_t value = StringTo<int64_t>(value_str);
......
...@@ -43,10 +43,7 @@ class CSyncCalcStreamOp : public framework::OperatorBase { ...@@ -43,10 +43,7 @@ class CSyncCalcStreamOp : public framework::OperatorBase {
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32) #if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
auto dev_ctx = static_cast<platform::CUDADeviceContext*>( auto dev_ctx = static_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(place)); platform::DeviceContextPool::Instance().Get(place));
cudaError_t e_sync = cudaStreamSynchronize(dev_ctx->stream()); PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(dev_ctx->stream()));
if (e_sync != 0) {
LOG(FATAL) << "Fail to sync cuda stream: " << cudaGetErrorString(e_sync);
}
#else #else
PADDLE_THROW("PaddlePaddle should compile with GPU."); PADDLE_THROW("PaddlePaddle should compile with GPU.");
#endif #endif
......
...@@ -45,10 +45,7 @@ class CSyncCommStreamOp : public framework::OperatorBase { ...@@ -45,10 +45,7 @@ class CSyncCommStreamOp : public framework::OperatorBase {
int ring_id = Attr<int>("ring_id"); int ring_id = Attr<int>("ring_id");
auto stream = auto stream =
platform::NCCLCommContext::Instance().Get(ring_id, place)->stream(); platform::NCCLCommContext::Instance().Get(ring_id, place)->stream();
cudaError_t e_sync = cudaStreamSynchronize(stream); PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream));
if (e_sync != 0) {
LOG(FATAL) << "Fail to sync nccl stream: " << cudaGetErrorString(e_sync);
}
#else #else
PADDLE_THROW("PaddlePaddle should compile with GPU."); PADDLE_THROW("PaddlePaddle should compile with GPU.");
#endif #endif
......
...@@ -192,11 +192,7 @@ class DataNormGradKernel<platform::CUDADeviceContext, T> ...@@ -192,11 +192,7 @@ class DataNormGradKernel<platform::CUDADeviceContext, T>
reinterpret_cast<const void *>(d_batch_square_sum), reinterpret_cast<const void *>(d_batch_square_sum),
reinterpret_cast<void *>(d_batch_square_sum), C, reinterpret_cast<void *>(d_batch_square_sum), C,
platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream)); platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream));
cudaError_t e_sync = cudaStreamSynchronize(stream); PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream));
if (e_sync != 0) {
LOG(FATAL) << "Fail to sync nccl stream: "
<< cudaGetErrorString(e_sync);
}
#else #else
PADDLE_THROW(platform::errors::PreconditionNotMet( PADDLE_THROW(platform::errors::PreconditionNotMet(
"PaddlePaddle should compile with GPU, and need_sync_stats connot be " "PaddlePaddle should compile with GPU, and need_sync_stats connot be "
......
...@@ -471,7 +471,9 @@ class DetectionMAPOpKernel : public framework::OpKernel<T> { ...@@ -471,7 +471,9 @@ class DetectionMAPOpKernel : public framework::OpKernel<T> {
mAP += average_precisions; mAP += average_precisions;
++count; ++count;
} else { } else {
LOG(FATAL) << "Unkown ap version: " << ap_type; PADDLE_THROW(platform::errors::Unimplemented(
"Unkown ap version %s. Now only supports integral and l1point.",
ap_type));
} }
} }
if (count != 0) mAP /= count; if (count != 0) mAP /= count;
......
...@@ -182,7 +182,9 @@ class DGCOpKernel : public framework::OpKernel<T> { ...@@ -182,7 +182,9 @@ class DGCOpKernel : public framework::OpKernel<T> {
static_cast<void*>(encode_grad_out_data), k, v_out_data, static_cast<void*>(encode_grad_out_data), k, v_out_data,
static_cast<int>(v_out->numel()), buf, dev_ctx.stream(), static_cast<int>(v_out->numel()), buf, dev_ctx.stream(),
u_out_data)) { u_out_data)) {
LOG(FATAL) << "v_out numel:" << v_out->numel(); // TODO(weihang): owner should polish this error message
PADDLE_THROW(platform::errors::InvalidArgument(
"V_out numel error, V_out numel is %d.", v_out->numel()));
} }
math::SetConstant<DeviceContext, T> tset; math::SetConstant<DeviceContext, T> tset;
......
...@@ -37,8 +37,9 @@ void HandleSendResponse(brpc::Controller* cntl, sendrecv::VoidMessage* response, ...@@ -37,8 +37,9 @@ void HandleSendResponse(brpc::Controller* cntl, sendrecv::VoidMessage* response,
ch_ptr->Push(ch_ctx); ch_ptr->Push(ch_ctx);
if (cntl->Failed()) { if (cntl->Failed()) {
LOG(FATAL) << "Fail to send SendVar: " << var_h->name() PADDLE_THROW(platform::errors::Unavailable(
<< ", error text: " << cntl->ErrorText(); "Failed to send variable %s, error text is %s.", var_h->name(),
cntl->ErrorText()));
var_h->Finish(false); var_h->Finish(false);
cls->DecreaseReqCount(); cls->DecreaseReqCount();
return; return;
...@@ -104,8 +105,9 @@ void HandleFetchBarrierResponse(brpc::Controller* cntl, ...@@ -104,8 +105,9 @@ void HandleFetchBarrierResponse(brpc::Controller* cntl,
ch_ptr->Push(ch_ctx); ch_ptr->Push(ch_ctx);
if (cntl->Failed()) { if (cntl->Failed()) {
LOG(FATAL) << "Fail to get HandleFetchBarrierResponse: " << var_h->name() PADDLE_THROW(platform::errors::Unavailable(
<< ", error text: " << cntl->ErrorText(); "Failed to get HandleFetchBarrierResponse %s, error text is %s.",
var_h->name(), cntl->ErrorText()));
var_h->Finish(false); var_h->Finish(false);
cls->DecreaseReqCount(); cls->DecreaseReqCount();
return; return;
...@@ -131,8 +133,9 @@ void HandleGetResponse(brpc::Controller* cntl, ...@@ -131,8 +133,9 @@ void HandleGetResponse(brpc::Controller* cntl,
ch_ptr->Push(ch_ctx); ch_ptr->Push(ch_ctx);
if (cntl->Failed()) { if (cntl->Failed()) {
LOG(FATAL) << "Fail to GetVar: " << var_h->name() PADDLE_THROW(platform::errors::Unavailable(
<< ", error text: " << cntl->ErrorText(); "Failed to get variable %s, error text is %s.", var_h->name(),
cntl->ErrorText()));
cls->DecreaseReqCount(); cls->DecreaseReqCount();
var_h->Finish(false); var_h->Finish(false);
return; return;
...@@ -368,7 +371,8 @@ ChannelQueuePtr BRPCClient::GetChannel(const std::string& ep) { ...@@ -368,7 +371,8 @@ ChannelQueuePtr BRPCClient::GetChannel(const std::string& ep) {
for (int i = 0; i < brpc_channel_num_per_server_; ++i) { for (int i = 0; i < brpc_channel_num_per_server_; ++i) {
std::shared_ptr<ChannelContext> c(new ChannelContext()); std::shared_ptr<ChannelContext> c(new ChannelContext());
if (c->channel.Init(ep.c_str(), &options) != 0) { if (c->channel.Init(ep.c_str(), &options) != 0) {
LOG(FATAL) << "Fail to initialize channel"; PADDLE_THROW(
platform::errors::Unavailable("Failed to initialize channel."));
return nullptr; return nullptr;
} }
......
...@@ -69,8 +69,10 @@ void RdmaMemPool::Register(const std::string& varname, void* data, ...@@ -69,8 +69,10 @@ void RdmaMemPool::Register(const std::string& varname, void* data,
pthread_rwlock_unlock(&access_); pthread_rwlock_unlock(&access_);
if (brpc::rdma::RegisterMemoryForRdma(data, data_size)) { if (brpc::rdma::RegisterMemoryForRdma(data, data_size)) {
LOG(FATAL) << "register " << varname << " data:" << data PADDLE_THROW(platform::errors::Unavailable(
<< " data_size:" << data_size << " error"; "Register memory for RDMA failed. Register %s data: %s data size %d "
"error.",
varname, data, data_size));
} }
VLOG(4) << "register on rdma:" << varname << " data:" << data VLOG(4) << "register on rdma:" << varname << " data:" << data
......
...@@ -36,7 +36,9 @@ class IOBufWriter { ...@@ -36,7 +36,9 @@ class IOBufWriter {
static void Append(const std::string& varname, butil::IOBuf* iobuf, int k, static void Append(const std::string& varname, butil::IOBuf* iobuf, int k,
const char* v, int64_t vlen) { const char* v, int64_t vlen) {
if (vlen >= std::numeric_limits<int>::max() || vlen < 0) { if (vlen >= std::numeric_limits<int>::max() || vlen < 0) {
LOG(FATAL) << "AppendZeroCopy varname:" << varname << ", vlen:" << vlen; PADDDLE_THROW(platform::errors::Unavailable(
"Variable lenght is invalid. Variable name is %s, length is %d.",
varname, vlen));
} }
iobuf->append(reinterpret_cast<char*>(&k), 4); iobuf->append(reinterpret_cast<char*>(&k), 4);
...@@ -95,7 +97,9 @@ class IOBufWriter { ...@@ -95,7 +97,9 @@ class IOBufWriter {
bool in_cuda_pinned, void (*destroy)(void*), bool in_cuda_pinned, void (*destroy)(void*),
void* user_data) { void* user_data) {
if (vlen >= std::numeric_limits<int>::max() || vlen < 0) { if (vlen >= std::numeric_limits<int>::max() || vlen < 0) {
LOG(FATAL) << "AppendZeroCopy varname:" << varname << ", vlen:" << vlen; PADDDLE_THROW(platform::errors::Unavailable(
"Variable lenght is invalid. Variable name is %s, length is %d.",
varname, vlen));
} }
#ifdef PADDLE_WITH_BRPC_RDMA #ifdef PADDLE_WITH_BRPC_RDMA
......
...@@ -364,7 +364,8 @@ void AsyncBRPCServer::StartServer() { ...@@ -364,7 +364,8 @@ void AsyncBRPCServer::StartServer() {
// service is put on stack, we don't want server to delete it, otherwise // service is put on stack, we don't want server to delete it, otherwise
// use brpc::SERVER_OWNS_SERVICE. // use brpc::SERVER_OWNS_SERVICE.
if (server_.AddService(&service_impl, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { if (server_.AddService(&service_impl, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) {
LOG(FATAL) << "Fail to add service"; PADDDLE_THROW(platform::errors::Unavailable(
"Failed to add service into BRPC server."));
return; return;
} }
...@@ -375,7 +376,8 @@ void AsyncBRPCServer::StartServer() { ...@@ -375,7 +376,8 @@ void AsyncBRPCServer::StartServer() {
options.idle_timeout_sec = idle_timeout_s_; options.idle_timeout_sec = idle_timeout_s_;
options.max_concurrency = max_concurrency_; options.max_concurrency = max_concurrency_;
if (server_.Start(bind_address_.c_str(), &options) != 0) { if (server_.Start(bind_address_.c_str(), &options) != 0) {
LOG(FATAL) << "Fail to start EchoServer" << bind_address_; PADDDLE_THROW(platform::errors::Unavailable(
"Failed to start EchoServer %s.", bind_address_));
return; return;
} }
......
...@@ -501,10 +501,11 @@ void GRPCClient::Proceed() { ...@@ -501,10 +501,11 @@ void GRPCClient::Proceed() {
VLOG(3) << c->GetVarHandlePtr()->String() << " process"; VLOG(3) << c->GetVarHandlePtr()->String() << " process";
c->Process(); c->Process();
} else if (c->status_.error_code() == grpc::StatusCode::DEADLINE_EXCEEDED) { } else if (c->status_.error_code() == grpc::StatusCode::DEADLINE_EXCEEDED) {
LOG(FATAL) << c->GetVarHandlePtr()->String() PADDLE_THROW(platform::errors::External(
<< " meets grpc error, error_code:" << c->status_.error_code() "%s meets grpc error, error_code is %d, error message is %s, error "
<< " error_message:" << c->status_.error_message() "details is %s.",
<< " error_details:" << c->status_.error_details(); c->GetVarHandlePtr()->String(), c->status_.error_code(),
c->status_.error_message(), c->status_.error_details()));
{ {
std::lock_guard<std::mutex> lk(sync_mutex_); std::lock_guard<std::mutex> lk(sync_mutex_);
ok_ = false; ok_ = false;
...@@ -519,11 +520,11 @@ void GRPCClient::Proceed() { ...@@ -519,11 +520,11 @@ void GRPCClient::Proceed() {
c->GetVarHandlePtr()->should_retry = true; c->GetVarHandlePtr()->should_retry = true;
c->Finish(false); c->Finish(false);
} else { } else {
LOG(FATAL) << c->GetVarHandlePtr()->String() PADDLE_THROW(platform::errors::External(
<< " meets grpc error, error_code:" << c->status_.error_code() "%s meets grpc error, error_code is %d, error message is %s, error "
<< " error_message:" << c->status_.error_message() "details is %s.",
<< " error_details:" << c->status_.error_details(); c->GetVarHandlePtr()->String(), c->status_.error_code(),
c->status_.error_message(), c->status_.error_details()));
c->Finish(false); c->Finish(false);
} }
......
...@@ -105,10 +105,9 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var, ...@@ -105,10 +105,9 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var,
e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber,
payload->memory_size()); payload->memory_size());
if (payload->memory_size() >= std::numeric_limits<int>::max()) { if (payload->memory_size() >= std::numeric_limits<int>::max()) {
LOG(FATAL) << "FATAL error: varname:" << name PADDLE_THROW(platform::errors::InvalidArgument(
<< ", vlen:" << payload->memory_size() "Variable %s length %d should less than %d.", name,
<< " >= std::numeric_limits<int>::max():" payload->memory_size(), std::numeric_limits<int>::max()));
<< std::numeric_limits<int>::max() << ", so exit!";
} }
// steal reference of tensor data // steal reference of tensor data
::grpc::Slice slices[4]; // metadata, tensor, rows meta, rows ::grpc::Slice slices[4]; // metadata, tensor, rows meta, rows
......
...@@ -131,10 +131,12 @@ void ParameterRecv<T>::operator()(const RpcContext &rpc_ctx, ...@@ -131,10 +131,12 @@ void ParameterRecv<T>::operator()(const RpcContext &rpc_ctx,
} }
} }
auto numel = recv_tensor->numel(); auto numel = recv_tensor->numel();
if (recv_numel != numel) { PADDLE_ENFORCE_EQ(
LOG(FATAL) << "recv_numel: " << recv_numel << " acture numel: " << numel; recv_numel, numel,
} platform::errors::InvalidArgument(
PADDLE_ENFORCE_EQ(recv_numel, numel); "The number of receive tensor's elements are not valid. The "
"recevie tensor numel is %d, the actual tensor numel is %d.",
recv_numel, numel));
} else if (recv_var->IsType<framework::SelectedRows>()) { } else if (recv_var->IsType<framework::SelectedRows>()) {
auto cpu_place = platform::CPUPlace(); auto cpu_place = platform::CPUPlace();
auto *slr = recv_var->GetMutable<framework::SelectedRows>(); auto *slr = recv_var->GetMutable<framework::SelectedRows>();
......
...@@ -81,14 +81,7 @@ class ProtoEncodeHelper { ...@@ -81,14 +81,7 @@ class ProtoEncodeHelper {
ProtoEncodeHelper(char* buf, int max_size) ProtoEncodeHelper(char* buf, int max_size)
: base_(buf), p_(buf), limit_(base_ + max_size) {} : base_(buf), p_(buf), limit_(base_ + max_size) {}
~ProtoEncodeHelper() { ~ProtoEncodeHelper() {}
#define REPLACE_ENFORCE_GLOG 1
// Make sure callers didn't do operations that went over max_size promised
if (paddle::platform::is_error(p_ <= limit_)) {
paddle::platform::throw_on_error(p_ <= limit_, "");
}
#undef REPLACE_ENFORCE_GLOG
}
const char* data() const { return base_; } const char* data() const { return base_; }
size_t size() const { return p_ - base_; } size_t size() const { return p_ - base_; }
......
...@@ -96,11 +96,9 @@ bool RequestSendHandler::Handle(const std::string& varname, ...@@ -96,11 +96,9 @@ bool RequestSendHandler::Handle(const std::string& varname,
} else { // sync } else { // sync
rpc_server_->WaitCond(kRequestSend); rpc_server_->WaitCond(kRequestSend);
VLOG(3) << "sync: processing received var: " << varname; VLOG(3) << "sync: processing received var: " << varname;
PADDLE_ENFORCE_NOT_NULL(
if (invar == nullptr) { invar, platform::errors::NotFound(
LOG(FATAL) << "sync: Can not find server side var: " << varname; "sync: Can not find server side var %s.", varname));
return false;
}
} }
} }
return true; return true;
......
...@@ -73,10 +73,7 @@ class AllReduceOpKernel : public framework::OpKernel<T> { ...@@ -73,10 +73,7 @@ class AllReduceOpKernel : public framework::OpKernel<T> {
sendbuff, recvbuff, numel, static_cast<ncclDataType_t>(dtype), red_type, sendbuff, recvbuff, numel, static_cast<ncclDataType_t>(dtype), red_type,
comm, stream)); comm, stream));
if (ctx.Attr<bool>("sync_mode")) { if (ctx.Attr<bool>("sync_mode")) {
cudaError_t e_sync = cudaStreamSynchronize(stream); PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream));
if (e_sync != 0) {
LOG(FATAL) << "cudaStreamSynchronize " << cudaGetErrorString(e_sync);
}
} }
#else #else
PADDLE_THROW("PaddlePaddle should compile with GPU."); PADDLE_THROW("PaddlePaddle should compile with GPU.");
......
...@@ -71,7 +71,9 @@ static void FlParallelExecuteBlocks( ...@@ -71,7 +71,9 @@ static void FlParallelExecuteBlocks(
<< "pointer: " << prepared[run_block].get(); << "pointer: " << prepared[run_block].get();
executor->RunPreparedContext(prepared[run_block].get(), scope); executor->RunPreparedContext(prepared[run_block].get(), scope);
} catch (const std::exception &e) { } catch (const std::exception &e) {
LOG(FATAL) << "run sub program:" << idx << " error " << e.what(); PADDLE_THROW(platform::errors::Fatal(
"Run %d-th sub program failed. The exception is:\n%s.", idx,
e.what()));
} }
})); }));
} }
......
...@@ -74,7 +74,9 @@ static void ParallelExecuteBlocks( ...@@ -74,7 +74,9 @@ static void ParallelExecuteBlocks(
<< "pointer: " << prepared[run_block].get(); << "pointer: " << prepared[run_block].get();
executor->RunPreparedContext(prepared[run_block].get(), scope); executor->RunPreparedContext(prepared[run_block].get(), scope);
} catch (const std::exception &e) { } catch (const std::exception &e) {
LOG(FATAL) << "run sub program:" << idx << " error " << e.what(); PADDLE_THROW(platform::errors::Fatal(
"Run %d-th sub program failed. The exception is:\n%s.", idx,
e.what()));
} }
})); }));
} }
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/jit/kernels.h" #include "paddle/fluid/operators/jit/kernels.h"
#include "paddle/fluid/platform/device_tracer.h" #include "paddle/fluid/platform/device_tracer.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/port.h" #include "paddle/fluid/platform/port.h"
#include "paddle/fluid/platform/variant.h" // for UNUSED #include "paddle/fluid/platform/variant.h" // for UNUSED
...@@ -119,7 +120,8 @@ void BenchAllImpls(const typename KernelTuple::attr_type& attr, Args... args) { ...@@ -119,7 +120,8 @@ void BenchAllImpls(const typename KernelTuple::attr_type& attr, Args... args) {
// Test result from Get function // Test result from Get function
auto tgt = jit::KernelFuncs<KernelTuple, PlaceType>::Cache().At(attr); auto tgt = jit::KernelFuncs<KernelTuple, PlaceType>::Cache().At(attr);
if (!tgt) { if (!tgt) {
LOG(FATAL) << "Target can not be empty!"; PADDLE_THROW(
paddle::platform::errors::Fatal("Benchmark target can not be empty."));
} }
infos.push_back(std::make_pair("Target", benchmark(tgt, args...))); infos.push_back(std::make_pair("Target", benchmark(tgt, args...)));
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <string> #include <string>
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/fluid/operators/jit/gen/jitcode.h" #include "paddle/fluid/operators/jit/gen/jitcode.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -249,7 +250,8 @@ class VActFunc : public JitCode { ...@@ -249,7 +250,8 @@ class VActFunc : public JitCode {
identity_jmm<JMM>(dst, src, 15); identity_jmm<JMM>(dst, src, 15);
break; break;
default: default:
LOG(FATAL) << "Do not support this operand type: " << type; PADDLE_THROW(platform::errors::Unimplemented(
"Do not support operand type code: %d.", type));
break; break;
} }
} }
...@@ -263,7 +265,8 @@ class VActJitCode : public VActFunc { ...@@ -263,7 +265,8 @@ class VActJitCode : public VActFunc {
if (!(type_ == operand_type::RELU || type_ == operand_type::EXP || if (!(type_ == operand_type::RELU || type_ == operand_type::EXP ||
type_ == operand_type::SIGMOID || type_ == operand_type::TANH || type_ == operand_type::SIGMOID || type_ == operand_type::TANH ||
type_ == operand_type::IDENTITY || type_ == operand_type::SQUARE)) { type_ == operand_type::IDENTITY || type_ == operand_type::SQUARE)) {
LOG(FATAL) << "Do not support this operand type: " << type_; PADDLE_THROW(platform::errors::Unimplemented(
"Do not support operand type code: %d.", type));
} }
this->genCode(); this->genCode();
} }
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <string> #include <string>
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/fluid/operators/jit/gen/jitcode.h" #include "paddle/fluid/operators/jit/gen/jitcode.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -36,7 +37,8 @@ class VXXJitCode : public JitCode { ...@@ -36,7 +37,8 @@ class VXXJitCode : public JitCode {
with_relu_(with_relu) { with_relu_(with_relu) {
if (!(type_ == operand_type::MUL || type_ == operand_type::ADD || if (!(type_ == operand_type::MUL || type_ == operand_type::ADD ||
type_ == operand_type::SUB)) { type_ == operand_type::SUB)) {
LOG(FATAL) << "Do not support this operand type: " << type_; PADDLE_THROW(platform::errors::Unimplemented(
"Do not support operand type code: %d.", type));
} }
this->genCode(); this->genCode();
} }
......
...@@ -33,7 +33,8 @@ class EmbSeqPoolJitCode : public JitCode { ...@@ -33,7 +33,8 @@ class EmbSeqPoolJitCode : public JitCode {
tbl_w_(attr.table_width), tbl_w_(attr.table_width),
type_(attr.pool_type) { type_(attr.pool_type) {
if (type_ != SeqPoolType::kSum) { if (type_ != SeqPoolType::kSum) {
LOG(FATAL) << "Only support sum pool yet "; PADDLE_THROW(
platform::errors::Unimplemented("Only supports sum pool yet."));
} }
this->genCode(); this->genCode();
} }
......
...@@ -39,7 +39,8 @@ class GRUJitCode : public VActFunc { ...@@ -39,7 +39,8 @@ class GRUJitCode : public VActFunc {
} else if (type == KernelType::kVIdentity) { } else if (type == KernelType::kVIdentity) {
return operand_type::IDENTITY; return operand_type::IDENTITY;
} else { } else {
LOG(FATAL) << "Do not support this jit::KernelType: " << type; PADDLE_THROW(platform::errors::Unimplemented(
"Do not support jit::KernelType code: %d.", type));
} }
return operand_type::IDENTITY; return operand_type::IDENTITY;
}; };
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <string> #include <string>
#include "glog/logging.h" #include "glog/logging.h"
#include "paddle/fluid/operators/jit/gen/jitcode.h" #include "paddle/fluid/operators/jit/gen/jitcode.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -30,7 +31,8 @@ class HOPVJitCode : public JitCode { ...@@ -30,7 +31,8 @@ class HOPVJitCode : public JitCode {
void* code_ptr = nullptr) void* code_ptr = nullptr)
: JitCode(code_size, code_ptr), num_(d), type_(type) { : JitCode(code_size, code_ptr), num_(d), type_(type) {
if (!(type_ == operand_type::MAX || type_ == operand_type::ADD)) { if (!(type_ == operand_type::MAX || type_ == operand_type::ADD)) {
LOG(FATAL) << "Do not support this operand type: " << type_; PADDLE_THROW(platform::errors::Unimplemented(
"Do not support operand type code: %d.", type));
} }
this->genCode(); this->genCode();
} }
......
...@@ -42,7 +42,8 @@ class LSTMJitCode : public VActFunc { ...@@ -42,7 +42,8 @@ class LSTMJitCode : public VActFunc {
} else if (type == KernelType::kVIdentity) { } else if (type == KernelType::kVIdentity) {
return operand_type::IDENTITY; return operand_type::IDENTITY;
} else { } else {
LOG(FATAL) << "Do not support this jit::KernelType: " << type; PADDLE_THROW(platform::errors::Unimplemented(
"Do not support jit::KernelType code: %d.", type));
} }
return operand_type::IDENTITY; return operand_type::IDENTITY;
}; };
......
...@@ -32,7 +32,8 @@ class SeqPoolJitCode : public JitCode { ...@@ -32,7 +32,8 @@ class SeqPoolJitCode : public JitCode {
: JitCode(code_size, code_ptr), w_(attr.w), type_(attr.type) { : JitCode(code_size, code_ptr), w_(attr.w), type_(attr.type) {
if (!(type_ == SeqPoolType::kSum || type_ == SeqPoolType::kAvg || if (!(type_ == SeqPoolType::kSum || type_ == SeqPoolType::kAvg ||
type_ == SeqPoolType::kSqrt)) { type_ == SeqPoolType::kSqrt)) {
LOG(FATAL) << "Only supported pool type: sum, avg and sqrt."; PADDLE_THROW(platform::errors::Unimplemented(
"Only supports sum, average and sqrt pool type."));
} }
fp_h_[0] = 1.f; fp_h_[0] = 1.f;
this->genCode(); this->genCode();
......
...@@ -98,7 +98,8 @@ void CreateTensor(framework::Scope* scope, const std::string& name, ...@@ -98,7 +98,8 @@ void CreateTensor(framework::Scope* scope, const std::string& name,
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
place = platform::CUDAPlace(0); place = platform::CUDAPlace(0);
#else #else
LOG(FATAL) << "You must define PADDLE_WITH_CUDA for using CUDAPlace."; PADDLE_THROW(platform::errors::PreconditionNetMet(
"You must define PADDLE_WITH_CUDA for using CUDAPlace."));
#endif #endif
} else { } else {
place = platform::CPUPlace(); place = platform::CPUPlace();
......
...@@ -394,7 +394,8 @@ class BeamSearchFunctor<platform::CUDADeviceContext, T> { ...@@ -394,7 +394,8 @@ class BeamSearchFunctor<platform::CUDADeviceContext, T> {
end_id, is_accumulated, num_used_threads)); end_id, is_accumulated, num_used_threads));
} }
} else { } else {
LOG(FATAL) << "Not implemented."; PADDLE_THROW(platform::errors::Unimplemented(
"Not implemented other number of sequences yet."));
} }
context.Wait(); context.Wait();
......
...@@ -419,8 +419,9 @@ size_t MKLDNNDeviceContext::GetShapeBlobSize() const { ...@@ -419,8 +419,9 @@ size_t MKLDNNDeviceContext::GetShapeBlobSize() const {
BlobMap* pMap = p_blobmap_.get(); BlobMap* pMap = p_blobmap_.get();
auto map_it = pMap->find(tls().cur_mkldnn_session_id); auto map_it = pMap->find(tls().cur_mkldnn_session_id);
if (map_it == pMap->end()) { if (map_it == pMap->end()) {
LOG(FATAL) << "MKLDNNDeviceContext don't find cur_mkldnn_session_id : " PADDLE_THROW(platform::errors::NotFound(
<< tls().cur_mkldnn_session_id; "MKLDNNDeviceContext don't find cur_mkldnn_session_id: %d.",
tls().cur_mkldnn_session_id));
} }
return map_it->second->size(); return map_it->second->size();
} }
......
...@@ -265,11 +265,7 @@ inline std::string GetTraceBackString(StrType&& what, const char* file, ...@@ -265,11 +265,7 @@ inline std::string GetTraceBackString(StrType&& what, const char* file,
inline bool is_error(bool stat) { return !stat; } inline bool is_error(bool stat) { return !stat; }
inline void throw_on_error(bool stat, const std::string& msg) { inline void throw_on_error(bool stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg); throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
} }
// Note: This Macro can only be used within enforce.h // Note: This Macro can only be used within enforce.h
...@@ -660,11 +656,7 @@ inline std::string build_nvidia_error_msg(cudaError_t e) { ...@@ -660,11 +656,7 @@ inline std::string build_nvidia_error_msg(cudaError_t e) {
} }
inline void throw_on_error(cudaError_t e, const std::string& msg) { inline void throw_on_error(cudaError_t e, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg); throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
} }
/** curand ERROR **/ /** curand ERROR **/
...@@ -711,12 +703,8 @@ inline std::string build_nvidia_error_msg(curandStatus_t stat) { ...@@ -711,12 +703,8 @@ inline std::string build_nvidia_error_msg(curandStatus_t stat) {
} }
inline void throw_on_error(curandStatus_t stat, const std::string& msg) { inline void throw_on_error(curandStatus_t stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw thrust::system_error(cudaErrorLaunchFailure, thrust::cuda_category(), throw thrust::system_error(cudaErrorLaunchFailure, thrust::cuda_category(),
msg); msg);
#else
LOG(FATAL) << msg;
#endif
} }
/***** CUDNN ERROR *****/ /***** CUDNN ERROR *****/
...@@ -730,11 +718,7 @@ inline std::string build_nvidia_error_msg(cudnnStatus_t stat) { ...@@ -730,11 +718,7 @@ inline std::string build_nvidia_error_msg(cudnnStatus_t stat) {
} }
inline void throw_on_error(cudnnStatus_t stat, const std::string& msg) { inline void throw_on_error(cudnnStatus_t stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg); throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
} }
/***** CUBLAS ERROR *****/ /***** CUBLAS ERROR *****/
...@@ -773,11 +757,7 @@ inline std::string build_nvidia_error_msg(cublasStatus_t stat) { ...@@ -773,11 +757,7 @@ inline std::string build_nvidia_error_msg(cublasStatus_t stat) {
} }
inline void throw_on_error(cublasStatus_t stat, const std::string& msg) { inline void throw_on_error(cublasStatus_t stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg); throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
} }
/***** CUSOLVER ERROR *****/ /***** CUSOLVER ERROR *****/
...@@ -811,11 +791,7 @@ inline std::string build_nvidia_error_msg(cusolverStatus_t stat) { ...@@ -811,11 +791,7 @@ inline std::string build_nvidia_error_msg(cusolverStatus_t stat) {
} }
inline void throw_on_error(cusolverStatus_t stat, const std::string& msg) { inline void throw_on_error(cusolverStatus_t stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg); throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
} }
/****** NCCL ERROR ******/ /****** NCCL ERROR ******/
...@@ -830,11 +806,7 @@ inline std::string build_nvidia_error_msg(ncclResult_t nccl_result) { ...@@ -830,11 +806,7 @@ inline std::string build_nvidia_error_msg(ncclResult_t nccl_result) {
} }
inline void throw_on_error(ncclResult_t nccl_result, const std::string& msg) { inline void throw_on_error(ncclResult_t nccl_result, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg); throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
} }
#endif // not(__APPLE__) and PADDLE_WITH_NCCL #endif // not(__APPLE__) and PADDLE_WITH_NCCL
......
...@@ -185,11 +185,11 @@ void InitDevices(bool init_p2p, const std::vector<int> devices) { ...@@ -185,11 +185,11 @@ void InitDevices(bool init_p2p, const std::vector<int> devices) {
// Throw some informations when CPU instructions mismatch. // Throw some informations when CPU instructions mismatch.
#define AVX_GUIDE(compiletime, runtime) \ #define AVX_GUIDE(compiletime, runtime) \
LOG(FATAL) \ PADDLE_THROW(platform::errors::Unavailable( \
<< "This version is compiled on higher instruction(" #compiletime \ "This version is compiled on higher instruction(" #compiletime \
") system, you may encounter illegal instruction error running on" \ ") system, you may encounter illegal instruction error running on" \
" your local CPU machine. Please reinstall the " #runtime \ " your local CPU machine. Please reinstall the " #runtime \
" version or compile from source code." " version or compile from source code."))
#ifdef __AVX512F__ #ifdef __AVX512F__
if (!platform::MayIUse(platform::avx512f)) { if (!platform::MayIUse(platform::avx512f)) {
......
...@@ -117,7 +117,9 @@ py::dtype PaddleDTypeToNumpyDType(PaddleDType dtype) { ...@@ -117,7 +117,9 @@ py::dtype PaddleDTypeToNumpyDType(PaddleDType dtype) {
dt = py::dtype::of<float>(); dt = py::dtype::of<float>();
break; break;
default: default:
LOG(FATAL) << "unsupported dtype"; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported data type. Now only supports INT32, INT64 and "
"FLOAT32."));
} }
return dt; return dt;
...@@ -150,7 +152,9 @@ size_t PaddleGetDTypeSize(PaddleDType dt) { ...@@ -150,7 +152,9 @@ size_t PaddleGetDTypeSize(PaddleDType dt) {
size = sizeof(float); size = sizeof(float);
break; break;
default: default:
LOG(FATAL) << "unsupported dtype"; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported data type. Now only supports INT32, INT64 and "
"FLOAT32."));
} }
return size; return size;
} }
...@@ -172,7 +176,9 @@ py::array ZeroCopyTensorToNumpy(ZeroCopyTensor &tensor) { // NOLINT ...@@ -172,7 +176,9 @@ py::array ZeroCopyTensorToNumpy(ZeroCopyTensor &tensor) { // NOLINT
tensor.copy_to_cpu<float>(static_cast<float *>(array.mutable_data())); tensor.copy_to_cpu<float>(static_cast<float *>(array.mutable_data()));
break; break;
default: default:
LOG(FATAL) << "unsupported dtype"; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported data type. Now only supports INT32, INT64 and "
"FLOAT32."));
} }
return array; return array;
} }
...@@ -252,7 +258,9 @@ void BindPaddleBuf(py::module *m) { ...@@ -252,7 +258,9 @@ void BindPaddleBuf(py::module *m) {
auto size = self.length() / sizeof(float); auto size = self.length() / sizeof(float);
l = py::cast(std::vector<float>(data, data + size)); l = py::cast(std::vector<float>(data, data + size));
} else { } else {
LOG(FATAL) << "unsupported dtype"; PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported data type. Now only supports INT32, INT64 and "
"FLOAT32."));
} }
return l; return l;
}) })
......
...@@ -158,7 +158,13 @@ fi ...@@ -158,7 +158,13 @@ fi
HAS_BOOST_GET=`git diff -U0 upstream/$BRANCH |grep "^+" |grep -o -m 1 "boost::get" || true` HAS_BOOST_GET=`git diff -U0 upstream/$BRANCH |grep "^+" |grep -o -m 1 "boost::get" || true`
if [ ${HAS_BOOST_GET} ] && [ "${GIT_PR_ID}" != "" ]; then if [ ${HAS_BOOST_GET} ] && [ "${GIT_PR_ID}" != "" ]; then
echo_line="boost::get is not recommended, because it may throw an bad_get exception without any stack information, so please use BOOST_GET(_**)(dtype, value) series macros here. If these macros cannot meet your needs, please use try-catch to handle boost::get and specify chenwhql (Recommend), luotao1 or lanxianghit review and approve.\n" echo_line="boost::get is not recommended, because it may throw an bad_get exception without any stack information, so please use BOOST_GET(_**)(dtype, value) series macros here. If these macros cannot meet your needs, please use try-catch to handle boost::get and request chenwhql (Recommend), luotao1 or lanxianghit review and approve.\n"
check_approval 1 6836917 47554610 22561442
fi
HAS_LOG_FATAL=`git diff -U0 upstream/$BRANCH |grep "^+" |grep -o -m 1 "LOG(FATAL)" || true`
if [ ${HAS_LOG_FATAL} ] && [ "${GIT_PR_ID}" != "" ]; then
echo_line="LOG(FATAL) is not recommended, because it will throw exception without standard stack information, so please use PADDLE_THROW macro here. If you have to use LOG(FATAL) here, please request chenwhql (Recommend), luotao1 or lanxianghit review and approve.\n"
check_approval 1 6836917 47554610 22561442 check_approval 1 6836917 47554610 22561442
fi fi
...@@ -190,7 +196,7 @@ ALL_PADDLE_CHECK=`git diff -U0 upstream/$BRANCH |grep "^+" |grep -zoE "(PADDLE_E ...@@ -190,7 +196,7 @@ ALL_PADDLE_CHECK=`git diff -U0 upstream/$BRANCH |grep "^+" |grep -zoE "(PADDLE_E
VALID_PADDLE_CHECK=`echo "$ALL_PADDLE_CHECK" | grep -zoE '(PADDLE_ENFORCE[A-Z_]{0,9}|PADDLE_THROW)\((.[^,;]+,)*.[^";]*(errors::).[^"]*".[^";]{20,}.[^;]*\);\s' || true` VALID_PADDLE_CHECK=`echo "$ALL_PADDLE_CHECK" | grep -zoE '(PADDLE_ENFORCE[A-Z_]{0,9}|PADDLE_THROW)\((.[^,;]+,)*.[^";]*(errors::).[^"]*".[^";]{20,}.[^;]*\);\s' || true`
INVALID_PADDLE_CHECK=`echo "$ALL_PADDLE_CHECK" |grep -vxF "$VALID_PADDLE_CHECK" || true` INVALID_PADDLE_CHECK=`echo "$ALL_PADDLE_CHECK" |grep -vxF "$VALID_PADDLE_CHECK" || true`
if [ "${INVALID_PADDLE_CHECK}" != "" ] && [ "${GIT_PR_ID}" != "" ]; then if [ "${INVALID_PADDLE_CHECK}" != "" ] && [ "${GIT_PR_ID}" != "" ]; then
echo_line="The error message you wrote in PADDLE_ENFORCE{_**} or PADDLE_THROW does not meet our error message writing specification. Possible errors include 1. the error message is empty / 2. the error message is too short / 3. the error type is not specified. Please read the specification [ https://github.com/PaddlePaddle/Paddle/wiki/Paddle-Error-Message-Writing-Specification ], then refine the error message. If it is a mismatch, please specify chenwhql (Recommend), luotao1 or lanxianghit review and approve.\nThe PADDLE_ENFORCE{_**} or PADDLE_THROW entries that do not meet the specification are as follows:\n${INVALID_PADDLE_CHECK}\n" echo_line="The error message you wrote in PADDLE_ENFORCE{_**} or PADDLE_THROW does not meet our error message writing specification. Possible errors include 1. the error message is empty / 2. the error message is too short / 3. the error type is not specified. Please read the specification [ https://github.com/PaddlePaddle/Paddle/wiki/Paddle-Error-Message-Writing-Specification ], then refine the error message. If it is a mismatch, please request chenwhql (Recommend), luotao1 or lanxianghit review and approve.\nThe PADDLE_ENFORCE{_**} or PADDLE_THROW entries that do not meet the specification are as follows:\n${INVALID_PADDLE_CHECK}\n"
check_approval 1 6836917 47554610 22561442 check_approval 1 6836917 47554610 22561442
fi fi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册