未验证 提交 d1062d52 编写于 作者: C Chen Weihang 提交者: GitHub

Replace all errors thrown by LOG(FATAL) with PADDLE_THROW (#24759)

* remove REPLACE_ENFORCE_GLOG compile option & add ci rule prohibit LOG(FATAL) using, test=develop

* remove ci test case, test=develop

* replace all LOG(FATAL) & polish message, test=develop

* fix typo, test=develop

* polish error info detail, test=develop
上级 a4f60034
......@@ -80,7 +80,6 @@ option(WITH_PSLIB "Compile with pslib support" OFF)
option(WITH_BOX_PS "Compile with box_ps support" OFF)
option(WITH_XBYAK "Compile with xbyak support" ON)
option(WITH_CONTRIB "Compile the third-party contributation" OFF)
option(REPLACE_ENFORCE_GLOG "Replace PADDLE_ENFORCE with glog/CHECK for better debug." OFF)
option(WITH_GRPC "Use grpc as the default rpc framework" ${WITH_DISTRIBUTE})
option(WITH_INFERENCE_API_TEST "Test fluid inference C++ high-level api interface" OFF)
option(PY_VERSION "Compile PaddlePaddle with python3 support" ${PY_VERSION})
......@@ -103,11 +102,6 @@ if(NOT CMAKE_BUILD_TYPE)
FORCE)
endif()
# Replace PADDLE_ENFORCE with glog/CHECK for better debug
if(REPLACE_ENFORCE_GLOG)
add_definitions("-DREPLACE_ENFORCE_GLOG")
endif()
# the type of sanitizer, options are: Address, Leak, Memory, Thread, Undefined. Default: OFF
if(SANITIZER_TYPE AND NOT "${SANITIZER_TYPE}" MATCHES "^(Address|Leak|Memory|Thread|Undefined)$")
message("Choose the correct type of sanitizer")
......
......@@ -32,6 +32,7 @@
#include <valarray>
#include <vector>
#include "paddle/fluid/framework/expect.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace framework {
......@@ -43,7 +44,10 @@ class ArchiveBase {
// Archive is not copyable. But to allow move capture by function objects,
// check it at runtime rather than at compile time.
ArchiveBase(const ArchiveBase&) { LOG(FATAL) << "Not supported"; }
ArchiveBase(const ArchiveBase&) {
PADDLE_THROW(platform::errors::Unavailable(
"ArchiveBase class does not support copy construction."));
}
ArchiveBase(ArchiveBase&& other)
: buffer_(other.buffer_),
......@@ -62,7 +66,8 @@ class ArchiveBase {
public:
ArchiveBase& operator=(const ArchiveBase&) {
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unavailable(
"ArchiveBase class does not support assignment construction."));
return *this;
}
......
......@@ -34,7 +34,7 @@ paddle::framework::DataFeedDesc load_datafeed_param_from_file(
const char* filename) {
paddle::framework::DataFeedDesc data_feed_desc;
int file_descriptor = open(filename, O_RDONLY);
PADDLE_ENFORCE_NE(file_descriptor, -1, platform::errors::Unavaliable(
PADDLE_ENFORCE_NE(file_descriptor, -1, platform::errors::Unavailable(
"Cannot open file %s.", filename));
google::protobuf::io::FileInputStream fileInput(file_descriptor);
google::protobuf::TextFormat::Parse(&fileInput, &data_feed_desc);
......@@ -45,7 +45,7 @@ paddle::framework::DataFeedDesc load_datafeed_param_from_file(
const std::vector<std::string> load_filelist_from_file(const char* filename) {
std::vector<std::string> filelist;
std::ifstream fin(filename);
PADDLE_ENFORCE_EQ(fin.good(), true, platform::errors::Unavaliable(
PADDLE_ENFORCE_EQ(fin.good(), true, platform::errors::Unavailable(
"Cannot open file %s.", filename));
std::string line;
while (getline(fin, line)) {
......
......@@ -187,16 +187,8 @@ void AllReduceOpHandle::SyncNCCLAllReduce() {
nccl_ctxs_->GetRunEnvNCCLCtx(run_order_, use_hierarchical_allreduce_);
auto &nccl_ctx = nccl_ctxs->at(dev_id);
auto stream = nccl_ctx.stream();
cudaError_t e_sync = cudaStreamSynchronize(stream);
if (e_sync != 0) {
LOG(FATAL) << "cudaStreamSynchronize " << cudaGetErrorString(e_sync);
}
cudaError_t e_get = cudaGetLastError();
if (e_get != 0) {
LOG(FATAL) << "cudaGetLastError " << cudaGetErrorString(e_get)
<< " errno:" << e_get;
}
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream));
PADDLE_ENFORCE_CUDA_SUCCESS(cudaGetLastError());
}
}
}
......
......@@ -34,9 +34,10 @@ class ExceptionHolder {
} catch (platform::EnforceNotMet& exp) {
Catch(exp);
} catch (std::exception& ex) {
LOG(FATAL) << "std::exception caught, " << ex.what();
PADDLE_THROW(platform::errors::Fatal(
"Unknown std::exception caught:\n%s.", ex.what()));
} catch (...) {
LOG(FATAL) << "Unknown exception caught";
PADDLE_THROW(platform::errors::Fatal("Unknown exception caught."));
}
}
......
......@@ -104,7 +104,8 @@ void DensePullThread::wait_all() {
}
if (_pull_dense_fail_times > 20) {
LOG(FATAL) << "pull dense failed times more than 20 times";
PADDLE_THROW(
platform::errors::Fatal("Pull dense failed more than 20 times."));
exit(-1);
}
......
......@@ -214,35 +214,35 @@ class AfsManager {
int fd_read[2];
int fd_write[2];
if (read) {
if (pipe(fd_read) != 0) {
LOG(FATAL) << "create read pipe failed";
return -1;
}
PADDLE_ENFORCE_EQ(
pipe(fd_read), 0,
platform::errors::External("Create read pipe failed in AfsManager."));
}
if (write) {
if (pipe(fd_write) != 0) {
LOG(FATAL) << "create write pipe failed";
return -1;
}
PADDLE_ENFORCE_EQ(pipe(fd_write), 0,
platform::errors::External(
"Create write pipe failed in AfsManager."));
}
pid = vfork();
if (pid < 0) {
LOG(FATAL) << "fork failed";
return -1;
}
PADDLE_ENFORCE_GE(
pid, 0,
platform::errors::External(
"Failed to create a child process via fork in AfsManager."));
if (pid == 0) {
if (read) {
if (-1 == dup2(fd_read[1], STDOUT_FILENO)) {
LOG(FATAL) << "dup2 failed";
}
PADDLE_ENFORCE_NE(
dup2(fd_read[1], STDOUT_FILENO), -1,
platform::errors::External(
"Failed to duplicate file descriptor via dup2 in AfsManager."));
close(fd_read[1]);
close(fd_read[0]);
}
if (write) {
if (-1 == dup2(fd_write[0], STDIN_FILENO)) {
LOG(FATAL) << "dup2 failed";
}
PADDLE_ENFORCE_NE(
dup2(fd_write[0], STDIN_FILENO), -1,
platform::errors::External(
"Failed to duplicate file descriptor via dup2 in AfsManager."));
close(fd_write[0]);
close(fd_write[1]);
}
......@@ -265,20 +265,20 @@ class AfsManager {
close(fd_read[1]);
fcntl(fd_read[0], F_SETFD, FD_CLOEXEC);
fp_read = fdopen(fd_read[0], "r");
if (0 == fp_read) {
LOG(FATAL) << "fdopen failed.";
return -1;
}
PADDLE_ENFORCE_NE(
fp_read, 0,
platform::errors::External(
"Failed to open file descriptor via fdopen in AfsManager."));
}
if (write) {
close(fd_write[0]);
fcntl(fd_write[1], F_SETFD, FD_CLOEXEC);
fp_write = fdopen(fd_write[1], "w");
if (0 == fp_write) {
LOG(FATAL) << "fdopen failed.";
return -1;
}
PADDLE_ENFORCE_NE(
fp_write, 0,
platform::errors::External(
"Failed to open file descriptor via fdopen in AfsManager."));
}
return 0;
}
......
......@@ -1085,7 +1085,8 @@ void FleetWrapper::ShrinkDenseTable(int table_id, Scope* scope,
push_status.wait();
auto status = push_status.get();
if (status != 0) {
LOG(FATAL) << "push shrink dense param failed, status[" << status << "]";
PADDLE_THORW(platform::errors::Fatal(
"push shrink dense param failed, status is [%d].", status));
sleep(sleep_seconds_before_fail_exit_);
exit(-1);
}
......
......@@ -13,8 +13,11 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/io/fs.h"
#include <memory>
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace framework {
......@@ -127,7 +130,8 @@ std::shared_ptr<FILE> localfs_open_write(std::string path,
int64_t localfs_file_size(const std::string& path) {
struct stat buf;
if (0 != stat(path.c_str(), &buf)) {
LOG(FATAL) << "file stat not zero";
PADDLE_THROW(platform::errors::External(
"Failed to get file status via stat function."));
return -1;
}
return (int64_t)buf.st_size;
......@@ -365,7 +369,9 @@ std::shared_ptr<FILE> fs_open_read(const std::string& path, int* err_no,
return hdfs_open_read(path, err_no, converter);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
return {};
......@@ -381,7 +387,9 @@ std::shared_ptr<FILE> fs_open_write(const std::string& path, int* err_no,
return hdfs_open_write(path, err_no, converter);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
return {};
......@@ -397,7 +405,9 @@ std::shared_ptr<FILE> fs_open(const std::string& path, const std::string& mode,
return fs_open_write(path, err_no, converter);
}
LOG(FATAL) << "Unknown mode: " << mode;
PADDLE_THROW(platform::errors::Unavailable(
"Unsupport file open mode: %s. Only supports 'r', 'rb', 'w' or 'wb'.",
mode));
return {};
}
......@@ -407,7 +417,8 @@ int64_t fs_file_size(const std::string& path) {
return localfs_file_size(path);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system."));
}
return 0;
......@@ -422,7 +433,9 @@ void fs_remove(const std::string& path) {
return hdfs_remove(path);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
}
......@@ -435,7 +448,9 @@ std::vector<std::string> fs_list(const std::string& path) {
return hdfs_list(path);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
return {};
......@@ -450,7 +465,9 @@ std::string fs_tail(const std::string& path) {
return hdfs_tail(path);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
return "";
......@@ -465,7 +482,9 @@ bool fs_exists(const std::string& path) {
return hdfs_exists(path);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
return false;
......@@ -480,7 +499,9 @@ void fs_mkdir(const std::string& path) {
return hdfs_mkdir(path);
default:
LOG(FATAL) << "Not supported";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupport file system. Now only supports local file system and "
"HDFS."));
}
}
......
......@@ -29,14 +29,16 @@ std::shared_ptr<FILE> shell_fopen(const std::string& path,
}
FILE* fp;
if (!(fp = fopen(path.c_str(), mode.c_str()))) {
LOG(FATAL) << "fopen fail, path[" << path << "], mode[" << mode << "]";
PADDLE_THROW(platform::errors::Unavailable(
"Failed to open file, path[%s], mode[%s].", path, mode));
}
return {fp, [path](FILE* fp) {
if (shell_verbose()) {
LOG(INFO) << "Closing file[" << path << "]";
}
if (0 != fclose(fp)) {
LOG(FATAL) << "fclose fail, path[" << path << "]";
PADDLE_THROW(platform::errors::Unavailable(
"Failed to close file, path[%s].", path));
}
}};
#endif
......@@ -58,7 +60,7 @@ static int close_open_fds_internal() {
int dir_fd = -1;
if ((dir_fd = open("/proc/self/fd", O_RDONLY)) < 0) {
LOG(FATAL) << "proc/self/fd open fail";
PADDLE_THROW(platform::errors::Unavailable("Failed to open proc/self/fd."));
return -1;
}
char buffer[sizeof(linux_dirent)];
......@@ -68,7 +70,8 @@ static int close_open_fds_internal() {
if ((bytes = syscall(SYS_getdents, dir_fd,
reinterpret_cast<linux_dirent*>(buffer),
sizeof(buffer))) < 0) {
LOG(FATAL) << "syscall fail";
PADDLE_THROW(platform::errors::Unavailable(
"System call failed via syscall function."));
return -1;
}
......
......@@ -281,7 +281,9 @@ void MultiDevSSAGraphBuilderBase::InsertScaleLossGradOp(
loss_scale = 0;
break;
default:
LOG(FATAL) << "Unknown gradient scale strategy.";
PADDLE_THROW(platform::errors::Unimplemented(
"Unknown gradient scale strategy. Now only supports One, "
"CoeffNumDevice and Customized strategies."));
break;
}
......@@ -1054,7 +1056,9 @@ void DistSSAGraphBuilder::InsertCollectiveOp(ir::Graph *result,
}
break;
default:
LOG(FATAL) << "Unknown reduce strategy.";
PADDLE_THROW(platform::errors::Unimplemented(
"Unknown reduce strategy. Now only supports Reduce and AllReduce "
"strategies."));
break;
}
}
......
......@@ -126,7 +126,8 @@ class Pass {
protected:
virtual void ApplyImpl(Graph *graph) const {
LOG(FATAL) << "Calling virtual Pass not implemented.";
PADDLE_THROW(platform::errors::Unimplemented(
"The virtual Pass called is not implemented."));
}
// Some Pass must be placed before this Pass, and some
......
......@@ -70,8 +70,8 @@ void PullDenseWorker::Wait(std::vector<::std::future<int32_t>>* status_vec) {
size_t MAX_FAIL_NUM = 20;
if (pull_dense_fail_times_ > MAX_FAIL_NUM) {
LOG(FATAL) << "Pull Dense Failed Times More Than " << MAX_FAIL_NUM
<< " Times";
PADDLE_THROW(platform::errors::Fatal(
"Pull dense failed more than %d times.", MAX_FAIL_NUM));
exit(-1);
}
status_vec->resize(0);
......
......@@ -38,10 +38,11 @@ struct ExceptionHandler {
void operator()() const {
auto ex = this->future_.get();
if (ex != nullptr) {
LOG(FATAL) << "The exception is thrown inside the thread pool. You "
"should use RunAndGetException to handle the exception.\n"
"The default exception handler is LOG(FATAL)."
<< ex->what();
PADDLE_THROW(platform::errors::Fatal(
"The exception is thrown inside the thread pool. You "
"should use RunAndGetException to handle the exception."
"The exception is:\n %s.",
ex->what()));
}
}
};
......@@ -78,9 +79,11 @@ class ThreadPool {
return std::unique_ptr<platform::EnforceNotMet>(
new platform::EnforceNotMet(ex));
} catch (const std::exception& e) {
LOG(FATAL) << "Unexpected exception is catched in thread pool. All "
"throwable exception in Fluid should be an EnforceNotMet."
<< e.what();
PADDLE_THROW(platform::errors::Fatal(
"Unexpected exception is catched in thread pool. All "
"throwable exception in Paddle should be an EnforceNotMet."
"The exception is:\n %s.",
e.what()));
}
return nullptr;
});
......
......@@ -579,11 +579,12 @@ std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
if (config.thread_local_stream_enabled() &&
process_level_allocator_enabled) {
LOG(FATAL) << " When binding threads and streams, the use of "
"process-level allocators will result in undefined result "
"errors due to memory asynchronous operations."
"The thread and stream binding configuration of all "
"predictors should be the same in a single process.";
PADDLE_THROW(platform::errors::Fatal(
"When binding threads and streams, the use of "
"process-level allocators will result in undefined result "
"errors due to memory asynchronous operations."
"The thread and stream binding configuration of all "
"predictors should be the same in a single process."));
}
}
......@@ -917,8 +918,9 @@ std::string AnalysisPredictor::GetSerializedProgram() const {
bool AnalysisPredictor::CheckOperatorCompatible() {
if (!inference_program_) {
LOG(FATAL) << "Inference program version check failed because the program "
"does not exist.";
PADDLE_THROW(platform::errors::PreconditionNotMet(
"Inference program version check failed because the program does not "
"exist."));
return false;
}
bool res = true;
......
......@@ -46,7 +46,8 @@ PaddleTensor LodTensorToPaddleTensor(framework::LoDTensor* t) {
pt.data.Reset(t->data<void>(), t->numel() * sizeof(int32_t));
pt.dtype = PaddleDType::INT32;
} else {
LOG(FATAL) << "unsupported type.";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported tensor date type. Now only supports INT64, FP32, INT32."));
}
pt.shape = framework::vectorize<int>(t->dims());
return pt;
......
......@@ -47,7 +47,9 @@ platform::Place GetNativePlace(const TargetType& type, int id = 0) {
case TargetType::kCUDA:
return platform::CUDAPlace(id);
default:
LOG(FATAL) << "Error target type.";
PADDLE_THROW(
platform::errors::Unavailable("Unsupported target type. Now only "
"supports Host, x86, CUDA target."));
return platform::Place();
}
}
......@@ -70,7 +72,9 @@ PrecisionType GetLitePrecisionType(framework::proto::VarType::Type type) {
case framework::proto::VarType_Type_INT64:
return PrecisionType::kInt64;
default:
LOG(FATAL) << "Error precision type.";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported precision type. Now only supports FP32, INT8, INT32 and "
"INT64."));
return PrecisionType::kUnk;
}
}
......@@ -87,7 +91,9 @@ framework::proto::VarType::Type GetNativePrecisionType(
case PrecisionType::kInt64:
return framework::proto::VarType_Type_INT64;
default:
LOG(FATAL) << "Error precision type.";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported precision type. Now only supports FP32, INT8, INT32 and "
"INT64."));
return static_cast<framework::proto::VarType::Type>(-1);
}
}
......@@ -97,7 +103,8 @@ framework::DataLayout GetNativeLayoutType(const DataLayoutType& type) {
case DataLayoutType::kNCHW:
return framework::DataLayout::kNCHW;
default:
LOG(FATAL) << "Error layout type.";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported layout type. Now only supports NCHW."));
return static_cast<framework::DataLayout>(-1);
}
}
......@@ -112,10 +119,12 @@ void MemoryCopyAsync(const platform::Place& dst_place, void* dst_data,
#ifdef PADDLE_WITH_CUDA
if (platform::is_cpu_place(dst_place) &&
platform::is_gpu_place(src_place)) {
LOG(FATAL) << "lite::MemoryCopy GPU->CPU is not yet implemented.";
PADDLE_THROW(platform::errors::Unimplemented(
"Lite::MemoryCopy GPU->CPU is not yet implemented."));
} else if (platform::is_gpu_place(dst_place) &&
platform::is_cpu_place(src_place)) {
LOG(FATAL) << "lite::MemoryCopy CPU->GPU is not yet implemented.";
PADDLE_THROW(platform::errors::Unimplemented(
"Lite::MemoryCopy CPU->GPU is not yet implemented."));
} else if (platform::is_gpu_place(dst_place) &&
platform::is_gpu_place(src_place)) {
auto gpu_place = BOOST_GET_CONST(platform::CUDAPlace, src_place);
......@@ -124,7 +133,8 @@ void MemoryCopyAsync(const platform::Place& dst_place, void* dst_data,
static_cast<const platform::CUDADeviceContext&>(ctx).stream());
}
#else
LOG(FATAL) << "You must define PADDLE_WITH_CUDA for using CUDAPlace.";
PADDLE_THROW(platform::errors::PreconditionNotMet(
"You must define PADDLE_WITH_CUDA for using CUDAPlace."));
#endif
}
}
......
......@@ -78,8 +78,9 @@ bool TRTInt8Calibrator::setBatch(
for (const auto& it : data) {
auto dataptr = data_buffers_.find(it.first);
if (dataptr == data_buffers_.end()) {
LOG(FATAL) << "FATAL " << engine_name_ << " input name '" << it.first
<< "' does not match with the buffer names";
PADDLE_THROW(platform::errors::Fatal(
"%s input name '%s' does not match with the buffer names.",
engine_name_, it.first));
}
const auto& d = dataptr->second;
PADDLE_ENFORCE(
......@@ -109,8 +110,10 @@ bool TRTInt8Calibrator::getBatch(void** bindings, const char** names,
for (int i = 0; i < num_bindings; i++) {
auto it = data_buffers_.find(names[i]);
if (it == data_buffers_.end()) {
LOG(FATAL) << "Calibration engine asked for unknown tensor name '"
<< names[i] << "' at position " << i;
PADDLE_THROW(
platform::errors::Fatal("Calibration engine asked for unknown tensor "
"name '%s' at position %d.",
names[i], i));
}
bindings[i] = it->second.first;
}
......
......@@ -180,15 +180,14 @@ void *Alloc<platform::CUDAPlace>(const platform::CUDAPlace &place,
platform::CUDADeviceGuard(place.device);
size_t avail, total;
platform::GpuMemoryUsage(&avail, &total);
LOG(FATAL) << "Cannot allocate " << string::HumanReadableSize(size)
<< " in GPU " << place.device << ", available "
<< string::HumanReadableSize(avail) << ", total "
<< string::HumanReadableSize(total) << ", GpuMinChunkSize "
<< string::HumanReadableSize(buddy_allocator->GetMinChunkSize())
<< ", GpuMaxChunkSize "
<< string::HumanReadableSize(buddy_allocator->GetMaxChunkSize())
<< ", GPU memory used: "
<< string::HumanReadableSize(Used<platform::CUDAPlace>(place));
PADDLE_THROW(platform::errors::ResourceExhausted(
"Cannot allocate %s in GPU %d, avaliable %s, total %s, GpuMinChunkSize "
"%s, GpuMaxChunkSize %s, GPU memory used: %s.",
string::HumanReadableSize(size), place.device,
string::HumanReadableSize(avail), string::HumanReadableSize(total),
string::HumanReadableSize(buddy_allocator->GetMinChunkSize()),
string::HumanReadableSize(buddy_allocator->GetMaxChunkSize()),
string::HumanReadableSize(Used<platform::CUDAPlace>(place))));
} else {
if (FLAGS_init_allocated_mem) {
cudaMemset(ptr, 0xEF, size);
......
......@@ -27,7 +27,8 @@ ThreadLocalAllocatorImpl::ThreadLocalAllocatorImpl(const platform::Place& p)
BOOST_GET_CONST(platform::CUDAPlace, place_).device)),
platform::GpuMinChunkSize(), platform::GpuMaxChunkSize()));
} else {
LOG(FATAL) << "Thread local allocator only supports CUDAPlace now.";
PADDLE_THROW(platform::errors::Unavailable(
"Thread local allocator only supports CUDAPlace now."));
}
}
......
......@@ -47,7 +47,8 @@ void OpTester::Init(const OpTesterConfig &config) {
CreateInputVarDesc();
CreateOutputVarDesc();
} else {
LOG(FATAL) << "Op \"" << config_.op_type << "\" is not registered.";
PADDLE_THROW(platform::errors::NotFound("Operator '%s' is not registered.",
config_.op_type));
}
if (config_.device_id >= 0) {
......@@ -169,10 +170,10 @@ void OpTester::CreateInputVarDesc() {
std::vector<std::string> input_names = GetOpProtoInputNames();
for (auto &name : input_names) {
const OpInputConfig *input = config_.GetInput(name);
if (input == nullptr) {
LOG(FATAL) << "The input " << name << " of op " << config_.op_type
<< " is not correctlly provided.";
}
PADDLE_ENFORCE_NOT_NULL(
input, platform::errors::NotFound(
"The input %s of operator %s is not correctlly provided.",
name, config_.op_type));
std::string var_name = config_.op_type + "." + name;
framework::VarDesc *var = Var(var_name);
......@@ -207,9 +208,10 @@ void OpTester::CreateOpDesc() {
GetOpProtoAttrNames();
for (auto item : config_.attrs) {
const std::string &name = item.first;
if (attr_types.find(name) == attr_types.end()) {
LOG(FATAL) << "Operator " << type_ << " do not have attr " << name;
}
PADDLE_ENFORCE_NE(
attr_types.find(name), attr_types.end(),
platform::errors::NotFound("Operator %s does not have attribute %d.",
type_, name));
const std::string &value_str = item.second;
const framework::proto::AttrType &type = attr_types[name];
......@@ -231,7 +233,8 @@ void OpTester::CreateOpDesc() {
case framework::proto::AttrType::INTS:
case framework::proto::AttrType::FLOATS:
case framework::proto::AttrType::STRINGS:
LOG(FATAL) << "Not supported yet.";
PADDLE_THROW(
platform::errors::Unimplemented("Not supported STRINGS type yet."));
break;
case framework::proto::AttrType::LONG: {
int64_t value = StringTo<int64_t>(value_str);
......
......@@ -43,10 +43,7 @@ class CSyncCalcStreamOp : public framework::OperatorBase {
#if defined(PADDLE_WITH_CUDA) && !defined(_WIN32)
auto dev_ctx = static_cast<platform::CUDADeviceContext*>(
platform::DeviceContextPool::Instance().Get(place));
cudaError_t e_sync = cudaStreamSynchronize(dev_ctx->stream());
if (e_sync != 0) {
LOG(FATAL) << "Fail to sync cuda stream: " << cudaGetErrorString(e_sync);
}
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(dev_ctx->stream()));
#else
PADDLE_THROW("PaddlePaddle should compile with GPU.");
#endif
......
......@@ -45,10 +45,7 @@ class CSyncCommStreamOp : public framework::OperatorBase {
int ring_id = Attr<int>("ring_id");
auto stream =
platform::NCCLCommContext::Instance().Get(ring_id, place)->stream();
cudaError_t e_sync = cudaStreamSynchronize(stream);
if (e_sync != 0) {
LOG(FATAL) << "Fail to sync nccl stream: " << cudaGetErrorString(e_sync);
}
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream));
#else
PADDLE_THROW("PaddlePaddle should compile with GPU.");
#endif
......
......@@ -192,11 +192,7 @@ class DataNormGradKernel<platform::CUDADeviceContext, T>
reinterpret_cast<const void *>(d_batch_square_sum),
reinterpret_cast<void *>(d_batch_square_sum), C,
platform::ToNCCLDataType(x->type()), ncclSum, comm->comm(), stream));
cudaError_t e_sync = cudaStreamSynchronize(stream);
if (e_sync != 0) {
LOG(FATAL) << "Fail to sync nccl stream: "
<< cudaGetErrorString(e_sync);
}
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream));
#else
PADDLE_THROW(platform::errors::PreconditionNotMet(
"PaddlePaddle should compile with GPU, and need_sync_stats connot be "
......
......@@ -471,7 +471,9 @@ class DetectionMAPOpKernel : public framework::OpKernel<T> {
mAP += average_precisions;
++count;
} else {
LOG(FATAL) << "Unkown ap version: " << ap_type;
PADDLE_THROW(platform::errors::Unimplemented(
"Unkown ap version %s. Now only supports integral and l1point.",
ap_type));
}
}
if (count != 0) mAP /= count;
......
......@@ -182,7 +182,9 @@ class DGCOpKernel : public framework::OpKernel<T> {
static_cast<void*>(encode_grad_out_data), k, v_out_data,
static_cast<int>(v_out->numel()), buf, dev_ctx.stream(),
u_out_data)) {
LOG(FATAL) << "v_out numel:" << v_out->numel();
// TODO(weihang): owner should polish this error message
PADDLE_THROW(platform::errors::InvalidArgument(
"V_out numel error, V_out numel is %d.", v_out->numel()));
}
math::SetConstant<DeviceContext, T> tset;
......
......@@ -37,8 +37,9 @@ void HandleSendResponse(brpc::Controller* cntl, sendrecv::VoidMessage* response,
ch_ptr->Push(ch_ctx);
if (cntl->Failed()) {
LOG(FATAL) << "Fail to send SendVar: " << var_h->name()
<< ", error text: " << cntl->ErrorText();
PADDLE_THROW(platform::errors::Unavailable(
"Failed to send variable %s, error text is %s.", var_h->name(),
cntl->ErrorText()));
var_h->Finish(false);
cls->DecreaseReqCount();
return;
......@@ -104,8 +105,9 @@ void HandleFetchBarrierResponse(brpc::Controller* cntl,
ch_ptr->Push(ch_ctx);
if (cntl->Failed()) {
LOG(FATAL) << "Fail to get HandleFetchBarrierResponse: " << var_h->name()
<< ", error text: " << cntl->ErrorText();
PADDLE_THROW(platform::errors::Unavailable(
"Failed to get HandleFetchBarrierResponse %s, error text is %s.",
var_h->name(), cntl->ErrorText()));
var_h->Finish(false);
cls->DecreaseReqCount();
return;
......@@ -131,8 +133,9 @@ void HandleGetResponse(brpc::Controller* cntl,
ch_ptr->Push(ch_ctx);
if (cntl->Failed()) {
LOG(FATAL) << "Fail to GetVar: " << var_h->name()
<< ", error text: " << cntl->ErrorText();
PADDLE_THROW(platform::errors::Unavailable(
"Failed to get variable %s, error text is %s.", var_h->name(),
cntl->ErrorText()));
cls->DecreaseReqCount();
var_h->Finish(false);
return;
......@@ -368,7 +371,8 @@ ChannelQueuePtr BRPCClient::GetChannel(const std::string& ep) {
for (int i = 0; i < brpc_channel_num_per_server_; ++i) {
std::shared_ptr<ChannelContext> c(new ChannelContext());
if (c->channel.Init(ep.c_str(), &options) != 0) {
LOG(FATAL) << "Fail to initialize channel";
PADDLE_THROW(
platform::errors::Unavailable("Failed to initialize channel."));
return nullptr;
}
......
......@@ -69,8 +69,10 @@ void RdmaMemPool::Register(const std::string& varname, void* data,
pthread_rwlock_unlock(&access_);
if (brpc::rdma::RegisterMemoryForRdma(data, data_size)) {
LOG(FATAL) << "register " << varname << " data:" << data
<< " data_size:" << data_size << " error";
PADDLE_THROW(platform::errors::Unavailable(
"Register memory for RDMA failed. Register %s data: %s data size %d "
"error.",
varname, data, data_size));
}
VLOG(4) << "register on rdma:" << varname << " data:" << data
......
......@@ -36,7 +36,9 @@ class IOBufWriter {
static void Append(const std::string& varname, butil::IOBuf* iobuf, int k,
const char* v, int64_t vlen) {
if (vlen >= std::numeric_limits<int>::max() || vlen < 0) {
LOG(FATAL) << "AppendZeroCopy varname:" << varname << ", vlen:" << vlen;
PADDDLE_THROW(platform::errors::Unavailable(
"Variable lenght is invalid. Variable name is %s, length is %d.",
varname, vlen));
}
iobuf->append(reinterpret_cast<char*>(&k), 4);
......@@ -95,7 +97,9 @@ class IOBufWriter {
bool in_cuda_pinned, void (*destroy)(void*),
void* user_data) {
if (vlen >= std::numeric_limits<int>::max() || vlen < 0) {
LOG(FATAL) << "AppendZeroCopy varname:" << varname << ", vlen:" << vlen;
PADDDLE_THROW(platform::errors::Unavailable(
"Variable lenght is invalid. Variable name is %s, length is %d.",
varname, vlen));
}
#ifdef PADDLE_WITH_BRPC_RDMA
......
......@@ -364,7 +364,8 @@ void AsyncBRPCServer::StartServer() {
// service is put on stack, we don't want server to delete it, otherwise
// use brpc::SERVER_OWNS_SERVICE.
if (server_.AddService(&service_impl, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) {
LOG(FATAL) << "Fail to add service";
PADDDLE_THROW(platform::errors::Unavailable(
"Failed to add service into BRPC server."));
return;
}
......@@ -375,7 +376,8 @@ void AsyncBRPCServer::StartServer() {
options.idle_timeout_sec = idle_timeout_s_;
options.max_concurrency = max_concurrency_;
if (server_.Start(bind_address_.c_str(), &options) != 0) {
LOG(FATAL) << "Fail to start EchoServer" << bind_address_;
PADDDLE_THROW(platform::errors::Unavailable(
"Failed to start EchoServer %s.", bind_address_));
return;
}
......
......@@ -501,10 +501,11 @@ void GRPCClient::Proceed() {
VLOG(3) << c->GetVarHandlePtr()->String() << " process";
c->Process();
} else if (c->status_.error_code() == grpc::StatusCode::DEADLINE_EXCEEDED) {
LOG(FATAL) << c->GetVarHandlePtr()->String()
<< " meets grpc error, error_code:" << c->status_.error_code()
<< " error_message:" << c->status_.error_message()
<< " error_details:" << c->status_.error_details();
PADDLE_THROW(platform::errors::External(
"%s meets grpc error, error_code is %d, error message is %s, error "
"details is %s.",
c->GetVarHandlePtr()->String(), c->status_.error_code(),
c->status_.error_message(), c->status_.error_details()));
{
std::lock_guard<std::mutex> lk(sync_mutex_);
ok_ = false;
......@@ -519,11 +520,11 @@ void GRPCClient::Proceed() {
c->GetVarHandlePtr()->should_retry = true;
c->Finish(false);
} else {
LOG(FATAL) << c->GetVarHandlePtr()->String()
<< " meets grpc error, error_code:" << c->status_.error_code()
<< " error_message:" << c->status_.error_message()
<< " error_details:" << c->status_.error_details();
PADDLE_THROW(platform::errors::External(
"%s meets grpc error, error_code is %d, error message is %s, error "
"details is %s.",
c->GetVarHandlePtr()->String(), c->status_.error_code(),
c->status_.error_message(), c->status_.error_details()));
c->Finish(false);
}
......
......@@ -105,10 +105,9 @@ void SerializeToByteBuffer(const std::string& name, framework::Variable* var,
e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber,
payload->memory_size());
if (payload->memory_size() >= std::numeric_limits<int>::max()) {
LOG(FATAL) << "FATAL error: varname:" << name
<< ", vlen:" << payload->memory_size()
<< " >= std::numeric_limits<int>::max():"
<< std::numeric_limits<int>::max() << ", so exit!";
PADDLE_THROW(platform::errors::InvalidArgument(
"Variable %s length %d should less than %d.", name,
payload->memory_size(), std::numeric_limits<int>::max()));
}
// steal reference of tensor data
::grpc::Slice slices[4]; // metadata, tensor, rows meta, rows
......
......@@ -131,10 +131,12 @@ void ParameterRecv<T>::operator()(const RpcContext &rpc_ctx,
}
}
auto numel = recv_tensor->numel();
if (recv_numel != numel) {
LOG(FATAL) << "recv_numel: " << recv_numel << " acture numel: " << numel;
}
PADDLE_ENFORCE_EQ(recv_numel, numel);
PADDLE_ENFORCE_EQ(
recv_numel, numel,
platform::errors::InvalidArgument(
"The number of receive tensor's elements are not valid. The "
"recevie tensor numel is %d, the actual tensor numel is %d.",
recv_numel, numel));
} else if (recv_var->IsType<framework::SelectedRows>()) {
auto cpu_place = platform::CPUPlace();
auto *slr = recv_var->GetMutable<framework::SelectedRows>();
......
......@@ -81,14 +81,7 @@ class ProtoEncodeHelper {
ProtoEncodeHelper(char* buf, int max_size)
: base_(buf), p_(buf), limit_(base_ + max_size) {}
~ProtoEncodeHelper() {
#define REPLACE_ENFORCE_GLOG 1
// Make sure callers didn't do operations that went over max_size promised
if (paddle::platform::is_error(p_ <= limit_)) {
paddle::platform::throw_on_error(p_ <= limit_, "");
}
#undef REPLACE_ENFORCE_GLOG
}
~ProtoEncodeHelper() {}
const char* data() const { return base_; }
size_t size() const { return p_ - base_; }
......
......@@ -96,11 +96,9 @@ bool RequestSendHandler::Handle(const std::string& varname,
} else { // sync
rpc_server_->WaitCond(kRequestSend);
VLOG(3) << "sync: processing received var: " << varname;
if (invar == nullptr) {
LOG(FATAL) << "sync: Can not find server side var: " << varname;
return false;
}
PADDLE_ENFORCE_NOT_NULL(
invar, platform::errors::NotFound(
"sync: Can not find server side var %s.", varname));
}
}
return true;
......
......@@ -73,10 +73,7 @@ class AllReduceOpKernel : public framework::OpKernel<T> {
sendbuff, recvbuff, numel, static_cast<ncclDataType_t>(dtype), red_type,
comm, stream));
if (ctx.Attr<bool>("sync_mode")) {
cudaError_t e_sync = cudaStreamSynchronize(stream);
if (e_sync != 0) {
LOG(FATAL) << "cudaStreamSynchronize " << cudaGetErrorString(e_sync);
}
PADDLE_ENFORCE_CUDA_SUCCESS(cudaStreamSynchronize(stream));
}
#else
PADDLE_THROW("PaddlePaddle should compile with GPU.");
......
......@@ -71,7 +71,9 @@ static void FlParallelExecuteBlocks(
<< "pointer: " << prepared[run_block].get();
executor->RunPreparedContext(prepared[run_block].get(), scope);
} catch (const std::exception &e) {
LOG(FATAL) << "run sub program:" << idx << " error " << e.what();
PADDLE_THROW(platform::errors::Fatal(
"Run %d-th sub program failed. The exception is:\n%s.", idx,
e.what()));
}
}));
}
......
......@@ -74,7 +74,9 @@ static void ParallelExecuteBlocks(
<< "pointer: " << prepared[run_block].get();
executor->RunPreparedContext(prepared[run_block].get(), scope);
} catch (const std::exception &e) {
LOG(FATAL) << "run sub program:" << idx << " error " << e.what();
PADDLE_THROW(platform::errors::Fatal(
"Run %d-th sub program failed. The exception is:\n%s.", idx,
e.what()));
}
}));
}
......
......@@ -21,6 +21,7 @@
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/jit/kernels.h"
#include "paddle/fluid/platform/device_tracer.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/place.h"
#include "paddle/fluid/platform/port.h"
#include "paddle/fluid/platform/variant.h" // for UNUSED
......@@ -119,7 +120,8 @@ void BenchAllImpls(const typename KernelTuple::attr_type& attr, Args... args) {
// Test result from Get function
auto tgt = jit::KernelFuncs<KernelTuple, PlaceType>::Cache().At(attr);
if (!tgt) {
LOG(FATAL) << "Target can not be empty!";
PADDLE_THROW(
paddle::platform::errors::Fatal("Benchmark target can not be empty."));
}
infos.push_back(std::make_pair("Target", benchmark(tgt, args...)));
......
......@@ -17,6 +17,7 @@
#include <string>
#include "glog/logging.h"
#include "paddle/fluid/operators/jit/gen/jitcode.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace operators {
......@@ -249,7 +250,8 @@ class VActFunc : public JitCode {
identity_jmm<JMM>(dst, src, 15);
break;
default:
LOG(FATAL) << "Do not support this operand type: " << type;
PADDLE_THROW(platform::errors::Unimplemented(
"Do not support operand type code: %d.", type));
break;
}
}
......@@ -263,7 +265,8 @@ class VActJitCode : public VActFunc {
if (!(type_ == operand_type::RELU || type_ == operand_type::EXP ||
type_ == operand_type::SIGMOID || type_ == operand_type::TANH ||
type_ == operand_type::IDENTITY || type_ == operand_type::SQUARE)) {
LOG(FATAL) << "Do not support this operand type: " << type_;
PADDLE_THROW(platform::errors::Unimplemented(
"Do not support operand type code: %d.", type));
}
this->genCode();
}
......
......@@ -17,6 +17,7 @@
#include <string>
#include "glog/logging.h"
#include "paddle/fluid/operators/jit/gen/jitcode.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace operators {
......@@ -36,7 +37,8 @@ class VXXJitCode : public JitCode {
with_relu_(with_relu) {
if (!(type_ == operand_type::MUL || type_ == operand_type::ADD ||
type_ == operand_type::SUB)) {
LOG(FATAL) << "Do not support this operand type: " << type_;
PADDLE_THROW(platform::errors::Unimplemented(
"Do not support operand type code: %d.", type));
}
this->genCode();
}
......
......@@ -33,7 +33,8 @@ class EmbSeqPoolJitCode : public JitCode {
tbl_w_(attr.table_width),
type_(attr.pool_type) {
if (type_ != SeqPoolType::kSum) {
LOG(FATAL) << "Only support sum pool yet ";
PADDLE_THROW(
platform::errors::Unimplemented("Only supports sum pool yet."));
}
this->genCode();
}
......
......@@ -39,7 +39,8 @@ class GRUJitCode : public VActFunc {
} else if (type == KernelType::kVIdentity) {
return operand_type::IDENTITY;
} else {
LOG(FATAL) << "Do not support this jit::KernelType: " << type;
PADDLE_THROW(platform::errors::Unimplemented(
"Do not support jit::KernelType code: %d.", type));
}
return operand_type::IDENTITY;
};
......
......@@ -17,6 +17,7 @@
#include <string>
#include "glog/logging.h"
#include "paddle/fluid/operators/jit/gen/jitcode.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace operators {
......@@ -30,7 +31,8 @@ class HOPVJitCode : public JitCode {
void* code_ptr = nullptr)
: JitCode(code_size, code_ptr), num_(d), type_(type) {
if (!(type_ == operand_type::MAX || type_ == operand_type::ADD)) {
LOG(FATAL) << "Do not support this operand type: " << type_;
PADDLE_THROW(platform::errors::Unimplemented(
"Do not support operand type code: %d.", type));
}
this->genCode();
}
......
......@@ -42,7 +42,8 @@ class LSTMJitCode : public VActFunc {
} else if (type == KernelType::kVIdentity) {
return operand_type::IDENTITY;
} else {
LOG(FATAL) << "Do not support this jit::KernelType: " << type;
PADDLE_THROW(platform::errors::Unimplemented(
"Do not support jit::KernelType code: %d.", type));
}
return operand_type::IDENTITY;
};
......
......@@ -32,7 +32,8 @@ class SeqPoolJitCode : public JitCode {
: JitCode(code_size, code_ptr), w_(attr.w), type_(attr.type) {
if (!(type_ == SeqPoolType::kSum || type_ == SeqPoolType::kAvg ||
type_ == SeqPoolType::kSqrt)) {
LOG(FATAL) << "Only supported pool type: sum, avg and sqrt.";
PADDLE_THROW(platform::errors::Unimplemented(
"Only supports sum, average and sqrt pool type."));
}
fp_h_[0] = 1.f;
this->genCode();
......
......@@ -98,7 +98,8 @@ void CreateTensor(framework::Scope* scope, const std::string& name,
#ifdef PADDLE_WITH_CUDA
place = platform::CUDAPlace(0);
#else
LOG(FATAL) << "You must define PADDLE_WITH_CUDA for using CUDAPlace.";
PADDLE_THROW(platform::errors::PreconditionNetMet(
"You must define PADDLE_WITH_CUDA for using CUDAPlace."));
#endif
} else {
place = platform::CPUPlace();
......
......@@ -394,7 +394,8 @@ class BeamSearchFunctor<platform::CUDADeviceContext, T> {
end_id, is_accumulated, num_used_threads));
}
} else {
LOG(FATAL) << "Not implemented.";
PADDLE_THROW(platform::errors::Unimplemented(
"Not implemented other number of sequences yet."));
}
context.Wait();
......
......@@ -419,8 +419,9 @@ size_t MKLDNNDeviceContext::GetShapeBlobSize() const {
BlobMap* pMap = p_blobmap_.get();
auto map_it = pMap->find(tls().cur_mkldnn_session_id);
if (map_it == pMap->end()) {
LOG(FATAL) << "MKLDNNDeviceContext don't find cur_mkldnn_session_id : "
<< tls().cur_mkldnn_session_id;
PADDLE_THROW(platform::errors::NotFound(
"MKLDNNDeviceContext don't find cur_mkldnn_session_id: %d.",
tls().cur_mkldnn_session_id));
}
return map_it->second->size();
}
......
......@@ -265,11 +265,7 @@ inline std::string GetTraceBackString(StrType&& what, const char* file,
inline bool is_error(bool stat) { return !stat; }
inline void throw_on_error(bool stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
}
// Note: This Macro can only be used within enforce.h
......@@ -660,11 +656,7 @@ inline std::string build_nvidia_error_msg(cudaError_t e) {
}
inline void throw_on_error(cudaError_t e, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
}
/** curand ERROR **/
......@@ -711,12 +703,8 @@ inline std::string build_nvidia_error_msg(curandStatus_t stat) {
}
inline void throw_on_error(curandStatus_t stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw thrust::system_error(cudaErrorLaunchFailure, thrust::cuda_category(),
msg);
#else
LOG(FATAL) << msg;
#endif
}
/***** CUDNN ERROR *****/
......@@ -730,11 +718,7 @@ inline std::string build_nvidia_error_msg(cudnnStatus_t stat) {
}
inline void throw_on_error(cudnnStatus_t stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
}
/***** CUBLAS ERROR *****/
......@@ -773,11 +757,7 @@ inline std::string build_nvidia_error_msg(cublasStatus_t stat) {
}
inline void throw_on_error(cublasStatus_t stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
}
/***** CUSOLVER ERROR *****/
......@@ -811,11 +791,7 @@ inline std::string build_nvidia_error_msg(cusolverStatus_t stat) {
}
inline void throw_on_error(cusolverStatus_t stat, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
}
/****** NCCL ERROR ******/
......@@ -830,11 +806,7 @@ inline std::string build_nvidia_error_msg(ncclResult_t nccl_result) {
}
inline void throw_on_error(ncclResult_t nccl_result, const std::string& msg) {
#ifndef REPLACE_ENFORCE_GLOG
throw std::runtime_error(msg);
#else
LOG(FATAL) << msg;
#endif
}
#endif // not(__APPLE__) and PADDLE_WITH_NCCL
......
......@@ -184,12 +184,12 @@ void InitDevices(bool init_p2p, const std::vector<int> devices) {
}
// Throw some informations when CPU instructions mismatch.
#define AVX_GUIDE(compiletime, runtime) \
LOG(FATAL) \
<< "This version is compiled on higher instruction(" #compiletime \
") system, you may encounter illegal instruction error running on" \
" your local CPU machine. Please reinstall the " #runtime \
" version or compile from source code."
#define AVX_GUIDE(compiletime, runtime) \
PADDLE_THROW(platform::errors::Unavailable( \
"This version is compiled on higher instruction(" #compiletime \
") system, you may encounter illegal instruction error running on" \
" your local CPU machine. Please reinstall the " #runtime \
" version or compile from source code."))
#ifdef __AVX512F__
if (!platform::MayIUse(platform::avx512f)) {
......
......@@ -117,7 +117,9 @@ py::dtype PaddleDTypeToNumpyDType(PaddleDType dtype) {
dt = py::dtype::of<float>();
break;
default:
LOG(FATAL) << "unsupported dtype";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported data type. Now only supports INT32, INT64 and "
"FLOAT32."));
}
return dt;
......@@ -150,7 +152,9 @@ size_t PaddleGetDTypeSize(PaddleDType dt) {
size = sizeof(float);
break;
default:
LOG(FATAL) << "unsupported dtype";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported data type. Now only supports INT32, INT64 and "
"FLOAT32."));
}
return size;
}
......@@ -172,7 +176,9 @@ py::array ZeroCopyTensorToNumpy(ZeroCopyTensor &tensor) { // NOLINT
tensor.copy_to_cpu<float>(static_cast<float *>(array.mutable_data()));
break;
default:
LOG(FATAL) << "unsupported dtype";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported data type. Now only supports INT32, INT64 and "
"FLOAT32."));
}
return array;
}
......@@ -252,7 +258,9 @@ void BindPaddleBuf(py::module *m) {
auto size = self.length() / sizeof(float);
l = py::cast(std::vector<float>(data, data + size));
} else {
LOG(FATAL) << "unsupported dtype";
PADDLE_THROW(platform::errors::Unimplemented(
"Unsupported data type. Now only supports INT32, INT64 and "
"FLOAT32."));
}
return l;
})
......
......@@ -158,7 +158,13 @@ fi
HAS_BOOST_GET=`git diff -U0 upstream/$BRANCH |grep "^+" |grep -o -m 1 "boost::get" || true`
if [ ${HAS_BOOST_GET} ] && [ "${GIT_PR_ID}" != "" ]; then
echo_line="boost::get is not recommended, because it may throw an bad_get exception without any stack information, so please use BOOST_GET(_**)(dtype, value) series macros here. If these macros cannot meet your needs, please use try-catch to handle boost::get and specify chenwhql (Recommend), luotao1 or lanxianghit review and approve.\n"
echo_line="boost::get is not recommended, because it may throw an bad_get exception without any stack information, so please use BOOST_GET(_**)(dtype, value) series macros here. If these macros cannot meet your needs, please use try-catch to handle boost::get and request chenwhql (Recommend), luotao1 or lanxianghit review and approve.\n"
check_approval 1 6836917 47554610 22561442
fi
HAS_LOG_FATAL=`git diff -U0 upstream/$BRANCH |grep "^+" |grep -o -m 1 "LOG(FATAL)" || true`
if [ ${HAS_LOG_FATAL} ] && [ "${GIT_PR_ID}" != "" ]; then
echo_line="LOG(FATAL) is not recommended, because it will throw exception without standard stack information, so please use PADDLE_THROW macro here. If you have to use LOG(FATAL) here, please request chenwhql (Recommend), luotao1 or lanxianghit review and approve.\n"
check_approval 1 6836917 47554610 22561442
fi
......@@ -190,7 +196,7 @@ ALL_PADDLE_CHECK=`git diff -U0 upstream/$BRANCH |grep "^+" |grep -zoE "(PADDLE_E
VALID_PADDLE_CHECK=`echo "$ALL_PADDLE_CHECK" | grep -zoE '(PADDLE_ENFORCE[A-Z_]{0,9}|PADDLE_THROW)\((.[^,;]+,)*.[^";]*(errors::).[^"]*".[^";]{20,}.[^;]*\);\s' || true`
INVALID_PADDLE_CHECK=`echo "$ALL_PADDLE_CHECK" |grep -vxF "$VALID_PADDLE_CHECK" || true`
if [ "${INVALID_PADDLE_CHECK}" != "" ] && [ "${GIT_PR_ID}" != "" ]; then
echo_line="The error message you wrote in PADDLE_ENFORCE{_**} or PADDLE_THROW does not meet our error message writing specification. Possible errors include 1. the error message is empty / 2. the error message is too short / 3. the error type is not specified. Please read the specification [ https://github.com/PaddlePaddle/Paddle/wiki/Paddle-Error-Message-Writing-Specification ], then refine the error message. If it is a mismatch, please specify chenwhql (Recommend), luotao1 or lanxianghit review and approve.\nThe PADDLE_ENFORCE{_**} or PADDLE_THROW entries that do not meet the specification are as follows:\n${INVALID_PADDLE_CHECK}\n"
echo_line="The error message you wrote in PADDLE_ENFORCE{_**} or PADDLE_THROW does not meet our error message writing specification. Possible errors include 1. the error message is empty / 2. the error message is too short / 3. the error type is not specified. Please read the specification [ https://github.com/PaddlePaddle/Paddle/wiki/Paddle-Error-Message-Writing-Specification ], then refine the error message. If it is a mismatch, please request chenwhql (Recommend), luotao1 or lanxianghit review and approve.\nThe PADDLE_ENFORCE{_**} or PADDLE_THROW entries that do not meet the specification are as follows:\n${INVALID_PADDLE_CHECK}\n"
check_approval 1 6836917 47554610 22561442
fi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册