未验证 提交 1e4f627d 编写于 作者: Z Zhenghai Zhang 提交者: GitHub

[clang-tidy] NO.12 enable modernize-use-nullptr check(#55800)

上级 b67715a4
...@@ -184,7 +184,7 @@ modernize-redundant-void-arg, ...@@ -184,7 +184,7 @@ modernize-redundant-void-arg,
-modernize-use-equals-default, -modernize-use-equals-default,
-modernize-use-equals-delete, -modernize-use-equals-delete,
-modernize-use-noexcept, -modernize-use-noexcept,
-modernize-use-nullptr, modernize-use-nullptr,
modernize-use-override, modernize-use-override,
-modernize-use-transparent-functors, -modernize-use-transparent-functors,
-modernize-use-uncaught-exceptions, -modernize-use-uncaught-exceptions,
......
...@@ -59,8 +59,8 @@ class BufferedLineFileReader { ...@@ -59,8 +59,8 @@ class BufferedLineFileReader {
int read_lines(T* reader, LineFunc func, int skip_lines) { int read_lines(T* reader, LineFunc func, int skip_lines) {
int lines = 0; int lines = 0;
size_t ret = 0; size_t ret = 0;
char* ptr = NULL; char* ptr = nullptr;
char* eol = NULL; char* eol = nullptr;
total_len_ = 0; total_len_ = 0;
error_line_ = 0; error_line_ = 0;
...@@ -70,7 +70,7 @@ class BufferedLineFileReader { ...@@ -70,7 +70,7 @@ class BufferedLineFileReader {
total_len_ += ret; total_len_ += ret;
ptr = buff_; ptr = buff_;
eol = reinterpret_cast<char*>(memchr(ptr, '\n', ret)); eol = reinterpret_cast<char*>(memchr(ptr, '\n', ret));
while (eol != NULL) { while (eol != nullptr) {
int size = static_cast<int>((eol - ptr) + 1); int size = static_cast<int>((eol - ptr) + 1);
x.append(ptr, size - 1); x.append(ptr, size - 1);
++lines; ++lines;
...@@ -1106,13 +1106,13 @@ void MultiSlotInMemoryDataFeed::GetMsgFromLogKey(const std::string& log_key, ...@@ -1106,13 +1106,13 @@ void MultiSlotInMemoryDataFeed::GetMsgFromLogKey(const std::string& log_key,
uint32_t* cmatch, uint32_t* cmatch,
uint32_t* rank) { uint32_t* rank) {
std::string searchid_str = log_key.substr(16, 16); std::string searchid_str = log_key.substr(16, 16);
*search_id = (uint64_t)strtoull(searchid_str.c_str(), NULL, 16); *search_id = (uint64_t)strtoull(searchid_str.c_str(), nullptr, 16);
std::string cmatch_str = log_key.substr(11, 3); std::string cmatch_str = log_key.substr(11, 3);
*cmatch = (uint32_t)strtoul(cmatch_str.c_str(), NULL, 16); *cmatch = (uint32_t)strtoul(cmatch_str.c_str(), nullptr, 16);
std::string rank_str = log_key.substr(14, 2); std::string rank_str = log_key.substr(14, 2);
*rank = (uint32_t)strtoul(rank_str.c_str(), NULL, 16); *rank = (uint32_t)strtoul(rank_str.c_str(), nullptr, 16);
} }
int MultiSlotInMemoryDataFeed::ParseInstanceFromSo( int MultiSlotInMemoryDataFeed::ParseInstanceFromSo(
...@@ -1657,8 +1657,8 @@ bool MultiSlotFileInstantDataFeed::Preprocess(const std::string& filename) { ...@@ -1657,8 +1657,8 @@ bool MultiSlotFileInstantDataFeed::Preprocess(const std::string& filename) {
fstat(fd_, &sb); fstat(fd_, &sb);
end_ = static_cast<size_t>(sb.st_size); end_ = static_cast<size_t>(sb.st_size);
buffer_ = buffer_ = reinterpret_cast<char*>(
reinterpret_cast<char*>(mmap(NULL, end_, PROT_READ, MAP_PRIVATE, fd_, 0)); mmap(nullptr, end_, PROT_READ, MAP_PRIVATE, fd_, 0));
PADDLE_ENFORCE_NE( PADDLE_ENFORCE_NE(
buffer_, buffer_,
MAP_FAILED, MAP_FAILED,
...@@ -2401,11 +2401,12 @@ static void parser_log_key(const std::string& log_key, ...@@ -2401,11 +2401,12 @@ static void parser_log_key(const std::string& log_key,
uint32_t* cmatch, uint32_t* cmatch,
uint32_t* rank) { uint32_t* rank) {
std::string searchid_str = log_key.substr(16, 16); std::string searchid_str = log_key.substr(16, 16);
*search_id = static_cast<uint64_t>(strtoull(searchid_str.c_str(), NULL, 16)); *search_id =
static_cast<uint64_t>(strtoull(searchid_str.c_str(), nullptr, 16));
std::string cmatch_str = log_key.substr(11, 3); std::string cmatch_str = log_key.substr(11, 3);
*cmatch = static_cast<uint32_t>(strtoul(cmatch_str.c_str(), NULL, 16)); *cmatch = static_cast<uint32_t>(strtoul(cmatch_str.c_str(), nullptr, 16));
std::string rank_str = log_key.substr(14, 2); std::string rank_str = log_key.substr(14, 2);
*rank = static_cast<uint32_t>(strtoul(rank_str.c_str(), NULL, 16)); *rank = static_cast<uint32_t>(strtoul(rank_str.c_str(), nullptr, 16));
} }
bool SlotRecordInMemoryDataFeed::ParseOneInstance(const std::string& line, bool SlotRecordInMemoryDataFeed::ParseOneInstance(const std::string& line,
......
...@@ -187,38 +187,42 @@ void AESCipher::BuildCipher( ...@@ -187,38 +187,42 @@ void AESCipher::BuildCipher(
m_cipher->reset(new CryptoPP::ECB_Mode<CryptoPP::AES>::Encryption); m_cipher->reset(new CryptoPP::ECB_Mode<CryptoPP::AES>::Encryption);
m_filter->reset(new CryptoPP::StreamTransformationFilter( m_filter->reset(new CryptoPP::StreamTransformationFilter(
*(*m_cipher).get(), *(*m_cipher).get(),
NULL, nullptr,
CryptoPP::BlockPaddingSchemeDef::PKCS_PADDING)); CryptoPP::BlockPaddingSchemeDef::PKCS_PADDING));
} else if (aes_cipher_name_ == "AES_ECB_PKCSPadding" && !for_encrypt) { } else if (aes_cipher_name_ == "AES_ECB_PKCSPadding" && !for_encrypt) {
m_cipher->reset(new CryptoPP::ECB_Mode<CryptoPP::AES>::Decryption); m_cipher->reset(new CryptoPP::ECB_Mode<CryptoPP::AES>::Decryption);
m_filter->reset(new CryptoPP::StreamTransformationFilter( m_filter->reset(new CryptoPP::StreamTransformationFilter(
*(*m_cipher).get(), *(*m_cipher).get(),
NULL, nullptr,
CryptoPP::BlockPaddingSchemeDef::PKCS_PADDING)); CryptoPP::BlockPaddingSchemeDef::PKCS_PADDING));
} else if (aes_cipher_name_ == "AES_CBC_PKCSPadding" && for_encrypt) { } else if (aes_cipher_name_ == "AES_CBC_PKCSPadding" && for_encrypt) {
m_cipher->reset(new CryptoPP::CBC_Mode<CryptoPP::AES>::Encryption); m_cipher->reset(new CryptoPP::CBC_Mode<CryptoPP::AES>::Encryption);
*need_iv = true; *need_iv = true;
m_filter->reset(new CryptoPP::StreamTransformationFilter( m_filter->reset(new CryptoPP::StreamTransformationFilter(
*(*m_cipher).get(), *(*m_cipher).get(),
NULL, nullptr,
CryptoPP::BlockPaddingSchemeDef::PKCS_PADDING)); CryptoPP::BlockPaddingSchemeDef::PKCS_PADDING));
} else if (aes_cipher_name_ == "AES_CBC_PKCSPadding" && !for_encrypt) { } else if (aes_cipher_name_ == "AES_CBC_PKCSPadding" && !for_encrypt) {
m_cipher->reset(new CryptoPP::CBC_Mode<CryptoPP::AES>::Decryption); m_cipher->reset(new CryptoPP::CBC_Mode<CryptoPP::AES>::Decryption);
*need_iv = true; *need_iv = true;
m_filter->reset(new CryptoPP::StreamTransformationFilter( m_filter->reset(new CryptoPP::StreamTransformationFilter(
*(*m_cipher).get(), *(*m_cipher).get(),
NULL, nullptr,
CryptoPP::BlockPaddingSchemeDef::PKCS_PADDING)); CryptoPP::BlockPaddingSchemeDef::PKCS_PADDING));
} else if (aes_cipher_name_ == "AES_CTR_NoPadding" && for_encrypt) { } else if (aes_cipher_name_ == "AES_CTR_NoPadding" && for_encrypt) {
m_cipher->reset(new CryptoPP::CTR_Mode<CryptoPP::AES>::Encryption); m_cipher->reset(new CryptoPP::CTR_Mode<CryptoPP::AES>::Encryption);
*need_iv = true; *need_iv = true;
m_filter->reset(new CryptoPP::StreamTransformationFilter( m_filter->reset(new CryptoPP::StreamTransformationFilter(
*(*m_cipher).get(), NULL, CryptoPP::BlockPaddingSchemeDef::NO_PADDING)); *(*m_cipher).get(),
nullptr,
CryptoPP::BlockPaddingSchemeDef::NO_PADDING));
} else if (aes_cipher_name_ == "AES_CTR_NoPadding" && !for_encrypt) { } else if (aes_cipher_name_ == "AES_CTR_NoPadding" && !for_encrypt) {
m_cipher->reset(new CryptoPP::CTR_Mode<CryptoPP::AES>::Decryption); m_cipher->reset(new CryptoPP::CTR_Mode<CryptoPP::AES>::Decryption);
*need_iv = true; *need_iv = true;
m_filter->reset(new CryptoPP::StreamTransformationFilter( m_filter->reset(new CryptoPP::StreamTransformationFilter(
*(*m_cipher).get(), NULL, CryptoPP::BlockPaddingSchemeDef::NO_PADDING)); *(*m_cipher).get(),
nullptr,
CryptoPP::BlockPaddingSchemeDef::NO_PADDING));
} else { } else {
PADDLE_THROW(paddle::platform::errors::Unimplemented( PADDLE_THROW(paddle::platform::errors::Unimplemented(
"Create cipher error. " "Create cipher error. "
...@@ -236,7 +240,7 @@ void AESCipher::BuildAuthEncCipher( ...@@ -236,7 +240,7 @@ void AESCipher::BuildAuthEncCipher(
*need_iv = true; *need_iv = true;
m_filter->reset(new CryptoPP::AuthenticatedEncryptionFilter( m_filter->reset(new CryptoPP::AuthenticatedEncryptionFilter(
*(*m_cipher).get(), *(*m_cipher).get(),
NULL, nullptr,
false, false,
tag_size_ / 8, tag_size_ / 8,
CryptoPP::DEFAULT_CHANNEL, CryptoPP::DEFAULT_CHANNEL,
...@@ -258,7 +262,7 @@ void AESCipher::BuildAuthDecCipher( ...@@ -258,7 +262,7 @@ void AESCipher::BuildAuthDecCipher(
*need_iv = true; *need_iv = true;
m_filter->reset(new CryptoPP::AuthenticatedDecryptionFilter( m_filter->reset(new CryptoPP::AuthenticatedDecryptionFilter(
*(*m_cipher).get(), *(*m_cipher).get(),
NULL, nullptr,
CryptoPP::AuthenticatedDecryptionFilter::DEFAULT_FLAGS, CryptoPP::AuthenticatedDecryptionFilter::DEFAULT_FLAGS,
tag_size_ / 8, tag_size_ / 8,
CryptoPP::BlockPaddingSchemeDef::NO_PADDING)); CryptoPP::BlockPaddingSchemeDef::NO_PADDING));
......
...@@ -60,7 +60,7 @@ static std::shared_ptr<FILE> fs_open_internal(const std::string& path, ...@@ -60,7 +60,7 @@ static std::shared_ptr<FILE> fs_open_internal(const std::string& path,
bool is_pipe, bool is_pipe,
const std::string& mode, const std::string& mode,
size_t buffer_size, size_t buffer_size,
int* err_no = 0) { int* err_no = nullptr) {
std::shared_ptr<FILE> fp = nullptr; std::shared_ptr<FILE> fp = nullptr;
if (!is_pipe) { if (!is_pipe) {
......
...@@ -82,7 +82,7 @@ static int close_open_fds_internal() { ...@@ -82,7 +82,7 @@ static int close_open_fds_internal() {
break; break;
} }
linux_dirent* entry = NULL; linux_dirent* entry = nullptr;
for (int offset = 0; offset < bytes; offset += entry->d_reclen) { for (int offset = 0; offset < bytes; offset += entry->d_reclen) {
entry = reinterpret_cast<linux_dirent*>(buffer + offset); entry = reinterpret_cast<linux_dirent*>(buffer + offset);
...@@ -140,9 +140,9 @@ static int shell_popen_fork_internal(const char* real_cmd, ...@@ -140,9 +140,9 @@ static int shell_popen_fork_internal(const char* real_cmd,
close_open_fds_internal(); close_open_fds_internal();
#if defined(PADDLE_WITH_MUSL) #if defined(PADDLE_WITH_MUSL)
PCHECK(execl("/bin/sh", "sh", "-c", real_cmd, NULL) >= 0); PCHECK(execl("/bin/sh", "sh", "-c", real_cmd, nullptr) >= 0);
#else #else
PCHECK(execl("/bin/bash", "bash", "-c", real_cmd, NULL) >= 0); PCHECK(execl("/bin/bash", "bash", "-c", real_cmd, nullptr) >= 0);
#endif #endif
// Note: just for compilation. the child don't run this line. // Note: just for compilation. the child don't run this line.
_exit(0); _exit(0);
...@@ -179,7 +179,7 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd, ...@@ -179,7 +179,7 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd,
bool do_write = mode == "w"; bool do_write = mode == "w";
if (!(do_read || do_write)) { if (!(do_read || do_write)) {
*err_no = -1; *err_no = -1;
return NULL; return nullptr;
} }
VLOG(3) << "Opening pipe[" << cmd << "] with mode[" << mode << "]"; VLOG(3) << "Opening pipe[" << cmd << "] with mode[" << mode << "]";
...@@ -189,7 +189,7 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd, ...@@ -189,7 +189,7 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd,
int pipe_fds[2]; int pipe_fds[2];
if (pipe(pipe_fds) != 0) { if (pipe(pipe_fds) != 0) {
*err_no = -1; *err_no = -1;
return NULL; return nullptr;
} }
int parent_end = 0; int parent_end = 0;
int child_end = 0; int child_end = 0;
...@@ -212,11 +212,11 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd, ...@@ -212,11 +212,11 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd,
close(child_end); close(child_end);
FILE* fp = NULL; FILE* fp = nullptr;
if ((fp = fdopen(parent_end, mode.c_str())) == NULL) { if ((fp = fdopen(parent_end, mode.c_str())) == nullptr) {
*err_no = -1; *err_no = -1;
signal(SIGCHLD, old_handler); signal(SIGCHLD, old_handler);
return NULL; return nullptr;
} }
return {fp, [cmd, child_pid, old_handler, err_no, status](FILE* fp) { return {fp, [cmd, child_pid, old_handler, err_no, status](FILE* fp) {
...@@ -281,7 +281,7 @@ static int shell_p2open_fork_internal(const char* real_cmd, ...@@ -281,7 +281,7 @@ static int shell_p2open_fork_internal(const char* real_cmd,
} }
close_open_fds_internal(); close_open_fds_internal();
if (execl("/bin/sh", "sh", "-c", real_cmd, NULL) < 0) { if (execl("/bin/sh", "sh", "-c", real_cmd, nullptr) < 0) {
return -1; return -1;
} }
exit(127); exit(127);
...@@ -302,10 +302,10 @@ std::pair<std::shared_ptr<FILE>, std::shared_ptr<FILE>> shell_p2open( ...@@ -302,10 +302,10 @@ std::pair<std::shared_ptr<FILE>, std::shared_ptr<FILE>> shell_p2open(
int pipein_fds[2]; int pipein_fds[2];
int pipeout_fds[2]; int pipeout_fds[2];
if (pipe(pipein_fds) != 0) { if (pipe(pipein_fds) != 0) {
return {NULL, NULL}; return {nullptr, nullptr};
} }
if (pipe(pipeout_fds) != 0) { if (pipe(pipeout_fds) != 0) {
return {NULL, NULL}; return {nullptr, nullptr};
} }
int child_pid = int child_pid =
...@@ -317,7 +317,7 @@ std::pair<std::shared_ptr<FILE>, std::shared_ptr<FILE>> shell_p2open( ...@@ -317,7 +317,7 @@ std::pair<std::shared_ptr<FILE>, std::shared_ptr<FILE>> shell_p2open(
fcntl(pipeout_fds[1], F_SETFD, FD_CLOEXEC); fcntl(pipeout_fds[1], F_SETFD, FD_CLOEXEC);
std::shared_ptr<int> child_life = { std::shared_ptr<int> child_life = {
NULL, [child_pid, cmd](void*) { nullptr, [child_pid, cmd](void*) {
if (shell_verbose()) { if (shell_verbose()) {
LOG(INFO) << "Closing bidirectional pipe[" << cmd << "]"; LOG(INFO) << "Closing bidirectional pipe[" << cmd << "]";
} }
...@@ -340,9 +340,9 @@ std::pair<std::shared_ptr<FILE>, std::shared_ptr<FILE>> shell_p2open( ...@@ -340,9 +340,9 @@ std::pair<std::shared_ptr<FILE>, std::shared_ptr<FILE>> shell_p2open(
}}; }};
FILE* in_fp; FILE* in_fp;
PCHECK((in_fp = fdopen(pipein_fds[0], "r")) != NULL); PCHECK((in_fp = fdopen(pipein_fds[0], "r")) != nullptr);
FILE* out_fp; FILE* out_fp;
PCHECK((out_fp = fdopen(pipeout_fds[1], "w")) != NULL); PCHECK((out_fp = fdopen(pipeout_fds[1], "w")) != nullptr);
return {{in_fp, [child_life](FILE* fp) { PCHECK(fclose(fp) == 0); }}, return {{in_fp, [child_life](FILE* fp) { PCHECK(fclose(fp) == 0); }},
{out_fp, [child_life](FILE* fp) { PCHECK(fclose(fp) == 0); }}}; {out_fp, [child_life](FILE* fp) { PCHECK(fclose(fp) == 0); }}};
#endif #endif
......
...@@ -291,7 +291,7 @@ class FuseAllReduceOpPass : public ir::Pass { ...@@ -291,7 +291,7 @@ class FuseAllReduceOpPass : public ir::Pass {
const platform::BKCLCommunicator *multi_bkcl_ctxs, const platform::BKCLCommunicator *multi_bkcl_ctxs,
#endif #endif
ir::Graph *result) const { ir::Graph *result) const {
details::FusedAllReduceOpHandle *op_handle = NULL; details::FusedAllReduceOpHandle *op_handle = nullptr;
if (is_grad_merge) { if (is_grad_merge) {
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
op_handle = new details::FusedGradMergeAllReduceOpHandle( op_handle = new details::FusedGradMergeAllReduceOpHandle(
......
...@@ -55,7 +55,7 @@ __pd_give PD_Config* PD_ConfigCreate() { ...@@ -55,7 +55,7 @@ __pd_give PD_Config* PD_ConfigCreate() {
} }
void PD_ConfigDestroy(__pd_take PD_Config* pd_config) { void PD_ConfigDestroy(__pd_take PD_Config* pd_config) {
if (pd_config != NULL) { if (pd_config != nullptr) {
delete reinterpret_cast<Config*>(pd_config); delete reinterpret_cast<Config*>(pd_config);
} }
} }
......
...@@ -68,7 +68,7 @@ __pd_give PD_IOInfos* PD_PredictorGetInputInfos( ...@@ -68,7 +68,7 @@ __pd_give PD_IOInfos* PD_PredictorGetInputInfos(
PD_IOInfos* input_infos = new PD_IOInfos; PD_IOInfos* input_infos = new PD_IOInfos;
input_infos->size = names.size(); input_infos->size = names.size();
input_infos->io_info = names.empty() ? NULL : new PD_IOInfo*[names.size()]; input_infos->io_info = names.empty() ? nullptr : new PD_IOInfo*[names.size()];
for (size_t i = 0; i < names.size(); i++) { for (size_t i = 0; i < names.size(); i++) {
const std::string& name = names[i]; const std::string& name = names[i];
input_infos->io_info[i] = new PD_IOInfo; input_infos->io_info[i] = new PD_IOInfo;
...@@ -99,7 +99,8 @@ __pd_give PD_IOInfos* PD_PredictorGetOutputInfos( ...@@ -99,7 +99,8 @@ __pd_give PD_IOInfos* PD_PredictorGetOutputInfos(
PD_IOInfos* output_infos = new PD_IOInfos; PD_IOInfos* output_infos = new PD_IOInfos;
output_infos->size = names.size(); output_infos->size = names.size();
output_infos->io_info = names.empty() ? NULL : new PD_IOInfo*[names.size()]; output_infos->io_info =
names.empty() ? nullptr : new PD_IOInfo*[names.size()];
for (size_t i = 0; i < names.size(); i++) { for (size_t i = 0; i < names.size(); i++) {
const std::string& name = names[i]; const std::string& name = names[i];
output_infos->io_info[i] = new PD_IOInfo; output_infos->io_info[i] = new PD_IOInfo;
......
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
#define DESTROY_ONE_DIM_ARRAY(type) \ #define DESTROY_ONE_DIM_ARRAY(type) \
void PD_OneDimArray##type##Destroy(__pd_take PD_OneDimArray##type* array) { \ void PD_OneDimArray##type##Destroy(__pd_take PD_OneDimArray##type* array) { \
if (array != NULL) { \ if (array != nullptr) { \
delete[] array->data; \ delete[] array->data; \
delete array; \ delete array; \
} \ } \
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
const std::vector<vec_type>& vec) { \ const std::vector<vec_type>& vec) { \
PD_OneDimArray##Type* array = new PD_OneDimArray##Type; \ PD_OneDimArray##Type* array = new PD_OneDimArray##Type; \
array->size = vec.size(); \ array->size = vec.size(); \
array->data = vec.empty() ? NULL : new type[vec.size()]; \ array->data = vec.empty() ? nullptr : new type[vec.size()]; \
for (size_t index = 0; index < vec.size(); ++index) { \ for (size_t index = 0; index < vec.size(); ++index) { \
array->data[index] = vec[index]; \ array->data[index] = vec[index]; \
} \ } \
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
std::vector<vec_type> CvtOneDimArrayToVec##Type( \ std::vector<vec_type> CvtOneDimArrayToVec##Type( \
__pd_keep const PD_OneDimArray##Type* array) { \ __pd_keep const PD_OneDimArray##Type* array) { \
std::vector<vec_type> vec; \ std::vector<vec_type> vec; \
if (array != NULL) { \ if (array != nullptr) { \
vec.resize(array->size); \ vec.resize(array->size); \
for (size_t index = 0; index < array->size; ++index) { \ for (size_t index = 0; index < array->size; ++index) { \
vec[index] = array->data[index]; \ vec[index] = array->data[index]; \
...@@ -68,7 +68,7 @@ ONE_DIM_ARRAY_UTILS_FUNC_IMPL(int64_t, Int64, int64_t) ...@@ -68,7 +68,7 @@ ONE_DIM_ARRAY_UTILS_FUNC_IMPL(int64_t, Int64, int64_t)
#undef DESTROY_ONE_DIM_ARRAY #undef DESTROY_ONE_DIM_ARRAY
void PD_OneDimArrayCstrDestroy(__pd_take PD_OneDimArrayCstr* array) { void PD_OneDimArrayCstrDestroy(__pd_take PD_OneDimArrayCstr* array) {
if (array != NULL) { if (array != nullptr) {
if (array->size != 0) { if (array->size != 0) {
for (size_t index = 0; index < array->size; ++index) { for (size_t index = 0; index < array->size; ++index) {
delete[] array->data[index]; delete[] array->data[index];
...@@ -80,11 +80,11 @@ void PD_OneDimArrayCstrDestroy(__pd_take PD_OneDimArrayCstr* array) { ...@@ -80,11 +80,11 @@ void PD_OneDimArrayCstrDestroy(__pd_take PD_OneDimArrayCstr* array) {
} }
void PD_CstrDestroy(__pd_take PD_Cstr* cstr) { void PD_CstrDestroy(__pd_take PD_Cstr* cstr) {
if (cstr != NULL) { if (cstr != nullptr) {
if (cstr->size != 0) { if (cstr->size != 0) {
cstr->size = 0; cstr->size = 0;
delete[] cstr->data; delete[] cstr->data;
cstr->data = NULL; cstr->data = nullptr;
} }
delete cstr; delete cstr;
} }
...@@ -95,7 +95,7 @@ __pd_give PD_OneDimArrayCstr* CvtVecToOneDimArrayCstr( ...@@ -95,7 +95,7 @@ __pd_give PD_OneDimArrayCstr* CvtVecToOneDimArrayCstr(
const std::vector<std::string>& vec) { const std::vector<std::string>& vec) {
PD_OneDimArrayCstr* array = new PD_OneDimArrayCstr; PD_OneDimArrayCstr* array = new PD_OneDimArrayCstr;
array->size = vec.size(); array->size = vec.size();
array->data = vec.empty() ? NULL : new char*[vec.size()]; array->data = vec.empty() ? nullptr : new char*[vec.size()];
for (size_t index = 0u; index < vec.size(); ++index) { for (size_t index = 0u; index < vec.size(); ++index) {
array->data[index] = new char[vec[index].size() + 1]; array->data[index] = new char[vec[index].size() + 1];
memcpy(array->data[index], vec[index].c_str(), vec[index].size() + 1); memcpy(array->data[index], vec[index].c_str(), vec[index].size() + 1);
...@@ -116,7 +116,7 @@ __pd_give PD_Cstr* CvtStrToCstr(const std::string& str) { ...@@ -116,7 +116,7 @@ __pd_give PD_Cstr* CvtStrToCstr(const std::string& str) {
PD_Cstr* cstr = new PD_Cstr; PD_Cstr* cstr = new PD_Cstr;
if (str.empty()) { if (str.empty()) {
cstr->size = 0; cstr->size = 0;
cstr->data = NULL; cstr->data = nullptr;
} else { } else {
cstr->size = str.length() + 1; cstr->size = str.length() + 1;
cstr->data = new char[str.length() + 1]; cstr->data = new char[str.length() + 1];
...@@ -128,7 +128,7 @@ __pd_give PD_Cstr* CvtStrToCstr(const std::string& str) { ...@@ -128,7 +128,7 @@ __pd_give PD_Cstr* CvtStrToCstr(const std::string& str) {
#define DESTROY_TWO_DIM_ARRAY(type) \ #define DESTROY_TWO_DIM_ARRAY(type) \
void PD_TwoDimArray##type##Destroy(__pd_take PD_TwoDimArray##type* array) { \ void PD_TwoDimArray##type##Destroy(__pd_take PD_TwoDimArray##type* array) { \
if (array != NULL) { \ if (array != nullptr) { \
if (array->size != 0) { \ if (array->size != 0) { \
for (size_t index = 0; index < array->size; ++index) { \ for (size_t index = 0; index < array->size; ++index) { \
PD_OneDimArray##type##Destroy(array->data[index]); \ PD_OneDimArray##type##Destroy(array->data[index]); \
...@@ -143,7 +143,8 @@ __pd_give PD_Cstr* CvtStrToCstr(const std::string& str) { ...@@ -143,7 +143,8 @@ __pd_give PD_Cstr* CvtStrToCstr(const std::string& str) {
const std::vector<std::vector<vec_type>>& vec) { \ const std::vector<std::vector<vec_type>>& vec) { \
PD_TwoDimArray##Type* array = new PD_TwoDimArray##Type; \ PD_TwoDimArray##Type* array = new PD_TwoDimArray##Type; \
array->size = vec.size(); \ array->size = vec.size(); \
array->data = vec.empty() ? NULL : new PD_OneDimArray##Type*[vec.size()]; \ array->data = \
vec.empty() ? nullptr : new PD_OneDimArray##Type*[vec.size()]; \
for (size_t index = 0; index < vec.size(); ++index) { \ for (size_t index = 0; index < vec.size(); ++index) { \
array->data[index] = CvtVecToOneDimArray##Type(vec[index]); \ array->data[index] = CvtVecToOneDimArray##Type(vec[index]); \
} \ } \
...@@ -153,7 +154,7 @@ __pd_give PD_Cstr* CvtStrToCstr(const std::string& str) { ...@@ -153,7 +154,7 @@ __pd_give PD_Cstr* CvtStrToCstr(const std::string& str) {
std::vector<std::vector<vec_type>> CvtTwoDimArrayToVec##Type( \ std::vector<std::vector<vec_type>> CvtTwoDimArrayToVec##Type( \
__pd_keep const PD_TwoDimArray##Type* array) { \ __pd_keep const PD_TwoDimArray##Type* array) { \
std::vector<std::vector<vec_type>> vec; \ std::vector<std::vector<vec_type>> vec; \
if (array != NULL && array->size != 0) { \ if (array != nullptr && array->size != 0) { \
vec.resize(array->size); \ vec.resize(array->size); \
for (size_t index = 0; index < array->size; ++index) { \ for (size_t index = 0; index < array->size; ++index) { \
vec[index] = CvtOneDimArrayToVec##Type((array->data)[index]); \ vec[index] = CvtOneDimArrayToVec##Type((array->data)[index]); \
...@@ -182,17 +183,17 @@ extern "C" { ...@@ -182,17 +183,17 @@ extern "C" {
#endif #endif
void PD_IOInfoDestroy(__pd_take PD_IOInfo* io_info) { void PD_IOInfoDestroy(__pd_take PD_IOInfo* io_info) {
if (io_info != NULL) { if (io_info != nullptr) {
PD_CstrDestroy(io_info->name); PD_CstrDestroy(io_info->name);
io_info->name = NULL; io_info->name = nullptr;
PD_OneDimArrayInt64Destroy(io_info->shape); PD_OneDimArrayInt64Destroy(io_info->shape);
io_info->shape = NULL; io_info->shape = nullptr;
delete io_info; delete io_info;
} }
} }
void PD_IOInfosDestroy(__pd_take PD_IOInfos* io_infos) { void PD_IOInfosDestroy(__pd_take PD_IOInfos* io_infos) {
if (io_infos != NULL) { if (io_infos != nullptr) {
if (io_infos->size != 0) { if (io_infos->size != 0) {
for (size_t index = 0; index < io_infos->size; ++index) { for (size_t index = 0; index < io_infos->size; ++index) {
PD_IOInfoDestroy(io_infos->io_info[index]); PD_IOInfoDestroy(io_infos->io_info[index]);
...@@ -200,7 +201,7 @@ void PD_IOInfosDestroy(__pd_take PD_IOInfos* io_infos) { ...@@ -200,7 +201,7 @@ void PD_IOInfosDestroy(__pd_take PD_IOInfos* io_infos) {
io_infos->size = 0; io_infos->size = 0;
} }
delete[] io_infos->io_info; delete[] io_infos->io_info;
io_infos->io_info = NULL; io_infos->io_info = nullptr;
delete io_infos; delete io_infos;
} }
} }
......
...@@ -30,7 +30,7 @@ namespace allocation { ...@@ -30,7 +30,7 @@ namespace allocation {
class StubAllocation : public Allocation { class StubAllocation : public Allocation {
public: public:
explicit StubAllocation(size_t size) explicit StubAllocation(size_t size)
: Allocation(0, size, platform::CPUPlace()) {} : Allocation(nullptr, size, platform::CPUPlace()) {}
}; };
TEST(BestFitAllocator, test_allocation) { TEST(BestFitAllocator, test_allocation) {
......
...@@ -269,7 +269,7 @@ std::shared_ptr<MemoryMapWriterAllocation> AllocateMemoryMapWriterAllocation( ...@@ -269,7 +269,7 @@ std::shared_ptr<MemoryMapWriterAllocation> AllocateMemoryMapWriterAllocation(
platform::errors::Unavailable( platform::errors::Unavailable(
"Fruncate a file to a specified length failed!")); "Fruncate a file to a specified length failed!"));
void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); void *ptr = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
PADDLE_ENFORCE_NE(ptr, PADDLE_ENFORCE_NE(ptr,
MAP_FAILED, MAP_FAILED,
platform::errors::Unavailable( platform::errors::Unavailable(
......
...@@ -307,8 +307,8 @@ class CPUPyramidHashOPKernel : public framework::OpKernel<T> { ...@@ -307,8 +307,8 @@ class CPUPyramidHashOPKernel : public framework::OpKernel<T> {
top_offset.resize(offset.size()); top_offset.resize(offset.size());
top_offset[0] = 0; top_offset[0] = 0;
math::bloomfilter* _filter = NULL; math::bloomfilter* _filter = nullptr;
math::bloomfilter* _black_filter = NULL; math::bloomfilter* _black_filter = nullptr;
if (use_filter) { if (use_filter) {
if (white_list_len != 0) { if (white_list_len != 0) {
_filter = (math::bloomfilter*)_blobs_1->data<float>(); _filter = (math::bloomfilter*)_blobs_1->data<float>();
......
...@@ -259,13 +259,13 @@ static int ConnectAddr(const std::string& ep, const CommHead head) { ...@@ -259,13 +259,13 @@ static int ConnectAddr(const std::string& ep, const CommHead head) {
server_addr.sin_family = AF_INET; server_addr.sin_family = AF_INET;
server_addr.sin_port = htons(port); server_addr.sin_port = htons(port);
char* ip = NULL; char* ip = nullptr;
struct hostent* hp = NULL; struct hostent* hp = nullptr;
// sleep for get_host_by_name_time seconds. // sleep for get_host_by_name_time seconds.
for (int i = 0; 2 * i < FLAGS_get_host_by_name_time; i++) { for (int i = 0; 2 * i < FLAGS_get_host_by_name_time; i++) {
hp = gethostbyname(host.c_str()); hp = gethostbyname(host.c_str());
if (hp != NULL) { if (hp != nullptr) {
break; break;
} }
std::this_thread::sleep_for(std::chrono::seconds(2)); std::this_thread::sleep_for(std::chrono::seconds(2));
...@@ -276,7 +276,7 @@ static int ConnectAddr(const std::string& ep, const CommHead head) { ...@@ -276,7 +276,7 @@ static int ConnectAddr(const std::string& ep, const CommHead head) {
platform::errors::InvalidArgument("Fail to get host by name %s.", host)); platform::errors::InvalidArgument("Fail to get host by name %s.", host));
int i = 0; int i = 0;
while (hp->h_addr_list[i] != NULL) { while (hp->h_addr_list[i] != nullptr) {
ip = inet_ntoa(*(struct in_addr*)hp->h_addr_list[i]); ip = inet_ntoa(*(struct in_addr*)hp->h_addr_list[i]);
VLOG(3) << "gethostbyname host:" << host << " ->ip: " << ip; VLOG(3) << "gethostbyname host:" << host << " ->ip: " << ip;
break; break;
......
...@@ -348,7 +348,7 @@ void DisableSignalHandler() { ...@@ -348,7 +348,7 @@ void DisableSignalHandler() {
memset(&sig_action, 0, sizeof(sig_action)); memset(&sig_action, 0, sizeof(sig_action));
sigemptyset(&sig_action.sa_mask); sigemptyset(&sig_action.sa_mask);
sig_action.sa_handler = SIG_DFL; sig_action.sa_handler = SIG_DFL;
sigaction(signal_number, &sig_action, NULL); sigaction(signal_number, &sig_action, nullptr);
} }
#endif #endif
} }
...@@ -367,10 +367,10 @@ void CreateDumpFile(LPCSTR lpstrDumpFilePathName, ...@@ -367,10 +367,10 @@ void CreateDumpFile(LPCSTR lpstrDumpFilePathName,
HANDLE hDumpFile = CreateFile(lpstrDumpFilePathName, HANDLE hDumpFile = CreateFile(lpstrDumpFilePathName,
GENERIC_WRITE, GENERIC_WRITE,
0, 0,
NULL, nullptr,
CREATE_ALWAYS, CREATE_ALWAYS,
FILE_ATTRIBUTE_NORMAL, FILE_ATTRIBUTE_NORMAL,
NULL); nullptr);
MINIDUMP_EXCEPTION_INFORMATION dumpInfo; MINIDUMP_EXCEPTION_INFORMATION dumpInfo;
dumpInfo.ExceptionPointers = pException; dumpInfo.ExceptionPointers = pException;
dumpInfo.ThreadId = GetCurrentThreadId(); dumpInfo.ThreadId = GetCurrentThreadId();
...@@ -384,8 +384,8 @@ void CreateDumpFile(LPCSTR lpstrDumpFilePathName, ...@@ -384,8 +384,8 @@ void CreateDumpFile(LPCSTR lpstrDumpFilePathName,
hDumpFile, hDumpFile,
MiniDumpWithPrivateReadWriteMemory, MiniDumpWithPrivateReadWriteMemory,
&dumpInfo, &dumpInfo,
NULL, nullptr,
NULL); nullptr);
CloseHandle(hDumpFile); CloseHandle(hDumpFile);
} }
......
...@@ -41,7 +41,7 @@ void Timer::Pause() { ...@@ -41,7 +41,7 @@ void Timer::Pause() {
} }
void Timer::Resume() { void Timer::Resume() {
gettimeofday(&_start, NULL); gettimeofday(&_start, nullptr);
_paused = false; _paused = false;
} }
...@@ -54,7 +54,7 @@ double Timer::ElapsedMS() { return _elapsed / 1000.0; } ...@@ -54,7 +54,7 @@ double Timer::ElapsedMS() { return _elapsed / 1000.0; }
double Timer::ElapsedSec() { return _elapsed / 1000000.0; } double Timer::ElapsedSec() { return _elapsed / 1000000.0; }
int64_t Timer::Tickus() { int64_t Timer::Tickus() {
gettimeofday(&_now, NULL); gettimeofday(&_now, nullptr);
return (_now.tv_sec - _start.tv_sec) * 1000 * 1000L + return (_now.tv_sec - _start.tv_sec) * 1000 * 1000L +
(_now.tv_usec - _start.tv_usec); (_now.tv_usec - _start.tv_usec);
} }
......
...@@ -376,7 +376,7 @@ py::object ParsePyArray( ...@@ -376,7 +376,7 @@ py::object ParsePyArray(
numpy_value = py::object( numpy_value = py::object(
py::handle(PyTuple_GET_ITEM(args, kw_order_map["value"] - 1)), true); py::handle(PyTuple_GET_ITEM(args, kw_order_map["value"] - 1)), true);
} else { } else {
if (flag_kwargs && kws_map["value"] != NULL) { if (flag_kwargs && kws_map["value"] != nullptr) {
numpy_value = py::object(py::handle(kws_map["value"]), true); numpy_value = py::object(py::handle(kws_map["value"]), true);
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
...@@ -403,7 +403,7 @@ paddle::platform::Place ParsePlace( ...@@ -403,7 +403,7 @@ paddle::platform::Place ParsePlace(
place = CastPyArg2Place(PyTuple_GET_ITEM(args, kw_order_map["place"] - 1), place = CastPyArg2Place(PyTuple_GET_ITEM(args, kw_order_map["place"] - 1),
kw_order_map["place"] - 1); kw_order_map["place"] - 1);
} else { } else {
if (flag_kwargs && kws_map["place"] != NULL) { if (flag_kwargs && kws_map["place"] != nullptr) {
place = CastPyArg2Place(kws_map["place"], 0); place = CastPyArg2Place(kws_map["place"], 0);
} else { } else {
// default // default
...@@ -425,7 +425,7 @@ std::shared_ptr<TensorDistAttr> ParseDistAttrArgs( ...@@ -425,7 +425,7 @@ std::shared_ptr<TensorDistAttr> ParseDistAttrArgs(
dist_attr = CastPyArg2DistAttr( dist_attr = CastPyArg2DistAttr(
PyTuple_GET_ITEM(args, kw_order_map["dist_attr"] - 1), PyTuple_GET_ITEM(args, kw_order_map["dist_attr"] - 1),
kw_order_map["dist_attr"] - 1); kw_order_map["dist_attr"] - 1);
} else if (flag_kwargs && kws_map["dist_attr"] != NULL) { } else if (flag_kwargs && kws_map["dist_attr"] != nullptr) {
dist_attr = CastPyArg2DistAttr(kws_map["dist_attr"], 0); dist_attr = CastPyArg2DistAttr(kws_map["dist_attr"], 0);
} }
return dist_attr; return dist_attr;
...@@ -445,7 +445,7 @@ int ParseBooleanArgs(std::string key, ...@@ -445,7 +445,7 @@ int ParseBooleanArgs(std::string key,
res = static_cast<int>(CastPyArg2AttrBoolean( res = static_cast<int>(CastPyArg2AttrBoolean(
PyTuple_GET_ITEM(args, kw_order_map[key] - 1), kw_order_map[key] - 1)); PyTuple_GET_ITEM(args, kw_order_map[key] - 1), kw_order_map[key] - 1));
} else { } else {
if (flag_kwargs && kws_map[key] != NULL) { if (flag_kwargs && kws_map[key] != nullptr) {
res = static_cast<int>(CastPyArg2AttrBoolean(kws_map[key], 0)); res = static_cast<int>(CastPyArg2AttrBoolean(kws_map[key], 0));
} }
} }
...@@ -469,7 +469,7 @@ std::string ParseName(std::unordered_map<std::string, PyObject*> kws_map, ...@@ -469,7 +469,7 @@ std::string ParseName(std::unordered_map<std::string, PyObject*> kws_map,
} }
} else { } else {
if (flag_kwargs) { if (flag_kwargs) {
if ((kws_map["name"] == NULL) || (kws_map["name"] == Py_None)) { if ((kws_map["name"] == nullptr) || (kws_map["name"] == Py_None)) {
act_name = act_name =
egr::Controller::Instance().GenerateUniqueName(unique_name_prefix); egr::Controller::Instance().GenerateUniqueName(unique_name_prefix);
} else { } else {
...@@ -581,7 +581,7 @@ void AutoInitTensorByTensor(TensorObject* py_tensor_ptr, ...@@ -581,7 +581,7 @@ void AutoInitTensorByTensor(TensorObject* py_tensor_ptr,
CastPyArg2Tensor(PyTuple_GET_ITEM(args, kw_order_map["value"] - 1), CastPyArg2Tensor(PyTuple_GET_ITEM(args, kw_order_map["value"] - 1),
kw_order_map["value"] - 1); kw_order_map["value"] - 1);
} else { } else {
if (flag_kwargs && kws_map["value"] != NULL) { if (flag_kwargs && kws_map["value"] != nullptr) {
src_tensor = CastPyArg2Tensor(kws_map["value"], 0); src_tensor = CastPyArg2Tensor(kws_map["value"], 0);
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
...@@ -610,7 +610,7 @@ void AutoInitTensorByTensor(TensorObject* py_tensor_ptr, ...@@ -610,7 +610,7 @@ void AutoInitTensorByTensor(TensorObject* py_tensor_ptr,
PyTuple_GET_ITEM(args, kw_order_map["value"] - 1), PyTuple_GET_ITEM(args, kw_order_map["value"] - 1),
kw_order_map["value"] - 1); kw_order_map["value"] - 1);
} else { } else {
if (flag_kwargs && kws_map["value"] != NULL) { if (flag_kwargs && kws_map["value"] != nullptr) {
src_tensor = CastPyArg2FrameworkTensor(kws_map["value"], 0); src_tensor = CastPyArg2FrameworkTensor(kws_map["value"], 0);
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
...@@ -687,7 +687,7 @@ void AutoInitStringTensorByStringTensor( ...@@ -687,7 +687,7 @@ void AutoInitStringTensorByStringTensor(
CastPyArg2Tensor(PyTuple_GET_ITEM(args, kw_order_map["value"] - 1), CastPyArg2Tensor(PyTuple_GET_ITEM(args, kw_order_map["value"] - 1),
kw_order_map["value"] - 1); kw_order_map["value"] - 1);
} else { } else {
if (flag_kwargs && kws_map["value"] != NULL) { if (flag_kwargs && kws_map["value"] != nullptr) {
src_tensor = CastPyArg2Tensor(kws_map["value"], 0); src_tensor = CastPyArg2Tensor(kws_map["value"], 0);
} else { } else {
PADDLE_THROW(platform::errors::InvalidArgument( PADDLE_THROW(platform::errors::InvalidArgument(
...@@ -764,17 +764,17 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -764,17 +764,17 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
if (kwargs) flag_kwargs = true; if (kwargs) flag_kwargs = true;
// all kwargs // all kwargs
PyObject* kw_zero_copy = NULL; PyObject* kw_zero_copy = nullptr;
PyObject* kw_persistable = NULL; PyObject* kw_persistable = nullptr;
PyObject* kw_stop_gradient = NULL; PyObject* kw_stop_gradient = nullptr;
PyObject* kw_value = NULL; // receive PyArray or Tensor PyObject* kw_value = nullptr; // receive PyArray or Tensor
PyObject* kw_place = NULL; PyObject* kw_place = nullptr;
PyObject* kw_name = NULL; PyObject* kw_name = nullptr;
PyObject* kw_dims = NULL; PyObject* kw_dims = nullptr;
PyObject* kw_dtype = NULL; PyObject* kw_dtype = nullptr;
PyObject* kw_type = NULL; PyObject* kw_type = nullptr;
PyObject* kw_dist_attr = NULL; PyObject* kw_dist_attr = nullptr;
// the keywords argument // the keywords argument
static char* kwlist[] = {const_cast<char*>("value"), static char* kwlist[] = {const_cast<char*>("value"),
...@@ -787,7 +787,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -787,7 +787,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
const_cast<char*>("dtype"), const_cast<char*>("dtype"),
const_cast<char*>("type"), const_cast<char*>("type"),
const_cast<char*>("dist_attr"), const_cast<char*>("dist_attr"),
NULL}; nullptr};
// 'O' Store a Python object (without any conversion) in a C object pointer, // 'O' Store a Python object (without any conversion) in a C object pointer,
// '|' Indicates that the remaining arguments in the Python argument list are // '|' Indicates that the remaining arguments in the Python argument list are
...@@ -856,7 +856,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -856,7 +856,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
egr::Controller::Instance().GetExpectedPlace()); egr::Controller::Instance().GetExpectedPlace());
return 0; return 0;
} else { // no position args, all arguments are kwargs } else { // no position args, all arguments are kwargs
if (kw_value != NULL) { if (kw_value != nullptr) {
if (pybind11::detail::npy_api::get().PyArray_Check_(kw_value)) { if (pybind11::detail::npy_api::get().PyArray_Check_(kw_value)) {
VLOG(6) << "Calling case3's or case4's initializer"; VLOG(6) << "Calling case3's or case4's initializer";
AutoInitTensorByPyArray( AutoInitTensorByPyArray(
...@@ -884,7 +884,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -884,7 +884,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
"Please check your input first and make sure you are on the " "Please check your input first and make sure you are on the "
"right way.")); "right way."));
} }
} else if (kw_dtype != NULL && } else if (kw_dtype != nullptr &&
PyObject_TypeCheck(kw_dtype, g_vartype_pytype)) { PyObject_TypeCheck(kw_dtype, g_vartype_pytype)) {
VLOG(6) << "Calling case2's initializer"; VLOG(6) << "Calling case2's initializer";
...@@ -1122,18 +1122,18 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -1122,18 +1122,18 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
if (kwargs) flag_kwargs = true; if (kwargs) flag_kwargs = true;
// all kwargs // all kwargs
PyObject* kw_zero_copy = NULL; PyObject* kw_zero_copy = nullptr;
PyObject* kw_value = NULL; // receive PyArray or Tensor PyObject* kw_value = nullptr; // receive PyArray or Tensor
PyObject* kw_name = NULL; PyObject* kw_name = nullptr;
PyObject* kw_dims = NULL; PyObject* kw_dims = nullptr;
// the keywords argument // the keywords argument
static char* kwlist[] = {const_cast<char*>("value"), static char* kwlist[] = {const_cast<char*>("value"),
const_cast<char*>("zero_copy"), const_cast<char*>("zero_copy"),
const_cast<char*>("name"), const_cast<char*>("name"),
const_cast<char*>("dims"), const_cast<char*>("dims"),
NULL}; nullptr};
// 'O' Store a Python object (without any conversion) in a C object pointer, // 'O' Store a Python object (without any conversion) in a C object pointer,
// '|' Indicates that the remaining arguments in the Python argument list are // '|' Indicates that the remaining arguments in the Python argument list are
// optional. // optional.
...@@ -1188,7 +1188,7 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -1188,7 +1188,7 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
egr::Controller::Instance().GetExpectedPlace()); egr::Controller::Instance().GetExpectedPlace());
return 0; return 0;
} else { } else {
if (kw_value != NULL) { if (kw_value != nullptr) {
if (pybind11::detail::npy_api::get().PyArray_Check_(kw_value)) { if (pybind11::detail::npy_api::get().PyArray_Check_(kw_value)) {
VLOG(6) << "Calling case3's or case4's string initializer"; VLOG(6) << "Calling case3's or case4's string initializer";
AutoInitStringTensorByPyArray( AutoInitStringTensorByPyArray(
...@@ -1207,7 +1207,7 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) { ...@@ -1207,7 +1207,7 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
"Please check your input first and make sure you are on the " "Please check your input first and make sure you are on the "
"right way.")); "right way."));
} }
} else if (kw_dims != NULL) { } else if (kw_dims != nullptr) {
VLOG(6) << "Calling case2's string initializer."; VLOG(6) << "Calling case2's string initializer.";
std::unordered_map<std::string, Py_ssize_t> kw_order_map{{"dims", 1}, std::unordered_map<std::string, Py_ssize_t> kw_order_map{{"dims", 1},
{"name", 2}}; {"name", 2}};
...@@ -1311,7 +1311,7 @@ void AddPyMethodDefs(std::vector<PyMethodDef>* vector, PyMethodDef* methods) { ...@@ -1311,7 +1311,7 @@ void AddPyMethodDefs(std::vector<PyMethodDef>* vector, PyMethodDef* methods) {
} }
static void TensorDealloc(TensorObject* self) { static void TensorDealloc(TensorObject* self) {
if (self->weakrefs != NULL) if (self->weakrefs != nullptr)
PyObject_ClearWeakRefs(reinterpret_cast<PyObject*>(self)); PyObject_ClearWeakRefs(reinterpret_cast<PyObject*>(self));
self->tensor.~Tensor(); self->tensor.~Tensor();
Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self)); Py_TYPE(self)->tp_free(reinterpret_cast<PyObject*>(self));
......
...@@ -1258,7 +1258,7 @@ static PyObject* eager_api_set_master_grads(PyObject* self, ...@@ -1258,7 +1258,7 @@ static PyObject* eager_api_set_master_grads(PyObject* self,
PADDLE_ENFORCE_NE(grad, PADDLE_ENFORCE_NE(grad,
nullptr, nullptr,
paddle::platform::errors::Fatal( paddle::platform::errors::Fatal(
"Detected NULL grad" "Detected nullptr grad"
"Please check if you have manually cleared" "Please check if you have manually cleared"
"the grad inside autograd_meta")); "the grad inside autograd_meta"));
if ((*grad).initialized() && ((*grad).dtype() == phi::DataType::FLOAT16 || if ((*grad).initialized() && ((*grad).dtype() == phi::DataType::FLOAT16 ||
...@@ -1278,90 +1278,90 @@ PyMethodDef variable_functions[] = { ...@@ -1278,90 +1278,90 @@ PyMethodDef variable_functions[] = {
{"scale", {"scale",
(PyCFunction)(void (*)())eager_api_scale, (PyCFunction)(void (*)())eager_api_scale,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_add_backward_final_hook", {"_add_backward_final_hook",
(PyCFunction)(void (*)())eager_api__add_backward_final_hook, (PyCFunction)(void (*)())eager_api__add_backward_final_hook,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"run_backward", {"run_backward",
(PyCFunction)(void (*)())eager_api_run_backward, (PyCFunction)(void (*)())eager_api_run_backward,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"run_partial_grad", {"run_partial_grad",
(PyCFunction)(void (*)())eager_api_run_partial_grad, (PyCFunction)(void (*)())eager_api_run_partial_grad,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_get_custom_operator_inplace_map", {"_get_custom_operator_inplace_map",
(PyCFunction)(void (*)( (PyCFunction)(void (*)(
void))eager_api__get_custom_operator_inplace_reverse_idx, void))eager_api__get_custom_operator_inplace_reverse_idx,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_run_custom_op", {"_run_custom_op",
(PyCFunction)(void (*)())eager_api_run_custom_op, (PyCFunction)(void (*)())eager_api_run_custom_op,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"tensor_copy", {"tensor_copy",
(PyCFunction)(void (*)())eager_api_tensor_copy, (PyCFunction)(void (*)())eager_api_tensor_copy,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"get_all_grads", {"get_all_grads",
(PyCFunction)(void (*)())eager_api_get_all_grads, (PyCFunction)(void (*)())eager_api_get_all_grads,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"get_grads_lists", {"get_grads_lists",
(PyCFunction)(void (*)())eager_api_get_grads_lists, (PyCFunction)(void (*)())eager_api_get_grads_lists,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"get_grads_types", {"get_grads_types",
(PyCFunction)(void (*)())eager_api_get_grads_types, (PyCFunction)(void (*)())eager_api_get_grads_types,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"read_next_tensor_list", {"read_next_tensor_list",
(PyCFunction)(void (*)())eager_api_read_next_tensor_list, (PyCFunction)(void (*)())eager_api_read_next_tensor_list,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"jit_function_call", {"jit_function_call",
(PyCFunction)(void (*)())eager_api_jit_function_call, (PyCFunction)(void (*)())eager_api_jit_function_call,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
/**sparse functions**/ /**sparse functions**/
{"sparse_coo_tensor", {"sparse_coo_tensor",
(PyCFunction)(void (*)())eager_api_sparse_coo_tensor, (PyCFunction)(void (*)())eager_api_sparse_coo_tensor,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"sparse_csr_tensor", {"sparse_csr_tensor",
(PyCFunction)(void (*)())eager_api_sparse_csr_tensor, (PyCFunction)(void (*)())eager_api_sparse_csr_tensor,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"register_saved_tensors_hooks", {"register_saved_tensors_hooks",
(PyCFunction)(void (*)())eager_api_register_saved_tensors_hooks, (PyCFunction)(void (*)())eager_api_register_saved_tensors_hooks,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"reset_saved_tensors_hooks", {"reset_saved_tensors_hooks",
(PyCFunction)(void (*)())eager_api_reset_saved_tensors_hooks, (PyCFunction)(void (*)())eager_api_reset_saved_tensors_hooks,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
/**amp functions**/ /**amp functions**/
{"set_master_grads", {"set_master_grads",
(PyCFunction)(void (*)())eager_api_set_master_grads, (PyCFunction)(void (*)())eager_api_set_master_grads,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
/**sparse functions**/ /**sparse functions**/
#if defined(PADDLE_WITH_CUDA) #if defined(PADDLE_WITH_CUDA)
{"async_read", {"async_read",
(PyCFunction)(void (*)())eager_api_async_read, (PyCFunction)(void (*)())eager_api_async_read,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"async_write", {"async_write",
(PyCFunction)(void (*)())eager_api_async_write, (PyCFunction)(void (*)())eager_api_async_write,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"to_uva_tensor", {"to_uva_tensor",
(PyCFunction)(void (*)())eager_api_to_uva_tensor, (PyCFunction)(void (*)())eager_api_to_uva_tensor,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
#endif #endif
{NULL, NULL, 0, NULL}}; {nullptr, nullptr, 0, nullptr}};
void BindFunctions(PyObject* module) { void BindFunctions(PyObject* module) {
if (PyModule_AddFunctions(module, variable_functions) < 0) { if (PyModule_AddFunctions(module, variable_functions) < 0) {
......
...@@ -1837,88 +1837,88 @@ PyMethodDef math_op_patch_methods[] = { ...@@ -1837,88 +1837,88 @@ PyMethodDef math_op_patch_methods[] = {
{"__add__", {"__add__",
(PyCFunction)(void (*)())tensor__add__method, (PyCFunction)(void (*)())tensor__add__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__radd__", {"__radd__",
(PyCFunction)(void (*)())tensor__add__method, (PyCFunction)(void (*)())tensor__add__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__sub__", {"__sub__",
(PyCFunction)(void (*)())tensor__sub__method, (PyCFunction)(void (*)())tensor__sub__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__rsub__", {"__rsub__",
(PyCFunction)(void (*)())tensor__rsub__method, (PyCFunction)(void (*)())tensor__rsub__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__mul__", {"__mul__",
(PyCFunction)(void (*)())tensor__mul__method, (PyCFunction)(void (*)())tensor__mul__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__rmul__", {"__rmul__",
(PyCFunction)(void (*)())tensor__mul__method, (PyCFunction)(void (*)())tensor__mul__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__div__", {"__div__",
(PyCFunction)(void (*)())tensor__div__method, (PyCFunction)(void (*)())tensor__div__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__truediv__", {"__truediv__",
(PyCFunction)(void (*)())tensor__div__method, (PyCFunction)(void (*)())tensor__div__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__rdiv__", {"__rdiv__",
(PyCFunction)(void (*)())tensor__rdiv__method, (PyCFunction)(void (*)())tensor__rdiv__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__rtruediv__", {"__rtruediv__",
(PyCFunction)(void (*)())tensor__rdiv__method, (PyCFunction)(void (*)())tensor__rdiv__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__floordiv__", {"__floordiv__",
(PyCFunction)(void (*)())tensor__floordiv__method, (PyCFunction)(void (*)())tensor__floordiv__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__pow__", {"__pow__",
(PyCFunction)(void (*)())tensor__pow__method, (PyCFunction)(void (*)())tensor__pow__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__rpow__", {"__rpow__",
(PyCFunction)(void (*)())tensor__rpow__method, (PyCFunction)(void (*)())tensor__rpow__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__mod__", {"__mod__",
(PyCFunction)(void (*)())tensor__mod__method, (PyCFunction)(void (*)())tensor__mod__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__matmul__", {"__matmul__",
(PyCFunction)(void (*)())tensor__matmul__method, (PyCFunction)(void (*)())tensor__matmul__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__gt__", {"__gt__",
(PyCFunction)(void (*)())tensor__gt__method, (PyCFunction)(void (*)())tensor__gt__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__ge__", {"__ge__",
(PyCFunction)(void (*)())tensor__ge__method, (PyCFunction)(void (*)())tensor__ge__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__lt__", {"__lt__",
(PyCFunction)(void (*)())tensor__lt__method, (PyCFunction)(void (*)())tensor__lt__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__le__", {"__le__",
(PyCFunction)(void (*)())tensor__le__method, (PyCFunction)(void (*)())tensor__le__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__eq__", {"__eq__",
(PyCFunction)(void (*)())tensor__eq__method, (PyCFunction)(void (*)())tensor__eq__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__ne__", {"__ne__",
(PyCFunction)(void (*)())tensor__ne__method, (PyCFunction)(void (*)())tensor__ne__method,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{NULL, NULL, 0, NULL}}; {nullptr, nullptr, 0, nullptr}};
} // namespace pybind } // namespace pybind
} // namespace paddle } // namespace paddle
...@@ -716,7 +716,7 @@ static PyObject* tensor_clear_gradient(TensorObject* self, ...@@ -716,7 +716,7 @@ static PyObject* tensor_clear_gradient(TensorObject* self,
grad = egr::EagerUtils::mutable_grad(self->tensor); grad = egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE(grad != nullptr, PADDLE_ENFORCE(grad != nullptr,
paddle::platform::errors::Fatal( paddle::platform::errors::Fatal(
"Detected NULL grad" "Detected nullptr grad"
"Please check if you have manually cleared" "Please check if you have manually cleared"
"the grad inside autograd_meta")); "the grad inside autograd_meta"));
} else { } else {
...@@ -773,7 +773,7 @@ static PyObject* tensor__zero_grads(TensorObject* self, ...@@ -773,7 +773,7 @@ static PyObject* tensor__zero_grads(TensorObject* self,
paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor); paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE(grad != nullptr, PADDLE_ENFORCE(grad != nullptr,
paddle::platform::errors::Fatal( paddle::platform::errors::Fatal(
"Detected NULL grad" "Detected nullptr grad"
"Please check if you have manually cleared" "Please check if you have manually cleared"
"the grad inside autograd_meta")); "the grad inside autograd_meta"));
if (grad->initialized()) { if (grad->initialized()) {
...@@ -1570,7 +1570,7 @@ static PyObject* tensor_register_grad_hook(TensorObject* self, ...@@ -1570,7 +1570,7 @@ static PyObject* tensor_register_grad_hook(TensorObject* self,
if (autograd_meta && !autograd_meta->StopGradient()) { if (autograd_meta && !autograd_meta->StopGradient()) {
if (!autograd_meta->GetMutableGradNode()) { if (!autograd_meta->GetMutableGradNode()) {
VLOG(6) << "Detected NULL grad_node, Leaf tensor should have had " VLOG(6) << "Detected nullptr grad_node, Leaf tensor should have had "
"grad_node with type: GradNodeAccumulation."; "grad_node with type: GradNodeAccumulation.";
autograd_meta->SetGradNode( autograd_meta->SetGradNode(
std::make_shared<egr::GradNodeAccumulation>(autograd_meta)); std::make_shared<egr::GradNodeAccumulation>(autograd_meta));
...@@ -1666,7 +1666,7 @@ static PyObject* tensor_register_reduce_hook(TensorObject* self, ...@@ -1666,7 +1666,7 @@ static PyObject* tensor_register_reduce_hook(TensorObject* self,
"gradient.")); "gradient."));
PADDLE_ENFORCE( PADDLE_ENFORCE(
grad_node.get() != nullptr, grad_node.get() != nullptr,
paddle::platform::errors::Fatal("Detected NULL grad_node," paddle::platform::errors::Fatal("Detected nullptr grad_node,"
"Leaf tensor should have had grad_node " "Leaf tensor should have had grad_node "
"with type: GradNodeAccumulation.")); "with type: GradNodeAccumulation."));
PyObject* hook_func = PyTuple_GET_ITEM(args, 0); PyObject* hook_func = PyTuple_GET_ITEM(args, 0);
...@@ -2171,10 +2171,11 @@ static PyObject* tensor__grad_name(TensorObject* self, ...@@ -2171,10 +2171,11 @@ static PyObject* tensor__grad_name(TensorObject* self,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor); paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE_EQ(grad != nullptr, PADDLE_ENFORCE_EQ(
grad != nullptr,
true, true,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Detected NULL grad. Please check if you have manually " "Detected nullptr grad. Please check if you have manually "
"cleared the grad inside autograd_meta")); "cleared the grad inside autograd_meta"));
return ToPyObject(grad->name()); return ToPyObject(grad->name());
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
...@@ -2185,10 +2186,11 @@ static PyObject* tensor__grad_value(TensorObject* self, ...@@ -2185,10 +2186,11 @@ static PyObject* tensor__grad_value(TensorObject* self,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor); paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE_EQ(grad != nullptr, PADDLE_ENFORCE_EQ(
grad != nullptr,
true, true,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Detected NULL grad. Please check if you have manually " "Detected nullptr grad. Please check if you have manually "
"cleared the grad inside autograd_meta")); "cleared the grad inside autograd_meta"));
if (!grad->defined()) { if (!grad->defined()) {
...@@ -2210,10 +2212,11 @@ static PyObject* tensor__unset_fake_empty(TensorObject* self, ...@@ -2210,10 +2212,11 @@ static PyObject* tensor__unset_fake_empty(TensorObject* self,
PyObject* kwargs) { PyObject* kwargs) {
EAGER_TRY EAGER_TRY
paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor); paddle::Tensor* grad = egr::EagerUtils::mutable_grad(self->tensor);
PADDLE_ENFORCE_EQ(grad != nullptr, PADDLE_ENFORCE_EQ(
grad != nullptr,
true, true,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"Detected NULL grad. Please check if you have manually " "Detected nullptr grad. Please check if you have manually "
"cleared the grad inside autograd_meta")); "cleared the grad inside autograd_meta"));
bool is_leaf = egr::EagerUtils::IsLeafTensor(self->tensor); bool is_leaf = egr::EagerUtils::IsLeafTensor(self->tensor);
...@@ -2357,20 +2360,20 @@ PyMethodDef variable_methods[] = { ...@@ -2357,20 +2360,20 @@ PyMethodDef variable_methods[] = {
{"_is_initialized", {"_is_initialized",
(PyCFunction)(void (*)())tensor_method__is_initialized, (PyCFunction)(void (*)())tensor_method__is_initialized,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_is_dense_tensor_hold_allocation", {"_is_dense_tensor_hold_allocation",
(PyCFunction)(void (*)( (PyCFunction)(void (*)(
void))tensor_method__is_dense_tensor_hold_allocation, void))tensor_method__is_dense_tensor_hold_allocation,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_copy_to", {"_copy_to",
(PyCFunction)(void (*)())tensor_method__copy_to, (PyCFunction)(void (*)())tensor_method__copy_to,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"copy_", {"copy_",
(PyCFunction)(void (*)())tensor_method_copy_, (PyCFunction)(void (*)())tensor_method_copy_,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"clone", {"clone",
(PyCFunction)(void (*)())tensor_method_clone, (PyCFunction)(void (*)())tensor_method_clone,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
...@@ -2378,11 +2381,11 @@ PyMethodDef variable_methods[] = { ...@@ -2378,11 +2381,11 @@ PyMethodDef variable_methods[] = {
{"reconstruct_from_", {"reconstruct_from_",
(PyCFunction)(void (*)())tensor_method_reconstruct_from_, (PyCFunction)(void (*)())tensor_method_reconstruct_from_,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"retain_grads", {"retain_grads",
(PyCFunction)(void (*)())tensor_retain_grads, (PyCFunction)(void (*)())tensor_retain_grads,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"clear_gradient", {"clear_gradient",
(PyCFunction)(void (*)())tensor_clear_gradient, (PyCFunction)(void (*)())tensor_clear_gradient,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
...@@ -2390,31 +2393,31 @@ PyMethodDef variable_methods[] = { ...@@ -2390,31 +2393,31 @@ PyMethodDef variable_methods[] = {
{"is_dense", {"is_dense",
(PyCFunction)(void (*)())tensor_method_is_dense, (PyCFunction)(void (*)())tensor_method_is_dense,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"is_dist", {"is_dist",
(PyCFunction)(void (*)())tensor_method_is_dist, (PyCFunction)(void (*)())tensor_method_is_dist,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_zero_grads", {"_zero_grads",
(PyCFunction)(void (*)())tensor__zero_grads, (PyCFunction)(void (*)())tensor__zero_grads,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_share_buffer_to", {"_share_buffer_to",
(PyCFunction)(void (*)())tensor__share_buffer_to, (PyCFunction)(void (*)())tensor__share_buffer_to,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_is_shared_buffer_with", {"_is_shared_buffer_with",
(PyCFunction)(void (*)())tensor__is_shared_buffer_with, (PyCFunction)(void (*)())tensor__is_shared_buffer_with,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_share_underline_tensor_to", {"_share_underline_tensor_to",
(PyCFunction)(void (*)())tensor__share_underline_tensor_to, (PyCFunction)(void (*)())tensor__share_underline_tensor_to,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_is_shared_underline_tensor_with", {"_is_shared_underline_tensor_with",
(PyCFunction)(void (*)())tensor__is_shared_underline_tensor_with, (PyCFunction)(void (*)())tensor__is_shared_underline_tensor_with,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"detach", {"detach",
(PyCFunction)(void (*)())tensor_method_detach, (PyCFunction)(void (*)())tensor_method_detach,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
...@@ -2422,39 +2425,39 @@ PyMethodDef variable_methods[] = { ...@@ -2422,39 +2425,39 @@ PyMethodDef variable_methods[] = {
{"detach_", {"detach_",
(PyCFunction)(void (*)(void))tensor_method_detach_, (PyCFunction)(void (*)(void))tensor_method_detach_,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"get_tensor", {"get_tensor",
(PyCFunction)(void (*)())tensor_method_get_underline_tensor, (PyCFunction)(void (*)())tensor_method_get_underline_tensor,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"get_selected_rows", {"get_selected_rows",
(PyCFunction)(void (*)())tensor_method_get_underline_selected_rows, (PyCFunction)(void (*)())tensor_method_get_underline_selected_rows,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_get_tensor_from_selected_rows", {"_get_tensor_from_selected_rows",
(PyCFunction)(void (*)())tensor_method__get_tensor_from_selected_rows, (PyCFunction)(void (*)())tensor_method__get_tensor_from_selected_rows,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_getitem_index_not_tensor", {"_getitem_index_not_tensor",
(PyCFunction)(void (*)())tensor__getitem_index_not_tensor, (PyCFunction)(void (*)())tensor__getitem_index_not_tensor,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_getitem_from_offset", {"_getitem_from_offset",
(PyCFunction)(void (*)())tensor__getitem_from_offset, (PyCFunction)(void (*)())tensor__getitem_from_offset,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"__setitem_eager_tensor__", {"__setitem_eager_tensor__",
(PyCFunction)(void (*)())tensor_method__setitem_eager_tensor, (PyCFunction)(void (*)())tensor_method__setitem_eager_tensor,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_register_grad_hook", {"_register_grad_hook",
(PyCFunction)(void (*)())tensor_register_grad_hook, (PyCFunction)(void (*)())tensor_register_grad_hook,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_remove_grad_hook", {"_remove_grad_hook",
(PyCFunction)(void (*)())tensor_remove_grad_hook, (PyCFunction)(void (*)())tensor_remove_grad_hook,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_register_backward_hook", {"_register_backward_hook",
(PyCFunction)(void (*)())tensor_register_reduce_hook, (PyCFunction)(void (*)())tensor_register_reduce_hook,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
...@@ -2462,77 +2465,77 @@ PyMethodDef variable_methods[] = { ...@@ -2462,77 +2465,77 @@ PyMethodDef variable_methods[] = {
{"_set_grad_type", {"_set_grad_type",
(PyCFunction)(void (*)())tensor__set_grad_type, (PyCFunction)(void (*)())tensor__set_grad_type,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_clear", {"_clear",
(PyCFunction)(void (*)())tensor__clear, (PyCFunction)(void (*)())tensor__clear,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_clear_dataptr", {"_clear_dataptr",
(PyCFunction)(void (*)())tensor__clear_dataptr, (PyCFunction)(void (*)())tensor__clear_dataptr,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_copy_gradient_from", {"_copy_gradient_from",
(PyCFunction)(void (*)())tensor__copy_gradient_from, (PyCFunction)(void (*)())tensor__copy_gradient_from,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_tensor_use_gpudnn", {"_tensor_use_gpudnn",
(PyCFunction)(void (*)())tensor__use_gpudnn, (PyCFunction)(void (*)())tensor__use_gpudnn,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
/** the methods to adapt old dygraph, will be removed in the future **/ /** the methods to adapt old dygraph, will be removed in the future **/
{"set_string_list", {"set_string_list",
(PyCFunction)(void (*)())tensor_method_set_string_list, (PyCFunction)(void (*)())tensor_method_set_string_list,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"set_vocab", {"set_vocab",
(PyCFunction)(void (*)())tensor_method_set_vocab, (PyCFunction)(void (*)())tensor_method_set_vocab,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"get_map_tensor", {"get_map_tensor",
(PyCFunction)(void (*)())tensor_method_get_map_tensor, (PyCFunction)(void (*)())tensor_method_get_map_tensor,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
/***the method of sparse tensor****/ /***the method of sparse tensor****/
{"nnz", {"nnz",
(PyCFunction)(void (*)())tensor_method_get_non_zero_nums, (PyCFunction)(void (*)())tensor_method_get_non_zero_nums,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"indices", {"indices",
(PyCFunction)(void (*)())tensor_method_get_non_zero_indices, (PyCFunction)(void (*)())tensor_method_get_non_zero_indices,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"values", {"values",
(PyCFunction)(void (*)())tensor_method_get_non_zero_elements, (PyCFunction)(void (*)())tensor_method_get_non_zero_elements,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"crows", {"crows",
(PyCFunction)(void (*)())tensor_method_get_non_zero_crows, (PyCFunction)(void (*)())tensor_method_get_non_zero_crows,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"cols", {"cols",
(PyCFunction)(void (*)())tensor_method_get_non_zero_cols, (PyCFunction)(void (*)())tensor_method_get_non_zero_cols,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"is_sparse", {"is_sparse",
(PyCFunction)(void (*)())tensor_method_is_sparse, (PyCFunction)(void (*)())tensor_method_is_sparse,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"is_sparse_coo", {"is_sparse_coo",
(PyCFunction)(void (*)())tensor_method_is_sparse_coo, (PyCFunction)(void (*)())tensor_method_is_sparse_coo,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"is_sparse_csr", {"is_sparse_csr",
(PyCFunction)(void (*)())tensor_method_is_sparse_csr, (PyCFunction)(void (*)())tensor_method_is_sparse_csr,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"is_same_shape", {"is_same_shape",
(PyCFunction)(void (*)())tensor_method_is_same_shape, (PyCFunction)(void (*)())tensor_method_is_same_shape,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"to_sparse_csr", {"to_sparse_csr",
(PyCFunction)(void (*)())tensor_method_to_sparse_csr, (PyCFunction)(void (*)())tensor_method_to_sparse_csr,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"element_size", {"element_size",
(PyCFunction)(void (*)())tensor_method_element_size, (PyCFunction)(void (*)())tensor_method_element_size,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
...@@ -2541,7 +2544,7 @@ PyMethodDef variable_methods[] = { ...@@ -2541,7 +2544,7 @@ PyMethodDef variable_methods[] = {
{"_inplace_version", {"_inplace_version",
(PyCFunction)(void (*)())tensor__inplace_version, (PyCFunction)(void (*)())tensor__inplace_version,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_bump_inplace_version", {"_bump_inplace_version",
(PyCFunction)(void (*)())tensor__bump_inplace_version, (PyCFunction)(void (*)())tensor__bump_inplace_version,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
...@@ -2549,80 +2552,80 @@ PyMethodDef variable_methods[] = { ...@@ -2549,80 +2552,80 @@ PyMethodDef variable_methods[] = {
{"is_selected_rows", {"is_selected_rows",
(PyCFunction)(void (*)())tensor_method_is_selected_rows, (PyCFunction)(void (*)())tensor_method_is_selected_rows,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"rows", {"rows",
(PyCFunction)(void (*)())tensor_method_get_rows, (PyCFunction)(void (*)())tensor_method_get_rows,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_reset_grad_inplace_version", {"_reset_grad_inplace_version",
(PyCFunction)(void (*)())tensor__reset_grad_inplace_version, (PyCFunction)(void (*)())tensor__reset_grad_inplace_version,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_share_memory", {"_share_memory",
(PyCFunction)(void (*)())tensor_method__share_memory, (PyCFunction)(void (*)())tensor_method__share_memory,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_offset", {"_offset",
(PyCFunction)(void (*)())tensor__offset, (PyCFunction)(void (*)())tensor__offset,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_grad_name", {"_grad_name",
(PyCFunction)(void (*)())tensor__grad_name, (PyCFunction)(void (*)())tensor__grad_name,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_grad_value", {"_grad_value",
(PyCFunction)(void (*)())tensor__grad_value, (PyCFunction)(void (*)())tensor__grad_value,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_unset_fake_empty", {"_unset_fake_empty",
(PyCFunction)(void (*)())tensor__unset_fake_empty, (PyCFunction)(void (*)())tensor__unset_fake_empty,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"data_ptr", {"data_ptr",
(PyCFunction)(void (*)())tensor_data_ptr, (PyCFunction)(void (*)())tensor_data_ptr,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_grad_ivar", {"_grad_ivar",
(PyCFunction)(void (*)())tensor__grad_ivar, (PyCFunction)(void (*)())tensor__grad_ivar,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"contiguous", {"contiguous",
(PyCFunction)(void (*)(void))tensor_contiguous, (PyCFunction)(void (*)(void))tensor_contiguous,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"is_contiguous", {"is_contiguous",
(PyCFunction)(void (*)(void))tensor_is_contiguous, (PyCFunction)(void (*)(void))tensor_is_contiguous,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"get_strides", {"get_strides",
(PyCFunction)(void (*)(void))tensor_method_strides, (PyCFunction)(void (*)(void))tensor_method_strides,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
#if defined(PADDLE_WITH_CUDA) #if defined(PADDLE_WITH_CUDA)
{"_tensor_uva", {"_tensor_uva",
(PyCFunction)(void (*)())tensor_method__uva, (PyCFunction)(void (*)())tensor_method__uva,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
#endif #endif
{NULL, NULL, 0, NULL}}; {nullptr, nullptr, 0, nullptr}};
// variable_methods for core.eager.StringTensor // variable_methods for core.eager.StringTensor
PyMethodDef string_tensor_variable_methods[] = { PyMethodDef string_tensor_variable_methods[] = {
{"numpy", {"numpy",
(PyCFunction)(void (*)())tensor_method_numpy_for_string_tensor, (PyCFunction)(void (*)())tensor_method_numpy_for_string_tensor,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_is_initialized", {"_is_initialized",
(PyCFunction)(void (*)())tensor_method__is_initialized, (PyCFunction)(void (*)())tensor_method__is_initialized,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{"_is_string_tensor_hold_allocation", {"_is_string_tensor_hold_allocation",
(PyCFunction)(void (*)( (PyCFunction)(void (*)(
void))tensor_method__is_string_tensor_hold_allocation, void))tensor_method__is_string_tensor_hold_allocation,
METH_VARARGS | METH_KEYWORDS, METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
// TODO(zhoushunjie): Need to add _copy_to, copy_ for StringTensor. // TODO(zhoushunjie): Need to add _copy_to, copy_ for StringTensor.
{NULL, NULL, 0, NULL}}; {nullptr, nullptr, 0, nullptr}};
} // namespace pybind } // namespace pybind
} // namespace paddle } // namespace paddle
...@@ -662,13 +662,15 @@ int tensor_properties_set_materialize_grads(PyLayerObject* self, ...@@ -662,13 +662,15 @@ int tensor_properties_set_materialize_grads(PyLayerObject* self,
EAGER_CATCH_AND_THROW_RETURN_NEG EAGER_CATCH_AND_THROW_RETURN_NEG
} }
PyMethodDef pylayer_methods[] = { PyMethodDef pylayer_methods[] = {{"name",
{"name", (PyCFunction)(void (*)())pylayer_method_name, METH_NOARGS, NULL}, (PyCFunction)(void (*)())pylayer_method_name,
METH_NOARGS,
nullptr},
{"apply", {"apply",
(PyCFunction)(void (*)())pylayer_method_apply, (PyCFunction)(void (*)())pylayer_method_apply,
METH_CLASS | METH_VARARGS | METH_KEYWORDS, METH_CLASS | METH_VARARGS | METH_KEYWORDS,
NULL}, nullptr},
{NULL, NULL, 0, NULL}}; {nullptr, nullptr, 0, nullptr}};
struct PyGetSetDef pylayer_properties[] { struct PyGetSetDef pylayer_properties[] {
{"container", {"container",
......
...@@ -87,9 +87,9 @@ void BindException(pybind11::module* m) { ...@@ -87,9 +87,9 @@ void BindException(pybind11::module* m) {
void ThrowExceptionToPython(std::exception_ptr p) { void ThrowExceptionToPython(std::exception_ptr p) {
static PyObject* EOFExceptionException = static PyObject* EOFExceptionException =
PyErr_NewException("paddle.EOFException", PyExc_Exception, NULL); PyErr_NewException("paddle.EOFException", PyExc_Exception, nullptr);
static PyObject* EnforceNotMetException = static PyObject* EnforceNotMetException =
PyErr_NewException("paddle.EnforceNotMet", PyExc_Exception, NULL); PyErr_NewException("paddle.EnforceNotMet", PyExc_Exception, nullptr);
try { try {
if (p) std::rethrow_exception(p); if (p) std::rethrow_exception(p);
} catch (const platform::EOFException& e) { } catch (const platform::EOFException& e) {
......
...@@ -120,7 +120,7 @@ static Py_tss_t eval_frame_callback_key = {0, 0}; ...@@ -120,7 +120,7 @@ static Py_tss_t eval_frame_callback_key = {0, 0};
inline static PyObject *eval_frame_callback_get() { inline static PyObject *eval_frame_callback_get() {
void *result = PyThread_tss_get(&eval_frame_callback_key); void *result = PyThread_tss_get(&eval_frame_callback_key);
if (unlikely(result == NULL)) { if (unlikely(result == nullptr)) {
Py_RETURN_NONE; Py_RETURN_NONE;
} else { } else {
return reinterpret_cast<PyObject *>(result); return reinterpret_cast<PyObject *>(result);
...@@ -136,7 +136,7 @@ inline static PyObject *eval_frame_default(PyThreadState *tstate, ...@@ -136,7 +136,7 @@ inline static PyObject *eval_frame_default(PyThreadState *tstate,
FrameObject *frame, FrameObject *frame,
int throw_flag) { int throw_flag) {
#if PY_VERSION_HEX >= 0x03090000 #if PY_VERSION_HEX >= 0x03090000
if (tstate == NULL) { if (tstate == nullptr) {
tstate = PyThreadState_GET(); tstate = PyThreadState_GET();
} }
return _PyEval_EvalFrameDefault(tstate, frame, throw_flag); return _PyEval_EvalFrameDefault(tstate, frame, throw_flag);
...@@ -164,9 +164,9 @@ inline static PyObject *eval_custom_code(PyThreadState *tstate, ...@@ -164,9 +164,9 @@ inline static PyObject *eval_custom_code(PyThreadState *tstate,
nfrees = PyTuple_GET_SIZE(code->co_freevars); nfrees = PyTuple_GET_SIZE(code->co_freevars);
#endif #endif
PyFrameObject *shadow = PyFrame_New(tstate, code, frame->f_globals, NULL); PyFrameObject *shadow = PyFrame_New(tstate, code, frame->f_globals, nullptr);
if (shadow == NULL) { if (shadow == nullptr) {
return NULL; return nullptr;
} }
#if PY_VERSION_HEX >= 0x030b0000 #if PY_VERSION_HEX >= 0x030b0000
...@@ -210,7 +210,7 @@ static PyObject *_custom_eval_frame(PyThreadState *tstate, ...@@ -210,7 +210,7 @@ static PyObject *_custom_eval_frame(PyThreadState *tstate,
#else #else
if (PyFrame_FastToLocalsWithError(frame) < 0) { if (PyFrame_FastToLocalsWithError(frame) < 0) {
#endif #endif
return NULL; return nullptr;
} }
// NOTE:(xiongkun): Handle GeneratorExit exception: (Spend a day) // NOTE:(xiongkun): Handle GeneratorExit exception: (Spend a day)
...@@ -241,10 +241,10 @@ static PyObject *_custom_eval_frame(PyThreadState *tstate, ...@@ -241,10 +241,10 @@ static PyObject *_custom_eval_frame(PyThreadState *tstate,
Py_DECREF(args); Py_DECREF(args);
VLOG(7) << "After call eval_frame_function and decrease frame."; VLOG(7) << "After call eval_frame_function and decrease frame.";
// result: GuardedCode // result: GuardedCode
if (result == NULL) { if (result == nullptr) {
// internal exception // internal exception
VLOG(7) << "Error happened."; VLOG(7) << "Error happened.";
return NULL; return nullptr;
} else if (result != Py_None) { } else if (result != Py_None) {
// NOTE: Cache is not supported now // NOTE: Cache is not supported now
PyCodeObject *code = reinterpret_cast<PyCodeObject *>( PyCodeObject *code = reinterpret_cast<PyCodeObject *>(
...@@ -354,7 +354,7 @@ PyMODINIT_FUNC PyInit__eval_frame() { ...@@ -354,7 +354,7 @@ PyMODINIT_FUNC PyInit__eval_frame() {
Py_INCREF(Py_None); Py_INCREF(Py_None);
eval_frame_callback_set(Py_None); eval_frame_callback_set(Py_None);
return NULL; return nullptr;
} }
PyTypeObject *g_jit_function_pytype = nullptr; PyTypeObject *g_jit_function_pytype = nullptr;
......
...@@ -100,13 +100,13 @@ typename std::enable_if<std::is_floating_point<T>::value>::type LapackEigvals( ...@@ -100,13 +100,13 @@ typename std::enable_if<std::is_floating_point<T>::value>::type LapackEigvals(
a.template data<T>(), a.template data<T>(),
static_cast<int>(n_dim), static_cast<int>(n_dim),
w_data, w_data,
NULL, nullptr,
1, 1,
NULL, nullptr,
1, 1,
work->template data<T>(), work->template data<T>(),
static_cast<int>(work_mem / sizeof(T)), static_cast<int>(work_mem / sizeof(T)),
static_cast<T*>(NULL), static_cast<T*>(nullptr),
&info); &info);
std::string name = "phi::backend::dynload::dgeev_"; std::string name = "phi::backend::dynload::dgeev_";
...@@ -165,9 +165,9 @@ LapackEigvals(const Context& ctx, ...@@ -165,9 +165,9 @@ LapackEigvals(const Context& ctx,
a.template data<T>(), a.template data<T>(),
static_cast<int>(n_dim), static_cast<int>(n_dim),
output->template data<T>(), output->template data<T>(),
NULL, nullptr,
1, 1,
NULL, nullptr,
1, 1,
work->template data<T>(), work->template data<T>(),
static_cast<int>(work_mem / sizeof(T)), static_cast<int>(work_mem / sizeof(T)),
...@@ -222,14 +222,14 @@ void EigvalsKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) { ...@@ -222,14 +222,14 @@ void EigvalsKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
static_cast<int>(n_dim), static_cast<int>(n_dim),
x_matrices[0].template data<T>(), x_matrices[0].template data<T>(),
static_cast<int>(n_dim), static_cast<int>(n_dim),
NULL, nullptr,
NULL, nullptr,
1, 1,
NULL, nullptr,
1, 1,
&qwork, &qwork,
-1, -1,
static_cast<dtype::Real<T>*>(NULL), static_cast<dtype::Real<T>*>(nullptr),
&info); &info);
int64_t lwork = static_cast<int64_t>(qwork); int64_t lwork = static_cast<int64_t>(qwork);
......
...@@ -66,7 +66,7 @@ void FCFunctor<DeviceContext, T>::operator()(const DeviceContext& context, ...@@ -66,7 +66,7 @@ void FCFunctor<DeviceContext, T>::operator()(const DeviceContext& context,
} else { } else {
blas.MatMul(M, N, K, X, W, Y); blas.MatMul(M, N, K, X, W, Y);
} }
if (B == NULL) { if (B == nullptr) {
if (padding_weights) { if (padding_weights) {
#ifdef PADDLE_WITH_MKLML #ifdef PADDLE_WITH_MKLML
#pragma omp parallel for #pragma omp parallel for
......
此差异已折叠。
...@@ -83,7 +83,7 @@ void SgdJitCode::genCode() { ...@@ -83,7 +83,7 @@ void SgdJitCode::genCode() {
Label inner_loop; Label inner_loop;
Label escape_loop; Label escape_loop;
mov(rax, 0); mov(rax, 0); // NOLINT
L(inner_loop); L(inner_loop);
{ {
cmp(rax, num_groups); cmp(rax, num_groups);
......
...@@ -407,7 +407,7 @@ class MulPrimitiveFactory { ...@@ -407,7 +407,7 @@ class MulPrimitiveFactory {
memory Reorder(const memory::desc &src_desc, memory Reorder(const memory::desc &src_desc,
const memory::desc &dst_desc, const memory::desc &dst_desc,
void *src_data, void *src_data,
void *dst_data = NULL) { void *dst_data = nullptr) {
auto src_mem = memory(src_desc, engine_, src_data); auto src_mem = memory(src_desc, engine_, src_data);
auto dst_mem = dst_data ? memory(dst_desc, engine_, dst_data) auto dst_mem = dst_data ? memory(dst_desc, engine_, dst_data)
: memory(dst_desc, engine_); : memory(dst_desc, engine_);
......
...@@ -77,7 +77,7 @@ char* LineFileReader::getdelim(FILE* f, char delim) { ...@@ -77,7 +77,7 @@ char* LineFileReader::getdelim(FILE* f, char delim) {
int code = feof(f); int code = feof(f);
(void)code; (void)code;
assert(code); assert(code);
return NULL; return nullptr;
} }
#else #else
return NULL; return NULL;
......
...@@ -42,7 +42,7 @@ TEST(Gather, GatherData) { ...@@ -42,7 +42,7 @@ TEST(Gather, GatherData) {
phi::CPUContext ctx(*cpu_place); phi::CPUContext ctx(*cpu_place);
phi::funcs::CPUGather<int>(ctx, *src, *index, output); phi::funcs::CPUGather<int>(ctx, *src, *index, output);
delete cpu_place; delete cpu_place;
cpu_place = NULL; cpu_place = nullptr;
for (int i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4); for (int i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4);
for (int i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4); for (int i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4);
......
...@@ -362,7 +362,7 @@ void benchIm2col(int ic, int ih, int iw, int fh, int fw, int ph, int pw) { ...@@ -362,7 +362,7 @@ void benchIm2col(int ic, int ih, int iw, int fh, int fw, int ph, int pw) {
constexpr int repeat = 100; constexpr int repeat = 100;
auto GetCurrentMs = []() -> double { auto GetCurrentMs = []() -> double {
struct timeval time; struct timeval time;
gettimeofday(&time, NULL); gettimeofday(&time, nullptr);
return 1e+3 * time.tv_sec + 1e-3 * time.tv_usec; return 1e+3 * time.tv_sec + 1e-3 * time.tv_usec;
}; };
auto t1 = GetCurrentMs(); auto t1 = GetCurrentMs();
......
...@@ -328,9 +328,9 @@ TEST(test_layer, test_varbase_basic) { ...@@ -328,9 +328,9 @@ TEST(test_layer, test_varbase_basic) {
new imperative::VarBase(true, "vin")); new imperative::VarBase(true, "vin"));
ASSERT_ANY_THROW(vin->MutableGradVar()); ASSERT_ANY_THROW(vin->MutableGradVar());
ASSERT_NO_THROW(ASSERT_TRUE(dynamic_cast<framework::Variable*>( ASSERT_NO_THROW(ASSERT_TRUE(dynamic_cast<framework::Variable*>(
vin_with_grad->MutableGradVar()) != 0)); vin_with_grad->MutableGradVar()) != nullptr));
ASSERT_TRUE( ASSERT_TRUE(dynamic_cast<framework::Variable*>(
dynamic_cast<framework::Variable*>(vin_with_grad->MutableGradVar()) != 0); vin_with_grad->MutableGradVar()) != nullptr);
vin_with_grad->SetOverridedStopGradient(false); vin_with_grad->SetOverridedStopGradient(false);
ASSERT_FALSE(vin_with_grad->OverridedStopGradient()); ASSERT_FALSE(vin_with_grad->OverridedStopGradient());
ASSERT_NO_FATAL_FAILURE(vin_with_grad->SetPersistable(true)); ASSERT_NO_FATAL_FAILURE(vin_with_grad->SetPersistable(true));
......
...@@ -25,7 +25,7 @@ namespace tests { ...@@ -25,7 +25,7 @@ namespace tests {
inline double GetCurrentUS() { inline double GetCurrentUS() {
struct timeval time; struct timeval time;
gettimeofday(&time, NULL); gettimeofday(&time, nullptr);
return 1e+6 * time.tv_sec + time.tv_usec; return 1e+6 * time.tv_sec + time.tv_usec;
} }
constexpr int repeat = 1000; constexpr int repeat = 1000;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册