Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
1e4f627d
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
1e4f627d
编写于
8月 04, 2023
作者:
Z
Zhenghai Zhang
提交者:
GitHub
8月 04, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[clang-tidy] NO.12 enable modernize-use-nullptr check(#55800)
上级
b67715a4
变更
32
展开全部
隐藏空白更改
内联
并排
Showing
32 changed file
with
433 addition
and
421 deletion
+433
-421
.clang-tidy
.clang-tidy
+1
-1
paddle/fluid/framework/data_feed.cc
paddle/fluid/framework/data_feed.cc
+12
-11
paddle/fluid/framework/io/crypto/aes_cipher.cc
paddle/fluid/framework/io/crypto/aes_cipher.cc
+12
-8
paddle/fluid/framework/io/fs.cc
paddle/fluid/framework/io/fs.cc
+1
-1
paddle/fluid/framework/io/shell.cc
paddle/fluid/framework/io/shell.cc
+14
-14
paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc
...rk/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc
+1
-1
paddle/fluid/inference/capi_exp/pd_config.cc
paddle/fluid/inference/capi_exp/pd_config.cc
+1
-1
paddle/fluid/inference/capi_exp/pd_predictor.cc
paddle/fluid/inference/capi_exp/pd_predictor.cc
+3
-2
paddle/fluid/inference/capi_exp/pd_utils.cc
paddle/fluid/inference/capi_exp/pd_utils.cc
+35
-34
paddle/fluid/memory/allocation/best_fit_allocator_test.cc
paddle/fluid/memory/allocation/best_fit_allocator_test.cc
+1
-1
paddle/fluid/memory/allocation/mmap_allocator.cc
paddle/fluid/memory/allocation/mmap_allocator.cc
+1
-1
paddle/fluid/operators/pyramid_hash_op.cc
paddle/fluid/operators/pyramid_hash_op.cc
+2
-2
paddle/fluid/platform/gen_comm_id_helper.cc
paddle/fluid/platform/gen_comm_id_helper.cc
+4
-4
paddle/fluid/platform/init.cc
paddle/fluid/platform/init.cc
+5
-5
paddle/fluid/platform/timer.cc
paddle/fluid/platform/timer.cc
+2
-2
paddle/fluid/pybind/eager.cc
paddle/fluid/pybind/eager.cc
+30
-30
paddle/fluid/pybind/eager_functions.cc
paddle/fluid/pybind/eager_functions.cc
+22
-22
paddle/fluid/pybind/eager_math_op_patch.cc
paddle/fluid/pybind/eager_math_op_patch.cc
+22
-22
paddle/fluid/pybind/eager_method.cc
paddle/fluid/pybind/eager_method.cc
+82
-79
paddle/fluid/pybind/eager_py_layer.cc
paddle/fluid/pybind/eager_py_layer.cc
+9
-7
paddle/fluid/pybind/exception.cc
paddle/fluid/pybind/exception.cc
+2
-2
paddle/fluid/pybind/jit.cc
paddle/fluid/pybind/jit.cc
+9
-9
paddle/phi/kernels/cpu/eigvals_kernel.cc
paddle/phi/kernels/cpu/eigvals_kernel.cc
+9
-9
paddle/phi/kernels/funcs/fc_functor.cc
paddle/phi/kernels/funcs/fc_functor.cc
+1
-1
paddle/phi/kernels/funcs/gpc.cc
paddle/phi/kernels/funcs/gpc.cc
+143
-143
paddle/phi/kernels/funcs/jit/gen/sgd.cc
paddle/phi/kernels/funcs/jit/gen/sgd.cc
+1
-1
paddle/phi/kernels/onednn/matmul_kernel.cc
paddle/phi/kernels/onednn/matmul_kernel.cc
+1
-1
paddle/utils/string/string_helper.cc
paddle/utils/string/string_helper.cc
+1
-1
test/cpp/fluid/gather_test.cc
test/cpp/fluid/gather_test.cc
+1
-1
test/cpp/fluid/math/im2col_test.cc
test/cpp/fluid/math/im2col_test.cc
+1
-1
test/cpp/imperative/test_layer.cc
test/cpp/imperative/test_layer.cc
+3
-3
test/cpp/phi/kernels/test_cpu_vec.cc
test/cpp/phi/kernels/test_cpu_vec.cc
+1
-1
未找到文件。
.clang-tidy
浏览文件 @
1e4f627d
...
...
@@ -184,7 +184,7 @@ modernize-redundant-void-arg,
-modernize-use-equals-default,
-modernize-use-equals-delete,
-modernize-use-noexcept,
-
modernize-use-nullptr,
modernize-use-nullptr,
modernize-use-override,
-modernize-use-transparent-functors,
-modernize-use-uncaught-exceptions,
...
...
paddle/fluid/framework/data_feed.cc
浏览文件 @
1e4f627d
...
...
@@ -59,8 +59,8 @@ class BufferedLineFileReader {
int
read_lines
(
T
*
reader
,
LineFunc
func
,
int
skip_lines
)
{
int
lines
=
0
;
size_t
ret
=
0
;
char
*
ptr
=
NULL
;
char
*
eol
=
NULL
;
char
*
ptr
=
nullptr
;
char
*
eol
=
nullptr
;
total_len_
=
0
;
error_line_
=
0
;
...
...
@@ -70,7 +70,7 @@ class BufferedLineFileReader {
total_len_
+=
ret
;
ptr
=
buff_
;
eol
=
reinterpret_cast
<
char
*>
(
memchr
(
ptr
,
'\n'
,
ret
));
while
(
eol
!=
NULL
)
{
while
(
eol
!=
nullptr
)
{
int
size
=
static_cast
<
int
>
((
eol
-
ptr
)
+
1
);
x
.
append
(
ptr
,
size
-
1
);
++
lines
;
...
...
@@ -1106,13 +1106,13 @@ void MultiSlotInMemoryDataFeed::GetMsgFromLogKey(const std::string& log_key,
uint32_t
*
cmatch
,
uint32_t
*
rank
)
{
std
::
string
searchid_str
=
log_key
.
substr
(
16
,
16
);
*
search_id
=
(
uint64_t
)
strtoull
(
searchid_str
.
c_str
(),
NULL
,
16
);
*
search_id
=
(
uint64_t
)
strtoull
(
searchid_str
.
c_str
(),
nullptr
,
16
);
std
::
string
cmatch_str
=
log_key
.
substr
(
11
,
3
);
*
cmatch
=
(
uint32_t
)
strtoul
(
cmatch_str
.
c_str
(),
NULL
,
16
);
*
cmatch
=
(
uint32_t
)
strtoul
(
cmatch_str
.
c_str
(),
nullptr
,
16
);
std
::
string
rank_str
=
log_key
.
substr
(
14
,
2
);
*
rank
=
(
uint32_t
)
strtoul
(
rank_str
.
c_str
(),
NULL
,
16
);
*
rank
=
(
uint32_t
)
strtoul
(
rank_str
.
c_str
(),
nullptr
,
16
);
}
int
MultiSlotInMemoryDataFeed
::
ParseInstanceFromSo
(
...
...
@@ -1657,8 +1657,8 @@ bool MultiSlotFileInstantDataFeed::Preprocess(const std::string& filename) {
fstat
(
fd_
,
&
sb
);
end_
=
static_cast
<
size_t
>
(
sb
.
st_size
);
buffer_
=
reinterpret_cast
<
char
*>
(
mmap
(
NULL
,
end_
,
PROT_READ
,
MAP_PRIVATE
,
fd_
,
0
));
buffer_
=
reinterpret_cast
<
char
*>
(
mmap
(
nullptr
,
end_
,
PROT_READ
,
MAP_PRIVATE
,
fd_
,
0
));
PADDLE_ENFORCE_NE
(
buffer_
,
MAP_FAILED
,
...
...
@@ -2401,11 +2401,12 @@ static void parser_log_key(const std::string& log_key,
uint32_t
*
cmatch
,
uint32_t
*
rank
)
{
std
::
string
searchid_str
=
log_key
.
substr
(
16
,
16
);
*
search_id
=
static_cast
<
uint64_t
>
(
strtoull
(
searchid_str
.
c_str
(),
NULL
,
16
));
*
search_id
=
static_cast
<
uint64_t
>
(
strtoull
(
searchid_str
.
c_str
(),
nullptr
,
16
));
std
::
string
cmatch_str
=
log_key
.
substr
(
11
,
3
);
*
cmatch
=
static_cast
<
uint32_t
>
(
strtoul
(
cmatch_str
.
c_str
(),
NULL
,
16
));
*
cmatch
=
static_cast
<
uint32_t
>
(
strtoul
(
cmatch_str
.
c_str
(),
nullptr
,
16
));
std
::
string
rank_str
=
log_key
.
substr
(
14
,
2
);
*
rank
=
static_cast
<
uint32_t
>
(
strtoul
(
rank_str
.
c_str
(),
NULL
,
16
));
*
rank
=
static_cast
<
uint32_t
>
(
strtoul
(
rank_str
.
c_str
(),
nullptr
,
16
));
}
bool
SlotRecordInMemoryDataFeed
::
ParseOneInstance
(
const
std
::
string
&
line
,
...
...
paddle/fluid/framework/io/crypto/aes_cipher.cc
浏览文件 @
1e4f627d
...
...
@@ -187,38 +187,42 @@ void AESCipher::BuildCipher(
m_cipher
->
reset
(
new
CryptoPP
::
ECB_Mode
<
CryptoPP
::
AES
>::
Encryption
);
m_filter
->
reset
(
new
CryptoPP
::
StreamTransformationFilter
(
*
(
*
m_cipher
).
get
(),
NULL
,
nullptr
,
CryptoPP
::
BlockPaddingSchemeDef
::
PKCS_PADDING
));
}
else
if
(
aes_cipher_name_
==
"AES_ECB_PKCSPadding"
&&
!
for_encrypt
)
{
m_cipher
->
reset
(
new
CryptoPP
::
ECB_Mode
<
CryptoPP
::
AES
>::
Decryption
);
m_filter
->
reset
(
new
CryptoPP
::
StreamTransformationFilter
(
*
(
*
m_cipher
).
get
(),
NULL
,
nullptr
,
CryptoPP
::
BlockPaddingSchemeDef
::
PKCS_PADDING
));
}
else
if
(
aes_cipher_name_
==
"AES_CBC_PKCSPadding"
&&
for_encrypt
)
{
m_cipher
->
reset
(
new
CryptoPP
::
CBC_Mode
<
CryptoPP
::
AES
>::
Encryption
);
*
need_iv
=
true
;
m_filter
->
reset
(
new
CryptoPP
::
StreamTransformationFilter
(
*
(
*
m_cipher
).
get
(),
NULL
,
nullptr
,
CryptoPP
::
BlockPaddingSchemeDef
::
PKCS_PADDING
));
}
else
if
(
aes_cipher_name_
==
"AES_CBC_PKCSPadding"
&&
!
for_encrypt
)
{
m_cipher
->
reset
(
new
CryptoPP
::
CBC_Mode
<
CryptoPP
::
AES
>::
Decryption
);
*
need_iv
=
true
;
m_filter
->
reset
(
new
CryptoPP
::
StreamTransformationFilter
(
*
(
*
m_cipher
).
get
(),
NULL
,
nullptr
,
CryptoPP
::
BlockPaddingSchemeDef
::
PKCS_PADDING
));
}
else
if
(
aes_cipher_name_
==
"AES_CTR_NoPadding"
&&
for_encrypt
)
{
m_cipher
->
reset
(
new
CryptoPP
::
CTR_Mode
<
CryptoPP
::
AES
>::
Encryption
);
*
need_iv
=
true
;
m_filter
->
reset
(
new
CryptoPP
::
StreamTransformationFilter
(
*
(
*
m_cipher
).
get
(),
NULL
,
CryptoPP
::
BlockPaddingSchemeDef
::
NO_PADDING
));
*
(
*
m_cipher
).
get
(),
nullptr
,
CryptoPP
::
BlockPaddingSchemeDef
::
NO_PADDING
));
}
else
if
(
aes_cipher_name_
==
"AES_CTR_NoPadding"
&&
!
for_encrypt
)
{
m_cipher
->
reset
(
new
CryptoPP
::
CTR_Mode
<
CryptoPP
::
AES
>::
Decryption
);
*
need_iv
=
true
;
m_filter
->
reset
(
new
CryptoPP
::
StreamTransformationFilter
(
*
(
*
m_cipher
).
get
(),
NULL
,
CryptoPP
::
BlockPaddingSchemeDef
::
NO_PADDING
));
*
(
*
m_cipher
).
get
(),
nullptr
,
CryptoPP
::
BlockPaddingSchemeDef
::
NO_PADDING
));
}
else
{
PADDLE_THROW
(
paddle
::
platform
::
errors
::
Unimplemented
(
"Create cipher error. "
...
...
@@ -236,7 +240,7 @@ void AESCipher::BuildAuthEncCipher(
*
need_iv
=
true
;
m_filter
->
reset
(
new
CryptoPP
::
AuthenticatedEncryptionFilter
(
*
(
*
m_cipher
).
get
(),
NULL
,
nullptr
,
false
,
tag_size_
/
8
,
CryptoPP
::
DEFAULT_CHANNEL
,
...
...
@@ -258,7 +262,7 @@ void AESCipher::BuildAuthDecCipher(
*
need_iv
=
true
;
m_filter
->
reset
(
new
CryptoPP
::
AuthenticatedDecryptionFilter
(
*
(
*
m_cipher
).
get
(),
NULL
,
nullptr
,
CryptoPP
::
AuthenticatedDecryptionFilter
::
DEFAULT_FLAGS
,
tag_size_
/
8
,
CryptoPP
::
BlockPaddingSchemeDef
::
NO_PADDING
));
...
...
paddle/fluid/framework/io/fs.cc
浏览文件 @
1e4f627d
...
...
@@ -60,7 +60,7 @@ static std::shared_ptr<FILE> fs_open_internal(const std::string& path,
bool
is_pipe
,
const
std
::
string
&
mode
,
size_t
buffer_size
,
int
*
err_no
=
0
)
{
int
*
err_no
=
nullptr
)
{
std
::
shared_ptr
<
FILE
>
fp
=
nullptr
;
if
(
!
is_pipe
)
{
...
...
paddle/fluid/framework/io/shell.cc
浏览文件 @
1e4f627d
...
...
@@ -82,7 +82,7 @@ static int close_open_fds_internal() {
break
;
}
linux_dirent
*
entry
=
NULL
;
linux_dirent
*
entry
=
nullptr
;
for
(
int
offset
=
0
;
offset
<
bytes
;
offset
+=
entry
->
d_reclen
)
{
entry
=
reinterpret_cast
<
linux_dirent
*>
(
buffer
+
offset
);
...
...
@@ -140,9 +140,9 @@ static int shell_popen_fork_internal(const char* real_cmd,
close_open_fds_internal
();
#if defined(PADDLE_WITH_MUSL)
PCHECK
(
execl
(
"/bin/sh"
,
"sh"
,
"-c"
,
real_cmd
,
NULL
)
>=
0
);
PCHECK
(
execl
(
"/bin/sh"
,
"sh"
,
"-c"
,
real_cmd
,
nullptr
)
>=
0
);
#else
PCHECK
(
execl
(
"/bin/bash"
,
"bash"
,
"-c"
,
real_cmd
,
NULL
)
>=
0
);
PCHECK
(
execl
(
"/bin/bash"
,
"bash"
,
"-c"
,
real_cmd
,
nullptr
)
>=
0
);
#endif
// Note: just for compilation. the child don't run this line.
_exit
(
0
);
...
...
@@ -179,7 +179,7 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd,
bool
do_write
=
mode
==
"w"
;
if
(
!
(
do_read
||
do_write
))
{
*
err_no
=
-
1
;
return
NULL
;
return
nullptr
;
}
VLOG
(
3
)
<<
"Opening pipe["
<<
cmd
<<
"] with mode["
<<
mode
<<
"]"
;
...
...
@@ -189,7 +189,7 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd,
int
pipe_fds
[
2
];
if
(
pipe
(
pipe_fds
)
!=
0
)
{
*
err_no
=
-
1
;
return
NULL
;
return
nullptr
;
}
int
parent_end
=
0
;
int
child_end
=
0
;
...
...
@@ -212,11 +212,11 @@ std::shared_ptr<FILE> shell_popen(const std::string& cmd,
close
(
child_end
);
FILE
*
fp
=
NULL
;
if
((
fp
=
fdopen
(
parent_end
,
mode
.
c_str
()))
==
NULL
)
{
FILE
*
fp
=
nullptr
;
if
((
fp
=
fdopen
(
parent_end
,
mode
.
c_str
()))
==
nullptr
)
{
*
err_no
=
-
1
;
signal
(
SIGCHLD
,
old_handler
);
return
NULL
;
return
nullptr
;
}
return
{
fp
,
[
cmd
,
child_pid
,
old_handler
,
err_no
,
status
](
FILE
*
fp
)
{
...
...
@@ -281,7 +281,7 @@ static int shell_p2open_fork_internal(const char* real_cmd,
}
close_open_fds_internal
();
if
(
execl
(
"/bin/sh"
,
"sh"
,
"-c"
,
real_cmd
,
NULL
)
<
0
)
{
if
(
execl
(
"/bin/sh"
,
"sh"
,
"-c"
,
real_cmd
,
nullptr
)
<
0
)
{
return
-
1
;
}
exit
(
127
);
...
...
@@ -302,10 +302,10 @@ std::pair<std::shared_ptr<FILE>, std::shared_ptr<FILE>> shell_p2open(
int
pipein_fds
[
2
];
int
pipeout_fds
[
2
];
if
(
pipe
(
pipein_fds
)
!=
0
)
{
return
{
NULL
,
NULL
};
return
{
nullptr
,
nullptr
};
}
if
(
pipe
(
pipeout_fds
)
!=
0
)
{
return
{
NULL
,
NULL
};
return
{
nullptr
,
nullptr
};
}
int
child_pid
=
...
...
@@ -317,7 +317,7 @@ std::pair<std::shared_ptr<FILE>, std::shared_ptr<FILE>> shell_p2open(
fcntl
(
pipeout_fds
[
1
],
F_SETFD
,
FD_CLOEXEC
);
std
::
shared_ptr
<
int
>
child_life
=
{
NULL
,
[
child_pid
,
cmd
](
void
*
)
{
nullptr
,
[
child_pid
,
cmd
](
void
*
)
{
if
(
shell_verbose
())
{
LOG
(
INFO
)
<<
"Closing bidirectional pipe["
<<
cmd
<<
"]"
;
}
...
...
@@ -340,9 +340,9 @@ std::pair<std::shared_ptr<FILE>, std::shared_ptr<FILE>> shell_p2open(
}};
FILE
*
in_fp
;
PCHECK
((
in_fp
=
fdopen
(
pipein_fds
[
0
],
"r"
))
!=
NULL
);
PCHECK
((
in_fp
=
fdopen
(
pipein_fds
[
0
],
"r"
))
!=
nullptr
);
FILE
*
out_fp
;
PCHECK
((
out_fp
=
fdopen
(
pipeout_fds
[
1
],
"w"
))
!=
NULL
);
PCHECK
((
out_fp
=
fdopen
(
pipeout_fds
[
1
],
"w"
))
!=
nullptr
);
return
{{
in_fp
,
[
child_life
](
FILE
*
fp
)
{
PCHECK
(
fclose
(
fp
)
==
0
);
}},
{
out_fp
,
[
child_life
](
FILE
*
fp
)
{
PCHECK
(
fclose
(
fp
)
==
0
);
}}};
#endif
...
...
paddle/fluid/framework/ir/multi_devices_graph_pass/fuse_all_reduce_op_pass.cc
浏览文件 @
1e4f627d
...
...
@@ -291,7 +291,7 @@ class FuseAllReduceOpPass : public ir::Pass {
const
platform
::
BKCLCommunicator
*
multi_bkcl_ctxs
,
#endif
ir
::
Graph
*
result
)
const
{
details
::
FusedAllReduceOpHandle
*
op_handle
=
NULL
;
details
::
FusedAllReduceOpHandle
*
op_handle
=
nullptr
;
if
(
is_grad_merge
)
{
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
op_handle
=
new
details
::
FusedGradMergeAllReduceOpHandle
(
...
...
paddle/fluid/inference/capi_exp/pd_config.cc
浏览文件 @
1e4f627d
...
...
@@ -55,7 +55,7 @@ __pd_give PD_Config* PD_ConfigCreate() {
}
void
PD_ConfigDestroy
(
__pd_take
PD_Config
*
pd_config
)
{
if
(
pd_config
!=
NULL
)
{
if
(
pd_config
!=
nullptr
)
{
delete
reinterpret_cast
<
Config
*>
(
pd_config
);
}
}
...
...
paddle/fluid/inference/capi_exp/pd_predictor.cc
浏览文件 @
1e4f627d
...
...
@@ -68,7 +68,7 @@ __pd_give PD_IOInfos* PD_PredictorGetInputInfos(
PD_IOInfos
*
input_infos
=
new
PD_IOInfos
;
input_infos
->
size
=
names
.
size
();
input_infos
->
io_info
=
names
.
empty
()
?
NULL
:
new
PD_IOInfo
*
[
names
.
size
()];
input_infos
->
io_info
=
names
.
empty
()
?
nullptr
:
new
PD_IOInfo
*
[
names
.
size
()];
for
(
size_t
i
=
0
;
i
<
names
.
size
();
i
++
)
{
const
std
::
string
&
name
=
names
[
i
];
input_infos
->
io_info
[
i
]
=
new
PD_IOInfo
;
...
...
@@ -99,7 +99,8 @@ __pd_give PD_IOInfos* PD_PredictorGetOutputInfos(
PD_IOInfos
*
output_infos
=
new
PD_IOInfos
;
output_infos
->
size
=
names
.
size
();
output_infos
->
io_info
=
names
.
empty
()
?
NULL
:
new
PD_IOInfo
*
[
names
.
size
()];
output_infos
->
io_info
=
names
.
empty
()
?
nullptr
:
new
PD_IOInfo
*
[
names
.
size
()];
for
(
size_t
i
=
0
;
i
<
names
.
size
();
i
++
)
{
const
std
::
string
&
name
=
names
[
i
];
output_infos
->
io_info
[
i
]
=
new
PD_IOInfo
;
...
...
paddle/fluid/inference/capi_exp/pd_utils.cc
浏览文件 @
1e4f627d
...
...
@@ -20,27 +20,27 @@
#define DESTROY_ONE_DIM_ARRAY(type) \
void PD_OneDimArray##type##Destroy(__pd_take PD_OneDimArray##type* array) { \
if (array !=
NULL) {
\
if (array !=
nullptr) {
\
delete[] array->data; \
delete array; \
} \
}
#define CONVERT_VEC_TO_ONE_DIM_ARRAY(type, Type, vec_type) \
__pd_give PD_OneDimArray##Type* CvtVecToOneDimArray##Type( \
const std::vector<vec_type>& vec) { \
PD_OneDimArray##Type* array = new PD_OneDimArray##Type; \
array->size = vec.size(); \
array->data = vec.empty() ?
NULL
: new type[vec.size()]; \
for (size_t index = 0; index < vec.size(); ++index) { \
array->data[index] = vec[index]; \
} \
return array; \
#define CONVERT_VEC_TO_ONE_DIM_ARRAY(type, Type, vec_type)
\
__pd_give PD_OneDimArray##Type* CvtVecToOneDimArray##Type(
\
const std::vector<vec_type>& vec) {
\
PD_OneDimArray##Type* array = new PD_OneDimArray##Type;
\
array->size = vec.size();
\
array->data = vec.empty() ?
nullptr
: new type[vec.size()]; \
for (size_t index = 0; index < vec.size(); ++index) {
\
array->data[index] = vec[index];
\
}
\
return array;
\
}
#define CONVERT_ONE_DIM_ARRAY_TO_VEC(type, Type, vec_type) \
std::vector<vec_type> CvtOneDimArrayToVec##Type( \
__pd_keep const PD_OneDimArray##Type* array) { \
std::vector<vec_type> vec; \
if (array !=
NULL) {
\
if (array !=
nullptr) {
\
vec.resize(array->size); \
for (size_t index = 0; index < array->size; ++index) { \
vec[index] = array->data[index]; \
...
...
@@ -68,7 +68,7 @@ ONE_DIM_ARRAY_UTILS_FUNC_IMPL(int64_t, Int64, int64_t)
#undef DESTROY_ONE_DIM_ARRAY
void
PD_OneDimArrayCstrDestroy
(
__pd_take
PD_OneDimArrayCstr
*
array
)
{
if
(
array
!=
NULL
)
{
if
(
array
!=
nullptr
)
{
if
(
array
->
size
!=
0
)
{
for
(
size_t
index
=
0
;
index
<
array
->
size
;
++
index
)
{
delete
[]
array
->
data
[
index
];
...
...
@@ -80,11 +80,11 @@ void PD_OneDimArrayCstrDestroy(__pd_take PD_OneDimArrayCstr* array) {
}
void
PD_CstrDestroy
(
__pd_take
PD_Cstr
*
cstr
)
{
if
(
cstr
!=
NULL
)
{
if
(
cstr
!=
nullptr
)
{
if
(
cstr
->
size
!=
0
)
{
cstr
->
size
=
0
;
delete
[]
cstr
->
data
;
cstr
->
data
=
NULL
;
cstr
->
data
=
nullptr
;
}
delete
cstr
;
}
...
...
@@ -95,7 +95,7 @@ __pd_give PD_OneDimArrayCstr* CvtVecToOneDimArrayCstr(
const
std
::
vector
<
std
::
string
>&
vec
)
{
PD_OneDimArrayCstr
*
array
=
new
PD_OneDimArrayCstr
;
array
->
size
=
vec
.
size
();
array
->
data
=
vec
.
empty
()
?
NULL
:
new
char
*
[
vec
.
size
()];
array
->
data
=
vec
.
empty
()
?
nullptr
:
new
char
*
[
vec
.
size
()];
for
(
size_t
index
=
0u
;
index
<
vec
.
size
();
++
index
)
{
array
->
data
[
index
]
=
new
char
[
vec
[
index
].
size
()
+
1
];
memcpy
(
array
->
data
[
index
],
vec
[
index
].
c_str
(),
vec
[
index
].
size
()
+
1
);
...
...
@@ -116,7 +116,7 @@ __pd_give PD_Cstr* CvtStrToCstr(const std::string& str) {
PD_Cstr
*
cstr
=
new
PD_Cstr
;
if
(
str
.
empty
())
{
cstr
->
size
=
0
;
cstr
->
data
=
NULL
;
cstr
->
data
=
nullptr
;
}
else
{
cstr
->
size
=
str
.
length
()
+
1
;
cstr
->
data
=
new
char
[
str
.
length
()
+
1
];
...
...
@@ -128,7 +128,7 @@ __pd_give PD_Cstr* CvtStrToCstr(const std::string& str) {
#define DESTROY_TWO_DIM_ARRAY(type) \
void PD_TwoDimArray##type##Destroy(__pd_take PD_TwoDimArray##type* array) { \
if (array !=
NULL) {
\
if (array !=
nullptr) {
\
if (array->size != 0) { \
for (size_t index = 0; index < array->size; ++index) { \
PD_OneDimArray##type##Destroy(array->data[index]); \
...
...
@@ -138,22 +138,23 @@ __pd_give PD_Cstr* CvtStrToCstr(const std::string& str) {
delete array; \
} \
}
#define CONVERT_VEC_TO_TWO_DIM_ARRAY(type, Type, vec_type) \
__pd_give PD_TwoDimArray##Type* CvtVecToTwoDimArray##Type( \
const std::vector<std::vector<vec_type>>& vec) { \
PD_TwoDimArray##Type* array = new PD_TwoDimArray##Type; \
array->size = vec.size(); \
array->data = vec.empty() ? NULL : new PD_OneDimArray##Type*[vec.size()]; \
for (size_t index = 0; index < vec.size(); ++index) { \
array->data[index] = CvtVecToOneDimArray##Type(vec[index]); \
} \
return array; \
#define CONVERT_VEC_TO_TWO_DIM_ARRAY(type, Type, vec_type) \
__pd_give PD_TwoDimArray##Type* CvtVecToTwoDimArray##Type( \
const std::vector<std::vector<vec_type>>& vec) { \
PD_TwoDimArray##Type* array = new PD_TwoDimArray##Type; \
array->size = vec.size(); \
array->data = \
vec.empty() ? nullptr : new PD_OneDimArray##Type*[vec.size()]; \
for (size_t index = 0; index < vec.size(); ++index) { \
array->data[index] = CvtVecToOneDimArray##Type(vec[index]); \
} \
return array; \
}
#define CONVERT_TWO_DIM_ARRAY_TO_VEC(type, Type, vec_type) \
std::vector<std::vector<vec_type>> CvtTwoDimArrayToVec##Type( \
__pd_keep const PD_TwoDimArray##Type* array) { \
std::vector<std::vector<vec_type>> vec; \
if (array !=
NULL && array->size != 0) {
\
if (array !=
nullptr && array->size != 0) {
\
vec.resize(array->size); \
for (size_t index = 0; index < array->size; ++index) { \
vec[index] = CvtOneDimArrayToVec##Type((array->data)[index]); \
...
...
@@ -182,17 +183,17 @@ extern "C" {
#endif
void
PD_IOInfoDestroy
(
__pd_take
PD_IOInfo
*
io_info
)
{
if
(
io_info
!=
NULL
)
{
if
(
io_info
!=
nullptr
)
{
PD_CstrDestroy
(
io_info
->
name
);
io_info
->
name
=
NULL
;
io_info
->
name
=
nullptr
;
PD_OneDimArrayInt64Destroy
(
io_info
->
shape
);
io_info
->
shape
=
NULL
;
io_info
->
shape
=
nullptr
;
delete
io_info
;
}
}
void
PD_IOInfosDestroy
(
__pd_take
PD_IOInfos
*
io_infos
)
{
if
(
io_infos
!=
NULL
)
{
if
(
io_infos
!=
nullptr
)
{
if
(
io_infos
->
size
!=
0
)
{
for
(
size_t
index
=
0
;
index
<
io_infos
->
size
;
++
index
)
{
PD_IOInfoDestroy
(
io_infos
->
io_info
[
index
]);
...
...
@@ -200,7 +201,7 @@ void PD_IOInfosDestroy(__pd_take PD_IOInfos* io_infos) {
io_infos
->
size
=
0
;
}
delete
[]
io_infos
->
io_info
;
io_infos
->
io_info
=
NULL
;
io_infos
->
io_info
=
nullptr
;
delete
io_infos
;
}
}
...
...
paddle/fluid/memory/allocation/best_fit_allocator_test.cc
浏览文件 @
1e4f627d
...
...
@@ -30,7 +30,7 @@ namespace allocation {
class
StubAllocation
:
public
Allocation
{
public:
explicit
StubAllocation
(
size_t
size
)
:
Allocation
(
0
,
size
,
platform
::
CPUPlace
())
{}
:
Allocation
(
nullptr
,
size
,
platform
::
CPUPlace
())
{}
};
TEST
(
BestFitAllocator
,
test_allocation
)
{
...
...
paddle/fluid/memory/allocation/mmap_allocator.cc
浏览文件 @
1e4f627d
...
...
@@ -269,7 +269,7 @@ std::shared_ptr<MemoryMapWriterAllocation> AllocateMemoryMapWriterAllocation(
platform
::
errors
::
Unavailable
(
"Fruncate a file to a specified length failed!"
));
void
*
ptr
=
mmap
(
NULL
,
size
,
PROT_READ
|
PROT_WRITE
,
MAP_SHARED
,
fd
,
0
);
void
*
ptr
=
mmap
(
nullptr
,
size
,
PROT_READ
|
PROT_WRITE
,
MAP_SHARED
,
fd
,
0
);
PADDLE_ENFORCE_NE
(
ptr
,
MAP_FAILED
,
platform
::
errors
::
Unavailable
(
...
...
paddle/fluid/operators/pyramid_hash_op.cc
浏览文件 @
1e4f627d
...
...
@@ -307,8 +307,8 @@ class CPUPyramidHashOPKernel : public framework::OpKernel<T> {
top_offset
.
resize
(
offset
.
size
());
top_offset
[
0
]
=
0
;
math
::
bloomfilter
*
_filter
=
NULL
;
math
::
bloomfilter
*
_black_filter
=
NULL
;
math
::
bloomfilter
*
_filter
=
nullptr
;
math
::
bloomfilter
*
_black_filter
=
nullptr
;
if
(
use_filter
)
{
if
(
white_list_len
!=
0
)
{
_filter
=
(
math
::
bloomfilter
*
)
_blobs_1
->
data
<
float
>
();
...
...
paddle/fluid/platform/gen_comm_id_helper.cc
浏览文件 @
1e4f627d
...
...
@@ -259,13 +259,13 @@ static int ConnectAddr(const std::string& ep, const CommHead head) {
server_addr
.
sin_family
=
AF_INET
;
server_addr
.
sin_port
=
htons
(
port
);
char
*
ip
=
NULL
;
struct
hostent
*
hp
=
NULL
;
char
*
ip
=
nullptr
;
struct
hostent
*
hp
=
nullptr
;
// sleep for get_host_by_name_time seconds.
for
(
int
i
=
0
;
2
*
i
<
FLAGS_get_host_by_name_time
;
i
++
)
{
hp
=
gethostbyname
(
host
.
c_str
());
if
(
hp
!=
NULL
)
{
if
(
hp
!=
nullptr
)
{
break
;
}
std
::
this_thread
::
sleep_for
(
std
::
chrono
::
seconds
(
2
));
...
...
@@ -276,7 +276,7 @@ static int ConnectAddr(const std::string& ep, const CommHead head) {
platform
::
errors
::
InvalidArgument
(
"Fail to get host by name %s."
,
host
));
int
i
=
0
;
while
(
hp
->
h_addr_list
[
i
]
!=
NULL
)
{
while
(
hp
->
h_addr_list
[
i
]
!=
nullptr
)
{
ip
=
inet_ntoa
(
*
(
struct
in_addr
*
)
hp
->
h_addr_list
[
i
]);
VLOG
(
3
)
<<
"gethostbyname host:"
<<
host
<<
" ->ip: "
<<
ip
;
break
;
...
...
paddle/fluid/platform/init.cc
浏览文件 @
1e4f627d
...
...
@@ -348,7 +348,7 @@ void DisableSignalHandler() {
memset
(
&
sig_action
,
0
,
sizeof
(
sig_action
));
sigemptyset
(
&
sig_action
.
sa_mask
);
sig_action
.
sa_handler
=
SIG_DFL
;
sigaction
(
signal_number
,
&
sig_action
,
NULL
);
sigaction
(
signal_number
,
&
sig_action
,
nullptr
);
}
#endif
}
...
...
@@ -367,10 +367,10 @@ void CreateDumpFile(LPCSTR lpstrDumpFilePathName,
HANDLE
hDumpFile
=
CreateFile
(
lpstrDumpFilePathName
,
GENERIC_WRITE
,
0
,
NULL
,
nullptr
,
CREATE_ALWAYS
,
FILE_ATTRIBUTE_NORMAL
,
NULL
);
nullptr
);
MINIDUMP_EXCEPTION_INFORMATION
dumpInfo
;
dumpInfo
.
ExceptionPointers
=
pException
;
dumpInfo
.
ThreadId
=
GetCurrentThreadId
();
...
...
@@ -384,8 +384,8 @@ void CreateDumpFile(LPCSTR lpstrDumpFilePathName,
hDumpFile
,
MiniDumpWithPrivateReadWriteMemory
,
&
dumpInfo
,
NULL
,
NULL
);
nullptr
,
nullptr
);
CloseHandle
(
hDumpFile
);
}
...
...
paddle/fluid/platform/timer.cc
浏览文件 @
1e4f627d
...
...
@@ -41,7 +41,7 @@ void Timer::Pause() {
}
void
Timer
::
Resume
()
{
gettimeofday
(
&
_start
,
NULL
);
gettimeofday
(
&
_start
,
nullptr
);
_paused
=
false
;
}
...
...
@@ -54,7 +54,7 @@ double Timer::ElapsedMS() { return _elapsed / 1000.0; }
double
Timer
::
ElapsedSec
()
{
return
_elapsed
/
1000000.0
;
}
int64_t
Timer
::
Tickus
()
{
gettimeofday
(
&
_now
,
NULL
);
gettimeofday
(
&
_now
,
nullptr
);
return
(
_now
.
tv_sec
-
_start
.
tv_sec
)
*
1000
*
1000L
+
(
_now
.
tv_usec
-
_start
.
tv_usec
);
}
...
...
paddle/fluid/pybind/eager.cc
浏览文件 @
1e4f627d
...
...
@@ -376,7 +376,7 @@ py::object ParsePyArray(
numpy_value
=
py
::
object
(
py
::
handle
(
PyTuple_GET_ITEM
(
args
,
kw_order_map
[
"value"
]
-
1
)),
true
);
}
else
{
if
(
flag_kwargs
&&
kws_map
[
"value"
]
!=
NULL
)
{
if
(
flag_kwargs
&&
kws_map
[
"value"
]
!=
nullptr
)
{
numpy_value
=
py
::
object
(
py
::
handle
(
kws_map
[
"value"
]),
true
);
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
...
...
@@ -403,7 +403,7 @@ paddle::platform::Place ParsePlace(
place
=
CastPyArg2Place
(
PyTuple_GET_ITEM
(
args
,
kw_order_map
[
"place"
]
-
1
),
kw_order_map
[
"place"
]
-
1
);
}
else
{
if
(
flag_kwargs
&&
kws_map
[
"place"
]
!=
NULL
)
{
if
(
flag_kwargs
&&
kws_map
[
"place"
]
!=
nullptr
)
{
place
=
CastPyArg2Place
(
kws_map
[
"place"
],
0
);
}
else
{
// default
...
...
@@ -425,7 +425,7 @@ std::shared_ptr<TensorDistAttr> ParseDistAttrArgs(
dist_attr
=
CastPyArg2DistAttr
(
PyTuple_GET_ITEM
(
args
,
kw_order_map
[
"dist_attr"
]
-
1
),
kw_order_map
[
"dist_attr"
]
-
1
);
}
else
if
(
flag_kwargs
&&
kws_map
[
"dist_attr"
]
!=
NULL
)
{
}
else
if
(
flag_kwargs
&&
kws_map
[
"dist_attr"
]
!=
nullptr
)
{
dist_attr
=
CastPyArg2DistAttr
(
kws_map
[
"dist_attr"
],
0
);
}
return
dist_attr
;
...
...
@@ -445,7 +445,7 @@ int ParseBooleanArgs(std::string key,
res
=
static_cast
<
int
>
(
CastPyArg2AttrBoolean
(
PyTuple_GET_ITEM
(
args
,
kw_order_map
[
key
]
-
1
),
kw_order_map
[
key
]
-
1
));
}
else
{
if
(
flag_kwargs
&&
kws_map
[
key
]
!=
NULL
)
{
if
(
flag_kwargs
&&
kws_map
[
key
]
!=
nullptr
)
{
res
=
static_cast
<
int
>
(
CastPyArg2AttrBoolean
(
kws_map
[
key
],
0
));
}
}
...
...
@@ -469,7 +469,7 @@ std::string ParseName(std::unordered_map<std::string, PyObject*> kws_map,
}
}
else
{
if
(
flag_kwargs
)
{
if
((
kws_map
[
"name"
]
==
NULL
)
||
(
kws_map
[
"name"
]
==
Py_None
))
{
if
((
kws_map
[
"name"
]
==
nullptr
)
||
(
kws_map
[
"name"
]
==
Py_None
))
{
act_name
=
egr
::
Controller
::
Instance
().
GenerateUniqueName
(
unique_name_prefix
);
}
else
{
...
...
@@ -581,7 +581,7 @@ void AutoInitTensorByTensor(TensorObject* py_tensor_ptr,
CastPyArg2Tensor
(
PyTuple_GET_ITEM
(
args
,
kw_order_map
[
"value"
]
-
1
),
kw_order_map
[
"value"
]
-
1
);
}
else
{
if
(
flag_kwargs
&&
kws_map
[
"value"
]
!=
NULL
)
{
if
(
flag_kwargs
&&
kws_map
[
"value"
]
!=
nullptr
)
{
src_tensor
=
CastPyArg2Tensor
(
kws_map
[
"value"
],
0
);
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
...
...
@@ -610,7 +610,7 @@ void AutoInitTensorByTensor(TensorObject* py_tensor_ptr,
PyTuple_GET_ITEM
(
args
,
kw_order_map
[
"value"
]
-
1
),
kw_order_map
[
"value"
]
-
1
);
}
else
{
if
(
flag_kwargs
&&
kws_map
[
"value"
]
!=
NULL
)
{
if
(
flag_kwargs
&&
kws_map
[
"value"
]
!=
nullptr
)
{
src_tensor
=
CastPyArg2FrameworkTensor
(
kws_map
[
"value"
],
0
);
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
...
...
@@ -687,7 +687,7 @@ void AutoInitStringTensorByStringTensor(
CastPyArg2Tensor
(
PyTuple_GET_ITEM
(
args
,
kw_order_map
[
"value"
]
-
1
),
kw_order_map
[
"value"
]
-
1
);
}
else
{
if
(
flag_kwargs
&&
kws_map
[
"value"
]
!=
NULL
)
{
if
(
flag_kwargs
&&
kws_map
[
"value"
]
!=
nullptr
)
{
src_tensor
=
CastPyArg2Tensor
(
kws_map
[
"value"
],
0
);
}
else
{
PADDLE_THROW
(
platform
::
errors
::
InvalidArgument
(
...
...
@@ -764,17 +764,17 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
if
(
kwargs
)
flag_kwargs
=
true
;
// all kwargs
PyObject
*
kw_zero_copy
=
NULL
;
PyObject
*
kw_persistable
=
NULL
;
PyObject
*
kw_stop_gradient
=
NULL
;
PyObject
*
kw_value
=
NULL
;
// receive PyArray or Tensor
PyObject
*
kw_place
=
NULL
;
PyObject
*
kw_name
=
NULL
;
PyObject
*
kw_dims
=
NULL
;
PyObject
*
kw_dtype
=
NULL
;
PyObject
*
kw_type
=
NULL
;
PyObject
*
kw_dist_attr
=
NULL
;
PyObject
*
kw_zero_copy
=
nullptr
;
PyObject
*
kw_persistable
=
nullptr
;
PyObject
*
kw_stop_gradient
=
nullptr
;
PyObject
*
kw_value
=
nullptr
;
// receive PyArray or Tensor
PyObject
*
kw_place
=
nullptr
;
PyObject
*
kw_name
=
nullptr
;
PyObject
*
kw_dims
=
nullptr
;
PyObject
*
kw_dtype
=
nullptr
;
PyObject
*
kw_type
=
nullptr
;
PyObject
*
kw_dist_attr
=
nullptr
;
// the keywords argument
static
char
*
kwlist
[]
=
{
const_cast
<
char
*>
(
"value"
),
...
...
@@ -787,7 +787,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
const_cast
<
char
*>
(
"dtype"
),
const_cast
<
char
*>
(
"type"
),
const_cast
<
char
*>
(
"dist_attr"
),
NULL
};
nullptr
};
// 'O' Store a Python object (without any conversion) in a C object pointer,
// '|' Indicates that the remaining arguments in the Python argument list are
...
...
@@ -856,7 +856,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
egr
::
Controller
::
Instance
().
GetExpectedPlace
());
return
0
;
}
else
{
// no position args, all arguments are kwargs
if
(
kw_value
!=
NULL
)
{
if
(
kw_value
!=
nullptr
)
{
if
(
pybind11
::
detail
::
npy_api
::
get
().
PyArray_Check_
(
kw_value
))
{
VLOG
(
6
)
<<
"Calling case3's or case4's initializer"
;
AutoInitTensorByPyArray
(
...
...
@@ -884,7 +884,7 @@ int TensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
"Please check your input first and make sure you are on the "
"right way."
));
}
}
else
if
(
kw_dtype
!=
NULL
&&
}
else
if
(
kw_dtype
!=
nullptr
&&
PyObject_TypeCheck
(
kw_dtype
,
g_vartype_pytype
))
{
VLOG
(
6
)
<<
"Calling case2's initializer"
;
...
...
@@ -1122,18 +1122,18 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
if
(
kwargs
)
flag_kwargs
=
true
;
// all kwargs
PyObject
*
kw_zero_copy
=
NULL
;
PyObject
*
kw_zero_copy
=
nullptr
;
PyObject
*
kw_value
=
NULL
;
// receive PyArray or Tensor
PyObject
*
kw_name
=
NULL
;
PyObject
*
kw_dims
=
NULL
;
PyObject
*
kw_value
=
nullptr
;
// receive PyArray or Tensor
PyObject
*
kw_name
=
nullptr
;
PyObject
*
kw_dims
=
nullptr
;
// the keywords argument
static
char
*
kwlist
[]
=
{
const_cast
<
char
*>
(
"value"
),
const_cast
<
char
*>
(
"zero_copy"
),
const_cast
<
char
*>
(
"name"
),
const_cast
<
char
*>
(
"dims"
),
NULL
};
nullptr
};
// 'O' Store a Python object (without any conversion) in a C object pointer,
// '|' Indicates that the remaining arguments in the Python argument list are
// optional.
...
...
@@ -1188,7 +1188,7 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
egr
::
Controller
::
Instance
().
GetExpectedPlace
());
return
0
;
}
else
{
if
(
kw_value
!=
NULL
)
{
if
(
kw_value
!=
nullptr
)
{
if
(
pybind11
::
detail
::
npy_api
::
get
().
PyArray_Check_
(
kw_value
))
{
VLOG
(
6
)
<<
"Calling case3's or case4's string initializer"
;
AutoInitStringTensorByPyArray
(
...
...
@@ -1207,7 +1207,7 @@ int StringTensorInit(PyObject* self, PyObject* args, PyObject* kwargs) {
"Please check your input first and make sure you are on the "
"right way."
));
}
}
else
if
(
kw_dims
!=
NULL
)
{
}
else
if
(
kw_dims
!=
nullptr
)
{
VLOG
(
6
)
<<
"Calling case2's string initializer."
;
std
::
unordered_map
<
std
::
string
,
Py_ssize_t
>
kw_order_map
{{
"dims"
,
1
},
{
"name"
,
2
}};
...
...
@@ -1311,7 +1311,7 @@ void AddPyMethodDefs(std::vector<PyMethodDef>* vector, PyMethodDef* methods) {
}
static
void
TensorDealloc
(
TensorObject
*
self
)
{
if
(
self
->
weakrefs
!=
NULL
)
if
(
self
->
weakrefs
!=
nullptr
)
PyObject_ClearWeakRefs
(
reinterpret_cast
<
PyObject
*>
(
self
));
self
->
tensor
.
~
Tensor
();
Py_TYPE
(
self
)
->
tp_free
(
reinterpret_cast
<
PyObject
*>
(
self
));
...
...
paddle/fluid/pybind/eager_functions.cc
浏览文件 @
1e4f627d
...
...
@@ -1258,7 +1258,7 @@ static PyObject* eager_api_set_master_grads(PyObject* self,
PADDLE_ENFORCE_NE
(
grad
,
nullptr
,
paddle
::
platform
::
errors
::
Fatal
(
"Detected
NULL
grad"
"Detected
nullptr
grad"
"Please check if you have manually cleared"
"the grad inside autograd_meta"
));
if
((
*
grad
).
initialized
()
&&
((
*
grad
).
dtype
()
==
phi
::
DataType
::
FLOAT16
||
...
...
@@ -1278,90 +1278,90 @@ PyMethodDef variable_functions[] = {
{
"scale"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_scale
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_add_backward_final_hook"
,
(
PyCFunction
)(
void
(
*
)())
eager_api__add_backward_final_hook
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"run_backward"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_run_backward
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"run_partial_grad"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_run_partial_grad
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_get_custom_operator_inplace_map"
,
(
PyCFunction
)(
void
(
*
)(
void
))
eager_api__get_custom_operator_inplace_reverse_idx
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_run_custom_op"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_run_custom_op
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"tensor_copy"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_tensor_copy
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"get_all_grads"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_get_all_grads
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"get_grads_lists"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_get_grads_lists
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"get_grads_types"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_get_grads_types
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"read_next_tensor_list"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_read_next_tensor_list
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"jit_function_call"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_jit_function_call
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
/**sparse functions**/
{
"sparse_coo_tensor"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_sparse_coo_tensor
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"sparse_csr_tensor"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_sparse_csr_tensor
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"register_saved_tensors_hooks"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_register_saved_tensors_hooks
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"reset_saved_tensors_hooks"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_reset_saved_tensors_hooks
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
/**amp functions**/
{
"set_master_grads"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_set_master_grads
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
/**sparse functions**/
#if defined(PADDLE_WITH_CUDA)
{
"async_read"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_async_read
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"async_write"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_async_write
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"to_uva_tensor"
,
(
PyCFunction
)(
void
(
*
)())
eager_api_to_uva_tensor
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
#endif
{
NULL
,
NULL
,
0
,
NULL
}};
{
nullptr
,
nullptr
,
0
,
nullptr
}};
void
BindFunctions
(
PyObject
*
module
)
{
if
(
PyModule_AddFunctions
(
module
,
variable_functions
)
<
0
)
{
...
...
paddle/fluid/pybind/eager_math_op_patch.cc
浏览文件 @
1e4f627d
...
...
@@ -1837,88 +1837,88 @@ PyMethodDef math_op_patch_methods[] = {
{
"__add__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__add__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__radd__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__add__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__sub__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__sub__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__rsub__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__rsub__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__mul__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__mul__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__rmul__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__mul__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__div__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__div__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__truediv__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__div__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__rdiv__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__rdiv__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__rtruediv__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__rdiv__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__floordiv__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__floordiv__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__pow__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__pow__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__rpow__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__rpow__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__mod__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__mod__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__matmul__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__matmul__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__gt__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__gt__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__ge__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__ge__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__lt__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__lt__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__le__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__le__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__eq__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__eq__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__ne__"
,
(
PyCFunction
)(
void
(
*
)())
tensor__ne__method
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
{
NULL
,
NULL
,
0
,
NULL
}};
nullptr
},
{
nullptr
,
nullptr
,
0
,
nullptr
}};
}
// namespace pybind
}
// namespace paddle
paddle/fluid/pybind/eager_method.cc
浏览文件 @
1e4f627d
...
...
@@ -716,7 +716,7 @@ static PyObject* tensor_clear_gradient(TensorObject* self,
grad
=
egr
::
EagerUtils
::
mutable_grad
(
self
->
tensor
);
PADDLE_ENFORCE
(
grad
!=
nullptr
,
paddle
::
platform
::
errors
::
Fatal
(
"Detected
NULL
grad"
"Detected
nullptr
grad"
"Please check if you have manually cleared"
"the grad inside autograd_meta"
));
}
else
{
...
...
@@ -773,7 +773,7 @@ static PyObject* tensor__zero_grads(TensorObject* self,
paddle
::
Tensor
*
grad
=
egr
::
EagerUtils
::
mutable_grad
(
self
->
tensor
);
PADDLE_ENFORCE
(
grad
!=
nullptr
,
paddle
::
platform
::
errors
::
Fatal
(
"Detected
NULL
grad"
"Detected
nullptr
grad"
"Please check if you have manually cleared"
"the grad inside autograd_meta"
));
if
(
grad
->
initialized
())
{
...
...
@@ -1570,7 +1570,7 @@ static PyObject* tensor_register_grad_hook(TensorObject* self,
if
(
autograd_meta
&&
!
autograd_meta
->
StopGradient
())
{
if
(
!
autograd_meta
->
GetMutableGradNode
())
{
VLOG
(
6
)
<<
"Detected
NULL
grad_node, Leaf tensor should have had "
VLOG
(
6
)
<<
"Detected
nullptr
grad_node, Leaf tensor should have had "
"grad_node with type: GradNodeAccumulation."
;
autograd_meta
->
SetGradNode
(
std
::
make_shared
<
egr
::
GradNodeAccumulation
>
(
autograd_meta
));
...
...
@@ -1666,7 +1666,7 @@ static PyObject* tensor_register_reduce_hook(TensorObject* self,
"gradient."
));
PADDLE_ENFORCE
(
grad_node
.
get
()
!=
nullptr
,
paddle
::
platform
::
errors
::
Fatal
(
"Detected
NULL
grad_node,"
paddle
::
platform
::
errors
::
Fatal
(
"Detected
nullptr
grad_node,"
"Leaf tensor should have had grad_node "
"with type: GradNodeAccumulation."
));
PyObject
*
hook_func
=
PyTuple_GET_ITEM
(
args
,
0
);
...
...
@@ -2171,11 +2171,12 @@ static PyObject* tensor__grad_name(TensorObject* self,
PyObject
*
kwargs
)
{
EAGER_TRY
paddle
::
Tensor
*
grad
=
egr
::
EagerUtils
::
mutable_grad
(
self
->
tensor
);
PADDLE_ENFORCE_EQ
(
grad
!=
nullptr
,
true
,
platform
::
errors
::
InvalidArgument
(
"Detected NULL grad. Please check if you have manually "
"cleared the grad inside autograd_meta"
));
PADDLE_ENFORCE_EQ
(
grad
!=
nullptr
,
true
,
platform
::
errors
::
InvalidArgument
(
"Detected nullptr grad. Please check if you have manually "
"cleared the grad inside autograd_meta"
));
return
ToPyObject
(
grad
->
name
());
EAGER_CATCH_AND_THROW_RETURN_NULL
}
...
...
@@ -2185,11 +2186,12 @@ static PyObject* tensor__grad_value(TensorObject* self,
PyObject
*
kwargs
)
{
EAGER_TRY
paddle
::
Tensor
*
grad
=
egr
::
EagerUtils
::
mutable_grad
(
self
->
tensor
);
PADDLE_ENFORCE_EQ
(
grad
!=
nullptr
,
true
,
platform
::
errors
::
InvalidArgument
(
"Detected NULL grad. Please check if you have manually "
"cleared the grad inside autograd_meta"
));
PADDLE_ENFORCE_EQ
(
grad
!=
nullptr
,
true
,
platform
::
errors
::
InvalidArgument
(
"Detected nullptr grad. Please check if you have manually "
"cleared the grad inside autograd_meta"
));
if
(
!
grad
->
defined
())
{
RETURN_PY_NONE
...
...
@@ -2210,11 +2212,12 @@ static PyObject* tensor__unset_fake_empty(TensorObject* self,
PyObject
*
kwargs
)
{
EAGER_TRY
paddle
::
Tensor
*
grad
=
egr
::
EagerUtils
::
mutable_grad
(
self
->
tensor
);
PADDLE_ENFORCE_EQ
(
grad
!=
nullptr
,
true
,
platform
::
errors
::
InvalidArgument
(
"Detected NULL grad. Please check if you have manually "
"cleared the grad inside autograd_meta"
));
PADDLE_ENFORCE_EQ
(
grad
!=
nullptr
,
true
,
platform
::
errors
::
InvalidArgument
(
"Detected nullptr grad. Please check if you have manually "
"cleared the grad inside autograd_meta"
));
bool
is_leaf
=
egr
::
EagerUtils
::
IsLeafTensor
(
self
->
tensor
);
if
(
is_leaf
)
{
...
...
@@ -2357,20 +2360,20 @@ PyMethodDef variable_methods[] = {
{
"_is_initialized"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method__is_initialized
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_is_dense_tensor_hold_allocation"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor_method__is_dense_tensor_hold_allocation
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_copy_to"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method__copy_to
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"copy_"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_copy_
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"clone"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_clone
,
METH_VARARGS
|
METH_KEYWORDS
,
...
...
@@ -2378,11 +2381,11 @@ PyMethodDef variable_methods[] = {
{
"reconstruct_from_"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_reconstruct_from_
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"retain_grads"
,
(
PyCFunction
)(
void
(
*
)())
tensor_retain_grads
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"clear_gradient"
,
(
PyCFunction
)(
void
(
*
)())
tensor_clear_gradient
,
METH_VARARGS
|
METH_KEYWORDS
,
...
...
@@ -2390,31 +2393,31 @@ PyMethodDef variable_methods[] = {
{
"is_dense"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_is_dense
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"is_dist"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_is_dist
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_zero_grads"
,
(
PyCFunction
)(
void
(
*
)())
tensor__zero_grads
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_share_buffer_to"
,
(
PyCFunction
)(
void
(
*
)())
tensor__share_buffer_to
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_is_shared_buffer_with"
,
(
PyCFunction
)(
void
(
*
)())
tensor__is_shared_buffer_with
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_share_underline_tensor_to"
,
(
PyCFunction
)(
void
(
*
)())
tensor__share_underline_tensor_to
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_is_shared_underline_tensor_with"
,
(
PyCFunction
)(
void
(
*
)())
tensor__is_shared_underline_tensor_with
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"detach"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_detach
,
METH_VARARGS
|
METH_KEYWORDS
,
...
...
@@ -2422,39 +2425,39 @@ PyMethodDef variable_methods[] = {
{
"detach_"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor_method_detach_
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"get_tensor"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_get_underline_tensor
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"get_selected_rows"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_get_underline_selected_rows
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_get_tensor_from_selected_rows"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method__get_tensor_from_selected_rows
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_getitem_index_not_tensor"
,
(
PyCFunction
)(
void
(
*
)())
tensor__getitem_index_not_tensor
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_getitem_from_offset"
,
(
PyCFunction
)(
void
(
*
)())
tensor__getitem_from_offset
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"__setitem_eager_tensor__"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method__setitem_eager_tensor
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_register_grad_hook"
,
(
PyCFunction
)(
void
(
*
)())
tensor_register_grad_hook
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_remove_grad_hook"
,
(
PyCFunction
)(
void
(
*
)())
tensor_remove_grad_hook
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_register_backward_hook"
,
(
PyCFunction
)(
void
(
*
)())
tensor_register_reduce_hook
,
METH_VARARGS
|
METH_KEYWORDS
,
...
...
@@ -2462,77 +2465,77 @@ PyMethodDef variable_methods[] = {
{
"_set_grad_type"
,
(
PyCFunction
)(
void
(
*
)())
tensor__set_grad_type
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_clear"
,
(
PyCFunction
)(
void
(
*
)())
tensor__clear
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_clear_dataptr"
,
(
PyCFunction
)(
void
(
*
)())
tensor__clear_dataptr
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_copy_gradient_from"
,
(
PyCFunction
)(
void
(
*
)())
tensor__copy_gradient_from
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_tensor_use_gpudnn"
,
(
PyCFunction
)(
void
(
*
)())
tensor__use_gpudnn
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
/** the methods to adapt old dygraph, will be removed in the future **/
{
"set_string_list"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_set_string_list
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"set_vocab"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_set_vocab
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"get_map_tensor"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_get_map_tensor
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
/***the method of sparse tensor****/
{
"nnz"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_get_non_zero_nums
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"indices"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_get_non_zero_indices
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"values"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_get_non_zero_elements
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"crows"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_get_non_zero_crows
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"cols"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_get_non_zero_cols
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"is_sparse"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_is_sparse
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"is_sparse_coo"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_is_sparse_coo
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"is_sparse_csr"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_is_sparse_csr
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"is_same_shape"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_is_same_shape
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"to_sparse_csr"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_to_sparse_csr
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"element_size"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_element_size
,
METH_VARARGS
|
METH_KEYWORDS
,
...
...
@@ -2541,7 +2544,7 @@ PyMethodDef variable_methods[] = {
{
"_inplace_version"
,
(
PyCFunction
)(
void
(
*
)())
tensor__inplace_version
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_bump_inplace_version"
,
(
PyCFunction
)(
void
(
*
)())
tensor__bump_inplace_version
,
METH_VARARGS
|
METH_KEYWORDS
,
...
...
@@ -2549,80 +2552,80 @@ PyMethodDef variable_methods[] = {
{
"is_selected_rows"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_is_selected_rows
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"rows"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_get_rows
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_reset_grad_inplace_version"
,
(
PyCFunction
)(
void
(
*
)())
tensor__reset_grad_inplace_version
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_share_memory"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method__share_memory
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_offset"
,
(
PyCFunction
)(
void
(
*
)())
tensor__offset
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_grad_name"
,
(
PyCFunction
)(
void
(
*
)())
tensor__grad_name
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_grad_value"
,
(
PyCFunction
)(
void
(
*
)())
tensor__grad_value
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_unset_fake_empty"
,
(
PyCFunction
)(
void
(
*
)())
tensor__unset_fake_empty
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"data_ptr"
,
(
PyCFunction
)(
void
(
*
)())
tensor_data_ptr
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_grad_ivar"
,
(
PyCFunction
)(
void
(
*
)())
tensor__grad_ivar
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"contiguous"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor_contiguous
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"is_contiguous"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor_is_contiguous
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"get_strides"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor_method_strides
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
#if defined(PADDLE_WITH_CUDA)
{
"_tensor_uva"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method__uva
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
#endif
{
NULL
,
NULL
,
0
,
NULL
}};
{
nullptr
,
nullptr
,
0
,
nullptr
}};
// variable_methods for core.eager.StringTensor
PyMethodDef
string_tensor_variable_methods
[]
=
{
{
"numpy"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method_numpy_for_string_tensor
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_is_initialized"
,
(
PyCFunction
)(
void
(
*
)())
tensor_method__is_initialized
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
{
"_is_string_tensor_hold_allocation"
,
(
PyCFunction
)(
void
(
*
)(
void
))
tensor_method__is_string_tensor_hold_allocation
,
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
nullptr
},
// TODO(zhoushunjie): Need to add _copy_to, copy_ for StringTensor.
{
NULL
,
NULL
,
0
,
NULL
}};
{
nullptr
,
nullptr
,
0
,
nullptr
}};
}
// namespace pybind
}
// namespace paddle
paddle/fluid/pybind/eager_py_layer.cc
浏览文件 @
1e4f627d
...
...
@@ -662,13 +662,15 @@ int tensor_properties_set_materialize_grads(PyLayerObject* self,
EAGER_CATCH_AND_THROW_RETURN_NEG
}
PyMethodDef
pylayer_methods
[]
=
{
{
"name"
,
(
PyCFunction
)(
void
(
*
)())
pylayer_method_name
,
METH_NOARGS
,
NULL
},
{
"apply"
,
(
PyCFunction
)(
void
(
*
)())
pylayer_method_apply
,
METH_CLASS
|
METH_VARARGS
|
METH_KEYWORDS
,
NULL
},
{
NULL
,
NULL
,
0
,
NULL
}};
PyMethodDef
pylayer_methods
[]
=
{{
"name"
,
(
PyCFunction
)(
void
(
*
)())
pylayer_method_name
,
METH_NOARGS
,
nullptr
},
{
"apply"
,
(
PyCFunction
)(
void
(
*
)())
pylayer_method_apply
,
METH_CLASS
|
METH_VARARGS
|
METH_KEYWORDS
,
nullptr
},
{
nullptr
,
nullptr
,
0
,
nullptr
}};
struct
PyGetSetDef
pylayer_properties
[]
{
{
"container"
,
...
...
paddle/fluid/pybind/exception.cc
浏览文件 @
1e4f627d
...
...
@@ -87,9 +87,9 @@ void BindException(pybind11::module* m) {
void
ThrowExceptionToPython
(
std
::
exception_ptr
p
)
{
static
PyObject
*
EOFExceptionException
=
PyErr_NewException
(
"paddle.EOFException"
,
PyExc_Exception
,
NULL
);
PyErr_NewException
(
"paddle.EOFException"
,
PyExc_Exception
,
nullptr
);
static
PyObject
*
EnforceNotMetException
=
PyErr_NewException
(
"paddle.EnforceNotMet"
,
PyExc_Exception
,
NULL
);
PyErr_NewException
(
"paddle.EnforceNotMet"
,
PyExc_Exception
,
nullptr
);
try
{
if
(
p
)
std
::
rethrow_exception
(
p
);
}
catch
(
const
platform
::
EOFException
&
e
)
{
...
...
paddle/fluid/pybind/jit.cc
浏览文件 @
1e4f627d
...
...
@@ -120,7 +120,7 @@ static Py_tss_t eval_frame_callback_key = {0, 0};
inline
static
PyObject
*
eval_frame_callback_get
()
{
void
*
result
=
PyThread_tss_get
(
&
eval_frame_callback_key
);
if
(
unlikely
(
result
==
NULL
))
{
if
(
unlikely
(
result
==
nullptr
))
{
Py_RETURN_NONE
;
}
else
{
return
reinterpret_cast
<
PyObject
*>
(
result
);
...
...
@@ -136,7 +136,7 @@ inline static PyObject *eval_frame_default(PyThreadState *tstate,
FrameObject
*
frame
,
int
throw_flag
)
{
#if PY_VERSION_HEX >= 0x03090000
if
(
tstate
==
NULL
)
{
if
(
tstate
==
nullptr
)
{
tstate
=
PyThreadState_GET
();
}
return
_PyEval_EvalFrameDefault
(
tstate
,
frame
,
throw_flag
);
...
...
@@ -164,9 +164,9 @@ inline static PyObject *eval_custom_code(PyThreadState *tstate,
nfrees
=
PyTuple_GET_SIZE
(
code
->
co_freevars
);
#endif
PyFrameObject
*
shadow
=
PyFrame_New
(
tstate
,
code
,
frame
->
f_globals
,
NULL
);
if
(
shadow
==
NULL
)
{
return
NULL
;
PyFrameObject
*
shadow
=
PyFrame_New
(
tstate
,
code
,
frame
->
f_globals
,
nullptr
);
if
(
shadow
==
nullptr
)
{
return
nullptr
;
}
#if PY_VERSION_HEX >= 0x030b0000
...
...
@@ -210,7 +210,7 @@ static PyObject *_custom_eval_frame(PyThreadState *tstate,
#else
if
(
PyFrame_FastToLocalsWithError
(
frame
)
<
0
)
{
#endif
return
NULL
;
return
nullptr
;
}
// NOTE:(xiongkun): Handle GeneratorExit exception: (Spend a day)
...
...
@@ -241,10 +241,10 @@ static PyObject *_custom_eval_frame(PyThreadState *tstate,
Py_DECREF
(
args
);
VLOG
(
7
)
<<
"After call eval_frame_function and decrease frame."
;
// result: GuardedCode
if
(
result
==
NULL
)
{
if
(
result
==
nullptr
)
{
// internal exception
VLOG
(
7
)
<<
"Error happened."
;
return
NULL
;
return
nullptr
;
}
else
if
(
result
!=
Py_None
)
{
// NOTE: Cache is not supported now
PyCodeObject
*
code
=
reinterpret_cast
<
PyCodeObject
*>
(
...
...
@@ -354,7 +354,7 @@ PyMODINIT_FUNC PyInit__eval_frame() {
Py_INCREF
(
Py_None
);
eval_frame_callback_set
(
Py_None
);
return
NULL
;
return
nullptr
;
}
PyTypeObject
*
g_jit_function_pytype
=
nullptr
;
...
...
paddle/phi/kernels/cpu/eigvals_kernel.cc
浏览文件 @
1e4f627d
...
...
@@ -100,13 +100,13 @@ typename std::enable_if<std::is_floating_point<T>::value>::type LapackEigvals(
a
.
template
data
<
T
>(),
static_cast
<
int
>
(
n_dim
),
w_data
,
NULL
,
nullptr
,
1
,
NULL
,
nullptr
,
1
,
work
->
template
data
<
T
>(),
static_cast
<
int
>
(
work_mem
/
sizeof
(
T
)),
static_cast
<
T
*>
(
NULL
),
static_cast
<
T
*>
(
nullptr
),
&
info
);
std
::
string
name
=
"phi::backend::dynload::dgeev_"
;
...
...
@@ -165,9 +165,9 @@ LapackEigvals(const Context& ctx,
a
.
template
data
<
T
>(),
static_cast
<
int
>
(
n_dim
),
output
->
template
data
<
T
>(),
NULL
,
nullptr
,
1
,
NULL
,
nullptr
,
1
,
work
->
template
data
<
T
>(),
static_cast
<
int
>
(
work_mem
/
sizeof
(
T
)),
...
...
@@ -222,14 +222,14 @@ void EigvalsKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
static_cast
<
int
>
(
n_dim
),
x_matrices
[
0
].
template
data
<
T
>(),
static_cast
<
int
>
(
n_dim
),
NULL
,
NULL
,
nullptr
,
nullptr
,
1
,
NULL
,
nullptr
,
1
,
&
qwork
,
-
1
,
static_cast
<
dtype
::
Real
<
T
>*>
(
NULL
),
static_cast
<
dtype
::
Real
<
T
>*>
(
nullptr
),
&
info
);
int64_t
lwork
=
static_cast
<
int64_t
>
(
qwork
);
...
...
paddle/phi/kernels/funcs/fc_functor.cc
浏览文件 @
1e4f627d
...
...
@@ -66,7 +66,7 @@ void FCFunctor<DeviceContext, T>::operator()(const DeviceContext& context,
}
else
{
blas
.
MatMul
(
M
,
N
,
K
,
X
,
W
,
Y
);
}
if
(
B
==
NULL
)
{
if
(
B
==
nullptr
)
{
if
(
padding_weights
)
{
#ifdef PADDLE_WITH_MKLML
#pragma omp parallel for
...
...
paddle/phi/kernels/funcs/gpc.cc
浏览文件 @
1e4f627d
此差异已折叠。
点击以展开。
paddle/phi/kernels/funcs/jit/gen/sgd.cc
浏览文件 @
1e4f627d
...
...
@@ -83,7 +83,7 @@ void SgdJitCode::genCode() {
Label
inner_loop
;
Label
escape_loop
;
mov
(
rax
,
0
);
mov
(
rax
,
0
);
// NOLINT
L
(
inner_loop
);
{
cmp
(
rax
,
num_groups
);
...
...
paddle/phi/kernels/onednn/matmul_kernel.cc
浏览文件 @
1e4f627d
...
...
@@ -407,7 +407,7 @@ class MulPrimitiveFactory {
memory
Reorder
(
const
memory
::
desc
&
src_desc
,
const
memory
::
desc
&
dst_desc
,
void
*
src_data
,
void
*
dst_data
=
NULL
)
{
void
*
dst_data
=
nullptr
)
{
auto
src_mem
=
memory
(
src_desc
,
engine_
,
src_data
);
auto
dst_mem
=
dst_data
?
memory
(
dst_desc
,
engine_
,
dst_data
)
:
memory
(
dst_desc
,
engine_
);
...
...
paddle/utils/string/string_helper.cc
浏览文件 @
1e4f627d
...
...
@@ -77,7 +77,7 @@ char* LineFileReader::getdelim(FILE* f, char delim) {
int
code
=
feof
(
f
);
(
void
)
code
;
assert
(
code
);
return
NULL
;
return
nullptr
;
}
#else
return
NULL
;
...
...
test/cpp/fluid/gather_test.cc
浏览文件 @
1e4f627d
...
...
@@ -42,7 +42,7 @@ TEST(Gather, GatherData) {
phi
::
CPUContext
ctx
(
*
cpu_place
);
phi
::
funcs
::
CPUGather
<
int
>
(
ctx
,
*
src
,
*
index
,
output
);
delete
cpu_place
;
cpu_place
=
NULL
;
cpu_place
=
nullptr
;
for
(
int
i
=
0
;
i
<
4
;
++
i
)
EXPECT_EQ
(
p_output
[
i
],
i
+
4
);
for
(
int
i
=
4
;
i
<
8
;
++
i
)
EXPECT_EQ
(
p_output
[
i
],
i
-
4
);
...
...
test/cpp/fluid/math/im2col_test.cc
浏览文件 @
1e4f627d
...
...
@@ -362,7 +362,7 @@ void benchIm2col(int ic, int ih, int iw, int fh, int fw, int ph, int pw) {
constexpr
int
repeat
=
100
;
auto
GetCurrentMs
=
[]()
->
double
{
struct
timeval
time
;
gettimeofday
(
&
time
,
NULL
);
gettimeofday
(
&
time
,
nullptr
);
return
1e+3
*
time
.
tv_sec
+
1e-3
*
time
.
tv_usec
;
};
auto
t1
=
GetCurrentMs
();
...
...
test/cpp/imperative/test_layer.cc
浏览文件 @
1e4f627d
...
...
@@ -328,9 +328,9 @@ TEST(test_layer, test_varbase_basic) {
new
imperative
::
VarBase
(
true
,
"vin"
));
ASSERT_ANY_THROW
(
vin
->
MutableGradVar
());
ASSERT_NO_THROW
(
ASSERT_TRUE
(
dynamic_cast
<
framework
::
Variable
*>
(
vin_with_grad
->
MutableGradVar
())
!=
0
));
ASSERT_TRUE
(
dynamic_cast
<
framework
::
Variable
*>
(
vin_with_grad
->
MutableGradVar
())
!=
0
);
vin_with_grad
->
MutableGradVar
())
!=
nullptr
));
ASSERT_TRUE
(
dynamic_cast
<
framework
::
Variable
*>
(
vin_with_grad
->
MutableGradVar
())
!=
nullptr
);
vin_with_grad
->
SetOverridedStopGradient
(
false
);
ASSERT_FALSE
(
vin_with_grad
->
OverridedStopGradient
());
ASSERT_NO_FATAL_FAILURE
(
vin_with_grad
->
SetPersistable
(
true
));
...
...
test/cpp/phi/kernels/test_cpu_vec.cc
浏览文件 @
1e4f627d
...
...
@@ -25,7 +25,7 @@ namespace tests {
inline
double
GetCurrentUS
()
{
struct
timeval
time
;
gettimeofday
(
&
time
,
NULL
);
gettimeofday
(
&
time
,
nullptr
);
return
1e+6
*
time
.
tv_sec
+
time
.
tv_usec
;
}
constexpr
int
repeat
=
1000
;
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录