Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
df23c7c3
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
df23c7c3
编写于
11月 24, 2022
作者:
P
PuQing
提交者:
GitHub
11月 24, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[PHI decoupling] remove "paddle/fluid/platform/enforce.h" in phi (#48049)
上级
e29c50c2
变更
32
隐藏空白更改
内联
并排
Showing
32 changed file
with
633 addition
and
574 deletion
+633
-574
paddle/fluid/inference/check_symbol.sh
paddle/fluid/inference/check_symbol.sh
+1
-1
paddle/fluid/platform/CMakeLists.txt
paddle/fluid/platform/CMakeLists.txt
+0
-3
paddle/fluid/platform/enforce.h
paddle/fluid/platform/enforce.h
+0
-530
paddle/phi/backends/callback_manager.cc
paddle/phi/backends/callback_manager.cc
+1
-1
paddle/phi/backends/dynload/cudnn.cc
paddle/phi/backends/dynload/cudnn.cc
+1
-1
paddle/phi/backends/dynload/cufft.cc
paddle/phi/backends/dynload/cufft.cc
+1
-1
paddle/phi/backends/dynload/dynamic_loader.cc
paddle/phi/backends/dynload/dynamic_loader.cc
+1
-1
paddle/phi/backends/dynload/miopen.cc
paddle/phi/backends/dynload/miopen.cc
+1
-1
paddle/phi/backends/dynload/tensorrt.h
paddle/phi/backends/dynload/tensorrt.h
+1
-1
paddle/phi/backends/gpu/cuda/cuda_info.cc
paddle/phi/backends/gpu/cuda/cuda_info.cc
+1
-2
paddle/phi/backends/gpu/gpu_context.cc
paddle/phi/backends/gpu/gpu_context.cc
+2
-2
paddle/phi/backends/gpu/gpu_resources.cc
paddle/phi/backends/gpu/gpu_resources.cc
+1
-2
paddle/phi/backends/gpu/rocm/rocm_info.cc
paddle/phi/backends/gpu/rocm/rocm_info.cc
+2
-3
paddle/phi/backends/xpu/enforce_xpu.h
paddle/phi/backends/xpu/enforce_xpu.h
+1
-1
paddle/phi/backends/xpu/xpu_header.h
paddle/phi/backends/xpu/xpu_header.h
+1
-1
paddle/phi/core/CMakeLists.txt
paddle/phi/core/CMakeLists.txt
+4
-0
paddle/phi/core/cuda_stream.h
paddle/phi/core/cuda_stream.h
+1
-2
paddle/phi/core/enforce.h
paddle/phi/core/enforce.h
+595
-6
paddle/phi/core/external_error.proto
paddle/phi/core/external_error.proto
+1
-1
paddle/phi/kernels/autotune/CMakeLists.txt
paddle/phi/kernels/autotune/CMakeLists.txt
+5
-2
paddle/phi/kernels/funcs/concat_funcs.h
paddle/phi/kernels/funcs/concat_funcs.h
+1
-1
paddle/phi/kernels/funcs/cpu_vec.h
paddle/phi/kernels/funcs/cpu_vec.h
+1
-1
paddle/phi/kernels/funcs/cufft_util.h
paddle/phi/kernels/funcs/cufft_util.h
+1
-1
paddle/phi/kernels/funcs/gru_compute.h
paddle/phi/kernels/funcs/gru_compute.h
+1
-1
paddle/phi/kernels/funcs/hipfft_util.h
paddle/phi/kernels/funcs/hipfft_util.h
+1
-1
paddle/phi/kernels/funcs/lstm_compute.h
paddle/phi/kernels/funcs/lstm_compute.h
+1
-1
paddle/phi/kernels/funcs/math_function.h
paddle/phi/kernels/funcs/math_function.h
+1
-1
paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu
paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu
+1
-1
paddle/phi/kernels/gpu/batch_norm_kernel.cu
paddle/phi/kernels/gpu/batch_norm_kernel.cu
+1
-1
paddle/phi/kernels/gpu/cholesky_solve_kernel.cu
paddle/phi/kernels/gpu/cholesky_solve_kernel.cu
+1
-1
paddle/phi/kernels/gpu/class_center_sample_kernel.cu
paddle/phi/kernels/gpu/class_center_sample_kernel.cu
+1
-1
tools/externalError/start.sh
tools/externalError/start.sh
+1
-1
未找到文件。
paddle/fluid/inference/check_symbol.sh
浏览文件 @
df23c7c3
...
@@ -18,7 +18,7 @@ lib=$1
...
@@ -18,7 +18,7 @@ lib=$1
if
[
$#
-ne
1
]
;
then
echo
"No input library"
;
exit
-1
;
fi
if
[
$#
-ne
1
]
;
then
echo
"No input library"
;
exit
-1
;
fi
num_paddle_syms
=
$(
nm
-D
"
${
lib
}
"
|
grep
-c
paddle
)
num_paddle_syms
=
$(
nm
-D
"
${
lib
}
"
|
grep
-c
paddle
)
num_google_syms
=
$(
nm
-D
"
${
lib
}
"
|
grep
google |
grep
-v
paddle |
grep
-v
brpc |
grep
-c
"T "
)
num_google_syms
=
$(
nm
-D
"
${
lib
}
"
|
grep
google |
grep
-v
paddle |
grep
-v
phi |
grep
-v
brpc |
grep
-c
"T "
)
if
[
$num_paddle_syms
-le
0
]
;
then
echo
"Have no paddle symbols"
;
exit
-1
;
fi
if
[
$num_paddle_syms
-le
0
]
;
then
echo
"Have no paddle symbols"
;
exit
-1
;
fi
if
[
$num_google_syms
-ge
1
]
;
then
echo
"Have some google symbols"
;
exit
-1
;
fi
if
[
$num_google_syms
-ge
1
]
;
then
echo
"Have some google symbols"
;
exit
-1
;
fi
...
...
paddle/fluid/platform/CMakeLists.txt
浏览文件 @
df23c7c3
proto_library
(
profiler_proto SRCS profiler.proto DEPS framework_proto
proto_library
(
profiler_proto SRCS profiler.proto DEPS framework_proto
simple_threadpool
)
simple_threadpool
)
if
(
WITH_GPU
)
proto_library
(
external_error_proto SRCS external_error.proto
)
endif
()
if
(
WITH_PYTHON
)
if
(
WITH_PYTHON
)
py_proto_compile
(
profiler_py_proto SRCS profiler.proto
)
py_proto_compile
(
profiler_py_proto SRCS profiler.proto
)
add_custom_target
(
profiler_py_proto_init ALL COMMAND
${
CMAKE_COMMAND
}
-E
add_custom_target
(
profiler_py_proto_init ALL COMMAND
${
CMAKE_COMMAND
}
-E
...
...
paddle/fluid/platform/enforce.h
浏览文件 @
df23c7c3
...
@@ -36,8 +36,6 @@ limitations under the License. */
...
@@ -36,8 +36,6 @@ limitations under the License. */
#include <cusparse.h>
#include <cusparse.h>
#include <thrust/system/cuda/error.h>
#include <thrust/system/cuda/error.h>
#include <thrust/system_error.h>
#include <thrust/system_error.h>
#include "paddle/fluid/platform/external_error.pb.h"
#endif // PADDLE_WITH_CUDA
#endif // PADDLE_WITH_CUDA
#ifdef PADDLE_WITH_HIP
#ifdef PADDLE_WITH_HIP
...
@@ -224,533 +222,5 @@ struct EOFException : public std::exception {
...
@@ -224,533 +222,5 @@ struct EOFException : public std::exception {
END_HANDLE_THE_ERROR \
END_HANDLE_THE_ERROR \
} while (0)
} while (0)
/**************************************************************************/
/**************************** NVIDIA ERROR ********************************/
#ifdef PADDLE_WITH_CUDA
namespace
details
{
template
<
typename
T
>
struct
ExternalApiType
{};
#define DEFINE_EXTERNAL_API_TYPE(type, success_value, proto_type) \
template <> \
struct ExternalApiType<type> { \
using Type = type; \
static constexpr Type kSuccess = success_value; \
static constexpr const char* kTypeString = #proto_type; \
static constexpr platform::proto::ApiType kProtoType = \
platform::proto::ApiType::proto_type; \
}
DEFINE_EXTERNAL_API_TYPE
(
cudaError_t
,
cudaSuccess
,
CUDA
);
DEFINE_EXTERNAL_API_TYPE
(
curandStatus_t
,
CURAND_STATUS_SUCCESS
,
CURAND
);
DEFINE_EXTERNAL_API_TYPE
(
cudnnStatus_t
,
CUDNN_STATUS_SUCCESS
,
CUDNN
);
DEFINE_EXTERNAL_API_TYPE
(
cublasStatus_t
,
CUBLAS_STATUS_SUCCESS
,
CUBLAS
);
DEFINE_EXTERNAL_API_TYPE
(
cusparseStatus_t
,
CUSPARSE_STATUS_SUCCESS
,
CUSPARSE
);
DEFINE_EXTERNAL_API_TYPE
(
cusolverStatus_t
,
CUSOLVER_STATUS_SUCCESS
,
CUSOLVER
);
DEFINE_EXTERNAL_API_TYPE
(
cufftResult_t
,
CUFFT_SUCCESS
,
CUFFT
);
DEFINE_EXTERNAL_API_TYPE
(
CUresult
,
CUDA_SUCCESS
,
CU
);
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
DEFINE_EXTERNAL_API_TYPE
(
ncclResult_t
,
ncclSuccess
,
NCCL
);
#endif
}
// namespace details
template
<
typename
T
>
inline
const
char
*
GetErrorMsgUrl
(
T
status
)
{
using
__CUDA_STATUS_TYPE__
=
decltype
(
status
);
platform
::
proto
::
ApiType
proto_type
=
details
::
ExternalApiType
<
__CUDA_STATUS_TYPE__
>::
kProtoType
;
switch
(
proto_type
)
{
case
platform
::
proto
::
ApiType
::
CUDA
:
case
platform
::
proto
::
ApiType
::
CU
:
return
"https://docs.nvidia.com/cuda/cuda-runtime-api/"
"group__CUDART__TYPES.html#group__CUDART__TYPES_"
"1g3f51e3575c2178246db0a94a430e0038"
;
break
;
case
platform
::
proto
::
ApiType
::
CURAND
:
return
"https://docs.nvidia.com/cuda/curand/"
"group__HOST.html#group__HOST_1gb94a31d5c165858c96b6c18b70644437"
;
break
;
case
platform
::
proto
::
ApiType
::
CUDNN
:
return
"https://docs.nvidia.com/deeplearning/cudnn/api/"
"index.html#cudnnStatus_t"
;
break
;
case
platform
::
proto
::
ApiType
::
CUBLAS
:
return
"https://docs.nvidia.com/cuda/cublas/index.html#cublasstatus_t"
;
break
;
case
platform
::
proto
::
ApiType
::
CUSOLVER
:
return
"https://docs.nvidia.com/cuda/cusolver/"
"index.html#cuSolverSPstatus"
;
break
;
case
platform
::
proto
::
ApiType
::
NCCL
:
return
"https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/api/"
"types.html#ncclresult-t"
;
break
;
case
platform
::
proto
::
ApiType
::
CUFFT
:
return
"https://docs.nvidia.com/cuda/cufft/index.html#cufftresult"
;
case
platform
::
proto
::
ApiType
::
CUSPARSE
:
return
"https://docs.nvidia.com/cuda/cusparse/"
"index.html#cusparseStatus_t"
;
break
;
default:
return
"Unknown type of External API, can't get error message URL!"
;
break
;
}
}
template
<
typename
T
>
inline
std
::
string
GetExternalErrorMsg
(
T
status
)
{
std
::
ostringstream
sout
;
bool
_initSucceed
=
false
;
platform
::
proto
::
ExternalErrorDesc
externalError
;
if
(
externalError
.
ByteSizeLong
()
==
0
)
{
std
::
string
filePath
;
#if !defined(_WIN32)
Dl_info
info
;
if
(
dladdr
(
reinterpret_cast
<
void
*>
(
GetCurrentTraceBackString
),
&
info
))
{
std
::
string
strModule
(
info
.
dli_fname
);
const
size_t
last_slash_idx
=
strModule
.
find_last_of
(
"/"
);
std
::
string
compare_path
=
strModule
.
substr
(
strModule
.
length
()
-
6
);
if
(
std
::
string
::
npos
!=
last_slash_idx
)
{
strModule
.
erase
(
last_slash_idx
,
std
::
string
::
npos
);
}
if
(
compare_path
.
compare
(
"avx.so"
)
==
0
)
{
filePath
=
strModule
+
"/../include/third_party/externalError/data/externalErrorMsg.pb"
;
}
else
{
filePath
=
strModule
+
"/../../third_party/externalError/data/externalErrorMsg.pb"
;
}
}
#else
char
buf
[
512
];
MEMORY_BASIC_INFORMATION
mbi
;
HMODULE
h_module
=
(
::
VirtualQuery
(
GetCurrentTraceBackString
,
&
mbi
,
sizeof
(
mbi
))
!=
0
)
?
(
HMODULE
)
mbi
.
AllocationBase
:
NULL
;
GetModuleFileName
(
h_module
,
buf
,
512
);
std
::
string
strModule
(
buf
);
const
size_t
last_slash_idx
=
strModule
.
find_last_of
(
"
\\
"
);
std
::
string
compare_path
=
strModule
.
substr
(
strModule
.
length
()
-
7
);
if
(
std
::
string
::
npos
!=
last_slash_idx
)
{
strModule
.
erase
(
last_slash_idx
,
std
::
string
::
npos
);
}
if
(
compare_path
.
compare
(
"avx.pyd"
)
==
0
)
{
filePath
=
strModule
+
"
\\
..
\\
include
\\
third_"
"party
\\
externalerror
\\
data
\\
externalErrorMsg.pb"
;
}
else
{
filePath
=
strModule
+
"
\\
..
\\
..
\\
third_party
\\
externalerror
\\
data
\\
externalErrorMsg.pb"
;
}
#endif
std
::
ifstream
fin
(
filePath
,
std
::
ios
::
in
|
std
::
ios
::
binary
);
_initSucceed
=
externalError
.
ParseFromIstream
(
&
fin
);
}
using
__CUDA_STATUS_TYPE__
=
decltype
(
status
);
platform
::
proto
::
ApiType
proto_type
=
details
::
ExternalApiType
<
__CUDA_STATUS_TYPE__
>::
kProtoType
;
if
(
_initSucceed
)
{
for
(
int
i
=
0
;
i
<
externalError
.
errors_size
();
++
i
)
{
if
(
proto_type
==
externalError
.
errors
(
i
).
type
())
{
for
(
int
j
=
0
;
j
<
externalError
.
errors
(
i
).
messages_size
();
++
j
)
{
if
(
status
==
externalError
.
errors
(
i
).
messages
(
j
).
code
())
{
sout
<<
"
\n
[Hint: "
<<
externalError
.
errors
(
i
).
messages
(
j
).
message
()
<<
"]"
;
return
sout
.
str
();
}
}
}
}
}
sout
<<
"
\n
[Hint: Please search for the error code("
<<
status
<<
") on website ("
<<
GetErrorMsgUrl
(
status
)
<<
") to get Nvidia's official solution and advice about "
<<
details
::
ExternalApiType
<
__CUDA_STATUS_TYPE__
>::
kTypeString
<<
" Error.]"
;
return
sout
.
str
();
}
template
std
::
string
GetExternalErrorMsg
<
cudaError_t
>(
cudaError_t
);
template
std
::
string
GetExternalErrorMsg
<
curandStatus_t
>(
curandStatus_t
);
template
std
::
string
GetExternalErrorMsg
<
cudnnStatus_t
>(
cudnnStatus_t
);
template
std
::
string
GetExternalErrorMsg
<
cublasStatus_t
>(
cublasStatus_t
);
template
std
::
string
GetExternalErrorMsg
<
cusparseStatus_t
>(
cusparseStatus_t
);
template
std
::
string
GetExternalErrorMsg
<
cusolverStatus_t
>(
cusolverStatus_t
);
template
std
::
string
GetExternalErrorMsg
<
cufftResult_t
>(
cufftResult_t
);
template
std
::
string
GetExternalErrorMsg
<
CUresult
>(
CUresult
);
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
template
std
::
string
GetExternalErrorMsg
<
ncclResult_t
>(
ncclResult_t
);
#endif
/*************** CUDA ERROR ***************/
inline
bool
is_error
(
cudaError_t
e
)
{
return
e
!=
cudaSuccess
;
}
inline
std
::
string
build_nvidia_error_msg
(
cudaError_t
e
)
{
std
::
ostringstream
sout
;
sout
<<
"CUDA error("
<<
e
<<
"), "
<<
cudaGetErrorString
(
e
)
<<
". "
<<
GetExternalErrorMsg
(
e
);
return
sout
.
str
();
}
/*************** CURAND ERROR ***************/
inline
bool
is_error
(
curandStatus_t
stat
)
{
return
stat
!=
CURAND_STATUS_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
curandStatus_t
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CURAND error("
<<
stat
<<
"). "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/*************** CUDNN ERROR ***************/
inline
bool
is_error
(
cudnnStatus_t
stat
)
{
return
stat
!=
CUDNN_STATUS_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
cudnnStatus_t
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CUDNN error("
<<
stat
<<
"), "
<<
phi
::
dynload
::
cudnnGetErrorString
(
stat
)
<<
". "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/*************** CUBLAS ERROR ***************/
inline
bool
is_error
(
cublasStatus_t
stat
)
{
return
stat
!=
CUBLAS_STATUS_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
cublasStatus_t
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CUBLAS error("
<<
stat
<<
"). "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/*************** CUSPARSE ERROR ***************/
inline
bool
is_error
(
cusparseStatus_t
stat
)
{
return
stat
!=
CUSPARSE_STATUS_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
cusparseStatus_t
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CUSparse error("
<<
stat
<<
"). "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/*************** CUSOLVER ERROR ***************/
inline
bool
is_error
(
cusolverStatus_t
stat
)
{
return
stat
!=
CUSOLVER_STATUS_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
cusolverStatus_t
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CUSOLVER error("
<<
stat
<<
"). "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/*************** CUFFT ERROR ***************/
inline
bool
is_error
(
cufftResult_t
stat
)
{
return
stat
!=
CUFFT_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
cufftResult_t
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CUFFT error("
<<
stat
<<
"). "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/*************** CUresult ERROR ***************/
inline
bool
is_error
(
CUresult
stat
)
{
return
stat
!=
CUDA_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
CUresult
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CU error("
<<
stat
<<
"). "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/**************** NCCL ERROR ****************/
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
inline
bool
is_error
(
ncclResult_t
nccl_result
)
{
return
nccl_result
!=
ncclSuccess
;
}
inline
std
::
string
build_nvidia_error_msg
(
ncclResult_t
nccl_result
)
{
std
::
ostringstream
sout
;
sout
<<
"NCCL error("
<<
nccl_result
<<
"), "
<<
phi
::
dynload
::
ncclGetErrorString
(
nccl_result
)
<<
". "
;
if
(
errno
==
ENOSPC
||
errno
==
EAGAIN
)
{
std
::
string
detail
(
strerror
(
errno
));
detail
+=
"
\n
Please try one of the following solutions:"
;
detail
+=
"
\n
1. export NCCL_SHM_DISABLE=1;"
;
detail
+=
"
\n
2. export NCCL_P2P_LEVEL=SYS;"
;
detail
+=
"
\n
3. Increase shared memory by setting the -shm-size "
"option when starting docker container, e.g., setting "
" -shm-size=2g.
\n
"
;
sout
<<
" Detail: "
+
detail
;
}
sout
<<
GetExternalErrorMsg
(
nccl_result
);
return
sout
.
str
();
}
#endif // not(__APPLE__) and PADDLE_WITH_NCCL
#define PADDLE_ENFORCE_GPU_SUCCESS(COND) \
do { \
auto __cond__ = (COND); \
using __CUDA_STATUS_TYPE__ = decltype(__cond__); \
constexpr auto __success_type__ = \
::paddle::platform::details::ExternalApiType< \
__CUDA_STATUS_TYPE__>::kSuccess; \
if (UNLIKELY(__cond__ != __success_type__)) { \
auto __summary__ = phi::errors::External( \
::paddle::platform::build_nvidia_error_msg(__cond__)); \
__THROW_ERROR_INTERNAL__(__summary__); \
} \
} while (0)
#define PADDLE_ENFORCE_CUDA_LAUNCH_SUCCESS(OP) \
do { \
auto res = cudaGetLastError(); \
if (UNLIKELY(res != cudaSuccess)) { \
auto msg = ::paddle::platform::build_nvidia_error_msg(res); \
PADDLE_THROW(platform::errors::Fatal( \
"CUDA error after kernel (%s): %s", OP, msg)); \
} \
} while (0)
inline
void
retry_sleep
(
unsigned
milliseconds
)
{
#ifdef _WIN32
Sleep
(
milliseconds
);
#else
if
(
milliseconds
<
1000
)
{
// usleep argument must be less than 1,000,000. Reference:
// https://pubs.opengroup.org/onlinepubs/7908799/xsh/usleep.html
usleep
(
milliseconds
*
1000
);
}
else
{
// clip to sleep in seconds because we can not and don't have to
// sleep for exact milliseconds
sleep
(
milliseconds
/
1000
);
}
#endif
}
#define PADDLE_RETRY_CUDA_SUCCESS(COND) \
do { \
auto __cond__ = (COND); \
int retry_count = 1; \
using __CUDA_STATUS_TYPE__ = decltype(__cond__); \
constexpr auto __success_type__ = \
::paddle::platform::details::ExternalApiType< \
__CUDA_STATUS_TYPE__>::kSuccess; \
while (UNLIKELY(__cond__ != __success_type__) && retry_count < 5) { \
paddle::platform::retry_sleep(10000); \
__cond__ = (COND); \
++retry_count; \
} \
if (UNLIKELY(__cond__ != __success_type__)) { \
auto __summary__ = phi::errors::External( \
::paddle::platform::build_nvidia_error_msg(__cond__)); \
__THROW_ERROR_INTERNAL__(__summary__); \
} \
} while (0)
#undef DEFINE_EXTERNAL_API_TYPE
#endif // PADDLE_WITH_CUDA
/**************************************************************************/
/***************************** HIP ERROR **********************************/
#ifdef PADDLE_WITH_HIP
/***** HIP ERROR *****/
inline
bool
is_error
(
hipError_t
e
)
{
return
e
!=
hipSuccess
;
}
inline
std
::
string
build_rocm_error_msg
(
hipError_t
e
)
{
std
::
ostringstream
sout
;
sout
<<
" Hip error("
<<
e
<<
"), "
<<
hipGetErrorString
(
e
)
<<
"."
;
return
sout
.
str
();
}
/***** HIPRAND ERROR *****/
inline
bool
is_error
(
hiprandStatus_t
stat
)
{
return
stat
!=
HIPRAND_STATUS_SUCCESS
;
}
inline
const
char
*
hiprandGetErrorString
(
hiprandStatus_t
stat
)
{
switch
(
stat
)
{
case
HIPRAND_STATUS_SUCCESS
:
return
"HIPRAND_STATUS_SUCCESS"
;
case
HIPRAND_STATUS_VERSION_MISMATCH
:
return
"HIPRAND_STATUS_VERSION_MISMATCH"
;
case
HIPRAND_STATUS_NOT_INITIALIZED
:
return
"HIPRAND_STATUS_NOT_INITIALIZED"
;
case
HIPRAND_STATUS_ALLOCATION_FAILED
:
return
"HIPRAND_STATUS_ALLOCATION_FAILED"
;
case
HIPRAND_STATUS_TYPE_ERROR
:
return
"HIPRAND_STATUS_TYPE_ERROR"
;
case
HIPRAND_STATUS_OUT_OF_RANGE
:
return
"HIPRAND_STATUS_OUT_OF_RANGE"
;
case
HIPRAND_STATUS_LENGTH_NOT_MULTIPLE
:
return
"HIPRAND_STATUS_LENGTH_NOT_MULTIPLE"
;
case
HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED
:
return
"HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED"
;
case
HIPRAND_STATUS_LAUNCH_FAILURE
:
return
"HIPRAND_STATUS_LAUNCH_FAILURE"
;
case
HIPRAND_STATUS_PREEXISTING_FAILURE
:
return
"HIPRAND_STATUS_PREEXISTING_FAILURE"
;
case
HIPRAND_STATUS_INITIALIZATION_FAILED
:
return
"HIPRAND_STATUS_INITIALIZATION_FAILED"
;
case
HIPRAND_STATUS_ARCH_MISMATCH
:
return
"HIPRAND_STATUS_ARCH_MISMATCH"
;
case
HIPRAND_STATUS_INTERNAL_ERROR
:
return
"HIPRAND_STATUS_INTERNAL_ERROR"
;
case
HIPRAND_STATUS_NOT_IMPLEMENTED
:
return
"HIPRAND_STATUS_NOT_IMPLEMENTED"
;
default:
return
"Unknown hiprand status"
;
}
}
inline
std
::
string
build_rocm_error_msg
(
hiprandStatus_t
stat
)
{
std
::
string
msg
(
" Hiprand error, "
);
return
msg
+
hiprandGetErrorString
(
stat
)
+
" "
;
}
/***** MIOPEN ERROR *****/
inline
bool
is_error
(
miopenStatus_t
stat
)
{
return
stat
!=
miopenStatusSuccess
;
}
inline
std
::
string
build_rocm_error_msg
(
miopenStatus_t
stat
)
{
std
::
string
msg
(
" Miopen error, "
);
return
msg
+
phi
::
dynload
::
miopenGetErrorString
(
stat
)
+
" "
;
}
/***** ROCBLAS ERROR *****/
inline
bool
is_error
(
rocblas_status
stat
)
{
return
stat
!=
rocblas_status_success
;
}
inline
const
char
*
rocblasGetErrorString
(
rocblas_status
stat
)
{
switch
(
stat
)
{
case
rocblas_status_invalid_handle
:
return
"rocblas_status_invalid_handle"
;
case
rocblas_status_memory_error
:
return
"rocblas_status_memory_error"
;
case
rocblas_status_invalid_value
:
return
"rocblas_status_invalid_value"
;
case
rocblas_status_not_implemented
:
return
"rocblas_status_not_implemented"
;
case
rocblas_status_invalid_pointer
:
return
"rocblas_status_invalid_pointer"
;
case
rocblas_status_invalid_size
:
return
"rocblas_status_invalid_size"
;
case
rocblas_status_internal_error
:
return
"rocblas_status_internal_error"
;
default:
return
"Unknown cublas status"
;
}
}
inline
std
::
string
build_rocm_error_msg
(
rocblas_status
stat
)
{
std
::
string
msg
(
" Rocblas error, "
);
return
msg
+
rocblasGetErrorString
(
stat
)
+
" "
;
}
/****** RCCL ERROR ******/
#if !defined(__APPLE__) && defined(PADDLE_WITH_RCCL)
inline
bool
is_error
(
ncclResult_t
nccl_result
)
{
return
nccl_result
!=
ncclSuccess
;
}
inline
std
::
string
build_rocm_error_msg
(
ncclResult_t
nccl_result
)
{
std
::
string
msg
(
" Rccl error, "
);
return
msg
+
phi
::
dynload
::
ncclGetErrorString
(
nccl_result
)
+
" "
;
}
#endif // not(__APPLE__) and PADDLE_WITH_NCCL
/***** HIPFFT ERROR *****/
inline
bool
is_error
(
hipfftResult_t
stat
)
{
return
stat
!=
HIPFFT_SUCCESS
;
}
inline
std
::
string
build_rocm_error_msg
(
hipfftResult_t
stat
)
{
std
::
string
msg
(
" HIPFFT error, "
);
return
msg
+
phi
::
dynload
::
hipfftGetErrorString
(
stat
)
+
" "
;
}
namespace
details
{
template
<
typename
T
>
struct
ExternalApiType
{};
#define DEFINE_EXTERNAL_API_TYPE(type, success_value) \
template <> \
struct ExternalApiType<type> { \
using Type = type; \
static constexpr Type kSuccess = success_value; \
}
DEFINE_EXTERNAL_API_TYPE
(
hipError_t
,
hipSuccess
);
DEFINE_EXTERNAL_API_TYPE
(
hiprandStatus_t
,
HIPRAND_STATUS_SUCCESS
);
DEFINE_EXTERNAL_API_TYPE
(
miopenStatus_t
,
miopenStatusSuccess
);
DEFINE_EXTERNAL_API_TYPE
(
rocblas_status
,
rocblas_status_success
);
DEFINE_EXTERNAL_API_TYPE
(
hipfftResult_t
,
HIPFFT_SUCCESS
);
#if !defined(__APPLE__) && defined(PADDLE_WITH_RCCL)
DEFINE_EXTERNAL_API_TYPE
(
ncclResult_t
,
ncclSuccess
);
#endif
}
// namespace details
#define PADDLE_ENFORCE_GPU_SUCCESS(COND) \
do { \
auto __cond__ = (COND); \
using __CUDA_STATUS_TYPE__ = decltype(__cond__); \
constexpr auto __success_type__ = \
::paddle::platform::details::ExternalApiType< \
__CUDA_STATUS_TYPE__>::kSuccess; \
if (UNLIKELY(__cond__ != __success_type__)) { \
auto __summary__ = phi::errors::External( \
::paddle::platform::build_rocm_error_msg(__cond__)); \
__THROW_ERROR_INTERNAL__(__summary__); \
} \
} while (0)
inline
void
retry_sleep
(
unsigned
millisecond
)
{
#ifdef _WIN32
Sleep
(
millisecond
);
#else
sleep
(
millisecond
);
#endif
}
#define PADDLE_RETRY_CUDA_SUCCESS(COND) \
do { \
auto __cond__ = (COND); \
int retry_count = 1; \
using __CUDA_STATUS_TYPE__ = decltype(__cond__); \
constexpr auto __success_type__ = \
::paddle::platform::details::ExternalApiType< \
__CUDA_STATUS_TYPE__>::kSuccess; \
while (UNLIKELY(__cond__ != __success_type__) && retry_count < 5) { \
::paddle::platform::retry_sleep(10000); \
__cond__ = (COND); \
++retry_count; \
} \
if (UNLIKELY(__cond__ != __success_type__)) { \
auto __summary__ = phi::errors::External( \
::paddle::platform::build_rocm_error_msg(__cond__)); \
__THROW_ERROR_INTERNAL__(__summary__); \
} \
} while (0)
#undef DEFINE_EXTERNAL_API_TYPE
#endif // PADDLE_WITH_HIP
}
// namespace platform
}
// namespace platform
}
// namespace paddle
}
// namespace paddle
paddle/phi/backends/callback_manager.cc
浏览文件 @
df23c7c3
...
@@ -17,8 +17,8 @@
...
@@ -17,8 +17,8 @@
#include <ThreadPool.h>
#include <ThreadPool.h>
#include "paddle/fluid/platform/device/device_wrapper.h"
#include "paddle/fluid/platform/device/device_wrapper.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/backends/device_guard.h"
#include "paddle/phi/backends/device_guard.h"
#include "paddle/phi/core/enforce.h"
namespace
phi
{
namespace
phi
{
...
...
paddle/phi/backends/dynload/cudnn.cc
浏览文件 @
df23c7c3
...
@@ -14,7 +14,7 @@ limitations under the License. */
...
@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/
fluid/platform
/enforce.h"
#include "paddle/
phi/core
/enforce.h"
namespace
phi
{
namespace
phi
{
namespace
dynload
{
namespace
dynload
{
...
...
paddle/phi/backends/dynload/cufft.cc
浏览文件 @
df23c7c3
...
@@ -14,7 +14,7 @@ limitations under the License. */
...
@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/phi/backends/dynload/cufft.h"
#include "paddle/phi/backends/dynload/cufft.h"
#include "paddle/
fluid/platform
/enforce.h"
#include "paddle/
phi/core
/enforce.h"
namespace
phi
{
namespace
phi
{
namespace
dynload
{
namespace
dynload
{
...
...
paddle/phi/backends/dynload/dynamic_loader.cc
浏览文件 @
df23c7c3
...
@@ -17,8 +17,8 @@ limitations under the License. */
...
@@ -17,8 +17,8 @@ limitations under the License. */
#include <string>
#include <string>
#include <vector>
#include <vector>
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/backends/dynload/cupti_lib_path.h"
#include "paddle/phi/backends/dynload/cupti_lib_path.h"
#include "paddle/phi/core/enforce.h"
#if defined(_WIN32)
#if defined(_WIN32)
#include <windows.h>
#include <windows.h>
...
...
paddle/phi/backends/dynload/miopen.cc
浏览文件 @
df23c7c3
...
@@ -14,7 +14,7 @@ limitations under the License. */
...
@@ -14,7 +14,7 @@ limitations under the License. */
#include "paddle/phi/backends/dynload/miopen.h"
#include "paddle/phi/backends/dynload/miopen.h"
#include "paddle/
fluid/platform
/enforce.h"
#include "paddle/
phi/core
/enforce.h"
namespace
phi
{
namespace
phi
{
namespace
dynload
{
namespace
dynload
{
...
...
paddle/phi/backends/dynload/tensorrt.h
浏览文件 @
df23c7c3
...
@@ -21,8 +21,8 @@ limitations under the License. */
...
@@ -21,8 +21,8 @@ limitations under the License. */
#include <mutex> // NOLINT
#include <mutex> // NOLINT
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/backends/dynload/dynamic_loader.h"
#include "paddle/phi/backends/dynload/dynamic_loader.h"
#include "paddle/phi/core/enforce.h"
namespace
phi
{
namespace
phi
{
namespace
dynload
{
namespace
dynload
{
...
...
paddle/phi/backends/gpu/cuda/cuda_info.cc
浏览文件 @
df23c7c3
...
@@ -14,8 +14,7 @@
...
@@ -14,8 +14,7 @@
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
// TODO(phi): remove fluid headers.
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/platform/enforce.h"
static
std
::
once_flag
g_device_props_size_init_flag
;
static
std
::
once_flag
g_device_props_size_init_flag
;
static
std
::
vector
<
std
::
unique_ptr
<
std
::
once_flag
>>
g_device_props_init_flags
;
static
std
::
vector
<
std
::
unique_ptr
<
std
::
once_flag
>>
g_device_props_init_flags
;
...
...
paddle/phi/backends/gpu/gpu_context.cc
浏览文件 @
df23c7c3
...
@@ -21,6 +21,7 @@ limitations under the License. */
...
@@ -21,6 +21,7 @@ limitations under the License. */
#include <future>
#include <future>
#include <memory>
#include <memory>
#include <mutex>
#include <mutex>
#include <unordered_map>
#include "glog/logging.h"
#include "glog/logging.h"
#include "paddle/phi/api/ext/exception.h"
#include "paddle/phi/api/ext/exception.h"
...
@@ -54,8 +55,7 @@ limitations under the License. */
...
@@ -54,8 +55,7 @@ limitations under the License. */
// without eigen.
// without eigen.
#include "unsupported/Eigen/CXX11/Tensor"
#include "unsupported/Eigen/CXX11/Tensor"
// TODO(phi): remove fluid header.
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/platform/enforce.h"
namespace
phi
{
namespace
phi
{
...
...
paddle/phi/backends/gpu/gpu_resources.cc
浏览文件 @
df23c7c3
...
@@ -32,8 +32,7 @@
...
@@ -32,8 +32,7 @@
#include "unsupported/Eigen/CXX11/Tensor"
#include "unsupported/Eigen/CXX11/Tensor"
// TODO(phi): remove fluid header.
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/platform/enforce.h"
namespace
phi
{
namespace
phi
{
...
...
paddle/phi/backends/gpu/rocm/rocm_info.cc
浏览文件 @
df23c7c3
...
@@ -16,12 +16,11 @@
...
@@ -16,12 +16,11 @@
#include "paddle/phi/backends/gpu/gpu_info.h"
#include "paddle/phi/backends/gpu/gpu_info.h"
// TODO(phi): remove fluid headers.
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/platform/enforce.h"
static
std
::
once_flag
g_device_props_size_init_flag
;
static
std
::
once_flag
g_device_props_size_init_flag
;
static
std
::
vector
<
std
::
unique_ptr
<
std
::
once_flag
>>
g_device_props_init_flags
;
static
std
::
vector
<
std
::
unique_ptr
<
std
::
once_flag
>>
g_device_props_init_flags
;
static
std
::
vector
<
p
addle
::
gpuDeviceProp
>
g_device_props
;
static
std
::
vector
<
p
hi
::
gpuDeviceProp
>
g_device_props
;
namespace
phi
{
namespace
phi
{
namespace
backends
{
namespace
backends
{
...
...
paddle/phi/backends/xpu/enforce_xpu.h
浏览文件 @
df23c7c3
...
@@ -14,8 +14,8 @@ limitations under the License. */
...
@@ -14,8 +14,8 @@ limitations under the License. */
#pragma once
#pragma once
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/backends/xpu/xpu_header.h"
#include "paddle/phi/backends/xpu/xpu_header.h"
#include "paddle/phi/core/enforce.h"
#ifdef PADDLE_WITH_XPU_BKCL
#ifdef PADDLE_WITH_XPU_BKCL
#include "xpu/bkcl.h"
#include "xpu/bkcl.h"
#endif
#endif
...
...
paddle/phi/backends/xpu/xpu_header.h
浏览文件 @
df23c7c3
...
@@ -19,9 +19,9 @@ limitations under the License. */
...
@@ -19,9 +19,9 @@ limitations under the License. */
#include <string>
#include <string>
#include <unordered_map>
#include <unordered_map>
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/enforce.h"
#include "xpu/runtime.h"
#include "xpu/runtime.h"
#include "xpu/runtime_ex.h"
#include "xpu/runtime_ex.h"
#include "xpu/xdnn.h"
#include "xpu/xdnn.h"
...
...
paddle/phi/core/CMakeLists.txt
浏览文件 @
df23c7c3
# compatible utils used for fluid op system
# compatible utils used for fluid op system
add_subdirectory
(
compat
)
add_subdirectory
(
compat
)
if
(
WITH_GPU
)
proto_library
(
external_error_proto SRCS external_error.proto
)
endif
()
cc_library
(
errors SRCS errors.cc
)
cc_library
(
errors SRCS errors.cc
)
set
(
phi_enforce_deps errors flags
)
set
(
phi_enforce_deps errors flags
)
if
(
WITH_GPU
)
if
(
WITH_GPU
)
...
...
paddle/phi/core/cuda_stream.h
浏览文件 @
df23c7c3
...
@@ -28,8 +28,7 @@ using gpuStream_t = cudaStream_t;
...
@@ -28,8 +28,7 @@ using gpuStream_t = cudaStream_t;
using
gpuStream_t
=
hipStream_t
;
using
gpuStream_t
=
hipStream_t
;
#endif
#endif
// TODO(phi): remove fluid headers.
#include "paddle/phi/core/enforce.h"
#include "paddle/fluid/platform/enforce.h"
namespace
phi
{
namespace
phi
{
...
...
paddle/phi/core/enforce.h
浏览文件 @
df23c7c3
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
@@ -28,21 +25,81 @@ limitations under the License. */
...
@@ -28,21 +25,81 @@ limitations under the License. */
#include <windows.h> // GetModuleFileName, Sleep
#include <windows.h> // GetModuleFileName, Sleep
#endif
#endif
#ifdef PADDLE_WITH_CUDA
#include <cublas_v2.h>
#include <cudnn.h>
#include <cufft.h>
#include <curand.h>
#include <cusparse.h>
#include <thrust/system/cuda/error.h>
#include <thrust/system_error.h>
#include "paddle/phi/core/external_error.pb.h"
#endif // PADDLE_WITH_CUDA
#ifdef PADDLE_WITH_HIP
#include <hiprand.h>
#include <miopen/miopen.h>
#include <rocblas.h>
#include <thrust/system/hip/error.h>
#include <thrust/system_error.h> // NOLINT
#endif
#include <fstream>
#include <iomanip>
#include <memory>
#include <sstream>
#include <sstream>
#include <stdexcept>
#include <stdexcept>
#include <string>
#include <string>
#include <type_traits>
#include <type_traits>
#include <utility>
#if !defined(_WIN32) && !defined(PADDLE_WITH_MUSL)
#if !defined(_WIN32) && !defined(PADDLE_WITH_MUSL)
#include <execinfo.h>
#include <execinfo.h>
#endif
#endif
#include "gflags/gflags.h"
#define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h
#define GLOG_NO_ABBREVIATED_SEVERITIES // msvc conflict logging with windows.h
#include "gflags/gflags.h"
#include "glog/logging.h"
#include "paddle/phi/core/errors.h"
#include "paddle/phi/core/errors.h"
#include "paddle/phi/backends/dynload/port.h"
#include "paddle/utils/string/printf.h"
#include "paddle/utils/string/printf.h"
#include "paddle/utils/string/to_string.h"
#include "paddle/utils/string/to_string.h"
#ifdef PADDLE_WITH_CUDA
#include "paddle/phi/backends/dynload/cublas.h"
#include "paddle/phi/backends/dynload/cudnn.h"
#include "paddle/phi/backends/dynload/curand.h"
#include "paddle/phi/backends/dynload/cusolver.h"
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
#include <error.h>
#include "paddle/phi/backends/dynload/nccl.h"
#endif // __APPLE__
#endif // PADDLE_WITH_CUDA
#ifdef PADDLE_WITH_HIP
#include "paddle/phi/backends/dynload/hipfft.h"
#include "paddle/phi/backends/dynload/hiprand.h"
#include "paddle/phi/backends/dynload/miopen.h"
#include "paddle/phi/backends/dynload/rocblas.h"
#if !defined(__APPLE__) && defined(PADDLE_WITH_RCCL)
#include <error.h> // NOLINT
#include "paddle/phi/backends/dynload/rccl.h"
#endif // __APPLE__
#endif // PADDLE_WITH_HIP
// Note: these headers for simplify demangle type string
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/type_defs.h"
// Note: this header for simplify HIP and CUDA type string
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
#include "paddle/phi/backends/gpu/gpu_types.h"
#endif
#include "paddle/fluid/platform/flags.h"
#include "paddle/utils/variant.h"
#include "paddle/utils/variant.h"
DECLARE_int32
(
call_stack_level
);
DECLARE_int32
(
call_stack_level
);
...
@@ -51,6 +108,10 @@ namespace phi {
...
@@ -51,6 +108,10 @@ namespace phi {
class
ErrorSummary
;
class
ErrorSummary
;
}
// namespace phi
}
// namespace phi
namespace
phi
{
namespace
proto
{}
// namespace proto
}
// namespace phi
namespace
phi
{
namespace
phi
{
namespace
enforce
{
namespace
enforce
{
...
@@ -420,7 +481,7 @@ struct EnforceNotMet : public std::exception {
...
@@ -420,7 +481,7 @@ struct EnforceNotMet : public std::exception {
* the direct use of paddle::get by CI rule.
* the direct use of paddle::get by CI rule.
*
*
* Parameters:
* Parameters:
*
__TYPE: the target variable type
*
__TYPE: the target variable type
* __VALUE: the target variable to get
* __VALUE: the target variable to get
*
*
* Examples:
* Examples:
...
@@ -480,6 +541,534 @@ DEFINE_SAFE_PADDLE_GET(InputType&&,
...
@@ -480,6 +541,534 @@ DEFINE_SAFE_PADDLE_GET(InputType&&,
phi::enforce::details::SafeBoostGetMutable<__TYPE>( \
phi::enforce::details::SafeBoostGetMutable<__TYPE>( \
__VALUE, #__VALUE, __FILE__, __LINE__)
__VALUE, #__VALUE, __FILE__, __LINE__)
/**************************************************************************/
/**************************** NVIDIA ERROR ********************************/
#ifdef PADDLE_WITH_CUDA
namespace
details
{
template
<
typename
T
>
struct
ExternalApiType
{};
#define DEFINE_EXTERNAL_API_TYPE(type, success_value, proto_type) \
template <> \
struct ExternalApiType<type> { \
using Type = type; \
static constexpr Type kSuccess = success_value; \
static constexpr const char* kTypeString = #proto_type; \
static constexpr phi::proto::ApiType kProtoType = \
phi::proto::ApiType::proto_type; \
}
DEFINE_EXTERNAL_API_TYPE
(
cudaError_t
,
cudaSuccess
,
CUDA
);
DEFINE_EXTERNAL_API_TYPE
(
curandStatus_t
,
CURAND_STATUS_SUCCESS
,
CURAND
);
DEFINE_EXTERNAL_API_TYPE
(
cudnnStatus_t
,
CUDNN_STATUS_SUCCESS
,
CUDNN
);
DEFINE_EXTERNAL_API_TYPE
(
cublasStatus_t
,
CUBLAS_STATUS_SUCCESS
,
CUBLAS
);
DEFINE_EXTERNAL_API_TYPE
(
cusparseStatus_t
,
CUSPARSE_STATUS_SUCCESS
,
CUSPARSE
);
DEFINE_EXTERNAL_API_TYPE
(
cusolverStatus_t
,
CUSOLVER_STATUS_SUCCESS
,
CUSOLVER
);
DEFINE_EXTERNAL_API_TYPE
(
cufftResult_t
,
CUFFT_SUCCESS
,
CUFFT
);
DEFINE_EXTERNAL_API_TYPE
(
CUresult
,
CUDA_SUCCESS
,
CU
);
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
DEFINE_EXTERNAL_API_TYPE
(
ncclResult_t
,
ncclSuccess
,
NCCL
);
#endif
}
// namespace details
template
<
typename
T
>
inline
const
char
*
GetErrorMsgUrl
(
T
status
)
{
using
__CUDA_STATUS_TYPE__
=
decltype
(
status
);
phi
::
proto
::
ApiType
proto_type
=
details
::
ExternalApiType
<
__CUDA_STATUS_TYPE__
>::
kProtoType
;
switch
(
proto_type
)
{
case
phi
::
proto
::
ApiType
::
CUDA
:
case
phi
::
proto
::
ApiType
::
CU
:
return
"https://docs.nvidia.com/cuda/cuda-runtime-api/"
"group__CUDART__TYPES.html#group__CUDART__TYPES_"
"1g3f51e3575c2178246db0a94a430e0038"
;
break
;
case
phi
::
proto
::
ApiType
::
CURAND
:
return
"https://docs.nvidia.com/cuda/curand/"
"group__HOST.html#group__HOST_1gb94a31d5c165858c96b6c18b70644437"
;
break
;
case
phi
::
proto
::
ApiType
::
CUDNN
:
return
"https://docs.nvidia.com/deeplearning/cudnn/api/"
"index.html#cudnnStatus_t"
;
break
;
case
phi
::
proto
::
ApiType
::
CUBLAS
:
return
"https://docs.nvidia.com/cuda/cublas/index.html#cublasstatus_t"
;
break
;
case
phi
::
proto
::
ApiType
::
CUSOLVER
:
return
"https://docs.nvidia.com/cuda/cusolver/"
"index.html#cuSolverSPstatus"
;
break
;
case
phi
::
proto
::
ApiType
::
NCCL
:
return
"https://docs.nvidia.com/deeplearning/nccl/user-guide/docs/api/"
"types.html#ncclresult-t"
;
break
;
case
phi
::
proto
::
ApiType
::
CUFFT
:
return
"https://docs.nvidia.com/cuda/cufft/index.html#cufftresult"
;
case
phi
::
proto
::
ApiType
::
CUSPARSE
:
return
"https://docs.nvidia.com/cuda/cusparse/"
"index.html#cusparseStatus_t"
;
break
;
default:
return
"Unknown type of External API, can't get error message URL!"
;
break
;
}
}
template
<
typename
T
>
inline
std
::
string
GetExternalErrorMsg
(
T
status
)
{
std
::
ostringstream
sout
;
bool
_initSucceed
=
false
;
phi
::
proto
::
ExternalErrorDesc
externalError
;
if
(
externalError
.
ByteSizeLong
()
==
0
)
{
std
::
string
filePath
;
#if !defined(_WIN32)
Dl_info
info
;
if
(
dladdr
(
reinterpret_cast
<
void
*>
(
GetCurrentTraceBackString
),
&
info
))
{
std
::
string
strModule
(
info
.
dli_fname
);
const
size_t
last_slash_idx
=
strModule
.
find_last_of
(
"/"
);
std
::
string
compare_path
=
strModule
.
substr
(
strModule
.
length
()
-
6
);
if
(
std
::
string
::
npos
!=
last_slash_idx
)
{
strModule
.
erase
(
last_slash_idx
,
std
::
string
::
npos
);
}
if
(
compare_path
.
compare
(
"avx.so"
)
==
0
)
{
filePath
=
strModule
+
"/../include/third_party/externalError/data/externalErrorMsg.pb"
;
}
else
{
filePath
=
strModule
+
"/../../third_party/externalError/data/externalErrorMsg.pb"
;
}
}
#else
char
buf
[
512
];
MEMORY_BASIC_INFORMATION
mbi
;
HMODULE
h_module
=
(
::
VirtualQuery
(
GetCurrentTraceBackString
,
&
mbi
,
sizeof
(
mbi
))
!=
0
)
?
(
HMODULE
)
mbi
.
AllocationBase
:
NULL
;
GetModuleFileName
(
h_module
,
buf
,
512
);
std
::
string
strModule
(
buf
);
const
size_t
last_slash_idx
=
strModule
.
find_last_of
(
"
\\
"
);
std
::
string
compare_path
=
strModule
.
substr
(
strModule
.
length
()
-
7
);
if
(
std
::
string
::
npos
!=
last_slash_idx
)
{
strModule
.
erase
(
last_slash_idx
,
std
::
string
::
npos
);
}
if
(
compare_path
.
compare
(
"avx.pyd"
)
==
0
)
{
filePath
=
strModule
+
"
\\
..
\\
include
\\
third_"
"party
\\
externalerror
\\
data
\\
externalErrorMsg.pb"
;
}
else
{
filePath
=
strModule
+
"
\\
..
\\
..
\\
third_party
\\
externalerror
\\
data
\\
externalErrorMsg.pb"
;
}
#endif
std
::
ifstream
fin
(
filePath
,
std
::
ios
::
in
|
std
::
ios
::
binary
);
_initSucceed
=
externalError
.
ParseFromIstream
(
&
fin
);
}
using
__CUDA_STATUS_TYPE__
=
decltype
(
status
);
phi
::
proto
::
ApiType
proto_type
=
details
::
ExternalApiType
<
__CUDA_STATUS_TYPE__
>::
kProtoType
;
if
(
_initSucceed
)
{
for
(
int
i
=
0
;
i
<
externalError
.
errors_size
();
++
i
)
{
if
(
proto_type
==
externalError
.
errors
(
i
).
type
())
{
for
(
int
j
=
0
;
j
<
externalError
.
errors
(
i
).
messages_size
();
++
j
)
{
if
(
status
==
externalError
.
errors
(
i
).
messages
(
j
).
code
())
{
sout
<<
"
\n
[Hint: "
<<
externalError
.
errors
(
i
).
messages
(
j
).
message
()
<<
"]"
;
return
sout
.
str
();
}
}
}
}
}
sout
<<
"
\n
[Hint: Please search for the error code("
<<
status
<<
") on website ("
<<
GetErrorMsgUrl
(
status
)
<<
") to get Nvidia's official solution and advice about "
<<
details
::
ExternalApiType
<
__CUDA_STATUS_TYPE__
>::
kTypeString
<<
" Error.]"
;
return
sout
.
str
();
}
template
std
::
string
GetExternalErrorMsg
<
cudaError_t
>(
cudaError_t
);
template
std
::
string
GetExternalErrorMsg
<
curandStatus_t
>(
curandStatus_t
);
template
std
::
string
GetExternalErrorMsg
<
cudnnStatus_t
>(
cudnnStatus_t
);
template
std
::
string
GetExternalErrorMsg
<
cublasStatus_t
>(
cublasStatus_t
);
template
std
::
string
GetExternalErrorMsg
<
cusparseStatus_t
>(
cusparseStatus_t
);
template
std
::
string
GetExternalErrorMsg
<
cusolverStatus_t
>(
cusolverStatus_t
);
template
std
::
string
GetExternalErrorMsg
<
cufftResult_t
>(
cufftResult_t
);
template
std
::
string
GetExternalErrorMsg
<
CUresult
>(
CUresult
);
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
template
std
::
string
GetExternalErrorMsg
<
ncclResult_t
>(
ncclResult_t
);
#endif
/*************** CUDA ERROR ***************/
inline
bool
is_error
(
cudaError_t
e
)
{
return
e
!=
cudaSuccess
;
}
inline
std
::
string
build_nvidia_error_msg
(
cudaError_t
e
)
{
std
::
ostringstream
sout
;
sout
<<
"CUDA error("
<<
e
<<
"), "
<<
cudaGetErrorString
(
e
)
<<
". "
<<
GetExternalErrorMsg
(
e
);
return
sout
.
str
();
}
/*************** CURAND ERROR ***************/
inline
bool
is_error
(
curandStatus_t
stat
)
{
return
stat
!=
CURAND_STATUS_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
curandStatus_t
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CURAND error("
<<
stat
<<
"). "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/*************** CUDNN ERROR ***************/
inline
bool
is_error
(
cudnnStatus_t
stat
)
{
return
stat
!=
CUDNN_STATUS_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
cudnnStatus_t
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CUDNN error("
<<
stat
<<
"), "
<<
phi
::
dynload
::
cudnnGetErrorString
(
stat
)
<<
". "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/*************** CUBLAS ERROR ***************/
inline
bool
is_error
(
cublasStatus_t
stat
)
{
return
stat
!=
CUBLAS_STATUS_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
cublasStatus_t
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CUBLAS error("
<<
stat
<<
"). "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/*************** CUSPARSE ERROR ***************/
inline
bool
is_error
(
cusparseStatus_t
stat
)
{
return
stat
!=
CUSPARSE_STATUS_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
cusparseStatus_t
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CUSparse error("
<<
stat
<<
"). "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/*************** CUSOLVER ERROR ***************/
inline
bool
is_error
(
cusolverStatus_t
stat
)
{
return
stat
!=
CUSOLVER_STATUS_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
cusolverStatus_t
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CUSOLVER error("
<<
stat
<<
"). "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/*************** CUFFT ERROR ***************/
inline
bool
is_error
(
cufftResult_t
stat
)
{
return
stat
!=
CUFFT_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
cufftResult_t
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CUFFT error("
<<
stat
<<
"). "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/*************** CUresult ERROR ***************/
inline
bool
is_error
(
CUresult
stat
)
{
return
stat
!=
CUDA_SUCCESS
;
}
inline
std
::
string
build_nvidia_error_msg
(
CUresult
stat
)
{
std
::
ostringstream
sout
;
sout
<<
"CU error("
<<
stat
<<
"). "
<<
GetExternalErrorMsg
(
stat
);
return
sout
.
str
();
}
/**************** NCCL ERROR ****************/
#if !defined(__APPLE__) && defined(PADDLE_WITH_NCCL)
inline
bool
is_error
(
ncclResult_t
nccl_result
)
{
return
nccl_result
!=
ncclSuccess
;
}
inline
std
::
string
build_nvidia_error_msg
(
ncclResult_t
nccl_result
)
{
std
::
ostringstream
sout
;
sout
<<
"NCCL error("
<<
nccl_result
<<
"), "
<<
phi
::
dynload
::
ncclGetErrorString
(
nccl_result
)
<<
". "
;
if
(
errno
==
ENOSPC
||
errno
==
EAGAIN
)
{
std
::
string
detail
(
strerror
(
errno
));
detail
+=
"
\n
Please try one of the following solutions:"
;
detail
+=
"
\n
1. export NCCL_SHM_DISABLE=1;"
;
detail
+=
"
\n
2. export NCCL_P2P_LEVEL=SYS;"
;
detail
+=
"
\n
3. Increase shared memory by setting the -shm-size "
"option when starting docker container, e.g., setting "
" -shm-size=2g.
\n
"
;
sout
<<
" Detail: "
+
detail
;
}
sout
<<
GetExternalErrorMsg
(
nccl_result
);
return
sout
.
str
();
}
#endif // not(__APPLE__) and PADDLE_WITH_NCCL
#define PADDLE_ENFORCE_GPU_SUCCESS(COND) \
do { \
auto __cond__ = (COND); \
using __CUDA_STATUS_TYPE__ = decltype(__cond__); \
constexpr auto __success_type__ = \
::phi::enforce::details::ExternalApiType< \
__CUDA_STATUS_TYPE__>::kSuccess; \
if (UNLIKELY(__cond__ != __success_type__)) { \
auto __summary__ = phi::errors::External( \
::phi::enforce::build_nvidia_error_msg(__cond__)); \
__THROW_ERROR_INTERNAL__(__summary__); \
} \
} while (0)
#define PADDLE_ENFORCE_CUDA_LAUNCH_SUCCESS(OP) \
do { \
auto res = cudaGetLastError(); \
if (UNLIKELY(res != cudaSuccess)) { \
auto msg = ::phi::enforce::build_nvidia_error_msg(res); \
PADDLE_THROW( \
phi::errors::Fatal("CUDA error after kernel (%s): %s", OP, msg)); \
} \
} while (0)
inline
void
retry_sleep
(
unsigned
milliseconds
)
{
#ifdef _WIN32
Sleep
(
milliseconds
);
#else
if
(
milliseconds
<
1000
)
{
// usleep argument must be less than 1,000,000. Reference:
// https://pubs.opengroup.org/onlinepubs/7908799/xsh/usleep.html
usleep
(
milliseconds
*
1000
);
}
else
{
// clip to sleep in seconds because we can not and don't have to
// sleep for exact milliseconds
sleep
(
milliseconds
/
1000
);
}
#endif
}
#define PADDLE_RETRY_CUDA_SUCCESS(COND) \
do { \
auto __cond__ = (COND); \
int retry_count = 1; \
using __CUDA_STATUS_TYPE__ = decltype(__cond__); \
constexpr auto __success_type__ = \
::phi::enforce::details::ExternalApiType< \
__CUDA_STATUS_TYPE__>::kSuccess; \
while (UNLIKELY(__cond__ != __success_type__) && retry_count < 5) { \
phi::enforce::retry_sleep(10000); \
__cond__ = (COND); \
++retry_count; \
} \
if (UNLIKELY(__cond__ != __success_type__)) { \
auto __summary__ = phi::errors::External( \
::phi::enforce::build_nvidia_error_msg(__cond__)); \
__THROW_ERROR_INTERNAL__(__summary__); \
} \
} while (0)
#undef DEFINE_EXTERNAL_API_TYPE
#endif // PADDLE_WITH_CUDA
/**************************************************************************/
/***************************** HIP ERROR **********************************/
#ifdef PADDLE_WITH_HIP
/***** HIP ERROR *****/
inline
bool
is_error
(
hipError_t
e
)
{
return
e
!=
hipSuccess
;
}
inline
std
::
string
build_rocm_error_msg
(
hipError_t
e
)
{
std
::
ostringstream
sout
;
sout
<<
" Hip error("
<<
e
<<
"), "
<<
hipGetErrorString
(
e
)
<<
"."
;
return
sout
.
str
();
}
/***** HIPRAND ERROR *****/
inline
bool
is_error
(
hiprandStatus_t
stat
)
{
return
stat
!=
HIPRAND_STATUS_SUCCESS
;
}
inline
const
char
*
hiprandGetErrorString
(
hiprandStatus_t
stat
)
{
switch
(
stat
)
{
case
HIPRAND_STATUS_SUCCESS
:
return
"HIPRAND_STATUS_SUCCESS"
;
case
HIPRAND_STATUS_VERSION_MISMATCH
:
return
"HIPRAND_STATUS_VERSION_MISMATCH"
;
case
HIPRAND_STATUS_NOT_INITIALIZED
:
return
"HIPRAND_STATUS_NOT_INITIALIZED"
;
case
HIPRAND_STATUS_ALLOCATION_FAILED
:
return
"HIPRAND_STATUS_ALLOCATION_FAILED"
;
case
HIPRAND_STATUS_TYPE_ERROR
:
return
"HIPRAND_STATUS_TYPE_ERROR"
;
case
HIPRAND_STATUS_OUT_OF_RANGE
:
return
"HIPRAND_STATUS_OUT_OF_RANGE"
;
case
HIPRAND_STATUS_LENGTH_NOT_MULTIPLE
:
return
"HIPRAND_STATUS_LENGTH_NOT_MULTIPLE"
;
case
HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED
:
return
"HIPRAND_STATUS_DOUBLE_PRECISION_REQUIRED"
;
case
HIPRAND_STATUS_LAUNCH_FAILURE
:
return
"HIPRAND_STATUS_LAUNCH_FAILURE"
;
case
HIPRAND_STATUS_PREEXISTING_FAILURE
:
return
"HIPRAND_STATUS_PREEXISTING_FAILURE"
;
case
HIPRAND_STATUS_INITIALIZATION_FAILED
:
return
"HIPRAND_STATUS_INITIALIZATION_FAILED"
;
case
HIPRAND_STATUS_ARCH_MISMATCH
:
return
"HIPRAND_STATUS_ARCH_MISMATCH"
;
case
HIPRAND_STATUS_INTERNAL_ERROR
:
return
"HIPRAND_STATUS_INTERNAL_ERROR"
;
case
HIPRAND_STATUS_NOT_IMPLEMENTED
:
return
"HIPRAND_STATUS_NOT_IMPLEMENTED"
;
default:
return
"Unknown hiprand status"
;
}
}
inline
std
::
string
build_rocm_error_msg
(
hiprandStatus_t
stat
)
{
std
::
string
msg
(
" Hiprand error, "
);
return
msg
+
hiprandGetErrorString
(
stat
)
+
" "
;
}
/***** MIOPEN ERROR *****/
inline
bool
is_error
(
miopenStatus_t
stat
)
{
return
stat
!=
miopenStatusSuccess
;
}
inline
std
::
string
build_rocm_error_msg
(
miopenStatus_t
stat
)
{
std
::
string
msg
(
" Miopen error, "
);
return
msg
+
phi
::
dynload
::
miopenGetErrorString
(
stat
)
+
" "
;
}
/***** ROCBLAS ERROR *****/
inline
bool
is_error
(
rocblas_status
stat
)
{
return
stat
!=
rocblas_status_success
;
}
inline
const
char
*
rocblasGetErrorString
(
rocblas_status
stat
)
{
switch
(
stat
)
{
case
rocblas_status_invalid_handle
:
return
"rocblas_status_invalid_handle"
;
case
rocblas_status_memory_error
:
return
"rocblas_status_memory_error"
;
case
rocblas_status_invalid_value
:
return
"rocblas_status_invalid_value"
;
case
rocblas_status_not_implemented
:
return
"rocblas_status_not_implemented"
;
case
rocblas_status_invalid_pointer
:
return
"rocblas_status_invalid_pointer"
;
case
rocblas_status_invalid_size
:
return
"rocblas_status_invalid_size"
;
case
rocblas_status_internal_error
:
return
"rocblas_status_internal_error"
;
default:
return
"Unknown cublas status"
;
}
}
inline
std
::
string
build_rocm_error_msg
(
rocblas_status
stat
)
{
std
::
string
msg
(
" Rocblas error, "
);
return
msg
+
rocblasGetErrorString
(
stat
)
+
" "
;
}
/****** RCCL ERROR ******/
#if !defined(__APPLE__) && defined(PADDLE_WITH_RCCL)
inline
bool
is_error
(
ncclResult_t
nccl_result
)
{
return
nccl_result
!=
ncclSuccess
;
}
inline
std
::
string
build_rocm_error_msg
(
ncclResult_t
nccl_result
)
{
std
::
string
msg
(
" Rccl error, "
);
return
msg
+
phi
::
dynload
::
ncclGetErrorString
(
nccl_result
)
+
" "
;
}
#endif // not(__APPLE__) and PADDLE_WITH_NCCL
/***** HIPFFT ERROR *****/
inline
bool
is_error
(
hipfftResult_t
stat
)
{
return
stat
!=
HIPFFT_SUCCESS
;
}
inline
std
::
string
build_rocm_error_msg
(
hipfftResult_t
stat
)
{
std
::
string
msg
(
" HIPFFT error, "
);
return
msg
+
phi
::
dynload
::
hipfftGetErrorString
(
stat
)
+
" "
;
}
namespace
details
{
template
<
typename
T
>
struct
ExternalApiType
{};
#define DEFINE_EXTERNAL_API_TYPE(type, success_value) \
template <> \
struct ExternalApiType<type> { \
using Type = type; \
static constexpr Type kSuccess = success_value; \
}
DEFINE_EXTERNAL_API_TYPE
(
hipError_t
,
hipSuccess
);
DEFINE_EXTERNAL_API_TYPE
(
hiprandStatus_t
,
HIPRAND_STATUS_SUCCESS
);
DEFINE_EXTERNAL_API_TYPE
(
miopenStatus_t
,
miopenStatusSuccess
);
DEFINE_EXTERNAL_API_TYPE
(
rocblas_status
,
rocblas_status_success
);
DEFINE_EXTERNAL_API_TYPE
(
hipfftResult_t
,
HIPFFT_SUCCESS
);
#if !defined(__APPLE__) && defined(PADDLE_WITH_RCCL)
DEFINE_EXTERNAL_API_TYPE
(
ncclResult_t
,
ncclSuccess
);
#endif
}
// namespace details
#define PADDLE_ENFORCE_GPU_SUCCESS(COND) \
do { \
auto __cond__ = (COND); \
using __CUDA_STATUS_TYPE__ = decltype(__cond__); \
constexpr auto __success_type__ = \
::phi::enforce::details::ExternalApiType< \
__CUDA_STATUS_TYPE__>::kSuccess; \
if (UNLIKELY(__cond__ != __success_type__)) { \
auto __summary__ = phi::errors::External( \
::phi::enforce::build_rocm_error_msg(__cond__)); \
__THROW_ERROR_INTERNAL__(__summary__); \
} \
} while (0)
inline
void
retry_sleep
(
unsigned
millisecond
)
{
#ifdef _WIN32
Sleep
(
millisecond
);
#else
sleep
(
millisecond
);
#endif
}
#define PADDLE_RETRY_CUDA_SUCCESS(COND) \
do { \
auto __cond__ = (COND); \
int retry_count = 1; \
using __CUDA_STATUS_TYPE__ = decltype(__cond__); \
constexpr auto __success_type__ = \
::phi::enforce::details::ExternalApiType< \
__CUDA_STATUS_TYPE__>::kSuccess; \
while (UNLIKELY(__cond__ != __success_type__) && retry_count < 5) { \
::phi::enforce::retry_sleep(10000); \
__cond__ = (COND); \
++retry_count; \
} \
if (UNLIKELY(__cond__ != __success_type__)) { \
auto __summary__ = phi::errors::External( \
::phi::enforce::build_rocm_error_msg(__cond__)); \
__THROW_ERROR_INTERNAL__(__summary__); \
} \
} while (0)
#undef DEFINE_EXTERNAL_API_TYPE
#endif // PADDLE_WITH_HIP
}
// namespace enforce
}
// namespace enforce
using
namespace
enforce
;
// NOLINT
using
namespace
enforce
;
// NOLINT
}
// namespace phi
}
// namespace phi
paddle/
fluid/platform
/external_error.proto
→
paddle/
phi/core
/external_error.proto
浏览文件 @
df23c7c3
...
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
...
@@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
limitations under the License. */
syntax
=
"proto2"
;
syntax
=
"proto2"
;
package
p
addle
.
platform
.proto
;
package
p
hi
.
proto
;
// (NOTE:zhouwei): ApiType describes which kind of external third party API
// (NOTE:zhouwei): ApiType describes which kind of external third party API
// More external third party API can be added.
// More external third party API can be added.
...
...
paddle/phi/kernels/autotune/CMakeLists.txt
浏览文件 @
df23c7c3
...
@@ -2,9 +2,12 @@ if(WITH_CUDNN_FRONTEND)
...
@@ -2,9 +2,12 @@ if(WITH_CUDNN_FRONTEND)
cc_library
(
cc_library
(
cache
cache
SRCS cache.cc
SRCS cache.cc
DEPS cudnn-frontend
)
DEPS cudnn-frontend
phi_enforce
)
else
()
else
()
cc_library
(
cache SRCS cache.cc
)
cc_library
(
cache
SRCS cache.cc
DEPS phi_enforce
)
endif
()
endif
()
cc_library
(
cc_library
(
switch_autotune
switch_autotune
...
...
paddle/phi/kernels/funcs/concat_funcs.h
浏览文件 @
df23c7c3
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
#pragma once
#pragma once
#include "paddle/
fluid/platform
/enforce.h"
#include "paddle/
phi/core
/enforce.h"
#include "paddle/phi/core/errors.h"
#include "paddle/phi/core/errors.h"
namespace
phi
{
namespace
phi
{
namespace
funcs
{
namespace
funcs
{
...
...
paddle/phi/kernels/funcs/cpu_vec.h
浏览文件 @
df23c7c3
...
@@ -18,7 +18,7 @@ limitations under the License. */
...
@@ -18,7 +18,7 @@ limitations under the License. */
#include <string>
#include <string>
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/fluid/platform/cpu_info.h"
#include "paddle/
fluid/platform
/enforce.h"
#include "paddle/
phi/core
/enforce.h"
#ifdef PADDLE_WITH_MKLML
#ifdef PADDLE_WITH_MKLML
#include "paddle/phi/backends/dynload/mklml.h"
#include "paddle/phi/backends/dynload/mklml.h"
...
...
paddle/phi/kernels/funcs/cufft_util.h
浏览文件 @
df23c7c3
...
@@ -15,9 +15,9 @@
...
@@ -15,9 +15,9 @@
#pragma once
#pragma once
#include <vector>
#include <vector>
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/backends/dynload/cufft.h"
#include "paddle/phi/backends/dynload/cufft.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/ddim.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/kernels/funcs/fft.h"
#include "paddle/phi/kernels/funcs/fft.h"
#include "paddle/phi/kernels/funcs/fft_key.h"
#include "paddle/phi/kernels/funcs/fft_key.h"
...
...
paddle/phi/kernels/funcs/gru_compute.h
浏览文件 @
df23c7c3
...
@@ -12,7 +12,7 @@ limitations under the License. */
...
@@ -12,7 +12,7 @@ limitations under the License. */
#pragma once
#pragma once
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/
fluid/platform
/enforce.h"
#include "paddle/
phi/core
/enforce.h"
#include "paddle/phi/kernels/funcs/detail/activation_functions.h"
#include "paddle/phi/kernels/funcs/detail/activation_functions.h"
namespace
phi
{
namespace
phi
{
...
...
paddle/phi/kernels/funcs/hipfft_util.h
浏览文件 @
df23c7c3
...
@@ -15,8 +15,8 @@
...
@@ -15,8 +15,8 @@
#pragma once
#pragma once
#include <vector>
#include <vector>
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/backends/dynload/hipfft.h"
#include "paddle/phi/backends/dynload/hipfft.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/kernels/funcs/fft.h"
#include "paddle/phi/kernels/funcs/fft.h"
#include "paddle/phi/kernels/funcs/fft_key.h"
#include "paddle/phi/kernels/funcs/fft_key.h"
...
...
paddle/phi/kernels/funcs/lstm_compute.h
浏览文件 @
df23c7c3
...
@@ -15,7 +15,7 @@ limitations under the License. */
...
@@ -15,7 +15,7 @@ limitations under the License. */
#pragma once
#pragma once
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/
fluid/platform
/enforce.h"
#include "paddle/
phi/core
/enforce.h"
#include "paddle/phi/kernels/funcs/detail/activation_functions.h"
#include "paddle/phi/kernels/funcs/detail/activation_functions.h"
namespace
phi
{
namespace
phi
{
...
...
paddle/phi/kernels/funcs/math_function.h
浏览文件 @
df23c7c3
...
@@ -21,8 +21,8 @@ limitations under the License. */
...
@@ -21,8 +21,8 @@ limitations under the License. */
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/utils/data_type.h"
#include "paddle/phi/core/utils/data_type.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
...
...
paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu
浏览文件 @
df23c7c3
...
@@ -15,10 +15,10 @@
...
@@ -15,10 +15,10 @@
#include "paddle/fluid/operators/layout_utils.h"
#include "paddle/fluid/operators/layout_utils.h"
#include "paddle/fluid/operators/norm_utils.cu.h"
#include "paddle/fluid/operators/norm_utils.cu.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/batch_norm_kernel.h"
#include "paddle/phi/kernels/batch_norm_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/empty_kernel.h"
...
...
paddle/phi/kernels/gpu/batch_norm_kernel.cu
浏览文件 @
df23c7c3
...
@@ -23,10 +23,10 @@ namespace cub = hipcub;
...
@@ -23,10 +23,10 @@ namespace cub = hipcub;
#include "paddle/fluid/operators/layout_utils.h"
#include "paddle/fluid/operators/layout_utils.h"
#include "paddle/fluid/operators/norm_utils.cu.h"
#include "paddle/fluid/operators/norm_utils.cu.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/common/layout.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/batch_norm_kernel.h"
#include "paddle/phi/kernels/batch_norm_kernel.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
...
...
paddle/phi/kernels/gpu/cholesky_solve_kernel.cu
浏览文件 @
df23c7c3
...
@@ -15,10 +15,10 @@
...
@@ -15,10 +15,10 @@
#ifndef PADDLE_WITH_HIP
#ifndef PADDLE_WITH_HIP
// HIP not support cusolver
// HIP not support cusolver
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/backends/dynload/cusolver.h"
#include "paddle/phi/backends/dynload/cusolver.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/complex.h"
#include "paddle/phi/common/complex.h"
#include "paddle/phi/core/enforce.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/funcs/lapack/lapack_function.h"
#include "paddle/phi/kernels/funcs/lapack/lapack_function.h"
#include "paddle/phi/kernels/impl/cholesky_solve_kernel_impl.h"
#include "paddle/phi/kernels/impl/cholesky_solve_kernel_impl.h"
...
...
paddle/phi/kernels/gpu/class_center_sample_kernel.cu
浏览文件 @
df23c7c3
...
@@ -30,7 +30,7 @@ namespace cub = hipcub;
...
@@ -30,7 +30,7 @@ namespace cub = hipcub;
#include <random>
#include <random>
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/
fluid/platform
/enforce.h"
#include "paddle/
phi/core
/enforce.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/ProcessGroup.h"
#include "paddle/fluid/distributed/collective/ProcessGroup.h"
...
...
tools/externalError/start.sh
浏览文件 @
df23c7c3
...
@@ -29,7 +29,7 @@ else
...
@@ -29,7 +29,7 @@ else
echo
"please run on Mac/Linux"
echo
"please run on Mac/Linux"
exit
1
exit
1
fi
fi
protobuf/bin/protoc
-I
../../paddle/
fluid/platform/
--python_out
.
../../paddle/fluid/platform
/external_error.proto
protobuf/bin/protoc
-I
../../paddle/
phi/core/
--python_out
.
../../paddle/phi/core
/external_error.proto
python3.7 spider.py
python3.7 spider.py
tar
czvf externalErrorMsg_
$(
date
+
'%Y%m%d'
)
.tar.gz externalErrorMsg.pb
tar
czvf externalErrorMsg_
$(
date
+
'%Y%m%d'
)
.tar.gz externalErrorMsg.pb
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录