提交 d60d028a 编写于 作者: M Megvii Engine Team

feat(mge/device): enable to get cuda/cudnn/tensorrt version

GitOrigin-RevId: 5864c61d10972aa30923cf87e457e9819e8ea52d
上级 8fed114a
......@@ -4,7 +4,10 @@ import re
from typing import Optional
from .core._imperative_rt.common import CompNode, DeviceType
from .core._imperative_rt.common import get_cuda_version as _get_cuda_version
from .core._imperative_rt.common import get_cudnn_version as _get_cudnn_version
from .core._imperative_rt.common import get_device_prop as _get_device_prop
from .core._imperative_rt.common import get_tensorrt_version as _get_tensorrt_version
from .core._imperative_rt.common import set_prealloc_config as _set_prealloc_config
from .core._imperative_rt.common import what_is_xpu as _what_is_xpu
from .core._imperative_rt.utils import _try_coalesce_all_free_memory
......@@ -17,6 +20,9 @@ __all__ = [
"get_mem_status_bytes",
"get_cuda_compute_capability",
"get_cuda_device_property",
"get_cuda_version",
"get_cudnn_version",
"get_tensorrt_version",
"get_allocated_memory",
"get_reserved_memory",
"get_max_reserved_memory",
......@@ -263,3 +269,30 @@ def coalesce_free_memory():
* This function may do nothing if there are no chunks that can be freed.
"""
return _try_coalesce_all_free_memory()
def get_cuda_version():
r"""Gets the CUDA version used when compiling MegEngine.
Returns:
a version number, indicating `CUDA_VERSION_MAJOR * 1000 + CUDA_VERSION_MINOR * 10`.
"""
return _get_cuda_version()
def get_cudnn_version():
r"""Get the Cudnn version used when compiling MegEngine.
Returns:
a version number, indicating `CUDNN_MAJOR * 1000 + CUDNN_MINOR * 100 + CUDNN_PATCHLEVEL`.
"""
return _get_cudnn_version()
def get_tensorrt_version():
r"""Get the TensorRT version used when compiling MegEngine.
Returns:
a version number, indicating `NV_TENSORRT_MAJOR * 1000 + NV_TENSORRT_MINOR * 100 + NV_TENSORRT_PATCH`.
"""
return _get_tensorrt_version()
......@@ -8,6 +8,7 @@
#include "megbrain/comp_node.h"
#include "megbrain/graph.h"
#include "megbrain/imperative/physical_tensor.h"
#include "megbrain/version.h"
#if MGB_ENABLE_OPR_MM
#include "megbrain/opr/mm_handler.h"
#endif
......@@ -264,6 +265,10 @@ void init_common(py::module m) {
return mge_gen_code;
});
m.def("get_cuda_version", []() { return mgb::get_cuda_version(); });
m.def("get_cudnn_version", []() { return mgb::get_cudnn_version(); });
m.def("get_tensorrt_version", []() { return mgb::get_tensorrt_version(); });
m.def("what_is_xpu",
[] { return CompNode::Locator::parse("xpux").to_physical().type; });
......
......@@ -26,4 +26,29 @@ Version mgb::get_version() {
#endif
}
#if MGB_CUDA
#include "NvInfer.h"
#include "cuda.h"
#include "cudnn.h"
int mgb::get_cuda_version() {
return CUDA_VERSION;
}
int mgb::get_cudnn_version() {
return CUDNN_VERSION;
}
int mgb::get_tensorrt_version() {
return NV_TENSORRT_VERSION;
}
#else
int mgb::get_cuda_version() {
return -1;
}
int mgb::get_cudnn_version() {
return -1;
}
int mgb::get_tensorrt_version() {
return -1;
}
#endif
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
......@@ -20,6 +20,9 @@ struct Version {
};
MGE_WIN_DECLSPEC_FUC Version get_version();
MGE_WIN_DECLSPEC_FUC int get_cuda_version();
MGE_WIN_DECLSPEC_FUC int get_cudnn_version();
MGE_WIN_DECLSPEC_FUC int get_tensorrt_version();
} // namespace mgb
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册