提交 b04c3d14 编写于 作者: M Megvii Engine Team

feat(lite): add set address ptr pair interface

GitOrigin-RevId: 285dacb4da51cb1e23f411967d612bb520d611d8
上级 17f2dffb
...@@ -21,3 +21,6 @@ ci/resource/prof/model_with_err_assert.mdl filter=lfs diff=lfs merge=lfs -text ...@@ -21,3 +21,6 @@ ci/resource/prof/model_with_err_assert.mdl filter=lfs diff=lfs merge=lfs -text
ci/resource/prof/test_mge.mge filter=lfs diff=lfs merge=lfs -text ci/resource/prof/test_mge.mge filter=lfs diff=lfs merge=lfs -text
lite/test/resource/lite/ax_models/64-58063ce2.axe filter=lfs diff=lfs merge=lfs -text lite/test/resource/lite/ax_models/64-58063ce2.axe filter=lfs diff=lfs merge=lfs -text
imperative/python/test/unit/module/MagicMindRuntimeOprTest.GraphShapeMutable.mlu filter=lfs diff=lfs merge=lfs -text imperative/python/test/unit/module/MagicMindRuntimeOprTest.GraphShapeMutable.mlu filter=lfs diff=lfs merge=lfs -text
lite/test/resource/lite/ax_data_input.npy filter=lfs diff=lfs merge=lfs -text
lite/test/resource/lite/ax_data_output.npy filter=lfs diff=lfs merge=lfs -text
lite/test/resource/lite/ax_model.mge filter=lfs diff=lfs merge=lfs -text
...@@ -154,6 +154,21 @@ LITE_API void set_tensor_rt_cache(std::string tensorrt_cache_path); ...@@ -154,6 +154,21 @@ LITE_API void set_tensor_rt_cache(std::string tensorrt_cache_path);
*/ */
LITE_API void dump_tensor_rt_cache(); LITE_API void dump_tensor_rt_cache();
/**
* register the physical and virtual address pair to the mge, some device
* need the map from physical to virtual.
*/
LITE_API bool register_memory_pair(
void* vir_ptr, void* phy_ptr, size_t length, LiteDeviceType device,
LiteBackend backend = LiteBackend::LITE_DEFAULT);
/**
* clear the physical and virtual address pair in mge.
*/
LITE_API bool clear_memory_pair(
void* vir_ptr, void* phy_ptr, LiteDeviceType device,
LiteBackend backend = LiteBackend::LITE_DEFAULT);
} // namespace lite } // namespace lite
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
...@@ -160,9 +160,24 @@ LITE_API int LITE_dump_persistent_cache(const char* cache_path); ...@@ -160,9 +160,24 @@ LITE_API int LITE_dump_persistent_cache(const char* cache_path);
* \brief dump the tensorrt policy cache to file * \brief dump the tensorrt policy cache to file
*/ */
LITE_API int LITE_dump_tensor_rt_cache(); LITE_API int LITE_dump_tensor_rt_cache();
#endif
/**
* register the physical and virtual address pair to the mge, some device
* need the map from physical to virtual.
*/
LITE_API int LITE_register_memory_pair(
void* vir_ptr, void* phy_ptr, size_t length, LiteDeviceType device,
LiteBackend backend);
/**
* clear the physical and virtual address pair in mge.
*/
LITE_API int LITE_clear_memory_pair(
void* phy_ptr, void* vir_ptr, LiteDeviceType device, LiteBackend backend);
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif
#endif
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
...@@ -189,4 +189,19 @@ int LITE_dump_tensor_rt_cache() { ...@@ -189,4 +189,19 @@ int LITE_dump_tensor_rt_cache() {
LITE_CAPI_END(); LITE_CAPI_END();
} }
int LITE_register_memory_pair(
void* vir_ptr, void* phy_ptr, size_t length, LiteDeviceType device,
LiteBackend backend) {
LITE_CAPI_BEGIN();
lite::register_memory_pair(vir_ptr, phy_ptr, length, device, backend);
LITE_CAPI_END();
}
int LITE_clear_memory_pair(
void* phy_ptr, void* vir_ptr, LiteDeviceType device, LiteBackend backend) {
LITE_CAPI_BEGIN();
lite::clear_memory_pair(vir_ptr, phy_ptr, device, backend);
LITE_CAPI_END();
}
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
...@@ -42,6 +42,8 @@ class _GlobalAPI(_LiteCObjBase): ...@@ -42,6 +42,8 @@ class _GlobalAPI(_LiteCObjBase):
# ('LITE_set_tensor_rt_cache', [c_char_p]), # ('LITE_set_tensor_rt_cache', [c_char_p]),
("LITE_dump_persistent_cache", [c_char_p]), ("LITE_dump_persistent_cache", [c_char_p]),
("LITE_dump_tensor_rt_cache", [c_char_p]), ("LITE_dump_tensor_rt_cache", [c_char_p]),
("LITE_register_memory_pair", [c_void_p, c_void_p, c_size_t, c_int, c_int]),
("LITE_clear_memory_pair", [c_void_p, c_void_p, c_int, c_int]),
] ]
...@@ -121,3 +123,21 @@ class LiteGlobal(object): ...@@ -121,3 +123,21 @@ class LiteGlobal(object):
@staticmethod @staticmethod
def try_coalesce_all_free_memory(): def try_coalesce_all_free_memory():
LiteGlobal._api.LITE_try_coalesce_all_free_memory() LiteGlobal._api.LITE_try_coalesce_all_free_memory()
@staticmethod
def register_memory_pair(
vir_ptr, phy_ptr, length, device, backend=LiteBackend.LITE_DEFAULT
):
assert isinstance(vir_ptr, c_void_p) and isinstance(
phy_ptr, c_void_p
), "clear memory pair only accept c_void_p type."
LiteGlobal._api.LITE_register_memory_pair(
vir_ptr, phy_ptr, length, device, backend
)
@staticmethod
def clear_memory_pair(vir_ptr, phy_ptr, device, backend=LiteBackend.LITE_DEFAULT):
assert isinstance(vir_ptr, c_void_p) and isinstance(
phy_ptr, c_void_p
), "clear memory pair only accept c_void_p type."
LiteGlobal._api.LITE_clear_memory_pair(vir_ptr, phy_ptr, device, backend)
...@@ -212,6 +212,26 @@ void lite::dump_tensor_rt_cache() { ...@@ -212,6 +212,26 @@ void lite::dump_tensor_rt_cache() {
#endif #endif
} }
bool lite::register_memory_pair(
void* vir_ptr, void* phy_ptr, size_t length, LiteDeviceType device,
LiteBackend backend) {
LITE_MARK_USED_VAR(vir_ptr);
LITE_MARK_USED_VAR(phy_ptr);
LITE_MARK_USED_VAR(length);
LITE_MARK_USED_VAR(device);
LITE_MARK_USED_VAR(backend);
LITE_THROW("register_memory_pair is not implement yet!");
}
bool lite::clear_memory_pair(
void* vir_ptr, void* phy_ptr, LiteDeviceType device, LiteBackend backend) {
LITE_MARK_USED_VAR(vir_ptr);
LITE_MARK_USED_VAR(phy_ptr);
LITE_MARK_USED_VAR(device);
LITE_MARK_USED_VAR(backend);
LITE_THROW("clear_memory_pair is not implement yet!");
}
#else // LITE_BUILD_WITH_MGE #else // LITE_BUILD_WITH_MGE
void lite::try_coalesce_all_free_memory() {} void lite::try_coalesce_all_free_memory() {}
...@@ -235,6 +255,17 @@ void lite::set_tensor_rt_cache(std::string) { ...@@ -235,6 +255,17 @@ void lite::set_tensor_rt_cache(std::string) {
void lite::dump_tensor_rt_cache() { void lite::dump_tensor_rt_cache() {
LITE_THROW("mge is disbale at build time, please build with mge"); LITE_THROW("mge is disbale at build time, please build with mge");
} }
bool lite::register_memory_pair(
void* vir_ptr, void* phy_ptr, size_t length, LiteDeviceType device,
LiteBackend beckend) {
LITE_THROW("register_memory_pair is not implement yet!");
}
bool lite::clear_memory_pair(
void* vir_ptr, void* phy_ptr, LiteDeviceType device, LiteBackend beckend) {
LITE_THROW("clear_memory_pair is not implement yet!");
}
#endif #endif
namespace lite { namespace lite {
REGIST_DECRYPTION_METHOD( REGIST_DECRYPTION_METHOD(
......
...@@ -1357,5 +1357,6 @@ TEST(TestNetWork, CambriconDeviceID) { ...@@ -1357,5 +1357,6 @@ TEST(TestNetWork, CambriconDeviceID) {
load_device_id(LiteDeviceType::LITE_CAMBRICON, 0, "./model_magicmind.mgb"); load_device_id(LiteDeviceType::LITE_CAMBRICON, 0, "./model_magicmind.mgb");
} }
#endif #endif
#endif #endif
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册