diff --git a/.gitattributes b/.gitattributes index 458eb5aa254d0fc5cc08cd5e542daaf10029615a..0b84a4c2c803ba9c43bd0310c24812b32378f5cf 100644 --- a/.gitattributes +++ b/.gitattributes @@ -21,3 +21,6 @@ ci/resource/prof/model_with_err_assert.mdl filter=lfs diff=lfs merge=lfs -text ci/resource/prof/test_mge.mge filter=lfs diff=lfs merge=lfs -text lite/test/resource/lite/ax_models/64-58063ce2.axe filter=lfs diff=lfs merge=lfs -text imperative/python/test/unit/module/MagicMindRuntimeOprTest.GraphShapeMutable.mlu filter=lfs diff=lfs merge=lfs -text +lite/test/resource/lite/ax_data_input.npy filter=lfs diff=lfs merge=lfs -text +lite/test/resource/lite/ax_data_output.npy filter=lfs diff=lfs merge=lfs -text +lite/test/resource/lite/ax_model.mge filter=lfs diff=lfs merge=lfs -text diff --git a/lite/include/lite/global.h b/lite/include/lite/global.h index e681ee7ed8d384f83f61e27fef36708508690671..f9c70777c56112bc04cd2277cca7666cab7de8c3 100644 --- a/lite/include/lite/global.h +++ b/lite/include/lite/global.h @@ -154,6 +154,21 @@ LITE_API void set_tensor_rt_cache(std::string tensorrt_cache_path); */ LITE_API void dump_tensor_rt_cache(); +/** + * register the physical and virtual address pair to the mge, some device + * need the map from physical to virtual. + */ +LITE_API bool register_memory_pair( + void* vir_ptr, void* phy_ptr, size_t length, LiteDeviceType device, + LiteBackend backend = LiteBackend::LITE_DEFAULT); + +/** + * clear the physical and virtual address pair in mge. + */ +LITE_API bool clear_memory_pair( + void* vir_ptr, void* phy_ptr, LiteDeviceType device, + LiteBackend backend = LiteBackend::LITE_DEFAULT); + } // namespace lite // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} diff --git a/lite/lite-c/include/lite-c/global_c.h b/lite/lite-c/include/lite-c/global_c.h index a895f28c8c31e077a06c048b88b3f74a703ddc3a..42eed593f3230d806ab9b9fa22a8b2f3f9878630 100644 --- a/lite/lite-c/include/lite-c/global_c.h +++ b/lite/lite-c/include/lite-c/global_c.h @@ -160,9 +160,24 @@ LITE_API int LITE_dump_persistent_cache(const char* cache_path); * \brief dump the tensorrt policy cache to file */ LITE_API int LITE_dump_tensor_rt_cache(); -#endif + +/** + * register the physical and virtual address pair to the mge, some device + * need the map from physical to virtual. + */ +LITE_API int LITE_register_memory_pair( + void* vir_ptr, void* phy_ptr, size_t length, LiteDeviceType device, + LiteBackend backend); + +/** + * clear the physical and virtual address pair in mge. + */ +LITE_API int LITE_clear_memory_pair( + void* phy_ptr, void* vir_ptr, LiteDeviceType device, LiteBackend backend); + #ifdef __cplusplus } #endif +#endif // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} diff --git a/lite/lite-c/src/global.cpp b/lite/lite-c/src/global.cpp index c686b1f3f961a6b0e917e11e6d0870615f62cf7b..8be2644ce2d33ed71a47efa7646d7110eda0fa46 100644 --- a/lite/lite-c/src/global.cpp +++ b/lite/lite-c/src/global.cpp @@ -189,4 +189,19 @@ int LITE_dump_tensor_rt_cache() { LITE_CAPI_END(); } +int LITE_register_memory_pair( + void* vir_ptr, void* phy_ptr, size_t length, LiteDeviceType device, + LiteBackend backend) { + LITE_CAPI_BEGIN(); + lite::register_memory_pair(vir_ptr, phy_ptr, length, device, backend); + LITE_CAPI_END(); +} + +int LITE_clear_memory_pair( + void* phy_ptr, void* vir_ptr, LiteDeviceType device, LiteBackend backend) { + LITE_CAPI_BEGIN(); + lite::clear_memory_pair(vir_ptr, phy_ptr, device, backend); + LITE_CAPI_END(); +} + // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}} diff --git a/lite/pylite/megenginelite/global_setting.py b/lite/pylite/megenginelite/global_setting.py index c39cdf62a9363520c9943fce0855293f074cc5ed..89615e6b3cc5d1ea3ef5ef979a1d8d727c45cbbc 100644 --- a/lite/pylite/megenginelite/global_setting.py +++ b/lite/pylite/megenginelite/global_setting.py @@ -42,6 +42,8 @@ class _GlobalAPI(_LiteCObjBase): # ('LITE_set_tensor_rt_cache', [c_char_p]), ("LITE_dump_persistent_cache", [c_char_p]), ("LITE_dump_tensor_rt_cache", [c_char_p]), + ("LITE_register_memory_pair", [c_void_p, c_void_p, c_size_t, c_int, c_int]), + ("LITE_clear_memory_pair", [c_void_p, c_void_p, c_int, c_int]), ] @@ -121,3 +123,21 @@ class LiteGlobal(object): @staticmethod def try_coalesce_all_free_memory(): LiteGlobal._api.LITE_try_coalesce_all_free_memory() + + @staticmethod + def register_memory_pair( + vir_ptr, phy_ptr, length, device, backend=LiteBackend.LITE_DEFAULT + ): + assert isinstance(vir_ptr, c_void_p) and isinstance( + phy_ptr, c_void_p + ), "clear memory pair only accept c_void_p type." + LiteGlobal._api.LITE_register_memory_pair( + vir_ptr, phy_ptr, length, device, backend + ) + + @staticmethod + def clear_memory_pair(vir_ptr, phy_ptr, device, backend=LiteBackend.LITE_DEFAULT): + assert isinstance(vir_ptr, c_void_p) and isinstance( + phy_ptr, c_void_p + ), "clear memory pair only accept c_void_p type." + LiteGlobal._api.LITE_clear_memory_pair(vir_ptr, phy_ptr, device, backend) diff --git a/lite/pylite/test/test_network_cuda.py b/lite/pylite/test/test_network_device.py similarity index 100% rename from lite/pylite/test/test_network_cuda.py rename to lite/pylite/test/test_network_device.py diff --git a/lite/src/global.cpp b/lite/src/global.cpp index 5aa973a7162bc5919d6e268ca3e4c1c103f76ffc..9f3e9fab86c27d530f6c67f0c60759cf5efdfae9 100644 --- a/lite/src/global.cpp +++ b/lite/src/global.cpp @@ -212,6 +212,26 @@ void lite::dump_tensor_rt_cache() { #endif } +bool lite::register_memory_pair( + void* vir_ptr, void* phy_ptr, size_t length, LiteDeviceType device, + LiteBackend backend) { + LITE_MARK_USED_VAR(vir_ptr); + LITE_MARK_USED_VAR(phy_ptr); + LITE_MARK_USED_VAR(length); + LITE_MARK_USED_VAR(device); + LITE_MARK_USED_VAR(backend); + LITE_THROW("register_memory_pair is not implement yet!"); +} + +bool lite::clear_memory_pair( + void* vir_ptr, void* phy_ptr, LiteDeviceType device, LiteBackend backend) { + LITE_MARK_USED_VAR(vir_ptr); + LITE_MARK_USED_VAR(phy_ptr); + LITE_MARK_USED_VAR(device); + LITE_MARK_USED_VAR(backend); + LITE_THROW("clear_memory_pair is not implement yet!"); +} + #else // LITE_BUILD_WITH_MGE void lite::try_coalesce_all_free_memory() {} @@ -235,6 +255,17 @@ void lite::set_tensor_rt_cache(std::string) { void lite::dump_tensor_rt_cache() { LITE_THROW("mge is disbale at build time, please build with mge"); } + +bool lite::register_memory_pair( + void* vir_ptr, void* phy_ptr, size_t length, LiteDeviceType device, + LiteBackend beckend) { + LITE_THROW("register_memory_pair is not implement yet!"); +} + +bool lite::clear_memory_pair( + void* vir_ptr, void* phy_ptr, LiteDeviceType device, LiteBackend beckend) { + LITE_THROW("clear_memory_pair is not implement yet!"); +} #endif namespace lite { REGIST_DECRYPTION_METHOD( diff --git a/lite/test/test_network.cpp b/lite/test/test_network.cpp index c7cab766a807a705141ea3393be8c842bd27965e..8734e8ee7a7a98167bf3827775a21b95dbc54ae7 100644 --- a/lite/test/test_network.cpp +++ b/lite/test/test_network.cpp @@ -1357,5 +1357,6 @@ TEST(TestNetWork, CambriconDeviceID) { load_device_id(LiteDeviceType::LITE_CAMBRICON, 0, "./model_magicmind.mgb"); } #endif + #endif // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}