提交 bde2efa3 编写于 作者: M Megvii Engine Team

feat(lite/load_and_run): support put and get model redis cache

GitOrigin-RevId: 55c82e28c197197dc1762ce5e5550f9d88087ae6
上级 48526abb
......@@ -30,11 +30,24 @@ if(UNIX)
endif()
endif()
if(LITE_BUILD_WITH_MGE
AND LITE_WITH_CUDA
AND NOT WIN32)
# FXIME third_party cpp redis do not support build with clang-cl
target_include_directories(load_and_run PRIVATE ${CPP_REDIS_INCLUDES})
endif()
install(
TARGETS load_and_run
EXPORT ${LITE_EXPORT_TARGETS}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
if(BUILD_SHARED_LIBS)
if(LITE_BUILD_WITH_MGE
AND LITE_WITH_CUDA
AND NOT WIN32)
# FXIME third_party cpp redis do not support build with clang-cl
list(APPEND SOURCES ${CPP_REDIS_SRCS})
endif()
add_executable(load_and_run_depends_shared ${SOURCES})
target_link_libraries(load_and_run_depends_shared lite_shared)
target_link_libraries(load_and_run_depends_shared gflags)
......@@ -58,6 +71,14 @@ if(BUILD_SHARED_LIBS)
endif()
endif()
if(LITE_BUILD_WITH_MGE
AND LITE_WITH_CUDA
AND NOT WIN32)
# FXIME third_party cpp redis do not support build with clang-cl
target_include_directories(load_and_run_depends_shared
PRIVATE ${CPP_REDIS_INCLUDES})
endif()
install(
TARGETS load_and_run_depends_shared
EXPORT ${MGE_EXPORT_TARGETS}
......
......@@ -10,9 +10,12 @@
#include "model_options.h"
#include "device_options.h"
#include "lite/pack_model.h"
#include "megbrain/opr/search_policy/algo_chooser.h"
#include "megbrain/utils/infile_persistent_cache.h"
#include "misc.h"
#include "models/model_lite.h"
#include "models/model_mdl.h"
#include "network_impl_base.h"
namespace lar {
template <typename ModelImpl>
......
......@@ -9,6 +9,7 @@
#pragma once
#include <gflags/gflags.h>
#include "megbrain/graph/operator_node.h"
#include "models/model.h"
#include "option_base.h"
......
......@@ -176,6 +176,10 @@ public:
//! dump network after global layout transform optimization
void dump_layout_transform_model(std::string optimized_model_path);
mgb::serialization::GraphLoader::LoadResult get_load_result() {
return m_load_result;
}
private:
//! construct the outputspec according to the m_network_io, and set the
//! call_back to the outputspec
......
......@@ -78,6 +78,13 @@ public:
MGE_WIN_DECLSPEC_FUC void put(
const std::string& category, const Blob& key, const Blob& value) override;
bool support_dump_cache() override { return true; }
std::unordered_map<
std::string,
std::unordered_map<BlobStorage, BlobStorage, BlobStorage::Hash>>
get_cache() {
return std::move(m_cache);
}
};
} // namespace mgb
......
......@@ -28,8 +28,6 @@ using namespace mgb;
// timeout delta to be added with fastest known algorithm for new algos
constexpr double TIMEOUT_TOLERANCE = 2;
#define CACHE_KEY_VERSION "v5"
namespace {
template <class MegDNNOpr>
......
......@@ -6,6 +6,7 @@
#include "megbrain/utils/persistent_cache.h"
#include "megdnn/oprs/base.h"
#define CACHE_KEY_VERSION "v5"
namespace mgb {
namespace rdnn {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册