提交 d898838e 编写于 作者: M Megvii Engine Team

fix(src/gopt/test): remove device info in the cache_data

GitOrigin-RevId: 8e9758971d36bc2bdafad373577c19878628dd56
上级 a2a46b56
......@@ -77,9 +77,12 @@ void ProfilerCache::Key::build_category(CompNode cn) {
switch (env.property().type) {
#if MGB_CUDA
case CompNode::DeviceType::CUDA: {
auto&& prop = env.cuda_env().device_prop;
m_category += ssprintf(
"plat=cuda;dev=%s;cap=%d.%d", prop.name, prop.major, prop.minor);
m_category += "plat=cuda";
if (ProfilerCache::inst().enable_device_info()) {
auto&& prop = env.cuda_env().device_prop;
m_category += ssprintf(
";dev=%s;cap=%d.%d", prop.name, prop.major, prop.minor);
}
break;
}
#endif
......
......@@ -291,8 +291,14 @@ public:
void put(const Key& key, Result& result);
bool enable_device_info() { return m_enable_device_info; }
void enable_device_info(bool flag) { m_enable_device_info = flag; }
private:
std::unique_ptr<PersistentCache> m_impl;
// whether to save platform information into the cache.
bool m_enable_device_info = true;
};
class CachedProfiler final : public ProfilerImpl {
......
此差异由.gitattributes 抑制。
......@@ -19,7 +19,7 @@
# 2. 编译megbrain_test,并运行所有全局图优化相关测试:
# ./megbrain_test --gtest_filter="*LayoutTransform*"
# 3. 用这个脚本把所有的cache文件打包在一起
# python3 embed_cache.py -o cache_data.h $(ls /path/to/cache/*.cache)
# python3 embed_cache.py -o cache_data.h -r $(ls /path/to/cache/*.cache)
# 4. 将步骤1中的 define 语句改回原样,这样 profile 过程就会使用 cache 下来的数据。
# 5. 最后可以重新构建一下 megbrain_test ,确保测试结果正确。
import os.path
......@@ -30,6 +30,7 @@ import struct
import itertools
import sys
import subprocess
import re
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.WARNING, format='%(asctime)-15s %(message)s')
......@@ -42,8 +43,9 @@ def _u32(data):
class CacheDataGenerator:
_cache_files = None
def __init__(self, cache_files):
def __init__(self, cache_files, remove_plat_info = True):
self._cache_files = cache_files
self._remove_plat_info = remove_plat_info
def _get_hash(self):
return _u32(self._hash.digest()[:4])
......@@ -52,6 +54,14 @@ class CacheDataGenerator:
fname = os.path.basename(fpath)
with open(fpath, 'rb') as fcache:
cache_data = fcache.read()
if self._remove_plat_info:
for matched in re.finditer(
rb"(layout_transform_profile:plat=.*);dev=.*;cap=\d.\d",
cache_data
):
plat_info = matched.group(1)
cat_info = cache_data[matched.span()[0] - 4: matched.span()[1]]
cache_data = re.sub(cat_info, struct.pack('I', len(plat_info)) + plat_info, cache_data)
cache_data = struct.unpack(
"<{}B".format(len(cache_data)), cache_data)
ret = list(map(CHAR_MAP.__getitem__, cache_data))
......@@ -89,7 +99,14 @@ if __name__ == '__main__':
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-o', '--output', help='output source file',
required=True)
parser.add_argument(
"-r",
"--remove-plat-info",
action='store_true',
default=True,
help="whether remove platform infomation in the cache (default: True)"
)
parser.add_argument('cache', help='cache files to be embedded', nargs='+')
args = parser.parse_args()
cache_generator = CacheDataGenerator(args.cache)
cache_generator = CacheDataGenerator(args.cache, args.remove_plat_info)
cache_generator.invoke(args.output)
......@@ -90,6 +90,8 @@ public:
mgb_assert(bin != nullptr);
ProfilerCache::inst().set_impl(
std::make_unique<InFilePersistentCache>(bin, size));
// disable saving platform information to make ci stable.
ProfilerCache::inst().enable_device_info(false);
}
~ProfilerMock() {
// reset in memory cache
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册