Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MindSpore
mindinsight
提交
5c421628
M
mindinsight
项目概览
MindSpore
/
mindinsight
通知
7
Star
3
Fork
2
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
M
mindinsight
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5c421628
编写于
5月 23, 2020
作者:
K
kouzhenzhong
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
lineagemgr: add test case for lineage decouple
上级
f52b491a
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
100 addition
and
4 deletion
+100
-4
tests/st/func/lineagemgr/collection/model/test_model_lineage.py
...st/func/lineagemgr/collection/model/test_model_lineage.py
+100
-4
未找到文件。
tests/st/func/lineagemgr/collection/model/test_model_lineage.py
浏览文件 @
5c421628
...
@@ -31,6 +31,7 @@ import pytest
...
@@ -31,6 +31,7 @@ import pytest
from
mindinsight.lineagemgr
import
get_summary_lineage
from
mindinsight.lineagemgr
import
get_summary_lineage
from
mindinsight.lineagemgr.collection.model.model_lineage
import
TrainLineage
,
EvalLineage
,
\
from
mindinsight.lineagemgr.collection.model.model_lineage
import
TrainLineage
,
EvalLineage
,
\
AnalyzeObject
AnalyzeObject
from
mindinsight.lineagemgr.common.utils
import
make_directory
from
mindinsight.lineagemgr.common.exceptions.error_code
import
LineageErrors
from
mindinsight.lineagemgr.common.exceptions.error_code
import
LineageErrors
from
mindinsight.lineagemgr.common.exceptions.exceptions
import
LineageParamRunContextError
from
mindinsight.lineagemgr.common.exceptions.exceptions
import
LineageParamRunContextError
from
mindinsight.utils.exceptions
import
MindInsightException
from
mindinsight.utils.exceptions
import
MindInsightException
...
@@ -198,6 +199,7 @@ class TestModelLineage(TestCase):
...
@@ -198,6 +199,7 @@ class TestModelLineage(TestCase):
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
platform_x86_cpu
@
pytest
.
mark
.
platform_x86_cpu
@
pytest
.
mark
.
env_single
@
pytest
.
mark
.
env_single
@
mock
.
patch
(
'mindinsight.lineagemgr.summary.summary_record.get_lineage_file_name'
)
@
mock
.
patch
(
'os.path.getsize'
)
@
mock
.
patch
(
'os.path.getsize'
)
def
test_multiple_trains
(
self
,
*
args
):
def
test_multiple_trains
(
self
,
*
args
):
"""
"""
...
@@ -208,22 +210,116 @@ class TestModelLineage(TestCase):
...
@@ -208,22 +210,116 @@ class TestModelLineage(TestCase):
"""
"""
args
[
0
].
return_value
=
10
args
[
0
].
return_value
=
10
for
i
in
range
(
2
):
for
i
in
range
(
2
):
summary_record
=
SummaryRecord
(
SUMMARY_DIR_2
,
create_time
=
int
(
time
.
time
()))
summary_record
=
SummaryRecord
(
eval_record
=
SummaryRecord
(
SUMMARY_DIR_2
,
create_time
=
int
(
time
.
time
())
+
1
)
SUMMARY_DIR_2
,
create_time
=
int
(
time
.
time
())
+
i
)
eval_record
=
SummaryRecord
(
SUMMARY_DIR_2
,
create_time
=
int
(
time
.
time
()
+
10
)
+
i
)
args
[
1
].
return_value
=
os
.
path
.
join
(
SUMMARY_DIR_2
,
f
'train_out.events.summary.
{
str
(
int
(
time
.
time
())
+
2
*
i
)
}
.ubuntu_lineage'
)
train_callback
=
TrainLineage
(
summary_record
,
True
)
train_callback
=
TrainLineage
(
summary_record
,
True
)
train_callback
.
begin
(
RunContext
(
self
.
run_context
))
train_callback
.
begin
(
RunContext
(
self
.
run_context
))
train_callback
.
end
(
RunContext
(
self
.
run_context
))
train_callback
.
end
(
RunContext
(
self
.
run_context
))
time
.
sleep
(
1
)
args
[
1
].
return_value
=
os
.
path
.
join
(
SUMMARY_DIR_2
,
f
'eval_out.events.summary.
{
str
(
int
(
time
.
time
())
+
2
*
i
+
1
)
}
.ubuntu_lineage'
)
eval_callback
=
EvalLineage
(
eval_record
,
True
)
eval_callback
=
EvalLineage
(
eval_record
,
True
)
eval_run_context
=
self
.
run_context
eval_run_context
=
self
.
run_context
eval_run_context
[
'metrics'
]
=
{
'accuracy'
:
0.78
+
i
+
1
}
eval_run_context
[
'metrics'
]
=
{
'accuracy'
:
0.78
+
i
+
1
}
eval_run_context
[
'valid_dataset'
]
=
self
.
run_context
[
'train_dataset'
]
eval_run_context
[
'valid_dataset'
]
=
self
.
run_context
[
'train_dataset'
]
eval_run_context
[
'step_num'
]
=
32
eval_run_context
[
'step_num'
]
=
32
eval_callback
.
end
(
RunContext
(
eval_run_context
))
eval_callback
.
end
(
RunContext
(
eval_run_context
))
time
.
sleep
(
1
)
file_num
=
os
.
listdir
(
SUMMARY_DIR_2
)
file_num
=
os
.
listdir
(
SUMMARY_DIR_2
)
assert
len
(
file_num
)
==
8
assert
len
(
file_num
)
==
8
@
pytest
.
mark
.
scene_train
(
2
)
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
platform_x86_cpu
@
pytest
.
mark
.
env_single
@
mock
.
patch
(
'mindinsight.lineagemgr.summary.summary_record.get_lineage_file_name'
)
@
mock
.
patch
(
'mindinsight.lineagemgr.collection.model.model_lineage.AnalyzeObject.get_file_size'
)
def
test_train_eval
(
self
,
*
args
):
"""Callback for train once and eval once."""
args
[
0
].
return_value
=
10
summary_dir
=
os
.
path
.
join
(
BASE_SUMMARY_DIR
,
'train_eval'
)
make_directory
(
summary_dir
)
args
[
1
].
return_value
=
os
.
path
.
join
(
summary_dir
,
f
'train_out.events.summary.
{
str
(
int
(
time
.
time
()))
}
.ubuntu_lineage'
)
train_callback
=
TrainLineage
(
summary_dir
)
train_callback
.
begin
(
RunContext
(
self
.
run_context
))
train_callback
.
end
(
RunContext
(
self
.
run_context
))
args
[
1
].
return_value
=
os
.
path
.
join
(
summary_dir
,
f
'eval_out.events.summary.
{
str
(
int
(
time
.
time
())
+
1
)
}
.ubuntu_lineage'
)
eval_callback
=
EvalLineage
(
summary_dir
)
eval_run_context
=
self
.
run_context
eval_run_context
[
'metrics'
]
=
{
'accuracy'
:
0.78
}
eval_run_context
[
'valid_dataset'
]
=
self
.
run_context
[
'train_dataset'
]
eval_run_context
[
'step_num'
]
=
32
eval_callback
.
end
(
RunContext
(
eval_run_context
))
res
=
get_summary_lineage
(
summary_dir
)
assert
res
.
get
(
'hyper_parameters'
,
{}).
get
(
'loss_function'
)
\
==
'SoftmaxCrossEntropyWithLogits'
assert
res
.
get
(
'algorithm'
,
{}).
get
(
'network'
)
==
'ResNet'
if
os
.
path
.
exists
(
summary_dir
):
shutil
.
rmtree
(
summary_dir
)
@
pytest
.
mark
.
scene_train
(
2
)
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_x86_gpu_training
@
pytest
.
mark
.
platform_x86_ascend_training
@
pytest
.
mark
.
platform_x86_cpu
@
pytest
.
mark
.
env_single
@
mock
.
patch
(
'mindinsight.lineagemgr.summary.summary_record.get_lineage_file_name'
)
@
mock
.
patch
(
'mindinsight.lineagemgr.collection.model.model_lineage.AnalyzeObject.get_file_size'
)
def
test_train_multi_eval
(
self
,
*
args
):
"""Callback for train once and eval twice."""
args
[
0
].
return_value
=
10
summary_dir
=
os
.
path
.
join
(
BASE_SUMMARY_DIR
,
'train_multi_eval'
)
make_directory
(
summary_dir
)
args
[
1
].
return_value
=
os
.
path
.
join
(
summary_dir
,
'train_out.events.summary.1590107366.ubuntu_lineage'
)
train_callback
=
TrainLineage
(
summary_dir
,
True
)
train_callback
.
begin
(
RunContext
(
self
.
run_context
))
train_callback
.
end
(
RunContext
(
self
.
run_context
))
args
[
1
].
return_value
=
os
.
path
.
join
(
summary_dir
,
'eval_out.events.summary.1590107367.ubuntu_lineage'
)
eval_callback
=
EvalLineage
(
summary_dir
,
True
)
eval_run_context
=
self
.
run_context
eval_run_context
[
'valid_dataset'
]
=
self
.
run_context
[
'train_dataset'
]
eval_run_context
[
'metrics'
]
=
{
'accuracy'
:
0.79
}
eval_callback
.
end
(
RunContext
(
eval_run_context
))
res
=
get_summary_lineage
(
summary_dir
)
assert
res
.
get
(
'metric'
,
{}).
get
(
'accuracy'
)
==
0.79
args
[
1
].
return_value
=
os
.
path
.
join
(
summary_dir
,
'eval_out.events.summary.1590107368.ubuntu_lineage'
)
eval_callback
=
EvalLineage
(
summary_dir
,
True
)
eval_run_context
=
self
.
run_context
eval_run_context
[
'valid_dataset'
]
=
self
.
run_context
[
'train_dataset'
]
eval_run_context
[
'metrics'
]
=
{
'accuracy'
:
0.80
}
eval_callback
.
end
(
RunContext
(
eval_run_context
))
res
=
get_summary_lineage
(
summary_dir
)
assert
res
.
get
(
'metric'
,
{}).
get
(
'accuracy'
)
==
0.80
if
os
.
path
.
exists
(
summary_dir
):
shutil
.
rmtree
(
summary_dir
)
@
pytest
.
mark
.
scene_train
(
2
)
@
pytest
.
mark
.
scene_train
(
2
)
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
level0
@
pytest
.
mark
.
platform_arm_ascend_training
@
pytest
.
mark
.
platform_arm_ascend_training
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录