提交 5c421628 编写于 作者: K kouzhenzhong

lineagemgr: add test case for lineage decouple

上级 f52b491a
...@@ -31,6 +31,7 @@ import pytest ...@@ -31,6 +31,7 @@ import pytest
from mindinsight.lineagemgr import get_summary_lineage from mindinsight.lineagemgr import get_summary_lineage
from mindinsight.lineagemgr.collection.model.model_lineage import TrainLineage, EvalLineage, \ from mindinsight.lineagemgr.collection.model.model_lineage import TrainLineage, EvalLineage, \
AnalyzeObject AnalyzeObject
from mindinsight.lineagemgr.common.utils import make_directory
from mindinsight.lineagemgr.common.exceptions.error_code import LineageErrors from mindinsight.lineagemgr.common.exceptions.error_code import LineageErrors
from mindinsight.lineagemgr.common.exceptions.exceptions import LineageParamRunContextError from mindinsight.lineagemgr.common.exceptions.exceptions import LineageParamRunContextError
from mindinsight.utils.exceptions import MindInsightException from mindinsight.utils.exceptions import MindInsightException
...@@ -198,6 +199,7 @@ class TestModelLineage(TestCase): ...@@ -198,6 +199,7 @@ class TestModelLineage(TestCase):
@pytest.mark.platform_x86_ascend_training @pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_cpu @pytest.mark.platform_x86_cpu
@pytest.mark.env_single @pytest.mark.env_single
@mock.patch('mindinsight.lineagemgr.summary.summary_record.get_lineage_file_name')
@mock.patch('os.path.getsize') @mock.patch('os.path.getsize')
def test_multiple_trains(self, *args): def test_multiple_trains(self, *args):
""" """
...@@ -208,22 +210,116 @@ class TestModelLineage(TestCase): ...@@ -208,22 +210,116 @@ class TestModelLineage(TestCase):
""" """
args[0].return_value = 10 args[0].return_value = 10
for i in range(2): for i in range(2):
summary_record = SummaryRecord(SUMMARY_DIR_2, create_time=int(time.time())) summary_record = SummaryRecord(
eval_record = SummaryRecord(SUMMARY_DIR_2, create_time=int(time.time()) + 1) SUMMARY_DIR_2,
create_time=int(time.time()) + i)
eval_record = SummaryRecord(
SUMMARY_DIR_2,
create_time=int(time.time() + 10) + i)
args[1].return_value = os.path.join(
SUMMARY_DIR_2,
f'train_out.events.summary.{str(int(time.time()) + 2*i)}.ubuntu_lineage'
)
train_callback = TrainLineage(summary_record, True) train_callback = TrainLineage(summary_record, True)
train_callback.begin(RunContext(self.run_context)) train_callback.begin(RunContext(self.run_context))
train_callback.end(RunContext(self.run_context)) train_callback.end(RunContext(self.run_context))
time.sleep(1)
args[1].return_value = os.path.join(
SUMMARY_DIR_2,
f'eval_out.events.summary.{str(int(time.time())+ 2*i + 1)}.ubuntu_lineage'
)
eval_callback = EvalLineage(eval_record, True) eval_callback = EvalLineage(eval_record, True)
eval_run_context = self.run_context eval_run_context = self.run_context
eval_run_context['metrics'] = {'accuracy': 0.78 + i + 1} eval_run_context['metrics'] = {'accuracy': 0.78 + i + 1}
eval_run_context['valid_dataset'] = self.run_context['train_dataset'] eval_run_context['valid_dataset'] = self.run_context['train_dataset']
eval_run_context['step_num'] = 32 eval_run_context['step_num'] = 32
eval_callback.end(RunContext(eval_run_context)) eval_callback.end(RunContext(eval_run_context))
time.sleep(1)
file_num = os.listdir(SUMMARY_DIR_2) file_num = os.listdir(SUMMARY_DIR_2)
assert len(file_num) == 8 assert len(file_num) == 8
@pytest.mark.scene_train(2)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_single
@mock.patch('mindinsight.lineagemgr.summary.summary_record.get_lineage_file_name')
@mock.patch('mindinsight.lineagemgr.collection.model.model_lineage.AnalyzeObject.get_file_size')
def test_train_eval(self, *args):
"""Callback for train once and eval once."""
args[0].return_value = 10
summary_dir = os.path.join(BASE_SUMMARY_DIR, 'train_eval')
make_directory(summary_dir)
args[1].return_value = os.path.join(
summary_dir,
f'train_out.events.summary.{str(int(time.time()))}.ubuntu_lineage'
)
train_callback = TrainLineage(summary_dir)
train_callback.begin(RunContext(self.run_context))
train_callback.end(RunContext(self.run_context))
args[1].return_value = os.path.join(
summary_dir,
f'eval_out.events.summary.{str(int(time.time())+1)}.ubuntu_lineage'
)
eval_callback = EvalLineage(summary_dir)
eval_run_context = self.run_context
eval_run_context['metrics'] = {'accuracy': 0.78}
eval_run_context['valid_dataset'] = self.run_context['train_dataset']
eval_run_context['step_num'] = 32
eval_callback.end(RunContext(eval_run_context))
res = get_summary_lineage(summary_dir)
assert res.get('hyper_parameters', {}).get('loss_function') \
== 'SoftmaxCrossEntropyWithLogits'
assert res.get('algorithm', {}).get('network') == 'ResNet'
if os.path.exists(summary_dir):
shutil.rmtree(summary_dir)
@pytest.mark.scene_train(2)
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_single
@mock.patch('mindinsight.lineagemgr.summary.summary_record.get_lineage_file_name')
@mock.patch('mindinsight.lineagemgr.collection.model.model_lineage.AnalyzeObject.get_file_size')
def test_train_multi_eval(self, *args):
"""Callback for train once and eval twice."""
args[0].return_value = 10
summary_dir = os.path.join(BASE_SUMMARY_DIR, 'train_multi_eval')
make_directory(summary_dir)
args[1].return_value = os.path.join(
summary_dir,
'train_out.events.summary.1590107366.ubuntu_lineage')
train_callback = TrainLineage(summary_dir, True)
train_callback.begin(RunContext(self.run_context))
train_callback.end(RunContext(self.run_context))
args[1].return_value = os.path.join(
summary_dir,
'eval_out.events.summary.1590107367.ubuntu_lineage')
eval_callback = EvalLineage(summary_dir, True)
eval_run_context = self.run_context
eval_run_context['valid_dataset'] = self.run_context['train_dataset']
eval_run_context['metrics'] = {'accuracy': 0.79}
eval_callback.end(RunContext(eval_run_context))
res = get_summary_lineage(summary_dir)
assert res.get('metric', {}).get('accuracy') == 0.79
args[1].return_value = os.path.join(
summary_dir,
'eval_out.events.summary.1590107368.ubuntu_lineage')
eval_callback = EvalLineage(summary_dir, True)
eval_run_context = self.run_context
eval_run_context['valid_dataset'] = self.run_context['train_dataset']
eval_run_context['metrics'] = {'accuracy': 0.80}
eval_callback.end(RunContext(eval_run_context))
res = get_summary_lineage(summary_dir)
assert res.get('metric', {}).get('accuracy') == 0.80
if os.path.exists(summary_dir):
shutil.rmtree(summary_dir)
@pytest.mark.scene_train(2) @pytest.mark.scene_train(2)
@pytest.mark.level0 @pytest.mark.level0
@pytest.mark.platform_arm_ascend_training @pytest.mark.platform_arm_ascend_training
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册