提交 b07993f2 编写于 作者: M mindspore-ci-bot 提交者: Gitee

!486 1. add the limitation of the number of tag in tensor visualization;...

!486 1. add the limitation of the number of tag in tensor visualization; update the max step per tensor tag to 20; 3. support query one train_job in the interface of train_jobs
Merge pull request !486 from wangshuide/wsd0727
......@@ -66,12 +66,13 @@ def query_train_jobs():
"""Query train jobs."""
offset = request.args.get("offset", default=0)
limit = request.args.get("limit", default=10)
train_id = get_train_id(request)
offset = Validation.check_offset(offset=offset)
limit = Validation.check_limit(limit, min_value=1, max_value=SummaryWatcher.MAX_SUMMARY_DIR_COUNT)
processor = TrainTaskManager(DATA_MANAGER)
total, train_jobs = processor.query_train_jobs(offset, limit)
total, train_jobs = processor.query_train_jobs(offset, limit, train_id)
return jsonify({
'name': os.path.basename(os.path.realpath(settings.SUMMARY_BASE_DIR)),
......
......@@ -84,9 +84,10 @@ MAX_TAG_SIZE_PER_EVENTS_DATA = 300
DEFAULT_STEP_SIZES_PER_TAG = 500
MAX_GRAPH_TAG_SIZE = 10
MAX_TENSOR_TAG_SIZE = 6
MAX_IMAGE_STEP_SIZE_PER_TAG = 10
MAX_SCALAR_STEP_SIZE_PER_TAG = 1000
MAX_GRAPH_STEP_SIZE_PER_TAG = 1
MAX_HISTOGRAM_STEP_SIZE_PER_TAG = 50
MAX_TENSOR_STEP_SIZE_PER_TAG = 50
MAX_TENSOR_STEP_SIZE_PER_TAG = 20
MAX_TENSOR_RESPONSE_DATA_SIZE = 100000
......@@ -35,6 +35,7 @@ CONFIG = {
'max_tag_sizes_per_plugin':
{
PluginNameEnum.GRAPH.value: settings.MAX_GRAPH_TAG_SIZE,
PluginNameEnum.TENSOR.value: settings.MAX_TENSOR_TAG_SIZE
},
'max_step_sizes_per_tag':
{
......
......@@ -83,17 +83,24 @@ class TrainTaskManager(BaseProcessor):
plugins=plugins
)
def query_train_jobs(self, offset=0, limit=10):
def query_train_jobs(self, offset=0, limit=10, request_train_id=None):
"""
Query train jobs.
Args:
offset (int): Specify page number. Default is 0.
limit (int): Specify page size. Default is 10.
request_train_id (str): Specify train id. Default is None.
Returns:
tuple, return quantity of total train jobs and list of train jobs specified by offset and limit.
"""
if request_train_id is not None:
train_job_item = self._get_train_job_item(request_train_id)
if train_job_item is None:
return 0, []
return 1, [train_job_item]
brief_cache = self._data_manager.get_brief_cache()
brief_train_jobs = list(brief_cache.get_train_jobs().values())
brief_train_jobs.sort(key=lambda x: x.basic_info.update_time, reverse=True)
......@@ -106,37 +113,52 @@ class TrainTaskManager(BaseProcessor):
train_ids = [train_job.basic_info.train_id for train_job in brief_train_jobs[start:end]]
for train_id in train_ids:
try:
train_job = self._data_manager.get_train_job(train_id)
except exceptions.TrainJobNotExistError:
logger.warning('Train job %s not existed', train_id)
train_job_item = self._get_train_job_item(train_id)
if train_job_item is None:
continue
basic_info = train_job.get_basic_info()
train_job_item = dict(
train_id=basic_info.train_id,
relative_path=basic_info.train_id,
create_time=basic_info.create_time.strftime('%Y-%m-%d %H:%M:%S'),
update_time=basic_info.update_time.strftime('%Y-%m-%d %H:%M:%S'),
profiler_dir=basic_info.profiler_dir,
cache_status=train_job.cache_status.value,
)
if train_job.cache_status == CacheStatus.CACHED:
plugins = self.get_plugins(train_id)
else:
plugins = dict(plugins={
'graph': [],
'scalar': [],
'image': [],
'histogram': [],
})
train_job_item.update(plugins)
train_jobs.append(train_job_item)
return total, train_jobs
def _get_train_job_item(self, train_id):
"""
Get train job item.
Args:
train_id (str): Specify train id.
Returns:
dict, a dict of train job item.
"""
try:
train_job = self._data_manager.get_train_job(train_id)
except exceptions.TrainJobNotExistError:
logger.warning('Train job %s not existed', train_id)
return None
basic_info = train_job.get_basic_info()
train_job_item = dict(
train_id=basic_info.train_id,
relative_path=basic_info.train_id,
create_time=basic_info.create_time.strftime('%Y-%m-%d %H:%M:%S'),
update_time=basic_info.update_time.strftime('%Y-%m-%d %H:%M:%S'),
profiler_dir=basic_info.profiler_dir,
cache_status=train_job.cache_status.value,
)
if train_job.cache_status == CacheStatus.CACHED:
plugins = self.get_plugins(train_id)
else:
plugins = dict(plugins={
'graph': [],
'scalar': [],
'image': [],
'histogram': [],
})
train_job_item.update(plugins)
return train_job_item
def cache_train_jobs(self, train_ids):
"""
Cache train jobs.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册