diff --git a/mindinsight/backend/datavisual/task_manager_api.py b/mindinsight/backend/datavisual/task_manager_api.py index c07f231e2ccecdaaaa355e79b43fb3e9db5b4179..43a890a952b197ab396d5cf8504bf86f752cbaa6 100644 --- a/mindinsight/backend/datavisual/task_manager_api.py +++ b/mindinsight/backend/datavisual/task_manager_api.py @@ -66,12 +66,13 @@ def query_train_jobs(): """Query train jobs.""" offset = request.args.get("offset", default=0) limit = request.args.get("limit", default=10) + train_id = get_train_id(request) offset = Validation.check_offset(offset=offset) limit = Validation.check_limit(limit, min_value=1, max_value=SummaryWatcher.MAX_SUMMARY_DIR_COUNT) processor = TrainTaskManager(DATA_MANAGER) - total, train_jobs = processor.query_train_jobs(offset, limit) + total, train_jobs = processor.query_train_jobs(offset, limit, train_id) return jsonify({ 'name': os.path.basename(os.path.realpath(settings.SUMMARY_BASE_DIR)), diff --git a/mindinsight/conf/constants.py b/mindinsight/conf/constants.py index f304349fbd46d7439f3bd422d98eb0e8fe986e48..0ccb270071fc75af8060b1d543c761afeb5448c8 100644 --- a/mindinsight/conf/constants.py +++ b/mindinsight/conf/constants.py @@ -84,9 +84,10 @@ MAX_TAG_SIZE_PER_EVENTS_DATA = 300 DEFAULT_STEP_SIZES_PER_TAG = 500 MAX_GRAPH_TAG_SIZE = 10 +MAX_TENSOR_TAG_SIZE = 6 MAX_IMAGE_STEP_SIZE_PER_TAG = 10 MAX_SCALAR_STEP_SIZE_PER_TAG = 1000 MAX_GRAPH_STEP_SIZE_PER_TAG = 1 MAX_HISTOGRAM_STEP_SIZE_PER_TAG = 50 -MAX_TENSOR_STEP_SIZE_PER_TAG = 50 +MAX_TENSOR_STEP_SIZE_PER_TAG = 20 MAX_TENSOR_RESPONSE_DATA_SIZE = 100000 diff --git a/mindinsight/datavisual/data_transform/events_data.py b/mindinsight/datavisual/data_transform/events_data.py index 07d054c345b2cf92eb2efa430d31ef0c932013a9..fb27d0a8fcd02013426a36007811555ab07964df 100644 --- a/mindinsight/datavisual/data_transform/events_data.py +++ b/mindinsight/datavisual/data_transform/events_data.py @@ -35,6 +35,7 @@ CONFIG = { 'max_tag_sizes_per_plugin': { PluginNameEnum.GRAPH.value: settings.MAX_GRAPH_TAG_SIZE, + PluginNameEnum.TENSOR.value: settings.MAX_TENSOR_TAG_SIZE }, 'max_step_sizes_per_tag': { diff --git a/mindinsight/datavisual/processors/train_task_manager.py b/mindinsight/datavisual/processors/train_task_manager.py index 540ea39bc7e9afd82088ff04e07df616e45f4c51..f0e780fa294e7bb2e0563a41e5d86b74a9e223e0 100644 --- a/mindinsight/datavisual/processors/train_task_manager.py +++ b/mindinsight/datavisual/processors/train_task_manager.py @@ -83,17 +83,24 @@ class TrainTaskManager(BaseProcessor): plugins=plugins ) - def query_train_jobs(self, offset=0, limit=10): + def query_train_jobs(self, offset=0, limit=10, request_train_id=None): """ Query train jobs. Args: offset (int): Specify page number. Default is 0. limit (int): Specify page size. Default is 10. + request_train_id (str): Specify train id. Default is None. Returns: tuple, return quantity of total train jobs and list of train jobs specified by offset and limit. """ + if request_train_id is not None: + train_job_item = self._get_train_job_item(request_train_id) + if train_job_item is None: + return 0, [] + return 1, [train_job_item] + brief_cache = self._data_manager.get_brief_cache() brief_train_jobs = list(brief_cache.get_train_jobs().values()) brief_train_jobs.sort(key=lambda x: x.basic_info.update_time, reverse=True) @@ -106,37 +113,52 @@ class TrainTaskManager(BaseProcessor): train_ids = [train_job.basic_info.train_id for train_job in brief_train_jobs[start:end]] for train_id in train_ids: - try: - train_job = self._data_manager.get_train_job(train_id) - except exceptions.TrainJobNotExistError: - logger.warning('Train job %s not existed', train_id) + train_job_item = self._get_train_job_item(train_id) + if train_job_item is None: continue - - basic_info = train_job.get_basic_info() - train_job_item = dict( - train_id=basic_info.train_id, - relative_path=basic_info.train_id, - create_time=basic_info.create_time.strftime('%Y-%m-%d %H:%M:%S'), - update_time=basic_info.update_time.strftime('%Y-%m-%d %H:%M:%S'), - profiler_dir=basic_info.profiler_dir, - cache_status=train_job.cache_status.value, - ) - - if train_job.cache_status == CacheStatus.CACHED: - plugins = self.get_plugins(train_id) - else: - plugins = dict(plugins={ - 'graph': [], - 'scalar': [], - 'image': [], - 'histogram': [], - }) - - train_job_item.update(plugins) train_jobs.append(train_job_item) return total, train_jobs + def _get_train_job_item(self, train_id): + """ + Get train job item. + + Args: + train_id (str): Specify train id. + + Returns: + dict, a dict of train job item. + """ + try: + train_job = self._data_manager.get_train_job(train_id) + except exceptions.TrainJobNotExistError: + logger.warning('Train job %s not existed', train_id) + return None + + basic_info = train_job.get_basic_info() + train_job_item = dict( + train_id=basic_info.train_id, + relative_path=basic_info.train_id, + create_time=basic_info.create_time.strftime('%Y-%m-%d %H:%M:%S'), + update_time=basic_info.update_time.strftime('%Y-%m-%d %H:%M:%S'), + profiler_dir=basic_info.profiler_dir, + cache_status=train_job.cache_status.value, + ) + + if train_job.cache_status == CacheStatus.CACHED: + plugins = self.get_plugins(train_id) + else: + plugins = dict(plugins={ + 'graph': [], + 'scalar': [], + 'image': [], + 'histogram': [], + }) + + train_job_item.update(plugins) + return train_job_item + def cache_train_jobs(self, train_ids): """ Cache train jobs.