未验证 提交 8a2efa4d 编写于 作者: B Ben Hoff 提交者: GitHub

fix auto annotation to not eat all RAM (#1328)

Co-authored-by: NNikita Manovich <40690625+nmanovic@users.noreply.github.com>
上级 176dc718
......@@ -55,6 +55,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- React UI is the primary UI
### Fixed
- Cleaned up memory in Auto Annotation to enable long running tasks on videos
- New shape is added when press ``esc`` when drawing instead of cancellation
- Dextr segmentation doesn't work.
- `FileNotFoundError` during dump after moving format files
......
import itertools
from .model_loader import ModelLoader
from cvat.apps.engine.utils import import_modules, execute_python_code
import itertools
def _process_detections(detections, path_to_conv_script, restricted=True):
results = Results()
......@@ -31,6 +31,17 @@ def _process_detections(detections, path_to_conv_script, restricted=True):
return results
def _process_attributes(shape_attributes, label_attr_spec):
attributes = []
for attr_text, attr_value in shape_attributes.items():
if attr_text in label_attr_spec:
attributes.append({
"spec_id": label_attr_spec[attr_text],
"value": attr_value,
})
return attributes
class Results():
def __init__(self):
self._results = {
......@@ -84,25 +95,62 @@ class Results():
"attributes": attributes or {},
}
def run_inference_engine_annotation(data, model_file, weights_file,
labels_mapping, attribute_spec, convertation_file, job=None, update_progress=None, restricted=True):
def process_attributes(shape_attributes, label_attr_spec):
attributes = []
for attr_text, attr_value in shape_attributes.items():
if attr_text in label_attr_spec:
attributes.append({
"spec_id": label_attr_spec[attr_text],
"value": attr_value,
})
class InferenceAnnotationRunner:
def __init__(self, data, model_file, weights_file, labels_mapping,
attribute_spec, convertation_file):
self.data = iter(data)
self.data_len = len(data)
self.model = ModelLoader(model=model_file, weights=weights_file)
self.frame_counter = 0
self.attribute_spec = attribute_spec
self.convertation_file = convertation_file
self.iteration_size = 128
self.labels_mapping = labels_mapping
def run(self, job=None, update_progress=None, restricted=True):
result = {
"shapes": [],
"tracks": [],
"tags": [],
"version": 0
}
detections = []
for _ in range(self.iteration_size):
try:
frame = next(self.data)
except StopIteration:
break
orig_rows, orig_cols = frame.shape[:2]
detections.append({
"frame_id": self.frame_counter,
"frame_height": orig_rows,
"frame_width": orig_cols,
"detections": self.model.infer(frame),
})
self.frame_counter += 1
if job and update_progress and not update_progress(job, self.frame_counter * 100 / self.data_len):
return None, False
processed_detections = _process_detections(detections, self.convertation_file, restricted=restricted)
return attributes
self._add_shapes(processed_detections.get_shapes(), result["shapes"])
def add_shapes(shapes, target_container):
more_items = self.frame_counter != self.data_len
return result, more_items
def _add_shapes(self, shapes, target_container):
for shape in shapes:
if shape["label"] not in labels_mapping:
if shape["label"] not in self.labels_mapping:
continue
db_label = labels_mapping[shape["label"]]
label_attr_spec = attribute_spec.get(db_label)
db_label = self.labels_mapping[shape["label"]]
label_attr_spec = self.attribute_spec.get(db_label)
target_container.append({
"label_id": db_label,
"frame": shape["frame"],
......@@ -111,38 +159,5 @@ def run_inference_engine_annotation(data, model_file, weights_file,
"z_order": 0,
"group": None,
"occluded": False,
"attributes": process_attributes(shape["attributes"], label_attr_spec),
"attributes": _process_attributes(shape["attributes"], label_attr_spec),
})
result = {
"shapes": [],
"tracks": [],
"tags": [],
"version": 0
}
data_len = len(data)
model = ModelLoader(model=model_file, weights=weights_file)
frame_counter = 0
detections = []
for frame in data:
orig_rows, orig_cols = frame.shape[:2]
detections.append({
"frame_id": frame_counter,
"frame_height": orig_rows,
"frame_width": orig_cols,
"detections": model.infer(frame),
})
frame_counter += 1
if job and update_progress and not update_progress(job, frame_counter * 100 / data_len):
return None
processed_detections = _process_detections(detections, convertation_file, restricted=restricted)
add_shapes(processed_detections.get_shapes(), result["shapes"])
return result
......@@ -10,7 +10,6 @@ import platform
_IE_PLUGINS_PATH = os.getenv("IE_PLUGINS_PATH", None)
def _check_instruction(instruction):
return instruction == str.strip(
subprocess.check_output(
......@@ -24,7 +23,7 @@ def make_plugin_or_core():
use_core_openvino = False
try:
major, minor, reference = [int(x) for x in version.split('.')]
if major >= 2 and minor >= 1 and reference >= 37988:
if major >= 2 and minor >= 1:
use_core_openvino = True
except Exception:
pass
......
......@@ -23,7 +23,7 @@ from cvat.apps.engine.frame_provider import FrameProvider
from .models import AnnotationModel, FrameworkChoice
from .model_loader import load_labelmap
from .image_loader import ImageLoader
from .inference import run_inference_engine_annotation
from .inference import InferenceAnnotationRunner
def _remove_old_file(model_file_field):
......@@ -44,15 +44,15 @@ def _update_dl_model_thread(dl_model_id, name, is_shared, model_file, weights_fi
test_image = np.ones((1024, 1980, 3), np.uint8) * 255
try:
dummy_labelmap = {key: key for key in load_labelmap(labelmap_file).keys()}
run_inference_engine_annotation(
runner = InferenceAnnotationRunner(
data=[test_image,],
model_file=model_file,
weights_file=weights_file,
labels_mapping=dummy_labelmap,
attribute_spec={},
convertation_file=interpretation_file,
restricted=restricted
)
convertation_file=interpretation_file)
runner.run(restricted=restricted)
except Exception as e:
return False, str(e)
......@@ -227,30 +227,32 @@ def run_inference_thread(tid, model_file, weights_file, labels_mapping, attribut
result = None
slogger.glob.info("auto annotation with openvino toolkit for task {}".format(tid))
result = run_inference_engine_annotation(
more_data = True
runner = InferenceAnnotationRunner(
data=ImageLoader(FrameProvider(db_task.data)),
model_file=model_file,
weights_file=weights_file,
labels_mapping=labels_mapping,
attribute_spec=attributes,
convertation_file= convertation_file,
job=job,
update_progress=update_progress,
restricted=restricted
)
if result is None:
slogger.glob.info("auto annotation for task {} canceled by user".format(tid))
return
serializer = LabeledDataSerializer(data = result)
if serializer.is_valid(raise_exception=True):
if reset:
put_task_data(tid, user, result)
else:
patch_task_data(tid, user, result, "create")
slogger.glob.info("auto annotation for task {} done".format(tid))
convertation_file= convertation_file)
while more_data:
result, more_data = runner.run(
job=job,
update_progress=update_progress,
restricted=restricted)
if result is None:
slogger.glob.info("auto annotation for task {} canceled by user".format(tid))
return
serializer = LabeledDataSerializer(data = result)
if serializer.is_valid(raise_exception=True):
if reset:
put_task_data(tid, user, result)
else:
patch_task_data(tid, user, result, "create")
slogger.glob.info("auto annotation for task {} done".format(tid))
except Exception as e:
try:
slogger.task[tid].exception("exception was occurred during auto annotation of the task", exc_info=True)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册