Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Clean up memory for Auto Annotation #1328

Merged
merged 2 commits into from
Apr 27, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- React UI is the primary UI

### Fixed
- Cleaned up memory in Auto Annotation to enable long running tasks on videos
- New shape is added when press ``esc`` when drawing instead of cancellation
- Dextr segmentation doesn't work.
- `FileNotFoundError` during dump after moving format files
Expand Down
115 changes: 65 additions & 50 deletions cvat/apps/auto_annotation/inference.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import itertools
from .model_loader import ModelLoader
from cvat.apps.engine.utils import import_modules, execute_python_code
import itertools

def _process_detections(detections, path_to_conv_script, restricted=True):
results = Results()
Expand Down Expand Up @@ -31,6 +31,17 @@ def _process_detections(detections, path_to_conv_script, restricted=True):

return results

def _process_attributes(shape_attributes, label_attr_spec):
attributes = []
for attr_text, attr_value in shape_attributes.items():
if attr_text in label_attr_spec:
attributes.append({
"spec_id": label_attr_spec[attr_text],
"value": attr_value,
})

return attributes

class Results():
def __init__(self):
self._results = {
Expand Down Expand Up @@ -84,25 +95,62 @@ def _create_polyshape(points: list, label: int, frame_number: int, attributes: d
"attributes": attributes or {},
}

def run_inference_engine_annotation(data, model_file, weights_file,
labels_mapping, attribute_spec, convertation_file, job=None, update_progress=None, restricted=True):
def process_attributes(shape_attributes, label_attr_spec):
attributes = []
for attr_text, attr_value in shape_attributes.items():
if attr_text in label_attr_spec:
attributes.append({
"spec_id": label_attr_spec[attr_text],
"value": attr_value,
})
class InferenceAnnotationRunner:
def __init__(self, data, model_file, weights_file, labels_mapping,
attribute_spec, convertation_file):
self.data = iter(data)
self.data_len = len(data)
self.model = ModelLoader(model=model_file, weights=weights_file)
self.frame_counter = 0
self.attribute_spec = attribute_spec
self.convertation_file = convertation_file
self.iteration_size = 128
self.labels_mapping = labels_mapping


def run(self, job=None, update_progress=None, restricted=True):
result = {
"shapes": [],
"tracks": [],
"tags": [],
"version": 0
}

detections = []
for _ in range(self.iteration_size):
try:
frame = next(self.data)
except StopIteration:
break

orig_rows, orig_cols = frame.shape[:2]

detections.append({
"frame_id": self.frame_counter,
"frame_height": orig_rows,
"frame_width": orig_cols,
"detections": self.model.infer(frame),
})

self.frame_counter += 1
if job and update_progress and not update_progress(job, self.frame_counter * 100 / self.data_len):
return None, False

processed_detections = _process_detections(detections, self.convertation_file, restricted=restricted)

return attributes
self._add_shapes(processed_detections.get_shapes(), result["shapes"])

def add_shapes(shapes, target_container):
more_items = self.frame_counter != self.data_len

return result, more_items

def _add_shapes(self, shapes, target_container):
for shape in shapes:
if shape["label"] not in labels_mapping:
if shape["label"] not in self.labels_mapping:
continue
db_label = labels_mapping[shape["label"]]
label_attr_spec = attribute_spec.get(db_label)

db_label = self.labels_mapping[shape["label"]]
label_attr_spec = self.attribute_spec.get(db_label)
target_container.append({
"label_id": db_label,
"frame": shape["frame"],
Expand All @@ -111,38 +159,5 @@ def add_shapes(shapes, target_container):
"z_order": 0,
"group": None,
"occluded": False,
"attributes": process_attributes(shape["attributes"], label_attr_spec),
"attributes": _process_attributes(shape["attributes"], label_attr_spec),
})

result = {
"shapes": [],
"tracks": [],
"tags": [],
"version": 0
}

data_len = len(data)
model = ModelLoader(model=model_file, weights=weights_file)

frame_counter = 0

detections = []
for frame in data:
orig_rows, orig_cols = frame.shape[:2]

detections.append({
"frame_id": frame_counter,
"frame_height": orig_rows,
"frame_width": orig_cols,
"detections": model.infer(frame),
})

frame_counter += 1
if job and update_progress and not update_progress(job, frame_counter * 100 / data_len):
return None

processed_detections = _process_detections(detections, convertation_file, restricted=restricted)

add_shapes(processed_detections.get_shapes(), result["shapes"])

return result
3 changes: 1 addition & 2 deletions cvat/apps/auto_annotation/inference_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@

_IE_PLUGINS_PATH = os.getenv("IE_PLUGINS_PATH", None)


def _check_instruction(instruction):
return instruction == str.strip(
subprocess.check_output(
Expand All @@ -24,7 +23,7 @@ def make_plugin_or_core():
use_core_openvino = False
try:
major, minor, reference = [int(x) for x in version.split('.')]
if major >= 2 and minor >= 1 and reference >= 37988:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@benhoff , why the condition isn't important anymore?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The reference condition will continue to change. In order to test this, I had to compile dldt on my machine. My reference was completely different.

My concern was that when they roll the release to a 3.1.XXXXX, the reference number might be smaller than 37988, causing this line to malfunction.

This might also happen for a 2.2.XXXXX

if major >= 2 and minor >= 1:
use_core_openvino = True
except Exception:
pass
Expand Down
50 changes: 26 additions & 24 deletions cvat/apps/auto_annotation/model_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from .models import AnnotationModel, FrameworkChoice
from .model_loader import load_labelmap
from .image_loader import ImageLoader
from .inference import run_inference_engine_annotation
from .inference import InferenceAnnotationRunner


def _remove_old_file(model_file_field):
Expand All @@ -44,15 +44,15 @@ def _run_test(model_file, weights_file, labelmap_file, interpretation_file):
test_image = np.ones((1024, 1980, 3), np.uint8) * 255
try:
dummy_labelmap = {key: key for key in load_labelmap(labelmap_file).keys()}
run_inference_engine_annotation(
runner = InferenceAnnotationRunner(
data=[test_image,],
model_file=model_file,
weights_file=weights_file,
labels_mapping=dummy_labelmap,
attribute_spec={},
convertation_file=interpretation_file,
restricted=restricted
)
convertation_file=interpretation_file)

runner.run(restricted=restricted)
except Exception as e:
return False, str(e)

Expand Down Expand Up @@ -227,30 +227,32 @@ def update_progress(job, progress):

result = None
slogger.glob.info("auto annotation with openvino toolkit for task {}".format(tid))
result = run_inference_engine_annotation(
more_data = True
runner = InferenceAnnotationRunner(
data=ImageLoader(FrameProvider(db_task.data)),
model_file=model_file,
weights_file=weights_file,
labels_mapping=labels_mapping,
attribute_spec=attributes,
convertation_file= convertation_file,
job=job,
update_progress=update_progress,
restricted=restricted
)

if result is None:
slogger.glob.info("auto annotation for task {} canceled by user".format(tid))
return

serializer = LabeledDataSerializer(data = result)
if serializer.is_valid(raise_exception=True):
if reset:
put_task_data(tid, user, result)
else:
patch_task_data(tid, user, result, "create")

slogger.glob.info("auto annotation for task {} done".format(tid))
convertation_file= convertation_file)
while more_data:
result, more_data = runner.run(
job=job,
update_progress=update_progress,
restricted=restricted)

if result is None:
slogger.glob.info("auto annotation for task {} canceled by user".format(tid))
return

serializer = LabeledDataSerializer(data = result)
if serializer.is_valid(raise_exception=True):
if reset:
put_task_data(tid, user, result)
else:
patch_task_data(tid, user, result, "create")

slogger.glob.info("auto annotation for task {} done".format(tid))
except Exception as e:
try:
slogger.task[tid].exception("exception was occurred during auto annotation of the task", exc_info=True)
Expand Down