Skip to content

Commit 4465c36

Browse files
committed
Add Instance Segmentation (object counting) training tests.
1 parent d975d18 commit 4465c36

File tree

2 files changed

+231
-97
lines changed

2 files changed

+231
-97
lines changed

external/mmdetection/tests/expected_metrics/metrics_test_ote_training.yml

+44
Original file line numberDiff line numberDiff line change
@@ -349,3 +349,47 @@
349349
: "metrics.accuracy.f-measure":
350350
"base": "nncf_evaluation.metrics.accuracy.f-measure"
351351
"max_diff": 0.06
352+
353+
? "ACTION-training_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
354+
: "metrics.accuracy.f-measure":
355+
"target_value": 0.92
356+
"max_diff_if_less_threshold": 0.06
357+
"max_diff_if_greater_threshold": 0.06
358+
? "ACTION-export_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
359+
: "metrics.accuracy.f-measure":
360+
"base": "training_evaluation.metrics.accuracy.f-measure"
361+
"max_diff": 0.01
362+
? "ACTION-pot_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
363+
: "metrics.accuracy.f-measure":
364+
"base": "training_evaluation.metrics.accuracy.f-measure"
365+
"max_diff": 0.01
366+
? "ACTION-nncf_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
367+
: "metrics.accuracy.f-measure":
368+
"base": "training_evaluation.metrics.accuracy.f-measure"
369+
"max_diff_if_less_threshold": 0.01
370+
? "ACTION-nncf_export_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
371+
: "metrics.accuracy.f-measure":
372+
"base": "nncf_evaluation.metrics.accuracy.f-measure"
373+
"max_diff": 0.01
374+
375+
? "ACTION-training_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
376+
: "metrics.accuracy.f-measure":
377+
"target_value": 0.92
378+
"max_diff_if_less_threshold": 0.06
379+
"max_diff_if_greater_threshold": 0.06
380+
? "ACTION-export_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
381+
: "metrics.accuracy.f-measure":
382+
"base": "training_evaluation.metrics.accuracy.f-measure"
383+
"max_diff": 0.01
384+
? "ACTION-pot_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
385+
: "metrics.accuracy.f-measure":
386+
"base": "training_evaluation.metrics.accuracy.f-measure"
387+
"max_diff": 0.01
388+
? "ACTION-nncf_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
389+
: "metrics.accuracy.f-measure":
390+
"base": "training_evaluation.metrics.accuracy.f-measure"
391+
"max_diff_if_less_threshold": 0.01
392+
? "ACTION-nncf_export_evaluation,model-Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B,dataset-aeromonas,num_iters-CONFIG,batch-CONFIG,usecase-reallife"
393+
: "metrics.accuracy.f-measure":
394+
"base": 'nncf_evaluation.metrics.accuracy.f-measure'
395+
"max_diff": 0.01

external/mmdetection/tests/test_ote_training.py

+187-97
Original file line numberDiff line numberDiff line change
@@ -70,26 +70,26 @@ def _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_na
7070
return params
7171

7272

73-
def _create_object_detection_dataset_and_labels_schema(dataset_params):
73+
def _create_object_detection_dataset_and_labels_schema(dataset_params, domain: Domain):
7474
logger.debug(f'Using for train annotation file {dataset_params.annotations_train}')
7575
logger.debug(f'Using for val annotation file {dataset_params.annotations_val}')
7676
labels_list = []
7777
items = load_dataset_items_coco_format(
7878
ann_file_path=dataset_params.annotations_train,
7979
data_root_dir=dataset_params.images_train_dir,
80-
domain=Domain.DETECTION,
80+
domain=domain,
8181
subset=Subset.TRAINING,
8282
labels_list=labels_list)
8383
items.extend(load_dataset_items_coco_format(
8484
ann_file_path=dataset_params.annotations_val,
8585
data_root_dir=dataset_params.images_val_dir,
86-
domain=Domain.DETECTION,
86+
domain=domain,
8787
subset=Subset.VALIDATION,
8888
labels_list=labels_list))
8989
items.extend(load_dataset_items_coco_format(
9090
ann_file_path=dataset_params.annotations_test,
9191
data_root_dir=dataset_params.images_test_dir,
92-
domain=Domain.DETECTION,
92+
domain=domain,
9393
subset=Subset.TESTING,
9494
labels_list=labels_list))
9595
dataset = DatasetEntity(items=items)
@@ -98,7 +98,6 @@ def _create_object_detection_dataset_and_labels_schema(dataset_params):
9898

9999

100100
class ObjectDetectionTrainingTestParameters(DefaultOTETestCreationParametersInterface):
101-
102101
def test_bunches(self) -> List[Dict[str, Any]]:
103102
test_bunches = [
104103
dict(
@@ -133,6 +132,32 @@ def test_bunches(self) -> List[Dict[str, Any]]:
133132
return deepcopy(test_bunches)
134133

135134

135+
class InstanceSegmentationTrainingTestParameters(DefaultOTETestCreationParametersInterface):
136+
def test_bunches(self) -> List[Dict[str, Any]]:
137+
test_bunches = [
138+
dict(
139+
model_name=[
140+
'Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50',
141+
'Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B',
142+
],
143+
dataset_name='aeromonas_short',
144+
usecase='precommit',
145+
),
146+
dict(
147+
model_name=[
148+
'Custom_Counting_Instance_Segmentation_MaskRCNN_ResNet50',
149+
'Custom_Counting_Instance_Segmentation_MaskRCNN_EfficientNetB2B',
150+
],
151+
dataset_name='aeromonas',
152+
num_training_iters=KEEP_CONFIG_FIELD_VALUE,
153+
batch_size=KEEP_CONFIG_FIELD_VALUE,
154+
usecase=REALLIFE_USECASE_CONSTANT,
155+
),
156+
157+
]
158+
return deepcopy(test_bunches)
159+
160+
136161
def get_dummy_compressed_model(task):
137162
"""
138163
Return compressed model without initialization
@@ -156,6 +181,128 @@ def get_dummy_compressed_model(task):
156181
return compressed_model
157182

158183

184+
@pytest.fixture
185+
def params_factories_for_test_actions_fx(current_test_parameters_fx,
186+
dataset_definitions_fx, template_paths_fx,
187+
ote_current_reference_dir_fx) -> Dict[str, Callable[[], Dict]]:
188+
logger.debug('params_factories_for_test_actions_fx: begin')
189+
190+
test_parameters = deepcopy(current_test_parameters_fx)
191+
dataset_definitions = deepcopy(dataset_definitions_fx)
192+
template_paths = deepcopy(template_paths_fx)
193+
194+
def _training_params_factory() -> Dict:
195+
if dataset_definitions is None:
196+
pytest.skip('The parameter "--dataset-definitions" is not set')
197+
198+
model_name = test_parameters['model_name']
199+
if "Custom_Object_Detection" in model_name:
200+
domain = Domain.DETECTION
201+
elif "Custom_Counting_Instance_Segmentation" in model_name:
202+
domain = Domain.INSTANCE_SEGMENTATION
203+
else:
204+
domain = None
205+
dataset_name = test_parameters['dataset_name']
206+
num_training_iters = test_parameters['num_training_iters']
207+
batch_size = test_parameters['batch_size']
208+
209+
dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name)
210+
211+
if model_name not in template_paths:
212+
raise ValueError(f'Model {model_name} is absent in template_paths, '
213+
f'template_paths.keys={list(template_paths.keys())}')
214+
template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY])
215+
216+
logger.debug('training params factory: Before creating dataset and labels_schema')
217+
dataset, labels_schema = _create_object_detection_dataset_and_labels_schema(
218+
dataset_params, domain)
219+
logger.debug('training params factory: After creating dataset and labels_schema')
220+
221+
return {
222+
'dataset': dataset,
223+
'labels_schema': labels_schema,
224+
'template_path': template_path,
225+
'num_training_iters': num_training_iters,
226+
'batch_size': batch_size,
227+
}
228+
229+
def _nncf_graph_params_factory() -> Dict:
230+
if dataset_definitions is None:
231+
pytest.skip('The parameter "--dataset-definitions" is not set')
232+
233+
model_name = test_parameters['model_name']
234+
if "Custom_Object_Detection" in model_name:
235+
domain = Domain.DETECTION
236+
elif "Custom_Counting_Instance_Segmentation" in model_name:
237+
domain = Domain.INSTANCE_SEGMENTATION
238+
else:
239+
domain = None
240+
dataset_name = test_parameters['dataset_name']
241+
242+
dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name)
243+
244+
if model_name not in template_paths:
245+
raise ValueError(f'Model {model_name} is absent in template_paths, '
246+
f'template_paths.keys={list(template_paths.keys())}')
247+
template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY])
248+
249+
logger.debug('training params factory: Before creating dataset and labels_schema')
250+
dataset, labels_schema = _create_object_detection_dataset_and_labels_schema(
251+
dataset_params, domain)
252+
logger.debug('training params factory: After creating dataset and labels_schema')
253+
254+
return {
255+
'dataset': dataset,
256+
'labels_schema': labels_schema,
257+
'template_path': template_path,
258+
'reference_dir': ote_current_reference_dir_fx,
259+
'fn_get_compressed_model': get_dummy_compressed_model,
260+
}
261+
262+
params_factories_for_test_actions = {
263+
'training': _training_params_factory,
264+
'nncf_graph': _nncf_graph_params_factory,
265+
}
266+
logger.debug('params_factories_for_test_actions_fx: end')
267+
return params_factories_for_test_actions
268+
269+
270+
# TODO(lbeynens): move to common fixtures
271+
@pytest.fixture
272+
def data_collector_fx(request) -> DataCollector:
273+
setup = deepcopy(request.node.callspec.params)
274+
setup['environment_name'] = os.environ.get('TT_ENVIRONMENT_NAME', 'no-env')
275+
setup['test_type'] = os.environ.get('TT_TEST_TYPE', 'no-test-type') # TODO: get from e2e test type
276+
setup['scenario'] = 'api' # TODO(lbeynens): get from a fixture!
277+
setup['test'] = request.node.name
278+
setup['project'] = 'ote'
279+
if 'test_parameters' in setup:
280+
assert isinstance(setup['test_parameters'], dict)
281+
if 'dataset_name' not in setup:
282+
setup['dataset_name'] = setup['test_parameters'].get('dataset_name')
283+
if 'model_name' not in setup:
284+
setup['model_name'] = setup['test_parameters'].get('model_name')
285+
if 'test_stage' not in setup:
286+
setup['test_stage'] = setup['test_parameters'].get('test_stage')
287+
if 'usecase' not in setup:
288+
setup['usecase'] = setup['test_parameters'].get('usecase')
289+
model_name = setup['test_parameters'].get('model_name')
290+
if "Custom_Object_Detection" in model_name:
291+
subject = 'custom-object-detection'
292+
elif "Custom_Counting_Instance_Segmentation" in model_name:
293+
subject = 'custom-counting-instance-seg'
294+
else:
295+
subject = None
296+
setup['subject'] = subject
297+
logger.info(f'creating DataCollector: setup=\n{pformat(setup, width=140)}')
298+
data_collector = DataCollector(name='TestOTEIntegration',
299+
setup=setup)
300+
with data_collector:
301+
logger.info('data_collector is created')
302+
yield data_collector
303+
logger.info('data_collector is released')
304+
305+
159306
class TestOTEReallifeObjectDetection(OTETrainingTestInterface):
160307
"""
161308
The main class of running test in this file.
@@ -172,74 +319,45 @@ def get_list_of_tests(cls, usecase: Optional[str] = None):
172319
return cls.helper.get_list_of_tests(usecase)
173320

174321
@pytest.fixture
175-
def params_factories_for_test_actions_fx(self, current_test_parameters_fx,
176-
dataset_definitions_fx, template_paths_fx,
177-
ote_current_reference_dir_fx) -> Dict[str,Callable[[], Dict]]:
178-
logger.debug('params_factories_for_test_actions_fx: begin')
179-
180-
test_parameters = deepcopy(current_test_parameters_fx)
181-
dataset_definitions = deepcopy(dataset_definitions_fx)
182-
template_paths = deepcopy(template_paths_fx)
183-
def _training_params_factory() -> Dict:
184-
if dataset_definitions is None:
185-
pytest.skip('The parameter "--dataset-definitions" is not set')
186-
187-
model_name = test_parameters['model_name']
188-
dataset_name = test_parameters['dataset_name']
189-
num_training_iters = test_parameters['num_training_iters']
190-
batch_size = test_parameters['batch_size']
191-
192-
dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name)
193-
194-
if model_name not in template_paths:
195-
raise ValueError(f'Model {model_name} is absent in template_paths, '
196-
f'template_paths.keys={list(template_paths.keys())}')
197-
template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY])
198-
199-
logger.debug('training params factory: Before creating dataset and labels_schema')
200-
dataset, labels_schema = _create_object_detection_dataset_and_labels_schema(dataset_params)
201-
logger.debug('training params factory: After creating dataset and labels_schema')
202-
203-
return {
204-
'dataset': dataset,
205-
'labels_schema': labels_schema,
206-
'template_path': template_path,
207-
'num_training_iters': num_training_iters,
208-
'batch_size': batch_size,
209-
}
210-
211-
def _nncf_graph_params_factory() -> Dict:
212-
if dataset_definitions is None:
213-
pytest.skip('The parameter "--dataset-definitions" is not set')
214-
215-
model_name = test_parameters['model_name']
216-
dataset_name = test_parameters['dataset_name']
217-
218-
dataset_params = _get_dataset_params_from_dataset_definitions(dataset_definitions, dataset_name)
322+
def test_case_fx(self, current_test_parameters_fx, params_factories_for_test_actions_fx):
323+
"""
324+
This fixture returns the test case class OTEIntegrationTestCase that should be used for the current test.
325+
Note that the cache from the test helper allows to store the instance of the class
326+
between the tests.
327+
If the main parameters used for this test are the same as the main parameters used for the previous test,
328+
the instance of the test case class will be kept and re-used. It is helpful for tests that can
329+
re-use the result of operations (model training, model optimization, etc) made for the previous tests,
330+
if these operations are time-consuming.
331+
If the main parameters used for this test differs w.r.t. the previous test, a new instance of
332+
test case class will be created.
333+
"""
334+
test_case = type(self).helper.get_test_case(current_test_parameters_fx,
335+
params_factories_for_test_actions_fx)
336+
return test_case
219337

220-
if model_name not in template_paths:
221-
raise ValueError(f'Model {model_name} is absent in template_paths, '
222-
f'template_paths.keys={list(template_paths.keys())}')
223-
template_path = make_path_be_abs(template_paths[model_name], template_paths[ROOT_PATH_KEY])
338+
@e2e_pytest_performance
339+
def test(self,
340+
test_parameters,
341+
test_case_fx, data_collector_fx,
342+
cur_test_expected_metrics_callback_fx):
343+
test_case_fx.run_stage(test_parameters['test_stage'], data_collector_fx,
344+
cur_test_expected_metrics_callback_fx)
224345

225-
logger.debug('training params factory: Before creating dataset and labels_schema')
226-
dataset, labels_schema = _create_object_detection_dataset_and_labels_schema(dataset_params)
227-
logger.debug('training params factory: After creating dataset and labels_schema')
228346

229-
return {
230-
'dataset': dataset,
231-
'labels_schema': labels_schema,
232-
'template_path': template_path,
233-
'reference_dir': ote_current_reference_dir_fx,
234-
'fn_get_compressed_model': get_dummy_compressed_model,
235-
}
347+
class TestInstanceSegmentation(OTETrainingTestInterface):
348+
"""
349+
The main class of running test in this file.
350+
"""
351+
PERFORMANCE_RESULTS = None # it is required for e2e system
352+
helper = OTETestHelper(InstanceSegmentationTrainingTestParameters())
236353

237-
params_factories_for_test_actions = {
238-
'training': _training_params_factory,
239-
'nncf_graph': _nncf_graph_params_factory,
240-
}
241-
logger.debug('params_factories_for_test_actions_fx: end')
242-
return params_factories_for_test_actions
354+
@classmethod
355+
def get_list_of_tests(cls, usecase: Optional[str] = None):
356+
"""
357+
This method should be a classmethod. It is called before fixture initialization, during
358+
tests discovering.
359+
"""
360+
return cls.helper.get_list_of_tests(usecase)
243361

244362
@pytest.fixture
245363
def test_case_fx(self, current_test_parameters_fx, params_factories_for_test_actions_fx):
@@ -258,34 +376,6 @@ def test_case_fx(self, current_test_parameters_fx, params_factories_for_test_act
258376
params_factories_for_test_actions_fx)
259377
return test_case
260378

261-
# TODO(lbeynens): move to common fixtures
262-
@pytest.fixture
263-
def data_collector_fx(self, request) -> DataCollector:
264-
setup = deepcopy(request.node.callspec.params)
265-
setup['environment_name'] = os.environ.get('TT_ENVIRONMENT_NAME', 'no-env')
266-
setup['test_type'] = os.environ.get('TT_TEST_TYPE', 'no-test-type') # TODO: get from e2e test type
267-
setup['scenario'] = 'api' # TODO(lbeynens): get from a fixture!
268-
setup['test'] = request.node.name
269-
setup['subject'] = 'custom-object-detection'
270-
setup['project'] = 'ote'
271-
if 'test_parameters' in setup:
272-
assert isinstance(setup['test_parameters'], dict)
273-
if 'dataset_name' not in setup:
274-
setup['dataset_name'] = setup['test_parameters'].get('dataset_name')
275-
if 'model_name' not in setup:
276-
setup['model_name'] = setup['test_parameters'].get('model_name')
277-
if 'test_stage' not in setup:
278-
setup['test_stage'] = setup['test_parameters'].get('test_stage')
279-
if 'usecase' not in setup:
280-
setup['usecase'] = setup['test_parameters'].get('usecase')
281-
logger.info(f'creating DataCollector: setup=\n{pformat(setup, width=140)}')
282-
data_collector = DataCollector(name='TestOTEIntegration',
283-
setup=setup)
284-
with data_collector:
285-
logger.info('data_collector is created')
286-
yield data_collector
287-
logger.info('data_collector is released')
288-
289379
@e2e_pytest_performance
290380
def test(self,
291381
test_parameters,

0 commit comments

Comments
 (0)