diff --git a/.github/workflows/hadolint.yml b/.github/workflows/hadolint.yml new file mode 100644 index 000000000000..3ba99d5a069c --- /dev/null +++ b/.github/workflows/hadolint.yml @@ -0,0 +1,49 @@ +name: Linter +on: pull_request +jobs: + HadoLint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + + - name: Run checks + env: + HADOLINT: "${{ github.workspace }}/hadolint" + HADOLINT_VER: "2.1.0" + VERIFICATION_LEVEL: "error" + run: | + URL="https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/files" + PR_FILES=$(curl -s -X GET -G $URL | jq -r '.[] | select(.status != "removed") | .filename') + for file in $PR_FILES; do + if [[ ${file} =~ 'Dockerfile' ]]; then + changed_dockerfiles+=" ${file}" + fi + done + + if [[ ! -z ${changed_dockerfiles} ]]; then + curl -sL -o ${HADOLINT} "https://github.com/hadolint/hadolint/releases/download/v${HADOLINT_VER}/hadolint-Linux-x86_64" && chmod 700 ${HADOLINT} + echo "HadoLint version: "`${HADOLINT} --version` + echo "The files will be checked: "`echo ${changed_dockerfiles}` + mkdir -p hadolint_report + + ${HADOLINT} --no-fail --format json ${changed_dockerfiles} > ./hadolint_report/hadolint_report.json + get_verification_level=`cat ./hadolint_report/hadolint_report.json | jq -r '.[] | .level'` + for line in ${get_verification_level}; do + if [[ ${line} =~ ${VERIFICATION_LEVEL} ]]; then + pip install json2html + python ./tests/json_to_html.py ./hadolint_report/hadolint_report.json + exit 1 + else + exit 0 + fi + done + else + echo "No files with the \"Dockerfile*\" name found" + fi + + - name: Upload artifacts + if: failure() + uses: actions/upload-artifact@v2 + with: + name: hadolint_report + path: hadolint_report diff --git a/.github/workflows/remark.yml b/.github/workflows/remark.yml new file mode 100644 index 000000000000..3550e5227087 --- /dev/null +++ b/.github/workflows/remark.yml @@ -0,0 +1,46 @@ +name: Linter +on: pull_request +jobs: + Remark: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v2 + with: + node-version: 12 + + - name: Run checks + run: | + URL="https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/files" + PR_FILES=$(curl -s -X GET -G $URL | jq -r '.[] | select(.status != "removed") | .filename') + for files in $PR_FILES; do + extension="${files##*.}" + if [[ $extension == 'md' ]]; then + changed_files_remark+=" ${files}" + fi + done + + if [[ ! -z ${changed_files_remark} ]]; then + npm ci + npm install remark-cli vfile-reporter-json + mkdir -p remark_report + + echo "Remark version: "`npx remark --version` + echo "The files will be checked: "`echo ${changed_files_remark}` + npx remark --report json --no-stdout ${changed_files_remark} 2> ./remark_report/remark_report.json + get_report=`cat ./remark_report/remark_report.json | jq -r '.[]'` + if [[ ! -z ${get_report} ]]; then + pip install json2html + python ./tests/json_to_html.py ./remark_report/remark_report.json + exit 1 + fi + else + echo "No files with the \"md\" extension found" + fi + + - name: Upload artifacts + if: failure() + uses: actions/upload-artifact@v2 + with: + name: remark_report + path: remark_report diff --git a/.github/workflows/stylelint.yml b/.github/workflows/stylelint.yml new file mode 100644 index 000000000000..76634447c915 --- /dev/null +++ b/.github/workflows/stylelint.yml @@ -0,0 +1,42 @@ +name: Linter +on: pull_request +jobs: + StyleLint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v2 + with: + node-version: 12 + + - name: Run checks + run: | + URL="https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}/files" + PR_FILES=$(curl -s -X GET -G $URL | jq -r '.[] | select(.status != "removed") | .filename') + for files in $PR_FILES; do + extension="${files##*.}" + if [[ $extension == 'css' || $extension == 'scss' ]]; then + changed_files_stylelint+=" ${files}" + fi + done + + if [[ ! -z ${changed_files_stylelint} ]]; then + npm ci + mkdir -p stylelint_report + + echo "StyleLint version: "`npx stylelint --version` + echo "The files will be checked: "`echo ${changed_files_stylelint}` + npx stylelint --formatter json --output-file ./stylelint_report/stylelint_report.json ${changed_files_stylelint} || exit_code=`echo $?` || true + pip install json2html + python ./tests/json_to_html.py ./stylelint_report/stylelint_report.json + exit ${exit_code} + else + echo "No files with the \"css|scss\" extension found" + fi + + - name: Upload artifacts + if: failure() + uses: actions/upload-artifact@v2 + with: + name: stylelint_report + path: stylelint_report diff --git a/CHANGELOG.md b/CHANGELOG.md index b6a6eaa280bf..62042ab38a58 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Documentation on mask annotation () - Hotkeys to switch a label of existing object or to change default label (for objects created with N) () +- A script to convert some kinds of DICOM files to regular images () ### Changed @@ -28,6 +29,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Export of instance masks with holes () - Changing a label on canvas does not work when 'Show object details' enabled () +- Make sure frame unzip web worker correctly terminates after unzipping all images in a requested chunk () ### Security diff --git a/cvat-core/src/frames.js b/cvat-core/src/frames.js index 4f029a3fd147..bd1009ef7200 100644 --- a/cvat-core/src/frames.js +++ b/cvat-core/src/frames.js @@ -286,7 +286,7 @@ if (nextChunkNumber * chunkSize < this.stopFrame) { provider.setReadyToLoading(nextChunkNumber); const nextStart = nextChunkNumber * chunkSize; - const nextStop = (nextChunkNumber + 1) * chunkSize - 1; + const nextStop = Math.min(this.stopFrame, (nextChunkNumber + 1) * chunkSize - 1); if (!provider.isChunkCached(nextStart, nextStop)) { if (!frameDataCache[this.tid].activeChunkRequest) { frameDataCache[this.tid].activeChunkRequest = { diff --git a/cvat/apps/dataset_manager/bindings.py b/cvat/apps/dataset_manager/bindings.py index a8d2fcb98da3..b800e18ca3cf 100644 --- a/cvat/apps/dataset_manager/bindings.py +++ b/cvat/apps/dataset_manager/bindings.py @@ -435,8 +435,9 @@ def db_task(self): def _get_filename(path): return osp.splitext(path)[0] - def match_frame(self, path, root_hint=None): - path = self._get_filename(path) + def match_frame(self, path, root_hint=None, path_has_ext=True): + if path_has_ext: + path = self._get_filename(path) match = self._frame_mapping.get(path) if not match and root_hint and not path.startswith(root_hint): path = osp.join(root_hint, path) @@ -611,7 +612,7 @@ def match_dm_item(item, task_data, root_hint=None): if frame_number is None and item.has_image: frame_number = task_data.match_frame(item.id + item.image.ext, root_hint) if frame_number is None: - frame_number = task_data.match_frame(item.id, root_hint) + frame_number = task_data.match_frame(item.id, root_hint, path_has_ext=False) if frame_number is None: frame_number = cast(item.attributes.get('frame', item.id), int) if frame_number is None and is_video: diff --git a/cvat/apps/dataset_manager/formats/cvat.py b/cvat/apps/dataset_manager/formats/cvat.py index 02025afc750a..786a5025e7c0 100644 --- a/cvat/apps/dataset_manager/formats/cvat.py +++ b/cvat/apps/dataset_manager/formats/cvat.py @@ -441,8 +441,9 @@ def load(file_object, annotations): elif el.tag == 'image': image_is_opened = True frame_id = annotations.abs_frame_id(match_dm_item( - DatasetItem(id=el.attrib['name'], - attributes={'frame': el.attrib['id']} + DatasetItem(id=osp.splitext(el.attrib['name'])[0], + attributes={'frame': el.attrib['id']}, + image=el.attrib['name'] ), task_data=annotations )) diff --git a/cvat/apps/dataset_manager/tests/test_formats.py b/cvat/apps/dataset_manager/tests/test_formats.py index f4589feedc05..2a7c40cdad6e 100644 --- a/cvat/apps/dataset_manager/tests/test_formats.py +++ b/cvat/apps/dataset_manager/tests/test_formats.py @@ -496,6 +496,7 @@ def test_frames_outside_are_not_generated(self): self.assertTrue(frame.frame in range(6, 10)) self.assertEqual(i + 1, 4) + class FrameMatchingTest(_DbTestBase): def _generate_task_images(self, paths): # pylint: disable=no-self-use f = BytesIO() @@ -586,3 +587,327 @@ def test_dataset_root(self): root = find_dataset_root(dataset, task_data) self.assertEqual(expected, root) + +class TaskAnnotationsImportTest(_DbTestBase): + def _generate_custom_annotations(self, annotations, task): + self._put_api_v1_task_id_annotations(task["id"], annotations) + return annotations + + def _generate_task_images(self, count, name="image"): + images = { + "client_files[%d]" % i: generate_image_file("image_%d.jpg" % i) + for i in range(count) + } + images["image_quality"] = 75 + return images + + def _generate_task(self, images, annotation_format, **overrides): + labels = [] + if annotation_format in ["ICDAR Recognition 1.0", + "ICDAR Localization 1.0"]: + labels = [{ + "name": "icdar", + "attributes": [{ + "name": "text", + "mutable": False, + "input_type": "text", + "values": ["word1", "word2"] + }] + }] + elif annotation_format == "ICDAR Segmentation 1.0": + labels = [{ + "name": "icdar", + "attributes": [ + { + "name": "text", + "mutable": False, + "input_type": "text", + "values": ["word_1", "word_2", "word_3"] + }, + { + "name": "index", + "mutable": False, + "input_type": "number", + "values": ["0", "1", "2"] + }, + { + "name": "color", + "mutable": False, + "input_type": "text", + "values": ["100 110 240", "10 15 20", "120 128 64"] + }, + { + "name": "center", + "mutable": False, + "input_type": "text", + "values": ["1 2", "2 4", "10 45"] + }, + ] + }] + elif annotation_format == "Market-1501 1.0": + labels = [{ + "name": "market-1501", + "attributes": [ + { + "name": "query", + "mutable": False, + "input_type": "select", + "values": ["True", "False"] + }, + { + "name": "camera_id", + "mutable": False, + "input_type": "number", + "values": ["0", "1", "2", "3"] + }, + { + "name": "person_id", + "mutable": False, + "input_type": "number", + "values": ["1", "2", "3"] + }, + ] + }] + else: + labels = [ + { + "name": "car", + "attributes": [ + { + "name": "model", + "mutable": False, + "input_type": "select", + "default_value": "mazda", + "values": ["bmw", "mazda", "renault"] + }, + { + "name": "parked", + "mutable": True, + "input_type": "checkbox", + "default_value": False + } + ] + }, + {"name": "person"} + ] + + task = { + "name": "my task #1", + "overlap": 0, + "segment_size": 100, + "labels": labels + } + task.update(overrides) + return self._create_task(task, images) + + def _generate_annotations(self, task, annotation_format): + shapes = [] + tracks = [] + tags = [] + + if annotation_format in ["ICDAR Recognition 1.0", + "ICDAR Localization 1.0"]: + shapes = [{ + "frame": 0, + "label_id": task["labels"][0]["id"], + "group": 0, + "source": "manual", + "attributes": [ + { + "spec_id": task["labels"][0]["attributes"][0]["id"], + "value": task["labels"][0]["attributes"][0]["values"][0] + }, + ], + "points": [1.0, 2.1, 10.6, 53.22], + "type": "rectangle", + "occluded": False, + }] + elif annotation_format == "Market-1501 1.0": + tags = [{ + "frame": 1, + "label_id": task["labels"][0]["id"], + "group": 0, + "source": "manual", + "attributes": [ + { + "spec_id": task["labels"][0]["attributes"][0]["id"], + "value": task["labels"][0]["attributes"][0]["values"][1] + }, + { + "spec_id": task["labels"][0]["attributes"][1]["id"], + "value": task["labels"][0]["attributes"][1]["values"][2] + }, + { + "spec_id": task["labels"][0]["attributes"][2]["id"], + "value": task["labels"][0]["attributes"][2]["values"][0] + } + ], + }] + elif annotation_format == "ICDAR Segmentation 1.0": + shapes = [{ + "frame": 0, + "label_id": task["labels"][0]["id"], + "group": 0, + "source": "manual", + "attributes": [ + { + "spec_id": task["labels"][0]["attributes"][0]["id"], + "value": task["labels"][0]["attributes"][0]["values"][0] + }, + { + "spec_id": task["labels"][0]["attributes"][1]["id"], + "value": task["labels"][0]["attributes"][1]["values"][0] + }, + { + "spec_id": task["labels"][0]["attributes"][2]["id"], + "value": task["labels"][0]["attributes"][2]["values"][1] + }, + { + "spec_id": task["labels"][0]["attributes"][3]["id"], + "value": task["labels"][0]["attributes"][3]["values"][2] + } + ], + "points": [1.0, 2.1, 10.6, 53.22], + "type": "rectangle", + "occluded": False, + }] + elif annotation_format == "VGGFace2 1.0": + shapes = [{ + "frame": 1, + "label_id": task["labels"][1]["id"], + "group": None, + "source": "manual", + "attributes": [], + "points": [2.0, 2.1, 40, 50.7], + "type": "rectangle", + "occluded": False + }] + else: + rectangle_shape_wo_attrs = { + "frame": 1, + "label_id": task["labels"][1]["id"], + "group": 0, + "source": "manual", + "attributes": [], + "points": [2.0, 2.1, 40, 50.7], + "type": "rectangle", + "occluded": False, + } + + rectangle_shape_with_attrs = { + "frame": 0, + "label_id": task["labels"][0]["id"], + "group": 0, + "source": "manual", + "attributes": [ + { + "spec_id": task["labels"][0]["attributes"][0]["id"], + "value": task["labels"][0]["attributes"][0]["values"][0] + }, + { + "spec_id": task["labels"][0]["attributes"][1]["id"], + "value": task["labels"][0]["attributes"][1]["default_value"] + } + ], + "points": [1.0, 2.1, 10.6, 53.22], + "type": "rectangle", + "occluded": False, + } + + track_wo_attrs = { + "frame": 0, + "label_id": task["labels"][1]["id"], + "group": 0, + "source": "manual", + "attributes": [], + "shapes": [ + { + "frame": 0, + "attributes": [], + "points": [1.0, 2.1, 100, 300.222], + "type": "polygon", + "occluded": False, + "outside": False + } + ] + } + + tag_wo_attrs = { + "frame": 0, + "label_id": task["labels"][0]["id"], + "group": None, + "attributes": [] + } + + tag_with_attrs = { + "frame": 1, + "label_id": task["labels"][0]["id"], + "group": 3, + "source": "manual", + "attributes": [ + { + "spec_id": task["labels"][0]["attributes"][0]["id"], + "value": task["labels"][0]["attributes"][0]["values"][1] + }, + { + "spec_id": task["labels"][0]["attributes"][1]["id"], + "value": task["labels"][0]["attributes"][1]["default_value"] + } + ], + } + + if annotation_format == "VGGFace2 1.0": + shapes = rectangle_shape_wo_attrs + elif annotation_format == "CVAT 1.1": + shapes = [rectangle_shape_wo_attrs, + rectangle_shape_with_attrs] + tags = [tag_with_attrs, tag_wo_attrs] + elif annotation_format == "MOTS PNG 1.0": + tracks = [track_wo_attrs] + else: + shapes = [rectangle_shape_wo_attrs, + rectangle_shape_with_attrs] + tags = tag_wo_attrs + tracks = track_wo_attrs + + annotations = { + "version": 0, + "tags": tags, + "shapes": shapes, + "tracks": tracks + } + + return self._generate_custom_annotations(annotations, task) + + def _test_can_import_annotations(self, task, import_format): + with tempfile.TemporaryDirectory() as temp_dir: + file_path = osp.join(temp_dir, import_format) + + export_format = import_format + if import_format == "CVAT 1.1": + export_format = "CVAT for images 1.1" + + dm.task.export_task(task["id"], file_path, export_format) + expected_ann = TaskAnnotation(task["id"]) + expected_ann.init_from_db() + + dm.task.import_task_annotations(task["id"], + file_path, import_format) + actual_ann = TaskAnnotation(task["id"]) + actual_ann.init_from_db() + + self.assertEqual(len(expected_ann.data), len(actual_ann.data)) + + def test_can_import_annotations_for_image_with_dots_in_filename(self): + for f in dm.views.get_import_formats(): + format_name = f.DISPLAY_NAME + + images = self._generate_task_images(3, "img0.0.0") + task = self._generate_task(images, format_name) + self._generate_annotations(task, format_name) + + with self.subTest(format=format_name): + if not f.ENABLED: + self.skipTest("Format is disabled") + + self._test_can_import_annotations(task, format_name) \ No newline at end of file diff --git a/tests/json_to_html.py b/tests/json_to_html.py new file mode 100644 index 000000000000..901179559c14 --- /dev/null +++ b/tests/json_to_html.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python + +# Copyright (C) 2021 Intel Corporation +# +# SPDX-License-Identifier: MIT + +from json2html import * +import sys +import os +import json + +def json_to_html(path_to_json): + with open(path_to_json) as json_file: + data = json.load(json_file) + hadolint_html_report = json2html.convert(json = data) + + with open(os.path.splitext(path_to_json)[0] + '.html', 'w') as html_file: + html_file.write(hadolint_html_report) + + +if __name__ == '__main__': + json_to_html(sys.argv[1]) diff --git a/utils/cli/core/core.py b/utils/cli/core/core.py index 6f966ce732a4..5e7cbd7826ce 100644 --- a/utils/cli/core/core.py +++ b/utils/cli/core/core.py @@ -66,16 +66,20 @@ def tasks_create(self, name, labels, overlap, segment_size, bug, resource_type, completion_verification_period=20, git_completion_verification_period=2, dataset_repository_url='', + project_id=None, lfs=False, **kwargs): """ Create a new task with the given name and labels JSON and add the files to it. """ url = self.api.tasks + labels = [] if project_id is not None else labels data = {'name': name, 'labels': labels, 'overlap': overlap, 'segment_size': segment_size, 'bug_tracker': bug, } + if project_id: + data.update({'project_id': project_id}) response = self.session.post(url, json=data) response.raise_for_status() response_json = response.json() diff --git a/utils/cli/core/definition.py b/utils/cli/core/definition.py index 76ecfa1d2fe7..11889a8bdfdf 100644 --- a/utils/cli/core/definition.py +++ b/utils/cli/core/definition.py @@ -112,6 +112,12 @@ def argparse(s): type=parse_label_arg, help='string or file containing JSON labels specification' ) +task_create_parser.add_argument( + '--project', + default=None, + type=int, + help='project ID if project exists' +) task_create_parser.add_argument( '--overlap', default=0, @@ -175,6 +181,7 @@ def argparse(s): action='store_true', help='using lfs for dataset repository (default: %(default)s)' ) + ####################################################################### # Delete ####################################################################### diff --git a/utils/dicom_converter/README.md b/utils/dicom_converter/README.md new file mode 100644 index 000000000000..e4c5ed800659 --- /dev/null +++ b/utils/dicom_converter/README.md @@ -0,0 +1,21 @@ +# Description + +The script is used to convert some kinds of DICOM data to regular images. +Then you can annotate these images on CVAT and get a segmentation mask. +The conversion script was tested on CT, MT and some multi-frame DICOM data. +DICOM files with series (multi-frame) are saved under the same name with a number postfix: 001, 002, 003, etc. + +# Installation + +```bash +python3 -m venv .env +. .env/bin/activate +pip install -r requirements.txt +``` + +# Running + +``` +. .env/bin/activate # if not activated +python script.py input_data output_data +``` diff --git a/utils/dicom_converter/requirements.txt b/utils/dicom_converter/requirements.txt new file mode 100644 index 000000000000..9ed6b39b6afd --- /dev/null +++ b/utils/dicom_converter/requirements.txt @@ -0,0 +1,4 @@ +numpy==1.20.2 +Pillow==8.2.0 +pydicom==2.1.2 +tqdm==4.60.0 diff --git a/utils/dicom_converter/script.py b/utils/dicom_converter/script.py new file mode 100644 index 000000000000..5bfbba3af120 --- /dev/null +++ b/utils/dicom_converter/script.py @@ -0,0 +1,113 @@ +# Copyright (C) 2021 Intel Corporation +# +# SPDX-License-Identifier: MIT + + +import os +import argparse +import logging +from glob import glob + +import numpy as np +from tqdm import tqdm +from PIL import Image +from pydicom import dcmread +from pydicom.pixel_data_handlers.util import convert_color_space + + +# Script configuration +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s') +parser = argparse.ArgumentParser(description='The script is used to convert some kinds of DICOM (.dcm) files to regular image files (.png)') +parser.add_argument('input', type=str, help='A root directory with medical data files in DICOM format. The script finds all these files based on their extension') +parser.add_argument('output', type=str, help='Where to save converted files. The script repeats internal directories structure of the input root directory') +args = parser.parse_args() + + +class Converter: + def __init__(self, filename): + with dcmread(filename) as ds: + self._pixel_array = ds.pixel_array + self._photometric_interpretation = ds.PhotometricInterpretation + self._min_value = ds.pixel_array.min() + self._max_value = ds.pixel_array.max() + self._depth = ds.BitsStored + + logging.debug('File: {}'.format(filename)) + logging.debug('Photometric interpretation: {}'.format(self._photometric_interpretation)) + logging.debug('Min value: {}'.format(self._min_value)) + logging.debug('Max value: {}'.format(self._max_value)) + logging.debug('Depth: {}'.format(self._depth)) + + try: + self._length = ds["NumberOfFrames"].value + except KeyError: + self._length = 1 + + def __len__(self): + return self._length + + def __iter__(self): + if self._length == 1: + self._pixel_array = np.expand_dims(self._pixel_array, axis=0) + + for pixel_array in self._pixel_array: + # Normalization to an output range 0..255, 0..65535 + pixel_array = pixel_array - self._min_value + pixel_array = pixel_array.astype(int) * (2 ** self._depth - 1) + pixel_array = pixel_array // (self._max_value - self._min_value) + + # In some cases we need to convert colors additionally + if 'YBR' in self._photometric_interpretation: + pixel_array = convert_color_space(pixel_array, self._photometric_interpretation, 'RGB') + + if self._depth == 8: + image = Image.fromarray(pixel_array.astype(np.uint8)) + elif self._depth == 16: + image = Image.fromarray(pixel_array.astype(np.uint16)) + else: + raise Exception('Not supported depth {}'.format(self._depth)) + + yield image + + +def main(root_dir, output_root_dir): + dicom_files = glob(os.path.join(root_dir, '**', '*.dcm'), recursive = True) + if not len(dicom_files): + logging.info('DICOM files are not found under the specified path') + else: + logging.info('Number of found DICOM files: ' + str(len(dicom_files))) + + pbar = tqdm(dicom_files) + for input_filename in pbar: + pbar.set_description('Conversion: ' + input_filename) + input_basename = os.path.basename(input_filename) + + output_subpath = os.path.relpath(os.path.dirname(input_filename), root_dir) + output_path = os.path.join(output_root_dir, output_subpath) + output_basename = '{}.png'.format(os.path.splitext(input_basename)[0]) + output_filename = os.path.join(output_path, output_basename) + + if not os.path.exists(output_path): + os.makedirs(output_path) + + try: + iterated_converter = Converter(input_filename) + length = len(iterated_converter) + for i, image in enumerate(iterated_converter): + if length == 1: + image.save(output_filename) + else: + filename_index = str(i).zfill(len(str(length))) + list_output_filename = '{}_{}.png'.format(os.path.splitext(output_filename)[0], filename_index) + image.save(list_output_filename) + except Exception as ex: + logging.error('Error while processing ' + input_filename) + logging.error(ex) + +if __name__ == '__main__': + input_root_path = os.path.abspath(args.input.rstrip(os.sep)) + output_root_path = os.path.abspath(args.output.rstrip(os.sep)) + + logging.info('From: {}'.format(input_root_path)) + logging.info('To: {}'.format(output_root_path)) + main(input_root_path, output_root_path)