Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reorganized console output #350

Merged
merged 1 commit into from
May 26, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
85 changes: 83 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,89 @@ It will run recommendation function for the model (model is located in [aibolit/
The model finds a pattern which contribution is the largest to the Cyclomatic Complexity.
If anything is found, you will see all recommendations for the mentioned patterns.
You can see the list of all patterns in [Patterns.md](https://github.com/yegor256/aibolit/blob/master/PATTERNS.md).
The output of recommendation will be saved to the `out.xml` file into the current directory.
You can change the output file, using the `--output` parameter.
The output of recommendation will be redirected to the stdout.
If the program has the `0` exit code, it means that all analyzed files do not have any issues.
If the program has the `1` exit code, it means that at least 1 analyzed file has an issue.
If the program has the `2` exit code, it means that program crash occurred.

You can change the format, using the `--format` parameter. The default parameter is `--format=text`.
```bash
$ aibolit recommend --folder src/java --format=text --full
```

It will show the text, where all data are sorted by pattern's importance in descending order and grouped by a pattern name:

```
Show all patterns
Filename /mnt/d/src/java/Configuration.java:
Score for file: 127.67642529949538
Some issues found
line 294: Null check (P13)
line 391: Null check (P13)
line 235: Non final attribute (P12)
line 3840: Var in the middle (P21)
line 3844: Var in the middle (P21)
line 3848: Var in the middle (P21)
line 2411: Null Assignment (P28)
Filename /mnt/d/src/java/ErrorExample.java:
Error when calculating patterns: Can't count P1 metric:
Filename /mnt/d/src/java/MavenSlice.java:
Your code is perfect in aibolit's opinion
Total score: 127.67642529949538

```

You can also choose xml format. It will have the same format as `text` mode, but xml will be created:

```xml
<report>
<score>127.67642529949538</score>
<!--Show all patterns-->
<files>
<file>
<path>/mnt/d/src/java/Configuration.java</path>
<summary>Some issues found</summary>
<score>127.67642529949538</score>
<patterns>
<pattern code="P13">
<details>Null check</details>
<lines>
<number>294</number>
<number>391</number>
</lines>
</pattern>
<pattern code="P12">
<details>Non final attribute</details>
<lines>
<number>235</number>
</lines>
</pattern>
<pattern code="P21">
<details>Var in the middle</details>
<lines>
<number>235</number>
</lines>
</pattern>
<pattern code="P28">
<details>Null Assignment</details>
<lines>
<number>2411</number>
</lines>
</pattern>
</patterns>
</file>
<file>
<path>/mnt/d/src/java/ErrorExample.java</path>
<summary>Error when calculating patterns: Can't count P1 metric: </summary>
</file>
<file>
<path>/mnt/d/src/java/MavenSlice.java</path>
<summary>Your code is perfect in aibolit's opinion</summary>
</file>
</files>
</report>

```

Model is automatically installed with *aibolit* package, but you can also try your own model

Expand Down
92 changes: 77 additions & 15 deletions aibolit/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
from pathlib import Path
import pickle
from aibolit.model.model import TwoFoldRankingModel, Dataset # type: ignore # noqa: F401
from sys import stdout

dir_path = os.path.dirname(os.path.realpath(__file__))

Expand Down Expand Up @@ -225,7 +226,6 @@ def run_recommend_for_file(file: str, args):
:param args: different command line arguments
:return: dict with code lines, filename and pattern name
"""
print('Analyzing {}'.format(file))
java_file = str(Path(os.getcwd(), file))
input_params, code_lines_dict, error_string = calculate_patterns_and_metrics(java_file)
results_list, importances = inference(input_params, code_lines_dict, args)
Expand Down Expand Up @@ -322,13 +322,48 @@ def get_exit_code(results):
return exit_code


def create_text(results, full_report):
importances_for_all_classes = []
buffer = []
if not full_report:
buffer.append('Show pattern with the largest contribution to Cognitive Complexity')
else:
buffer.append('Show all patterns')
for result_for_file in results:
filename = result_for_file.get('filename')
buffer.append('Filename {}: '.format(filename))
results = result_for_file.get('results')
errors_string = result_for_file.get('error_string')
if not results and not errors_string:
output_string = 'Your code is perfect in aibolit\'s opinion'
buffer.append(output_string)
elif not results and errors_string:
output_string = 'Error when calculating patterns: {}'.format(str(errors_string))
buffer.append(output_string)
else:
output_string = 'Some issues found'
score = result_for_file['importances']
importances_for_all_classes.append(score)
buffer.append('Score for file: {}'.format(score))
buffer.append(output_string)
for pattern_item in result_for_file['results']:
code = pattern_item.get('pattern_code')
if code:
pattern_name_str = pattern_item.get('pattern_name')
buffer.append('line {}: {} ({})'.format(pattern_item.get('code_line'), pattern_name_str, code))
if importances_for_all_classes:
buffer.append('Total score: {}'.format(np.mean(importances_for_all_classes)))

return buffer


def recommend():
"""Run recommendation pipeline."""

parser = argparse.ArgumentParser(
description='Get recommendations for Java code',
usage='''
aibolit recommend < --folder | --filenames > [--output] [--model_file] [--threshold] [--full]
aibolit recommend < --folder | --filenames > [--output] [--model_file] [--threshold] [--full] [--format]
''')

group_exclusive = parser.add_mutually_exclusive_group(required=True)
Expand All @@ -344,12 +379,6 @@ def recommend():
nargs="*",
default=False
)
parser.add_argument(
'--output',
help='output of xml file where all results will be saved, default is out.xml of the current directory',
default=False
)

parser.add_argument(
'--model_file',
help='''file where pretrained model is located, the default path is located
Expand All @@ -368,6 +397,11 @@ def recommend():
default=False,
action='store_true'
)
parser.add_argument(
'--format',
default='text',
help='text (by default) or xml. Usage: --format=xml'
)

args = parser.parse_args(sys.argv[2:])

Expand All @@ -382,18 +416,44 @@ def recommend():

results = list(run_thread(files, args))

if args.output:
filename = args.output
else:
filename = 'out.xml'
if args.format:
new_results = format_converter_for_pattern(results)
if args.format == 'text':
text = create_text(new_results, args)
print('\n'.join(text))
elif args.format == 'xml':
root = create_xml_tree(results, args.full)
tree = root.getroottree()
tree.write(stdout.buffer, pretty_print=True)
else:
raise Exception('Unknown format')

root = create_xml_tree(results, args.full)
tree = root.getroottree()
tree.write(filename, pretty_print=True)
exit_code = get_exit_code(results)
return exit_code


def format_converter_for_pattern(results):
"""Reformat data where data are sorted by patterns importance
(it is already sorted in the input).
Then lines are sorted in ascending order."""

def flatten(l):
return [item for sublist in l for item in sublist]

for file in results:
items = file.get('results')
if items:
new_items = flatten([
[{'pattern_code': x['pattern_code'],
'pattern_name': x['pattern_name'],
'code_line': line,
} for line in sorted(x['code_lines'])] for x in items
])
file['results'] = new_items

return results


def version():
"""
Parses arguments and shows current version of program.
Expand Down Expand Up @@ -429,6 +489,8 @@ def main():
}
exit_code = run_parse_args(commands)
except Exception:
import traceback
traceback.print_exc()
sys.exit(2)
else:
sys.exit(exit_code)
Expand Down
48 changes: 47 additions & 1 deletion test/recommend/test_recommend_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@
from aibolit.config import Config
from lxml import etree

from aibolit.__main__ import list_dir, calculate_patterns_and_metrics, create_xml_tree
from aibolit.__main__ import list_dir, calculate_patterns_and_metrics, \
create_xml_tree, create_text, format_converter_for_pattern


class TestRecommendPipeline(TestCase):
Expand All @@ -38,6 +39,38 @@ def __init__(self, *args, **kwargs):
self.cur_file_dir = Path(os.path.realpath(__file__)).parent
self.config = Config.get_patterns_config()

def __create_mock_input(self):
patterns = [x['code'] for x in self.config['patterns']]
item = {
'filename': '1.java',
'results': [
{'pattern_code': 'P23',
'pattern_name': 'Some patterns name',
'code_lines': [1, 2, 4]
}
],
'importances': sum([0.1 + x for x in range(len(patterns))])
}
another_item = {
'filename': 'hdd/home/jardani_jovonovich/John_wick.java',
'results': [
{'pattern_code': 'P2',
'pattern_name': 'Somebody please get this man a gun',
'code_lines': [10, 100, 15000]},
{'pattern_code': 'P4',
'pattern_name': 'New item',
'code_lines': [5, 6]}
],
'importances': sum([0.1 + 2 * x for x in range(len(patterns))])
}
error_file = {
'error_string': "Error occured",
'filename': 'hdd/home/Error.java',
'results': []
}
mock_input = [item, another_item, error_file]
return mock_input

def test_calculate_patterns_and_metrics(self):
file = Path(self.cur_file_dir, 'folder/LottieImageAsset.java')
calculate_patterns_and_metrics(file)
Expand Down Expand Up @@ -93,3 +126,16 @@ def test_xml_empty_resutls(self):
xml_string = create_xml_tree([], True)
md5_hash = md5(etree.tostring(xml_string))
self.assertEqual(md5_hash.hexdigest(), '7d55be99025f9d9bba410bdbd2c42cee')

def test_text_format(self):
mock_input = self.__create_mock_input()
new_mock = format_converter_for_pattern(mock_input)
text = create_text(new_mock, full_report=True)
md5_hash = md5('\n'.join(text).encode('utf-8'))
self.assertEqual(md5_hash.hexdigest(), 'e59a6eced350dc1320dffc2b99dcfecd')

def test_empty_text_format(self):
new_mock = format_converter_for_pattern([])
text = create_text(new_mock, full_report=True)
md5_hash = md5('\n'.join(text).encode('utf-8'))
self.assertEqual(md5_hash.hexdigest(), 'bc22beda46ca18267a677eb32361a2aa')