Skip to content

Commit

Permalink
[web] Store multiple report directories with one command
Browse files Browse the repository at this point in the history
* Allow the user to store multiple report directories with one
`CodeChecker store` command.
* New metadata.json format.
* Tests for parsing and merging metadata.json files.

If multiple report directories are stored created by different
codechecker versions there can be multiple results with OFF detection
status. If additional reports are stored created with cppcheck all
the cppcheck analyzer results will be marked with unavailable
detection status. To solve this problem we will return with an empty
checker set. This way detection statuses will be calculated properly
but OFF and UNAVAILABLE checker statuses will never be used.
  • Loading branch information
csordasmarton committed Mar 30, 2020
1 parent 9e33f21 commit bd775d6
Show file tree
Hide file tree
Showing 20 changed files with 1,060 additions and 81 deletions.
65 changes: 25 additions & 40 deletions analyzer/codechecker_analyzer/analysis_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,66 +35,52 @@
LOG = get_logger('analyzer')


def print_analyzer_statistic_summary(statistics, status, msg=None):
def print_analyzer_statistic_summary(metadata_analyzers, status, msg=None):
"""
Print analyzer statistic summary for the given status code with the given
section heading message.
"""
has_status = sum((res.get(status, 0) for res in
(statistics[i] for i in statistics)))
has_status = False
for _, analyzer in metadata_analyzers.items():
if analyzer.get('analyzer_statistics', {}).get(status):
has_status = True
break

if has_status and msg:
LOG.info(msg)

for analyzer_type, res in statistics.items():
successful = res[status]
if successful:
LOG.info(" %s: %s", analyzer_type, successful)


def worker_result_handler(results, metadata, output_path, analyzer_binaries):
"""
Print the analysis summary.
"""
for analyzer_type, analyzer in metadata_analyzers.items():
res = analyzer.get('analyzer_statistics', {}).get(status)
if res:
LOG.info(" %s: %s", analyzer_type, res)

if metadata is None:
metadata = {}

def worker_result_handler(results, metadata_tool, output_path,
analyzer_binaries):
""" Print the analysis summary. """
skipped_num = 0
reanalyzed_num = 0
statistics = {}

metadata_analyzers = metadata_tool['analyzers']
for res, skipped, reanalyzed, analyzer_type, _, sources in results:
statistics = metadata_analyzers[analyzer_type]['analyzer_statistics']
if skipped:
skipped_num += 1
else:
if reanalyzed:
reanalyzed_num += 1

if analyzer_type not in statistics:
analyzer_bin = analyzer_binaries[analyzer_type]
analyzer_version = \
metadata.get('versions', {}).get(analyzer_bin)

statistics[analyzer_type] = {
"failed": 0,
"failed_sources": [],
"successful": 0,
"version": analyzer_version
}

if res == 0:
statistics[analyzer_type]['successful'] += 1
statistics['successful'] += 1
else:
statistics[analyzer_type]['failed'] += 1
statistics[analyzer_type]['failed_sources'].append(sources)
statistics['failed'] += 1
statistics['failed_sources'].append(sources)

LOG.info("----==== Summary ====----")
print_analyzer_statistic_summary(statistics,
print_analyzer_statistic_summary(metadata_analyzers,
'successful',
'Successfully analyzed')

print_analyzer_statistic_summary(statistics,
print_analyzer_statistic_summary(metadata_analyzers,
'failed',
'Failed to analyze')

Expand All @@ -103,8 +89,7 @@ def worker_result_handler(results, metadata, output_path, analyzer_binaries):
if skipped_num:
LOG.info("Skipped compilation commands: %d", skipped_num)

metadata['skipped'] = skipped_num
metadata['analyzer_statistics'] = statistics
metadata_tool['skipped'] = skipped_num

# check() created the result .plist files and additional, per-analysis
# meta information in forms of .plist.source files.
Expand All @@ -121,9 +106,9 @@ def worker_result_handler(results, metadata, output_path, analyzer_binaries):
err_file, _ = os.path.splitext(f)
plist_file = os.path.basename(err_file) + ".plist"
plist_file = os.path.join(output_path, plist_file)
metadata['result_source_files'].pop(plist_file, None)
metadata_tool['result_source_files'].pop(plist_file, None)

metadata['result_source_files'].update(source_map)
metadata_tool['result_source_files'].update(source_map)


# Progress reporting.
Expand Down Expand Up @@ -680,7 +665,7 @@ def skip_cpp(compile_actions, skip_handler):


def start_workers(actions_map, actions, context, analyzer_config_map,
jobs, output_path, skip_handler, metadata,
jobs, output_path, skip_handler, metadata_tool,
quiet_analyze, capture_analysis_output, timeout,
ctu_reanalyze_on_failure, statistics_data, manager,
compile_cmd_count):
Expand Down Expand Up @@ -752,7 +737,7 @@ def signal_handler(signum, frame):
analyzed_actions,
1,
callback=lambda results: worker_result_handler(
results, metadata, output_path,
results, metadata_tool, output_path,
context.analyzer_binaries)
).get(31557600)

Expand Down
32 changes: 21 additions & 11 deletions analyzer/codechecker_analyzer/analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ def __get_statistics_data(args, manager):
return statistics_data


def perform_analysis(args, skip_handler, context, actions, metadata,
def perform_analysis(args, skip_handler, context, actions, metadata_tool,
compile_cmd_count):
"""
Perform static analysis via the given (or if not, all) analyzers,
Expand Down Expand Up @@ -212,18 +212,28 @@ def perform_analysis(args, skip_handler, context, actions, metadata,
config_map[ClangSA.ANALYZER_NAME].set_checker_enabled(
ReturnValueCollector.checker_collect, False)

# Save some metadata information.
versions = __get_analyzer_version(context, config_map)
metadata['versions'].update(versions)
check_env = env.extend(context.path_env_extra,
context.ld_lib_path_extra)

metadata['checkers'] = {}
# Save some metadata information.
for analyzer in analyzers:
metadata['checkers'][analyzer] = {}
metadata_info = {
'checkers': {},
'analyzer_statistics': {
"failed": 0,
"failed_sources": [],
"successful": 0,
"version": None}}

for check, data in config_map[analyzer].checks().items():
state, _ = data
metadata['checkers'][analyzer].update(
{check: state == CheckerState.enabled})
metadata_info['checkers'].update({
check: state == CheckerState.enabled})

version = config_map[analyzer].get_version(check_env)
metadata_info['analyzer_statistics']['version'] = version

metadata_tool['analyzers'][analyzer] = metadata_info

if ctu_collect:
shutil.rmtree(ctu_dir, ignore_errors=True)
Expand Down Expand Up @@ -291,7 +301,7 @@ def perform_analysis(args, skip_handler, context, actions, metadata,
config_map, args.jobs,
args.output_path,
skip_handler,
metadata,
metadata_tool,
'quiet' in args,
'capture_analysis_output' in args,
args.timeout if 'timeout' in args
Expand All @@ -311,8 +321,8 @@ def perform_analysis(args, skip_handler, context, actions, metadata,
end_time = time.time()
LOG.info("Analysis length: %s sec.", end_time - start_time)

metadata['timestamps'] = {'begin': start_time,
'end': end_time}
metadata_tool['timestamps'] = {'begin': start_time,
'end': end_time}

if ctu_collect and ctu_analyze:
shutil.rmtree(ctu_dir, ignore_errors=True)
Expand Down
18 changes: 18 additions & 0 deletions analyzer/codechecker_analyzer/analyzers/config_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
import collections
import os
import platform
import subprocess
import sys

from codechecker_common.logger import get_logger
Expand Down Expand Up @@ -73,6 +74,23 @@ def analyzer_plugins(self):
and f.endswith(".so")]
return analyzer_plugins

def get_version(self, env=None):
""" Get analyzer version information. """
version = [self.analyzer_binary, '--version']
try:
output = subprocess.check_output(version,
env=env,
universal_newlines=True,
encoding="utf-8",
errors="ignore")
return output
except (subprocess.CalledProcessError, OSError) as oerr:
LOG.warning("Failed to get analyzer version: %s",
' '.join(version))
LOG.warning(oerr)

return None

def add_checker(self, checker_name, description=None, state=None):
"""
Add additional checker. If no state argument is given, the actual usage
Expand Down
46 changes: 32 additions & 14 deletions analyzer/codechecker_analyzer/cmd/analyze.py
Original file line number Diff line number Diff line change
Expand Up @@ -619,7 +619,7 @@ def __cleanup_metadata(metadata_prev, metadata):
if not metadata_prev:
return

result_src_files = metadata_prev['result_source_files']
result_src_files = __get_result_source_files(metadata_prev)
for plist_file, source_file in result_src_files.items():
if not os.path.exists(source_file):
try:
Expand All @@ -632,6 +632,19 @@ def __cleanup_metadata(metadata_prev, metadata):
LOG.warning("Failed to remove plist file: %s", plist_file)


def __get_result_source_files(metadata):
""" Get result source files from the given metadata. """
if 'result_source_files' in metadata:
return metadata['result_source_files']

result_src_files = {}
for tool in metadata.get('tools', {}):
r_src_files = tool.get('result_source_files', {})
result_src_files.update(r_src_files.items())

return result_src_files


def main(args):
"""
Perform analysis on the given logfiles and store the results in a machine-
Expand Down Expand Up @@ -762,26 +775,31 @@ def main(args):
json.dump(actions, f,
cls=log_parser.CompileCommandEncoder)

metadata = {'action_num': len(actions),
'command': sys.argv,
'versions': {
'codechecker': "{0} ({1})".format(
context.package_git_tag,
context.package_git_hash)},
'working_directory': os.getcwd(),
'output_path': args.output_path,
'result_source_files': {}}
metadata = {
'version': 2,
'tools': [{
'name': 'codechecker',
'action_num': len(actions),
'command': sys.argv,
'version': "{0} ({1})".format(context.package_git_tag,
context.package_git_hash),
'working_directory': os.getcwd(),
'output_path': args.output_path,
'result_source_files': {},
'analyzers': {}
}]}
metadata_tool = metadata['tools'][0]

if 'name' in args:
metadata['name'] = args.name
metadata_tool['run_name'] = args.name

# Update metadata dictionary with old values.
metadata_file = os.path.join(args.output_path, 'metadata.json')
metadata_prev = None
if os.path.exists(metadata_file):
metadata_prev = load_json_or_empty(metadata_file)
metadata['result_source_files'] = \
dict(metadata_prev['result_source_files'])
metadata_tool['result_source_files'] = \
__get_result_source_files(metadata_prev)

CompileCmdParseCount = \
collections.namedtuple('CompileCmdParseCount',
Expand Down Expand Up @@ -809,7 +827,7 @@ def main(args):
compile_cmd_count.analyze)

analyzer.perform_analysis(args, skip_handler, context, actions,
metadata,
metadata_tool,
compile_cmd_count)

__update_skip_file(args)
Expand Down
13 changes: 10 additions & 3 deletions analyzer/codechecker_analyzer/cmd/parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -457,10 +457,17 @@ def parse(plist_file, metadata_dict, rh, file_report_map):

LOG.debug("Parsing input file '%s'", plist_file)

if 'result_source_files' in metadata_dict and \
plist_file in metadata_dict['result_source_files']:
result_source_files = {}
if 'result_source_files' in metadata_dict:
result_source_files = metadata_dict['result_source_files']
else:
for tool in metadata_dict.get('tools', {}):
result_src_files = tool.get('result_source_files', {})
result_source_files.update(result_src_files.items())

if plist_file in result_source_files:
analyzed_source_file = \
metadata_dict['result_source_files'][plist_file]
result_source_files[plist_file]

if analyzed_source_file not in file_report_map:
file_report_map[analyzed_source_file] = []
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
NORMAL#CodeChecker log --output $LOGFILE$ --build "make context_hash" --quiet
NORMAL#CodeChecker analyze $LOGFILE$ --output $OUTPUT$ --analyzers clang-tidy
NORMAL#CodeChecker parse $OUTPUT$ --print-steps
CHECK#CodeChecker check --build "make context_hash" --output $OUTPUT$ --quiet --print-steps --analyer clang-tidy
CHECK#CodeChecker check --build "make context_hash" --output $OUTPUT$ --quiet --print-steps --analyzers clang-tidy
--------------------------------------------------------------------------------
[] - Starting build ...
[] - Build finished successfully.
Expand Down
2 changes: 2 additions & 0 deletions docs/web/user_guide.md
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,8 @@ database.
positional arguments:
file/folder The analysis result files and/or folders containing
analysis results which should be parsed and printed.
If multiple report directories are given, OFF and
UNAVAILABLE detection statuses will not be available.
(default: /home/<username>/.codechecker/reports)
optional arguments:
Expand Down
Loading

0 comments on commit bd775d6

Please sign in to comment.