From e06208766598f0e186185ec2cd7c3c2e37aa133c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Csaba=20Osztrogon=C3=A1c?= Date: Fri, 30 Aug 2019 14:29:11 +0000 Subject: [PATCH] Make run-tests --unittests work on Windows too MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changes: * Bash based unittest runner replaced with a python runner * Typo fixed in doctest cmake build system (python executable) * run-tests.py prints error message if build fails JerryScript-DCO-1.0-Signed-off-by: Csaba Osztrogonác oszi@inf.u-szeged.hu --- tests/unit-doc/CMakeLists.txt | 2 +- tools/run-tests.py | 26 ++++++- tools/runners/run-unittests.py | 94 +++++++++++++++++++++++++ tools/runners/run-unittests.sh | 121 --------------------------------- tools/settings.py | 2 +- 5 files changed, 120 insertions(+), 125 deletions(-) create mode 100755 tools/runners/run-unittests.py delete mode 100755 tools/runners/run-unittests.sh diff --git a/tests/unit-doc/CMakeLists.txt b/tests/unit-doc/CMakeLists.txt index 1157dbc5de..152ef13560 100644 --- a/tests/unit-doc/CMakeLists.txt +++ b/tests/unit-doc/CMakeLists.txt @@ -28,7 +28,7 @@ endif() # file names that will be generated. This allows the definition of proper # dependencies between the MarkDown files and the generated sources. execute_process( - COMMAND ${Python_EXECUTABLE} ${GEN_DOCTEST} --dry -d ${CMAKE_CURRENT_BINARY_DIR} ${DOC_FILES} + COMMAND ${PYTHON_EXECUTABLE} ${GEN_DOCTEST} --dry -d ${CMAKE_CURRENT_BINARY_DIR} ${DOC_FILES} OUTPUT_VARIABLE DOCTEST_OUTPUT RESULT_VARIABLE GEN_DOCTEST_RESULT ) diff --git a/tools/run-tests.py b/tools/run-tests.py index af80543237..4feaffea48 100755 --- a/tools/run-tests.py +++ b/tools/run-tests.py @@ -222,6 +222,7 @@ def get_arguments(): TERM_NORMAL = '\033[0m' TERM_YELLOW = '\033[1;33m' TERM_BLUE = '\033[1;34m' +TERM_RED = '\033[1;31m' def report_command(cmd_type, cmd, env=None): sys.stderr.write('%s%s%s\n' % (TERM_BLUE, cmd_type, TERM_NORMAL)) @@ -236,6 +237,11 @@ def report_skip(job): sys.stderr.write(' (%s)' % job.skip) sys.stderr.write('%s\n' % TERM_NORMAL) +def get_platform_cmd_prefix(): + if sys.platform == 'win32': + return ['cmd', '/S', '/C'] + return [] + def create_binary(job, options): build_args = job.build_args[:] if options.buildoptions: @@ -243,7 +249,9 @@ def create_binary(job, options): if option not in build_args: build_args.append(option) - build_cmd = [settings.BUILD_SCRIPT] + build_args + build_cmd = get_platform_cmd_prefix() + build_cmd.append(settings.BUILD_SCRIPT) + build_cmd.extend(build_args) build_dir_path = os.path.join(options.outdir, job.name) build_cmd.append('--builddir=%s' % build_dir_path) @@ -329,6 +337,7 @@ def run_jerry_debugger_tests(options): for job in DEBUGGER_TEST_OPTIONS: ret_build, build_dir_path = create_binary(job, options) if ret_build: + print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL)) break for channel in ["websocket", "rawpacket"]: @@ -413,6 +422,7 @@ def run_test262_test_suite(options): for job in TEST262_TEST_SUITE_OPTIONS: ret_build, build_dir_path = create_binary(job, options) if ret_build: + print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL)) break test_cmd = [ @@ -433,11 +443,22 @@ def run_unittests(options): for job in JERRY_UNITTESTS_OPTIONS: ret_build, build_dir_path = create_binary(job, options) if ret_build: + print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL)) break + if sys.platform == 'win32': + if "--debug" in job.build_args: + build_config = "Debug" + else: + build_config = "MinSizeRel" + else: + build_config = "" + + ret_test |= run_check( + get_platform_cmd_prefix() + [settings.UNITTEST_RUNNER_SCRIPT] + - [os.path.join(build_dir_path, 'tests')] + + [os.path.join(build_dir_path, 'tests', build_config)] + (["-q"] if options.quiet else []) ) @@ -451,6 +472,7 @@ def run_buildoption_test(options): ret, _ = create_binary(job, options) if ret: + print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL)) break return ret diff --git a/tools/runners/run-unittests.py b/tools/runners/run-unittests.py new file mode 100755 index 0000000000..d0ef671944 --- /dev/null +++ b/tools/runners/run-unittests.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python + +# Copyright JS Foundation and other contributors, http://js.foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import argparse +import glob +import os +import subprocess +import sys + +TERM_NORMAL = '\033[0m' +TERM_RED = '\033[1;31m' +TERM_GREEN = '\033[1;32m' + +def get_arguments(): + parser = argparse.ArgumentParser() + parser.add_argument('-q', '--quiet', action='store_true', + help='Only print out failing tests') + parser.add_argument('path', + help='Path of test binaries') + + script_args = parser.parse_args() + return script_args + + +def get_unittests(path): + unittests = [] + files = glob.glob(os.path.join(path, 'unit-*')) + for testfile in files: + if os.path.isfile(testfile) and os.access(testfile, os.X_OK): + if sys.platform != 'win32' or testfile.endswith(".exe"): + unittests.append(testfile) + unittests.sort() + return unittests + + +def main(args): + unittests = get_unittests(args.path) + total = len(unittests) + if total == 0: + print("%s: no unit-* test to execute", args.path) + return 1 + + tested = 0 + passed = 0 + failed = 0 + + runtime = os.environ.get('RUNTIME') + test_cmd = [runtime] if runtime else [] + + for test in unittests: + tested += 1 + testpath = os.path.relpath(test) + try: + subprocess.check_output(test_cmd + [test], stderr=subprocess.STDOUT, universal_newlines=True) + passed += 1 + if not args.quiet: + print("[%4d/%4d] %sPASS: %s%s" % (tested, total, TERM_GREEN, testpath, TERM_NORMAL)) + except subprocess.CalledProcessError as err: + failed += 1 + print("[%4d/%4d] %sFAIL (%d): %s%s" % (tested, total, TERM_RED, err.returncode, testpath, TERM_NORMAL)) + print("================================================") + print(err.output) + print("================================================") + + print("\n[summary] %s\n" % os.path.join(os.path.relpath(args.path), "unit-*")) + print("TOTAL: %d" % total) + print("%sPASS: %d%s" % (TERM_GREEN, passed, TERM_NORMAL)) + print("%sFAIL: %d%s\n" % (TERM_RED, failed, TERM_NORMAL)) + + success_color = TERM_GREEN if passed == total else TERM_RED + print("%sSuccess: %d%%%s" % (success_color, passed*100/total, TERM_NORMAL)) + + if failed > 0: + return 1 + + return 0 + + +if __name__ == "__main__": + sys.exit(main(get_arguments())) diff --git a/tools/runners/run-unittests.sh b/tools/runners/run-unittests.sh deleted file mode 100755 index 651b1a07d5..0000000000 --- a/tools/runners/run-unittests.sh +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/bash - -# Copyright JS Foundation and other contributors, http://js.foundation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -DIR="$1" -shift - -VERBOSE=1 -if [ "$1" == "-q" ] -then - unset VERBOSE - shift -fi - -UNITTEST_ERROR=$DIR/unittests.failed -UNITTEST_OK=$DIR/unittests.passed - -rm -f $UNITTEST_ERROR $UNITTEST_OK - -UNITTESTS=$(find $DIR -maxdepth 1 -type f -name 'unit-*') -total=$(find $DIR -maxdepth 1 -type f -name 'unit-*' | wc -l) - -if [ "$total" -eq 0 ] -then - echo "$0: $DIR: no unit-* test to execute" - exit 1 -fi - -ROOT_DIR="" -CURRENT_DIR=`pwd` -PATH_STEP=2 -while true -do - TMP_ROOT_DIR=`(echo "$CURRENT_DIR"; echo "$0"; echo "$DIR") | cut -f1-$PATH_STEP -d/ | uniq -d` - if [ -z "$TMP_ROOT_DIR" ] - then - break - else - ROOT_DIR="$TMP_ROOT_DIR" - fi - PATH_STEP=$((PATH_STEP+1)) -done -if [ -n "$ROOT_DIR" ] -then - ROOT_DIR="$ROOT_DIR/" -fi - -tested=1 -failed=0 -passed=0 - -UNITTEST_TEMP=`mktemp unittest-out.XXXXXXXXXX` - -TERM_NORMAL="\033[0m" -TERM_RED="\033[1;31m" -TERM_GREEN="\033[1;32m" - -for unit_test in $UNITTESTS -do - cmd_line="$RUNTIME ${unit_test#$ROOT_DIR}" - $RUNTIME $unit_test &>$UNITTEST_TEMP - status_code=$? - - if [ $status_code -ne 0 ] - then - printf "[%4d/%4d] %bFAIL (%d): %s%b\n" "$tested" "$total" "$TERM_RED" "$status_code" "${unit_test#$ROOT_DIR}" "$TERM_NORMAL" - cat $UNITTEST_TEMP - - echo "$status_code: $unit_test" >> $UNITTEST_ERROR - echo "============================================" >> $UNITTEST_ERROR - cat $UNITTEST_TEMP >> $UNITTEST_ERROR - echo "============================================" >> $UNITTEST_ERROR - echo >> $UNITTEST_ERROR - echo >> $UNITTEST_ERROR - - failed=$((failed+1)) - else - test $VERBOSE && printf "[%4d/%4d] %bPASS: %s%b\n" "$tested" "$total" "$TERM_GREEN" "${unit_test#$ROOT_DIR}" "$TERM_NORMAL" - echo "$unit_test" >> $UNITTEST_OK - - passed=$((passed+1)) - fi - - tested=$((tested+1)) -done - -rm -f $UNITTEST_TEMP - -ratio=$(echo $passed*100/$total | bc) -if [ $passed -eq $total ] -then - success_color=$TERM_GREEN -else - success_color=$TERM_RED -fi - -echo -e "\n[summary] ${DIR#$ROOT_DIR}/unit-*\n" -echo -e "TOTAL: $total" -echo -e "${TERM_GREEN}PASS: $passed${TERM_NORMAL}" -echo -e "${TERM_RED}FAIL: $failed${TERM_NORMAL}\n" -echo -e "${success_color}Success: $ratio%${TERM_NORMAL}\n" - -if [ $failed -ne 0 ] -then - echo "$0: see $UNITTEST_ERROR for details about failures" - exit 1 -fi - -exit 0 diff --git a/tools/settings.py b/tools/settings.py index aa5cf92a38..89f225f8af 100755 --- a/tools/settings.py +++ b/tools/settings.py @@ -37,4 +37,4 @@ TEST_RUNNER_SCRIPT = path.join(TOOLS_DIR, 'runners/run-test-suite.sh') TEST262_RUNNER_SCRIPT = path.join(TOOLS_DIR, 'runners/run-test-suite-test262.sh') VERA_SCRIPT = path.join(TOOLS_DIR, 'check-vera.sh') -UNITTEST_RUNNER_SCRIPT = path.join(TOOLS_DIR, 'runners/run-unittests.sh') +UNITTEST_RUNNER_SCRIPT = path.join(TOOLS_DIR, 'runners/run-unittests.py')