From e844f65134f4a5edbf1f778c47411c4115b5d6d8 Mon Sep 17 00:00:00 2001 From: John Cater Date: Wed, 16 Aug 2023 12:02:42 -0700 Subject: [PATCH] Add a copy of bashunit. Needed for tests of android_sdk_repository. Part of #76. PiperOrigin-RevId: 557560436 Change-Id: I1c904b2e3a3136819b3d6b3bc6ee49193aeebbce --- test/bashunit/BUILD | 43 ++ test/bashunit/unittest.bash | 845 ++++++++++++++++++++++++++++++++ test/bashunit/unittest_test.py | 738 ++++++++++++++++++++++++++++ test/bashunit/unittest_utils.sh | 181 +++++++ 4 files changed, 1807 insertions(+) create mode 100644 test/bashunit/BUILD create mode 100644 test/bashunit/unittest.bash create mode 100644 test/bashunit/unittest_test.py create mode 100644 test/bashunit/unittest_utils.sh diff --git a/test/bashunit/BUILD b/test/bashunit/BUILD new file mode 100644 index 00000000..3404758e --- /dev/null +++ b/test/bashunit/BUILD @@ -0,0 +1,43 @@ +load("@rules_python//python:py_test.bzl", "py_test") + +package( + default_applicable_licenses = ["//:license"], + default_visibility = [ + "//test:__subpackages__", + ], +) + +licenses(["notice"]) + +exports_files( + ["unittest.bash"], +) + +sh_library( + name = "bashunit", + testonly = True, + srcs = [ + "unittest.bash", + "unittest_utils.sh", + ], +) + +# Test bashunit with python to avoid recursion. +py_test( + name = "bashunit_test", + size = "medium", + srcs = ["unittest_test.py"], + data = [ + ":bashunit", + # This test relies on writing shell scripts that use bash runfiles + # to load the actual copy of unittest.bash being tested. + "@bazel_tools//tools/bash/runfiles", + ], + main = "unittest_test.py", + python_version = "PY3", + srcs_version = "PY3", + tags = [ + "manual", # TODO(b/266084774): Re-enable this. + "no_windows", # test runs bash scripts in a subprocess + ], +) diff --git a/test/bashunit/unittest.bash b/test/bashunit/unittest.bash new file mode 100644 index 00000000..c88ba2ce --- /dev/null +++ b/test/bashunit/unittest.bash @@ -0,0 +1,845 @@ +#!/bin/bash +# +# Copyright 2015 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Common utility file for Bazel shell tests +# +# unittest.bash: a unit test framework in Bash. +# +# A typical test suite looks like so: +# +# ------------------------------------------------------------------------ +# #!/bin/bash +# +# source path/to/unittest.bash || exit 1 +# +# # Test that foo works. +# function test_foo() { +# foo >$TEST_log || fail "foo failed"; +# expect_log "blah" "Expected to see 'blah' in output of 'foo'." +# } +# +# # Test that bar works. +# function test_bar() { +# bar 2>$TEST_log || fail "bar failed"; +# expect_not_log "ERROR" "Unexpected error from 'bar'." +# ... +# assert_equals $x $y +# } +# +# run_suite "Test suite for blah" +# ------------------------------------------------------------------------ +# +# Each test function is considered to pass iff fail() is not called +# while it is active. fail() may be called directly, or indirectly +# via other assertions such as expect_log(). run_suite must be called +# at the very end. +# +# A test suite may redefine functions "set_up" and/or "tear_down"; +# these functions are executed before and after each test function, +# respectively. Similarly, "cleanup" and "timeout" may be redefined, +# and these function are called upon exit (of any kind) or a timeout. +# +# The user can pass --test_filter to blaze test to select specific tests +# to run with Bash globs. A union of tests matching any of the provided globs +# will be run. Additionally the user may define TESTS=(test_foo test_bar ...) to +# specify a subset of test functions to execute, for example, a working set +# during debugging. By default, all functions called test_* will be executed. +# +# This file provides utilities for assertions over the output of a +# command. The output of the command under test is directed to the +# file $TEST_log, and then the expect_log* assertions can be used to +# test for the presence of certain regular expressions in that file. +# +# The test framework is responsible for restoring the original working +# directory before each test. +# +# The order in which test functions are run is not defined, so it is +# important that tests clean up after themselves. +# +# Each test will be run in a new subshell. +# +# Functions named __* are not intended for use by clients. +# +# This framework implements the "test sharding protocol". +# + +[[ -n "$BASH_VERSION" ]] || + { echo "unittest.bash only works with bash!" >&2; exit 1; } + +export BAZEL_SHELL_TEST=1 + +DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd) + +# Load the environment support utilities. +source "${DIR}/unittest_utils.sh" || { echo "unittest_utils.sh not found" >&2; exit 1; } + +#### Global variables: + +TEST_name="" # The name of the current test. + +TEST_log=$TEST_TMPDIR/log # The log file over which the + # expect_log* assertions work. Must + # be absolute to be robust against + # tests invoking 'cd'! + +TEST_passed="true" # The result of the current test; + # failed assertions cause this to + # become false. + +# These variables may be overridden by the test suite: + +TESTS=() # A subset or "working set" of test + # functions that should be run. By + # default, all tests called test_* are + # run. + +_TEST_FILTERS=() # List of globs to use to filter the tests. + # If non-empty, all tests matching at least one + # of the globs are run and test list provided in + # the arguments is ignored if present. + +__in_tear_down=0 # Indicates whether we are in `tear_down` phase + # of test. Used to avoid re-entering `tear_down` + # on failures within it. + +if (( $# > 0 )); then + ( + IFS=':' + echo "WARNING: Passing test names in arguments (--test_arg) is deprecated, please use --test_filter='$*' instead." >&2 + ) + + # Legacy behavior is to ignore missing regexp, but with errexit + # the following line fails without || true. + # TODO(dmarting): maybe we should revisit the way of selecting + # test with that framework (use Bazel's environment variable instead). + TESTS=($(for i in "$@"; do echo $i; done | grep ^test_ || true)) + if (( ${#TESTS[@]} == 0 )); then + echo "WARNING: Arguments do not specify tests!" >&2 + fi +fi +# TESTBRIDGE_TEST_ONLY contains the value of --test_filter, if any. We want to +# preferentially use that instead of $@ to determine which tests to run. +if [[ ${TESTBRIDGE_TEST_ONLY:-} != "" ]]; then + if (( ${#TESTS[@]} != 0 )); then + echo "WARNING: Both --test_arg and --test_filter specified, ignoring --test_arg" >&2 + TESTS=() + fi + # Split TESTBRIDGE_TEST_ONLY on colon and store it in `_TEST_FILTERS` array. + IFS=':' read -r -a _TEST_FILTERS <<< "$TESTBRIDGE_TEST_ONLY" +fi + +TEST_verbose="true" # Whether or not to be verbose. A + # command; "true" or "false" are + # acceptable. The default is: true. + +TEST_script="$0" # Full path to test script +# Check if the script path is absolute, if not prefix the PWD. +if [[ ! "$TEST_script" = /* ]]; then + TEST_script="${PWD}/$0" +fi + + +#### Internal functions + +function __show_log() { + echo "-- Test log: -----------------------------------------------------------" + [[ -e $TEST_log ]] && cat "$TEST_log" || echo "(Log file did not exist.)" + echo "------------------------------------------------------------------------" +} + +# Usage: __pad <pad-char> +# Print $title padded to 80 columns with $pad_char. +function __pad() { + local title=$1 + local pad=$2 + # Ignore the subshell error -- `head` closes the fd before reading to the + # end, therefore the subshell will get SIGPIPE while stuck in `write`. + { + echo -n "${pad}${pad} ${title} " + printf "%80s" " " | tr ' ' "$pad" + } | head -c 80 || true + echo +} + +#### Exported functions + +# Usage: init_test ... +# Deprecated. Has no effect. +function init_test() { + : +} + + +# Usage: set_up +# Called before every test function. May be redefined by the test suite. +function set_up() { + : +} + +# Usage: tear_down +# Called after every test function. May be redefined by the test suite. +function tear_down() { + : +} + +# Usage: cleanup +# Called upon eventual exit of the test suite. May be redefined by +# the test suite. +function cleanup() { + : +} + +# Usage: timeout +# Called upon early exit from a test due to timeout. +function timeout() { + : +} + +# Usage: testenv_set_up +# Called prior to set_up. For use by testenv.sh. +function testenv_set_up() { + : +} + +# Usage: testenv_tear_down +# Called after tear_down. For use by testenv.sh. +function testenv_tear_down() { + : +} + +# Usage: fail <message> [<message> ...] +# Print failure message with context information, and mark the test as +# a failure. The context includes a stacktrace including the longest sequence +# of calls outside this module. (We exclude the top and bottom portions of +# the stack because they just add noise.) Also prints the contents of +# $TEST_log. +function fail() { + __show_log >&2 + echo "${TEST_name} FAILED: $*." >&2 + # Keep the original error message if we fail in `tear_down` after a failure. + [[ "${TEST_passed}" == "true" ]] && echo "$@" >"$TEST_TMPDIR"/__fail + TEST_passed="false" + __show_stack + # Cleanup as we are leaving the subshell now + __run_tear_down_after_failure + exit 1 +} + +function __run_tear_down_after_failure() { + # Skip `tear_down` after a failure in `tear_down` to prevent infinite + # recursion. + (( __in_tear_down )) && return + __in_tear_down=1 + echo -e "\nTear down:\n" >&2 + tear_down + testenv_tear_down +} + +# Usage: warn <message> +# Print a test warning with context information. +# The context includes a stacktrace including the longest sequence +# of calls outside this module. (We exclude the top and bottom portions of +# the stack because they just add noise.) +function warn() { + __show_log >&2 + echo "${TEST_name} WARNING: $1." >&2 + __show_stack + + if [[ -n "${TEST_WARNINGS_OUTPUT_FILE:-}" ]]; then + echo "${TEST_name} WARNING: $1." >> "$TEST_WARNINGS_OUTPUT_FILE" + fi +} + +# Usage: show_stack +# Prints the portion of the stack that does not belong to this module, +# i.e. the user's code that called a failing assertion. Stack may not +# be available if Bash is reading commands from stdin; an error is +# printed in that case. +__show_stack() { + local i=0 + local trace_found=0 + + # Skip over active calls within this module: + while (( i < ${#FUNCNAME[@]} )) && [[ ${BASH_SOURCE[i]:-} == "${BASH_SOURCE[0]}" ]]; do + (( ++i )) + done + + # Show all calls until the next one within this module (typically run_suite): + while (( i < ${#FUNCNAME[@]} )) && [[ ${BASH_SOURCE[i]:-} != "${BASH_SOURCE[0]}" ]]; do + # Read online docs for BASH_LINENO to understand the strange offset. + # Undefined can occur in the BASH_SOURCE stack apparently when one exits from a subshell + echo "${BASH_SOURCE[i]:-"Unknown"}:${BASH_LINENO[i - 1]:-"Unknown"}: in call to ${FUNCNAME[i]:-"Unknown"}" >&2 + (( ++i )) + trace_found=1 + done + + (( trace_found )) || echo "[Stack trace not available]" >&2 +} + +# Usage: expect_log <regexp> [error-message] +# Asserts that $TEST_log matches regexp. Prints the contents of +# $TEST_log and the specified (optional) error message otherwise, and +# returns non-zero. +function expect_log() { + local pattern=$1 + local message=${2:-Expected regexp "$pattern" not found} + grep -sq -- "$pattern" $TEST_log && return 0 + + fail "$message" + return 1 +} + +# Usage: expect_log_warn <regexp> [error-message] +# Warns if $TEST_log does not match regexp. Prints the contents of +# $TEST_log and the specified (optional) error message on mismatch. +function expect_log_warn() { + local pattern=$1 + local message=${2:-Expected regexp "$pattern" not found} + grep -sq -- "$pattern" $TEST_log && return 0 + + warn "$message" + return 1 +} + +# Usage: expect_log_once <regexp> [error-message] +# Asserts that $TEST_log contains one line matching <regexp>. +# Prints the contents of $TEST_log and the specified (optional) +# error message otherwise, and returns non-zero. +function expect_log_once() { + local pattern=$1 + local message=${2:-Expected regexp "$pattern" not found exactly once} + expect_log_n "$pattern" 1 "$message" +} + +# Usage: expect_log_n <regexp> <count> [error-message] +# Asserts that $TEST_log contains <count> lines matching <regexp>. +# Prints the contents of $TEST_log and the specified (optional) +# error message otherwise, and returns non-zero. +function expect_log_n() { + local pattern=$1 + local expectednum=${2:-1} + local message=${3:-Expected regexp "$pattern" not found exactly $expectednum times} + local count=$(grep -sc -- "$pattern" $TEST_log) + (( count == expectednum )) && return 0 + fail "$message" + return 1 +} + +# Usage: expect_not_log <regexp> [error-message] +# Asserts that $TEST_log does not match regexp. Prints the contents +# of $TEST_log and the specified (optional) error message otherwise, and +# returns non-zero. +function expect_not_log() { + local pattern=$1 + local message=${2:-Unexpected regexp "$pattern" found} + grep -sq -- "$pattern" $TEST_log || return 0 + + fail "$message" + return 1 +} + +# Usage: expect_query_targets <arguments> +# Checks that log file contains exactly the targets in the argument list. +function expect_query_targets() { + for arg in "$@"; do + expect_log_once "^$arg$" + done + +# Checks that the number of lines started with '//' equals to the number of +# arguments provided. + expect_log_n "^//[^ ]*$" $# +} + +# Usage: expect_log_with_timeout <regexp> <timeout> [error-message] +# Waits for the given regexp in the $TEST_log for up to timeout seconds. +# Prints the contents of $TEST_log and the specified (optional) +# error message otherwise, and returns non-zero. +function expect_log_with_timeout() { + local pattern=$1 + local timeout=$2 + local message=${3:-Regexp "$pattern" not found in "$timeout" seconds} + local count=0 + while (( count < timeout )); do + grep -sq -- "$pattern" "$TEST_log" && return 0 + let count=count+1 + sleep 1 + done + + grep -sq -- "$pattern" "$TEST_log" && return 0 + fail "$message" + return 1 +} + +# Usage: expect_cmd_with_timeout <expected> <cmd> [timeout] +# Repeats the command once a second for up to timeout seconds (10s by default), +# until the output matches the expected value. Fails and returns 1 if +# the command does not return the expected value in the end. +function expect_cmd_with_timeout() { + local expected="$1" + local cmd="$2" + local timeout=${3:-10} + local count=0 + while (( count < timeout )); do + local actual="$($cmd)" + [[ "$expected" == "$actual" ]] && return 0 + (( ++count )) + sleep 1 + done + + [[ "$expected" == "$actual" ]] && return 0 + fail "Expected '${expected}' within ${timeout}s, was '${actual}'" + return 1 +} + +# Usage: assert_one_of <expected_list>... <actual> +# Asserts that actual is one of the items in expected_list +# +# Example: +# local expected=( "foo", "bar", "baz" ) +# assert_one_of $expected $actual +function assert_one_of() { + local args=("$@") + local last_arg_index=$((${#args[@]} - 1)) + local actual=${args[last_arg_index]} + unset args[last_arg_index] + for expected_item in "${args[@]}"; do + [[ "$expected_item" == "$actual" ]] && return 0 + done; + + fail "Expected one of '${args[*]}', was '$actual'" + return 1 +} + +# Usage: assert_not_one_of <expected_list>... <actual> +# Asserts that actual is not one of the items in expected_list +# +# Example: +# local unexpected=( "foo", "bar", "baz" ) +# assert_not_one_of $unexpected $actual +function assert_not_one_of() { + local args=("$@") + local last_arg_index=$((${#args[@]} - 1)) + local actual=${args[last_arg_index]} + unset args[last_arg_index] + for expected_item in "${args[@]}"; do + if [[ "$expected_item" == "$actual" ]]; then + fail "'${args[*]}' contains '$actual'" + return 1 + fi + done; + + return 0 +} + +# Usage: assert_equals <expected> <actual> +# Asserts [[ expected == actual ]]. +function assert_equals() { + local expected=$1 actual=$2 + [[ "$expected" == "$actual" ]] && return 0 + + fail "Expected '$expected', was '$actual'" + return 1 +} + +# Usage: assert_not_equals <unexpected> <actual> +# Asserts [[ unexpected != actual ]]. +function assert_not_equals() { + local unexpected=$1 actual=$2 + [[ "$unexpected" != "$actual" ]] && return 0; + + fail "Expected not '${unexpected}', was '${actual}'" + return 1 +} + +# Usage: assert_contains <regexp> <file> [error-message] +# Asserts that file matches regexp. Prints the contents of +# file and the specified (optional) error message otherwise, and +# returns non-zero. +function assert_contains() { + local pattern=$1 + local file=$2 + local message=${3:-Expected regexp "$pattern" not found in "$file"} + grep -sq -- "$pattern" "$file" && return 0 + + cat "$file" >&2 + fail "$message" + return 1 +} + +# Usage: assert_not_contains <regexp> <file> [error-message] +# Asserts that file does not match regexp. Prints the contents of +# file and the specified (optional) error message otherwise, and +# returns non-zero. +function assert_not_contains() { + local pattern=$1 + local file=$2 + local message=${3:-Expected regexp "$pattern" found in "$file"} + + if [[ -f "$file" ]]; then + grep -sq -- "$pattern" "$file" || return 0 + else + fail "$file is not a file: $message" + return 1 + fi + + cat "$file" >&2 + fail "$message" + return 1 +} + +function assert_contains_n() { + local pattern=$1 + local expectednum=${2:-1} + local file=$3 + local message=${4:-Expected regexp "$pattern" not found exactly $expectednum times} + local count + if [[ -f "$file" ]]; then + count=$(grep -sc -- "$pattern" "$file") + else + fail "$file is not a file: $message" + return 1 + fi + (( count == expectednum )) && return 0 + + cat "$file" >&2 + fail "$message" + return 1 +} + +# Updates the global variables TESTS if +# sharding is enabled, i.e. ($TEST_TOTAL_SHARDS > 0). +function __update_shards() { + [[ -z "${TEST_TOTAL_SHARDS-}" ]] && return 0 + + (( TEST_TOTAL_SHARDS > 0 )) || + { echo "Invalid total shards ${TEST_TOTAL_SHARDS}" >&2; exit 1; } + + (( TEST_SHARD_INDEX < 0 || TEST_SHARD_INDEX >= TEST_TOTAL_SHARDS )) && + { echo "Invalid shard ${TEST_SHARD_INDEX}" >&2; exit 1; } + + IFS=$'\n' read -rd $'\0' -a TESTS < <( + for test in "${TESTS[@]}"; do echo "$test"; done | + awk "NR % ${TEST_TOTAL_SHARDS} == ${TEST_SHARD_INDEX}" && + echo -en '\0') + + [[ -z "${TEST_SHARD_STATUS_FILE-}" ]] || touch "$TEST_SHARD_STATUS_FILE" +} + +# Usage: __test_terminated <signal-number> +# Handler that is called when the test terminated unexpectedly +function __test_terminated() { + __show_log >&2 + echo "$TEST_name FAILED: terminated by signal $1." >&2 + TEST_passed="false" + __show_stack + timeout + exit 1 +} + +# Usage: __test_terminated_err +# Handler that is called when the test terminated unexpectedly due to "errexit". +function __test_terminated_err() { + # When a subshell exits due to signal ERR, its parent shell also exits, + # thus the signal handler is called recursively and we print out the + # error message and stack trace multiple times. We're only interested + # in the first one though, as it contains the most information, so ignore + # all following. + if [[ -f $TEST_TMPDIR/__err_handled ]]; then + exit 1 + fi + __show_log >&2 + if [[ ! -z "$TEST_name" ]]; then + echo -n "$TEST_name " >&2 + fi + echo "FAILED: terminated because this command returned a non-zero status:" >&2 + touch $TEST_TMPDIR/__err_handled + TEST_passed="false" + __show_stack + # If $TEST_name is still empty, the test suite failed before we even started + # to run tests, so we shouldn't call tear_down. + if [[ -n "$TEST_name" ]]; then + __run_tear_down_after_failure + fi + exit 1 +} + +# Usage: __trap_with_arg <handler> <signals ...> +# Helper to install a trap handler for several signals preserving the signal +# number, so that the signal number is available to the trap handler. +function __trap_with_arg() { + func="$1" ; shift + for sig ; do + trap "$func $sig" "$sig" + done +} + +# Usage: <node> <block> +# Adds the block to the given node in the report file. Quotes in the in +# arguments need to be escaped. +function __log_to_test_report() { + local node="$1" + local block="$2" + if [[ ! -e "$XML_OUTPUT_FILE" ]]; then + local xml_header='<?xml version="1.0" encoding="UTF-8"?>' + echo "${xml_header}<testsuites></testsuites>" > "$XML_OUTPUT_FILE" + fi + + # replace match on node with block and match + # replacement expression only needs escaping for quotes + perl -e "\ +\$input = @ARGV[0]; \ +\$/=undef; \ +open FILE, '+<$XML_OUTPUT_FILE'; \ +\$content = <FILE>; \ +if (\$content =~ /($node.*)\$/) { \ + seek FILE, 0, 0; \ + print FILE \$\` . \$input . \$1; \ +}; \ +close FILE" "$block" +} + +# Usage: <total> <passed> +# Adds the test summaries to the xml nodes. +function __finish_test_report() { + local suite_name="$1" + local total="$2" + local passed="$3" + local failed=$((total - passed)) + + # Update the xml output with the suite name and total number of + # passed/failed tests. + cat "$XML_OUTPUT_FILE" | \ + sed \ + "s/<testsuites>/<testsuites tests=\"$total\" failures=\"0\" errors=\"$failed\">/" | \ + sed \ + "s/<testsuite>/<testsuite name=\"${suite_name}\" tests=\"$total\" failures=\"0\" errors=\"$failed\">/" \ + > "${XML_OUTPUT_FILE}.bak" + + rm -f "$XML_OUTPUT_FILE" + mv "${XML_OUTPUT_FILE}.bak" "$XML_OUTPUT_FILE" +} + +# Multi-platform timestamp function +UNAME=$(uname -s | tr 'A-Z' 'a-z') +if [[ "$UNAME" == "linux" ]] || [[ "$UNAME" =~ msys_nt* ]]; then + function timestamp() { + echo $(($(date +%s%N)/1000000)) + } +else + function timestamp() { + # macOS and BSDs do not have %N, so Python is the best we can do. + # LC_ALL=C works around python 3.8 and 3.9 crash on macOS when the + # filesystem encoding is unspecified (e.g. when LANG=en_US). + local PYTHON=python + command -v python3 &> /dev/null && PYTHON=python3 + LC_ALL=C "${PYTHON}" -c 'import time; print(int(round(time.time() * 1000)))' + } +fi + +function get_run_time() { + local ts_start=$1 + local ts_end=$2 + run_time_ms=$((ts_end - ts_start)) + echo $((run_time_ms / 1000)).${run_time_ms: -3} +} + +# Usage: run_tests <suite-comment> +# Must be called from the end of the user's test suite. +# Calls exit with zero on success, non-zero otherwise. +function run_suite() { + local message="$1" + # The name of the suite should be the script being run, which + # will be the filename with the ".sh" extension removed. + local suite_name="$(basename "$0")" + + echo >&2 + echo "$message" >&2 + echo >&2 + + __log_to_test_report "<\/testsuites>" "<testsuite></testsuite>" + + local total=0 + local passed=0 + + atexit "cleanup" + + # If the user didn't specify an explicit list of tests (e.g. a + # working set), use them all. + if (( ${#TESTS[@]} == 0 )); then + # Even if there aren't any tests, this needs to succeed. + local all_tests=() + IFS=$'\n' read -d $'\0' -ra all_tests < <( + declare -F | awk '{print $3}' | grep ^test_ || true; echo -en '\0') + + if (( "${#_TEST_FILTERS[@]}" == 0 )); then + # Use ${array[@]+"${array[@]}"} idiom to avoid errors when running with + # Bash version <= 4.4 with `nounset` when `all_tests` is empty ( + # https://github.com/bminor/bash/blob/a0c0a00fc419b7bc08202a79134fcd5bc0427071/CHANGES#L62-L63). + TESTS=("${all_tests[@]+${all_tests[@]}}") + else + for t in "${all_tests[@]+${all_tests[@]}}"; do + local matches=0 + for f in "${_TEST_FILTERS[@]}"; do + # We purposely want to glob match. + # shellcheck disable=SC2053 + [[ "$t" = $f ]] && matches=1 && break + done + if (( matches )); then + TESTS+=("$t") + fi + done + fi + + elif [[ -n "${TEST_WARNINGS_OUTPUT_FILE:-}" ]]; then + if grep -q "TESTS=" "$TEST_script" ; then + echo "TESTS variable overridden in sh_test. Please remove before submitting" \ + >> "$TEST_WARNINGS_OUTPUT_FILE" + fi + fi + + # Reset TESTS in the common case where it contains a single empty string. + if [[ -z "${TESTS[*]-}" ]]; then + TESTS=() + fi + local original_tests_size=${#TESTS[@]} + + __update_shards + + if [[ "${#TESTS[@]}" -ne 0 ]]; then + for TEST_name in "${TESTS[@]}"; do + >"$TEST_log" # Reset the log. + TEST_passed="true" + + (( ++total )) + if [[ "$TEST_verbose" == "true" ]]; then + date >&2 + __pad "$TEST_name" '*' >&2 + fi + + local run_time="0.0" + rm -f "${TEST_TMPDIR}"/{__ts_start,__ts_end} + + if [[ "$(type -t "$TEST_name")" == function ]]; then + # Save exit handlers eventually set. + local SAVED_ATEXIT="$ATEXIT"; + ATEXIT= + + # Run test in a subshell. + rm -f "${TEST_TMPDIR}"/__err_handled + __trap_with_arg __test_terminated INT KILL PIPE TERM ABRT FPE ILL QUIT SEGV + + # Remember -o pipefail value and disable it for the subshell result + # collection. + if [[ "${SHELLOPTS}" =~ (^|:)pipefail(:|$) ]]; then + local __opt_switch=-o + else + local __opt_switch=+o + fi + set +o pipefail + ( + set "${__opt_switch}" pipefail + # if errexit is enabled, make sure we run cleanup and collect the log. + if [[ "$-" = *e* ]]; then + set -E + trap __test_terminated_err ERR + fi + timestamp >"${TEST_TMPDIR}"/__ts_start + testenv_set_up + set_up + eval "$TEST_name" + __in_tear_down=1 + tear_down + testenv_tear_down + timestamp >"${TEST_TMPDIR}"/__ts_end + test "$TEST_passed" == "true" + ) 2>&1 | tee "${TEST_TMPDIR}"/__log + # Note that tee will prevent the control flow continuing if the test + # spawned any processes which are still running and have not closed + # their stdout. + + test_subshell_status=${PIPESTATUS[0]} + set "${__opt_switch}" pipefail + if (( test_subshell_status != 0 )); then + TEST_passed="false" + # Ensure that an end time is recorded in case the test subshell + # terminated prematurely. + [[ -f "$TEST_TMPDIR"/__ts_end ]] || timestamp >"$TEST_TMPDIR"/__ts_end + fi + + # Calculate run time for the testcase. + local ts_start + ts_start=$(<"${TEST_TMPDIR}"/__ts_start) + local ts_end + ts_end=$(<"${TEST_TMPDIR}"/__ts_end) + run_time=$(get_run_time $ts_start $ts_end) + + # Eventually restore exit handlers. + if [[ -n "$SAVED_ATEXIT" ]]; then + ATEXIT="$SAVED_ATEXIT" + trap "$ATEXIT" EXIT + fi + else # Bad test explicitly specified in $TESTS. + fail "Not a function: '$TEST_name'" + fi + + local testcase_tag="" + + local red='\033[0;31m' + local green='\033[0;32m' + local no_color='\033[0m' + + if [[ "$TEST_verbose" == "true" ]]; then + echo >&2 + fi + + if [[ "$TEST_passed" == "true" ]]; then + if [[ "$TEST_verbose" == "true" ]]; then + echo -e "${green}PASSED${no_color}: ${TEST_name}" >&2 + fi + (( ++passed )) + testcase_tag="<testcase name=\"${TEST_name}\" status=\"run\" time=\"${run_time}\" classname=\"\"></testcase>" + else + echo -e "${red}FAILED${no_color}: ${TEST_name}" >&2 + # end marker in CDATA cannot be escaped, we need to split the CDATA sections + log=$(sed 's/]]>/]]>]]><![CDATA[/g' "${TEST_TMPDIR}"/__log) + fail_msg=$(cat "${TEST_TMPDIR}"/__fail 2> /dev/null || echo "No failure message") + # Replacing '&' with '&', '<' with '<', '>' with '>', and '"' with '"' + escaped_fail_msg=$(echo "$fail_msg" | sed 's/&/\&/g' | sed 's/</\</g' | sed 's/>/\>/g' | sed 's/"/\"/g') + testcase_tag="<testcase name=\"${TEST_name}\" status=\"run\" time=\"${run_time}\" classname=\"\"><error message=\"${escaped_fail_msg}\"><![CDATA[${log}]]></error></testcase>" + fi + + if [[ "$TEST_verbose" == "true" ]]; then + echo >&2 + fi + __log_to_test_report "<\/testsuite>" "$testcase_tag" + done + fi + + __finish_test_report "$suite_name" $total $passed + __pad "${passed} / ${total} tests passed." '*' >&2 + if (( original_tests_size == 0 )); then + __pad "No tests found." '*' + exit 1 + elif (( total != passed )); then + __pad "There were errors." '*' >&2 + exit 1 + elif (( total == 0 )); then + __pad "No tests executed due to sharding. Check your test's shard_count." '*' + __pad "Succeeding anyway." '*' + fi + + exit 0 +} diff --git a/test/bashunit/unittest_test.py b/test/bashunit/unittest_test.py new file mode 100644 index 00000000..2ecc17f1 --- /dev/null +++ b/test/bashunit/unittest_test.py @@ -0,0 +1,738 @@ +# Copyright 2020 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for unittest.bash.""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import re +import shutil +import stat +import subprocess +import tempfile +import textwrap +import unittest + +# The test setup for this external test is forwarded to the internal bash test. +# This allows the internal test to use the same runfiles to load unittest.bash. +_TEST_PREAMBLE = """ +#!/bin/bash +# --- begin runfiles.bash initialization --- +if [[ -f "${RUNFILES_DIR:-/dev/null}/bazel_tools/tools/bash/runfiles/runfiles.bash" ]]; then + source "${RUNFILES_DIR}/bazel_tools/tools/bash/runfiles/runfiles.bash" +else + echo >&2 "ERROR: cannot find @bazel_tools//tools/bash/runfiles:runfiles.bash" + exit 1 +fi +# --- end runfiles.bash initialization --- + +echo "Writing XML to ${XML_OUTPUT_FILE}" + +source "$(rlocation "build_bazel_rules_android/test/bashunit/unittest.bash")" \ + || { echo "Could not source unittest.bash" >&2; exit 1; } +""" + +ANSI_ESCAPE = re.compile(r"(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]") + + +def remove_ansi(line): + """Remove ANSI-style escape sequences from the input.""" + return ANSI_ESCAPE.sub("", line) + + +class TestResult(object): + """Save test results for easy checking.""" + + def __init__(self, asserter, return_code, output, xmlfile): + self._asserter = asserter + self._return_code = return_code + self._output = remove_ansi(output) + + # Read in the XML result file. + if os.path.isfile(xmlfile): + with open(xmlfile, "r") as f: + self._xml = f.read() + else: + # Unable to read the file, errors will be reported later. + self._xml = "" + + # Methods to assert on the state of the results. + + def assertLogMessage(self, message): + self.assertExactlyOneMatch(self._output, message) + + def assertNotLogMessage(self, message): + self._asserter.assertNotRegex(self._output, message) + + def assertXmlMessage(self, message): + self.assertExactlyOneMatch(self._xml, message) + + def assertNotXmlMessage(self, message): + self._asserter.assertNotRegex(self._xml, message) + + def assertSuccess(self, suite_name): + self._asserter.assertEqual(0, self._return_code, + f"Script failed unexpectedly:\n{self._output}") + self.assertLogMessage(suite_name) + self.assertXmlMessage("<testsuites [^/]*failures=\"0\"") + self.assertXmlMessage("<testsuites [^/]*errors=\"0\"") + + def assertNotSuccess(self, suite_name, failures=0, errors=0): + self._asserter.assertNotEqual(0, self._return_code) + self.assertLogMessage(suite_name) + if failures: + self.assertXmlMessage(f'<testsuites [^/]*failures="{failures}"') + if errors: + self.assertXmlMessage(f'<testsuites [^/]*errors="{errors}"') + + def assertTestPassed(self, test_name): + self.assertLogMessage(f"PASSED: {test_name}") + + def assertTestFailed(self, test_name, message=""): + self.assertLogMessage(f"{test_name} FAILED: {message}") + + def assertExactlyOneMatch(self, text, pattern): + self._asserter.assertRegex(text, pattern) + self._asserter.assertEqual( + len(re.findall(pattern, text)), + 1, + msg=f"Found more than 1 match of '{pattern}' in '{text}'") + + +class UnittestTest(unittest.TestCase): + + def setUp(self): + """Create a working directory under our temp dir.""" + super(UnittestTest, self).setUp() + self.work_dir = tempfile.mkdtemp(dir=os.environ["TEST_TMPDIR"]) + + def tearDown(self): + """Clean up the working directory.""" + super(UnittestTest, self).tearDown() + shutil.rmtree(self.work_dir) + + def write_file(self, filename, contents=""): + """Write the contents to a file in the workdir.""" + + filepath = os.path.join(self.work_dir, filename) + with open(filepath, "w") as f: + f.write(_TEST_PREAMBLE.strip()) + f.write(contents) + os.chmod(filepath, stat.S_IEXEC | stat.S_IWRITE | stat.S_IREAD) + + def find_runfiles(self): + if "RUNFILES_DIR" in os.environ: + return os.environ["RUNFILES_DIR"] + + # Fall back to being based on the srcdir. + if "TEST_SRCDIR" in os.environ: + return os.environ["TEST_SRCDIR"] + + # Base on the current dir + return f"{os.getcwd()}/.." + + def execute_test(self, filename, env=None, args=()): + """Executes the file and stores the results.""" + + filepath = os.path.join(self.work_dir, filename) + xmlfile = os.path.join(self.work_dir, "dummy-testlog.xml") + test_env = { + "TEST_TMPDIR": self.work_dir, + "RUNFILES_DIR": self.find_runfiles(), + "TEST_SRCDIR": os.environ["TEST_SRCDIR"], + "XML_OUTPUT_FILE": xmlfile, + } + # Add in env, forcing everything to be a string. + if env: + for k, v in env.items(): + test_env[k] = str(v) + completed = subprocess.run( + [filepath, *args], + env=test_env, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + return TestResult(self, completed.returncode, + completed.stdout.decode("utf-8"), xmlfile) + + # Actual test cases. + + def test_success(self): + self.write_file( + "thing.sh", """ +function test_success() { + echo foo >&${TEST_log} || fail "expected echo to succeed" + expect_log "foo" +} + +run_suite "success tests" +""") + + result = self.execute_test("thing.sh") + result.assertSuccess("success tests") + result.assertTestPassed("test_success") + + def test_timestamp(self): + self.write_file( + "thing.sh", """ +function test_timestamp() { + local ts=$(timestamp) + [[ $ts =~ ^[0-9]{13}$ ]] || fail "timestamp wan't valid: $ts" + + local time_diff=$(get_run_time 100000 223456) + assert_equals $time_diff 123.456 +} + +run_suite "timestamp tests" +""") + + result = self.execute_test("thing.sh") + result.assertSuccess("timestamp tests") + result.assertTestPassed("test_timestamp") + + def test_failure(self): + self.write_file( + "thing.sh", """ +function test_failure() { + fail "I'm a failure with <>&\\" escaped symbols" +} + +run_suite "failure tests" +""") + + result = self.execute_test("thing.sh") + result.assertNotSuccess("failure tests", failures=0, errors=1) + result.assertTestFailed("test_failure") + result.assertXmlMessage( + "message=\"I'm a failure with <>&" escaped symbols\"") + result.assertXmlMessage("I'm a failure with <>&\" escaped symbols") + + def test_set_bash_errexit_prints_stack_trace(self): + self.write_file( + "thing.sh", """ +set -euo pipefail + +function helper() { + echo before + false + echo after +} + +function test_failure_in_helper() { + helper +} + +run_suite "bash errexit tests" +""") + + result = self.execute_test("thing.sh") + result.assertNotSuccess("bash errexit tests") + result.assertTestFailed("test_failure_in_helper") + result.assertLogMessage(r"./thing.sh:\d*: in call to helper") + result.assertLogMessage( + r"./thing.sh:\d*: in call to test_failure_in_helper") + + def test_set_bash_errexit_runs_tear_down(self): + self.write_file( + "thing.sh", """ +set -euo pipefail + +function tear_down() { + echo "Running tear_down" +} + +function testenv_tear_down() { + echo "Running testenv_tear_down" +} + +function test_failure_in_helper() { + wrong_command +} + +run_suite "bash errexit tests" +""") + + result = self.execute_test("thing.sh") + result.assertNotSuccess("bash errexit tests") + result.assertTestFailed("test_failure_in_helper") + result.assertLogMessage("Running tear_down") + result.assertLogMessage("Running testenv_tear_down") + + def test_set_bash_errexit_pipefail_propagates_failure_through_pipe(self): + self.write_file( + "thing.sh", """ +set -euo pipefail + +function test_pipefail() { + wrong_command | cat + echo after +} + +run_suite "bash errexit tests" +""") + + result = self.execute_test("thing.sh") + result.assertNotSuccess("bash errexit tests") + result.assertTestFailed("test_pipefail") + result.assertLogMessage("wrong_command: command not found") + result.assertNotLogMessage("after") + + def test_set_bash_errexit_no_pipefail_ignores_failure_before_pipe(self): + self.write_file( + "thing.sh", """ +set -eu +set +o pipefail + +function test_nopipefail() { + wrong_command | cat + echo after +} + +run_suite "bash errexit tests" +""") + + result = self.execute_test("thing.sh") + result.assertSuccess("bash errexit tests") + result.assertTestPassed("test_nopipefail") + result.assertLogMessage("wrong_command: command not found") + result.assertLogMessage("after") + + def test_set_bash_errexit_pipefail_long_testname_succeeds(self): + test_name = "x" * 1000 + self.write_file( + "thing.sh", """ +set -euo pipefail + +function test_%s() { + : +} + +run_suite "bash errexit tests" +""" % test_name) + + result = self.execute_test("thing.sh") + result.assertSuccess("bash errexit tests") + + def test_empty_test_fails(self): + self.write_file("thing.sh", """ +# No tests present. + +run_suite "empty test suite" +""") + + result = self.execute_test("thing.sh") + result.assertNotSuccess("empty test suite") + result.assertLogMessage("No tests found.") + + def test_empty_test_succeeds_sharding(self): + self.write_file( + "thing.sh", """ +# Only one test. +function test_thing() { + echo +} + +run_suite "empty test suite" +""") + + # First shard. + result = self.execute_test( + "thing.sh", env={ + "TEST_TOTAL_SHARDS": 2, + "TEST_SHARD_INDEX": 0, + }) + result.assertSuccess("empty test suite") + result.assertLogMessage("No tests executed due to sharding") + + # Second shard. + result = self.execute_test( + "thing.sh", env={ + "TEST_TOTAL_SHARDS": 2, + "TEST_SHARD_INDEX": 1, + }) + result.assertSuccess("empty test suite") + result.assertNotLogMessage("No tests") + + def test_filter_runs_only_matching_test(self): + self.write_file( + "thing.sh", + textwrap.dedent(""" + function test_abc() { + : + } + + function test_def() { + echo "running def" + } + + run_suite "tests to filter" + """)) + + result = self.execute_test( + "thing.sh", env={"TESTBRIDGE_TEST_ONLY": "test_a*"}) + + result.assertSuccess("tests to filter") + result.assertTestPassed("test_abc") + result.assertNotLogMessage("running def") + + def test_filter_prefix_match_only_skips_test(self): + self.write_file( + "thing.sh", + textwrap.dedent(""" + function test_abc() { + echo "running abc" + } + + run_suite "tests to filter" + """)) + + result = self.execute_test( + "thing.sh", env={"TESTBRIDGE_TEST_ONLY": "test_a"}) + + result.assertNotSuccess("tests to filter") + result.assertLogMessage("No tests found.") + + def test_filter_multiple_globs_runs_tests_matching_any(self): + self.write_file( + "thing.sh", + textwrap.dedent(""" + function test_abc() { + echo "running abc" + } + + function test_def() { + echo "running def" + } + + run_suite "tests to filter" + """)) + + result = self.execute_test( + "thing.sh", env={"TESTBRIDGE_TEST_ONLY": "donotmatch:*a*"}) + + result.assertSuccess("tests to filter") + result.assertTestPassed("test_abc") + result.assertNotLogMessage("running def") + + def test_filter_character_group_runs_only_matching_tests(self): + self.write_file( + "thing.sh", + textwrap.dedent(""" + function test_aaa() { + : + } + + function test_daa() { + : + } + + function test_zaa() { + echo "running zaa" + } + + run_suite "tests to filter" + """)) + + result = self.execute_test( + "thing.sh", env={"TESTBRIDGE_TEST_ONLY": "test_[a-f]aa"}) + + result.assertSuccess("tests to filter") + result.assertTestPassed("test_aaa") + result.assertTestPassed("test_daa") + result.assertNotLogMessage("running zaa") + + def test_filter_sharded_runs_subset_of_filtered_tests(self): + for index in range(2): + with self.subTest(index=index): + self.__filter_sharded_runs_subset_of_filtered_tests(index) + + def __filter_sharded_runs_subset_of_filtered_tests(self, index): + self.write_file( + "thing.sh", + textwrap.dedent(""" + function test_a0() { + echo "running a0" + } + + function test_a1() { + echo "running a1" + } + + function test_bb() { + echo "running bb" + } + + run_suite "tests to filter" + """)) + + result = self.execute_test( + "thing.sh", + env={ + "TESTBRIDGE_TEST_ONLY": "test_a*", + "TEST_TOTAL_SHARDS": 2, + "TEST_SHARD_INDEX": index + }) + + result.assertSuccess("tests to filter") + # The sharding logic is shifted by 1, starts with 2nd shard. + result.assertTestPassed("test_a" + str(index ^ 1)) + result.assertLogMessage("running a" + str(index ^ 1)) + result.assertNotLogMessage("running a" + str(index)) + result.assertNotLogMessage("running bb") + + def test_arg_runs_only_matching_test_and_issues_warning(self): + self.write_file( + "thing.sh", + textwrap.dedent(""" + function test_abc() { + : + } + + function test_def() { + echo "running def" + } + + run_suite "tests to filter" + """)) + + result = self.execute_test("thing.sh", args=["test_abc"]) + + result.assertSuccess("tests to filter") + result.assertTestPassed("test_abc") + result.assertNotLogMessage("running def") + result.assertLogMessage( + r"WARNING: Passing test names in arguments \(--test_arg\) is " + "deprecated, please use --test_filter='test_abc' instead.") + + def test_arg_multiple_tests_issues_warning_with_test_filter_command(self): + self.write_file( + "thing.sh", + textwrap.dedent(""" + function test_abc() { + : + } + + function test_def() { + : + } + + run_suite "tests to filter" + """)) + + result = self.execute_test("thing.sh", args=["test_abc", "test_def"]) + + result.assertSuccess("tests to filter") + result.assertTestPassed("test_abc") + result.assertTestPassed("test_def") + result.assertLogMessage( + r"WARNING: Passing test names in arguments \(--test_arg\) is " + "deprecated, please use --test_filter='test_abc:test_def' instead.") + + def test_arg_and_filter_ignores_arg(self): + self.write_file( + "thing.sh", + textwrap.dedent(""" + function test_abc() { + : + } + + function test_def() { + echo "running def" + } + + run_suite "tests to filter" + """)) + + result = self.execute_test( + "thing.sh", args=["test_def"], env={"TESTBRIDGE_TEST_ONLY": "test_a*"}) + + result.assertSuccess("tests to filter") + result.assertTestPassed("test_abc") + result.assertNotLogMessage("running def") + result.assertLogMessage( + "WARNING: Both --test_arg and --test_filter specified, ignoring --test_arg" + ) + + def test_custom_ifs_variable_finds_and_runs_test(self): + for sharded in (False, True): + for ifs in (r"\t", "t"): + with self.subTest(ifs=ifs, sharded=sharded): + self.__custom_ifs_variable_finds_and_runs_test(ifs, sharded) + + def __custom_ifs_variable_finds_and_runs_test(self, ifs, sharded): + self.write_file( + "thing.sh", + textwrap.dedent(r""" + set -euo pipefail + IFS=$'%s' + function test_foo() { + : + } + + run_suite "custom IFS test" + """ % ifs)) + + result = self.execute_test( + "thing.sh", + env={} if not sharded else { + "TEST_TOTAL_SHARDS": 2, + "TEST_SHARD_INDEX": 1 + }) + + result.assertSuccess("custom IFS test") + result.assertTestPassed("test_foo") + + def test_fail_in_teardown_reports_failure(self): + self.write_file( + "thing.sh", + textwrap.dedent(r""" + function tear_down() { + echo "tear_down log" >"${TEST_log}" + fail "tear_down failure" + } + + function test_foo() { + : + } + + run_suite "Failure in tear_down test" + """)) + + result = self.execute_test("thing.sh") + + result.assertNotSuccess("Failure in tear_down test", errors=1) + result.assertTestFailed("test_foo", "tear_down failure") + result.assertXmlMessage('message="tear_down failure"') + result.assertLogMessage("tear_down log") + + def test_fail_in_teardown_after_test_failure_reports_both_failures(self): + self.write_file( + "thing.sh", + textwrap.dedent(r""" + function tear_down() { + echo "tear_down log" >"${TEST_log}" + fail "tear_down failure" + } + + function test_foo() { + echo "test_foo log" >"${TEST_log}" + fail "Test failure" + } + + run_suite "Failure in tear_down test" + """)) + + result = self.execute_test("thing.sh") + + result.assertNotSuccess("Failure in tear_down test", errors=1) + result.assertTestFailed("test_foo", "Test failure") + result.assertTestFailed("test_foo", "tear_down failure") + result.assertXmlMessage('message="Test failure"') + result.assertNotXmlMessage('message="tear_down failure"') + result.assertXmlMessage("test_foo log") + result.assertXmlMessage("tear_down log") + result.assertLogMessage("Test failure") + result.assertLogMessage("tear_down failure") + result.assertLogMessage("test_foo log") + result.assertLogMessage("tear_down log") + + def test_errexit_in_teardown_reports_failure(self): + self.write_file( + "thing.sh", + textwrap.dedent(r""" + set -euo pipefail + + function tear_down() { + invalid_command + } + + function test_foo() { + : + } + + run_suite "errexit in tear_down test" + """)) + + result = self.execute_test("thing.sh") + + result.assertNotSuccess("errexit in tear_down test") + result.assertLogMessage("invalid_command: command not found") + result.assertXmlMessage('message="No failure message"') + result.assertXmlMessage("invalid_command: command not found") + + def test_fail_in_tear_down_after_errexit_reports_both_failures(self): + self.write_file( + "thing.sh", + textwrap.dedent(r""" + set -euo pipefail + + function tear_down() { + echo "tear_down log" >"${TEST_log}" + fail "tear_down failure" + } + + function test_foo() { + invalid_command + } + + run_suite "fail after failure" + """)) + + result = self.execute_test("thing.sh") + + result.assertNotSuccess("fail after failure") + result.assertTestFailed( + "test_foo", + "terminated because this command returned a non-zero status") + result.assertTestFailed("test_foo", "tear_down failure") + result.assertLogMessage("invalid_command: command not found") + result.assertLogMessage("tear_down log") + result.assertXmlMessage('message="No failure message"') + result.assertXmlMessage("invalid_command: command not found") + + def test_errexit_in_tear_down_after_errexit_reports_both_failures(self): + self.write_file( + "thing.sh", + textwrap.dedent(r""" + set -euo pipefail + + function tear_down() { + invalid_command_tear_down + } + + function test_foo() { + invalid_command_test + } + + run_suite "fail after failure" + """)) + + result = self.execute_test("thing.sh") + + result.assertNotSuccess("fail after failure") + result.assertTestFailed( + "test_foo", + "terminated because this command returned a non-zero status") + result.assertLogMessage("invalid_command_test: command not found") + result.assertLogMessage("invalid_command_tear_down: command not found") + result.assertXmlMessage('message="No failure message"') + result.assertXmlMessage("invalid_command_test: command not found") + result.assertXmlMessage("invalid_command_tear_down: command not found") + + +if __name__ == "__main__": + unittest.main() diff --git a/test/bashunit/unittest_utils.sh b/test/bashunit/unittest_utils.sh new file mode 100644 index 00000000..be3409e2 --- /dev/null +++ b/test/bashunit/unittest_utils.sh @@ -0,0 +1,181 @@ +# Copyright 2020 The Bazel Authors. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Support for unittest.bash + +#### Set up the test environment. + +set -euo pipefail + +cat_jvm_log () { + if [[ "$log_content" =~ \ + "(error code:".*", error message: '".*"', log file: '"(.*)"')" ]]; then + echo >&2 + echo "Content of ${BASH_REMATCH[1]}:" >&2 + cat "${BASH_REMATCH[1]}" >&2 + fi +} + +# Print message in "$1" then exit with status "$2" +die () { + # second argument is optional, defaulting to 1 + local status_code=${2:-1} + # Stop capturing stdout/stderr, and dump captured output + if [[ "$CAPTURED_STD_ERR" -ne 0 || "$CAPTURED_STD_OUT" -ne 0 ]]; then + restore_outputs + if [[ "$CAPTURED_STD_OUT" -ne 0 ]]; then + cat "${TEST_TMPDIR}/captured.out" + CAPTURED_STD_OUT=0 + fi + if [[ "$CAPTURED_STD_ERR" -ne 0 ]]; then + cat "${TEST_TMPDIR}/captured.err" 1>&2 + cat_jvm_log "$(cat "${TEST_TMPDIR}/captured.err")" + CAPTURED_STD_ERR=0 + fi + fi + + if [[ -n "${1-}" ]] ; then + echo "$1" 1>&2 + fi + if [[ -n "${BASH-}" ]]; then + local caller_n=0 + while [[ $caller_n -lt 4 ]] && \ + caller_out=$(caller $caller_n 2>/dev/null); do + test $caller_n -eq 0 && echo "CALLER stack (max 4):" + echo " $caller_out" + let caller_n=caller_n+1 + done 1>&2 + fi + if [[ -n "${status_code}" && "${status_code}" -ne 0 ]]; then + exit "$status_code" + else + exit 1 + fi +} + +# Print message in "$1" then record that a non-fatal error occurred in +# ERROR_COUNT +ERROR_COUNT="${ERROR_COUNT:-0}" +error () { + if [[ -n "$1" ]] ; then + echo "$1" 1>&2 + fi + ERROR_COUNT=$(($ERROR_COUNT + 1)) +} + +# Die if "$1" != "$2", print $3 as death reason +check_eq () { + [[ "$1" = "$2" ]] || die "Check failed: '$1' == '$2' ${3:+ ($3)}" +} + +# Die if "$1" == "$2", print $3 as death reason +check_ne () { + [[ "$1" != "$2" ]] || die "Check failed: '$1' != '$2' ${3:+ ($3)}" +} + +# The structure of the following if statements is such that if '[[' fails +# (e.g., a non-number was passed in) then the check will fail. + +# Die if "$1" > "$2", print $3 as death reason +check_le () { + [[ "$1" -gt "$2" ]] || die "Check failed: '$1' <= '$2' ${3:+ ($3)}" +} + +# Die if "$1" >= "$2", print $3 as death reason +check_lt () { + [[ "$1" -lt "$2" ]] || die "Check failed: '$1' < '$2' ${3:+ ($3)}" +} + +# Die if "$1" < "$2", print $3 as death reason +check_ge () { + [[ "$1" -ge "$2" ]] || die "Check failed: '$1' >= '$2' ${3:+ ($3)}" +} + +# Die if "$1" <= "$2", print $3 as death reason +check_gt () { + [[ "$1" -gt "$2" ]] || die "Check failed: '$1' > '$2' ${3:+ ($3)}" +} + +# Die if $2 !~ $1; print $3 as death reason +check_match () +{ + expr match "$2" "$1" >/dev/null || \ + die "Check failed: '$2' does not match regex '$1' ${3:+ ($3)}" +} + +# Run command "$1" at exit. Like "trap" but multiple atexits don't +# overwrite each other. Will break if someone does call trap +# directly. So, don't do that. +ATEXIT="${ATEXIT-}" +atexit () { + if [[ -z "$ATEXIT" ]]; then + ATEXIT="$1" + else + ATEXIT="$1 ; $ATEXIT" + fi + trap "$ATEXIT" EXIT +} + +## TEST_TMPDIR +if [[ -z "${TEST_TMPDIR:-}" ]]; then + export TEST_TMPDIR="$(mktemp -d ${TMPDIR:-/tmp}/bazel-test.XXXXXXXX)" +fi +if [[ ! -e "${TEST_TMPDIR}" ]]; then + mkdir -p -m 0700 "${TEST_TMPDIR}" + # Clean TEST_TMPDIR on exit + atexit "rm -fr ${TEST_TMPDIR}" +fi + +# Functions to compare the actual output of a test to the expected +# (golden) output. +# +# Usage: +# capture_test_stdout +# ... do something ... +# diff_test_stdout "$TEST_SRCDIR/path/to/golden.out" + +# Redirect a file descriptor to a file. +CAPTURED_STD_OUT="${CAPTURED_STD_OUT:-0}" +CAPTURED_STD_ERR="${CAPTURED_STD_ERR:-0}" + +capture_test_stdout () { + exec 3>&1 # Save stdout as fd 3 + exec 4>"${TEST_TMPDIR}/captured.out" + exec 1>&4 + CAPTURED_STD_OUT=1 +} + +capture_test_stderr () { + exec 6>&2 # Save stderr as fd 6 + exec 7>"${TEST_TMPDIR}/captured.err" + exec 2>&7 + CAPTURED_STD_ERR=1 +} + +# Force XML_OUTPUT_FILE to an existing path +if [[ -z "${XML_OUTPUT_FILE:-}" ]]; then + XML_OUTPUT_FILE=${TEST_TMPDIR}/output.xml +fi + +# Functions to provide easy access to external repository outputs in the sibling +# repository layout. +# +# Usage: +# bin_dir <repository name> +# genfiles_dir <repository name> +# testlogs_dir <repository name> + +testlogs_dir() { + echo $(bazel info bazel-testlogs | sed "s|bazel-out|bazel-out/$1|") +}