diff --git a/ci/linux-direct-ubuntu20.04-gcc-release.jenkinsfile b/ci/linux-direct-ubuntu20.04-gcc-release.jenkinsfile index b8db5291..4bf12374 100644 --- a/ci/linux-direct-ubuntu20.04-gcc-release.jenkinsfile +++ b/ci/linux-direct-ubuntu20.04-gcc-release.jenkinsfile @@ -4,7 +4,7 @@ node(node_label) { try { stage('checkout'){ dir ('./') { - git url: 'https://github.com/jinengandhi-intel/graphene_local_ci.git' + git url: 'https://github.com/jinengandhi-intel/graphene_local_ci.git' } dir ("gramine") { diff --git a/ci/stage-test-direct.jenkinsfile b/ci/stage-test-direct.jenkinsfile index 33165bbf..0874a0bc 100644 --- a/ci/stage-test-direct.jenkinsfile +++ b/ci/stage-test-direct.jenkinsfile @@ -4,9 +4,15 @@ stage('test-direct') { timeout(time: 30, unit: 'MINUTES') { sh ''' cd LibOS/shim/test/ltp - make ${MAKEOPTS} -f Makefile.LTP all LTPCFG=$CFG LTPTESTFILE=$PWD$LTPSCENARIO - make -f Makefile.LTP ltp_results_1.xml LTPCFG=$CFG LTPTESTFILE=$PWD$LTPSCENARIO + cp -f toml_files/tests_direct.toml tests.toml + make ${MAKEOPTS} -f Makefile.LTP all ''' + // Run tests in a separate block, so that Jenkins measures build time and run time + // separately + sh ''' + cd LibOS/shim/test/ltp + python3 -m pytest -v -n4 --junit-xml=ltp.xml + ''' /* sh ''' cd LibOS/shim/test/ltp @@ -20,8 +26,8 @@ stage('test-direct') { env.build_ok = false sh 'echo "LTP Test Failed"' } finally { - archiveArtifacts 'LibOS/shim/test/ltp/ltp_results_*.xml' - junit 'LibOS/shim/test/ltp/ltp_results_*.xml' + archiveArtifacts 'LibOS/shim/test/ltp/ltp.xml' + junit 'LibOS/shim/test/ltp/ltp.xml' } try{ diff --git a/ci/stage-test-sgx.jenkinsfile b/ci/stage-test-sgx.jenkinsfile index 7d03099d..2ccb6038 100644 --- a/ci/stage-test-sgx.jenkinsfile +++ b/ci/stage-test-sgx.jenkinsfile @@ -5,11 +5,15 @@ stage('test-sgx') { timeout(time: 60, unit: 'MINUTES') { sh ''' cd LibOS/shim/test/ltp - make ${MAKEOPTS} -f Makefile.LTP SGX=1 all LTPCFG="$CFG" LTPTESTFILE=$PWD$LTPSCENARIO - bash update_manifest.sh - make ${MAKEOPTS} -f Makefile.LTP SGX=1 sgx-tokens LTPCFG="$CFG" LTPTESTFILE=$PWD$LTPSCENARIO - make -f Makefile.LTP ltp-sgx_results_1.xml SGX=1 LTPCFG="$CFG" LTPTESTFILE=$PWD$LTPSCENARIO + cp -f toml_files/tests_sgx.toml tests.toml + make ${MAKEOPTS} -f Makefile.LTP SGX=1 all ''' + // Run tests in a separate block, so that Jenkins measures build time and run time + // separately + sh ''' + cd LibOS/shim/test/ltp + python3 -m pytest -v --junit-xml=ltp-sgx.xml + ''' /* sh ''' cd LibOS/shim/test/ltp @@ -23,8 +27,8 @@ stage('test-sgx') { env.build_ok = false sh 'echo "LTP Test Failed"' } finally { - archiveArtifacts 'LibOS/shim/test/ltp/ltp-sgx_results_*.xml' - junit 'LibOS/shim/test/ltp/ltp-sgx_results_*.xml' + archiveArtifacts 'LibOS/shim/test/ltp/ltp-sgx.xml' + junit 'LibOS/shim/test/ltp/ltp-sgx.xml' } } diff --git a/ci/ubuntu18.04.dockerfile b/ci/ubuntu18.04.dockerfile index a2cce735..d7d6f7bd 100644 --- a/ci/ubuntu18.04.dockerfile +++ b/ci/ubuntu18.04.dockerfile @@ -61,6 +61,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install -y \ python3-protobuf \ python3-pyelftools \ python3-pytest \ + python3-pytest-xdist \ python3-scipy \ sqlite3 \ shellcheck \ diff --git a/ci/ubuntu20.04.dockerfile b/ci/ubuntu20.04.dockerfile index 660134b9..39da72e6 100644 --- a/ci/ubuntu20.04.dockerfile +++ b/ci/ubuntu20.04.dockerfile @@ -62,6 +62,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install -y \ python3-protobuf \ python3-pyelftools \ python3-pytest \ + python3-pytest-xdist \ python3-recommonmark \ python3-scipy \ python3-sphinx-rtd-theme \ diff --git a/ci/ubuntu21.04.dockerfile b/ci/ubuntu21.04.dockerfile index a6c159e9..fb10d538 100644 --- a/ci/ubuntu21.04.dockerfile +++ b/ci/ubuntu21.04.dockerfile @@ -61,6 +61,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install -y \ python3-protobuf \ python3-pyelftools \ python3-pytest \ + python3-pytest-xdist \ python3-recommonmark \ python3-scipy \ python3-sphinx-rtd-theme \ diff --git a/ltp_config/Makefile.LTP b/ltp_config/Makefile.LTP index ac3faa60..4c22e628 100644 --- a/ltp_config/Makefile.LTP +++ b/ltp_config/Makefile.LTP @@ -1,30 +1,26 @@ -include makevars.mk +ROOTDIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) -target = $(INSTALLDIR)/INSTALL_SUCCESS build-manifest -exec_target = - -clean-extra += clean-build +BUILDDIR = $(ROOTDIR)/build +INSTALLDIR = $(ROOTDIR)/install +TESTCASEDIR = $(INSTALLDIR)/testcases/bin +LTP_SCENARIO = "install/runtest/syscalls-new" SRCDIR = $(ROOTDIR)/ltp_src +clean-extra += clean-build +CFG = ltp_tests.cfg +XML_FILE = ltp.xml ifeq ($(SGX),1) BUILDDIR:=$(BUILDDIR)-sgx INSTALLDIR:=$(INSTALLDIR)-sgx - LTPROOT = install-sgx -else - LTPROOT = install + LTP_SCENARIO = "install-sgx/runtest/syscalls-new" + CFG = ltp_tests.cfg ltp-sgx_tests.cfg ltp-bug-1075_tests.cfg + XML_FILE = ltp-sgx.xml endif VERSION = $(shell uname -r) export VERSION -include ../../../../Scripts/Makefile.rules -include ../../../../Scripts/Makefile.configs -# Make ARCH_LIBDIR visible in Makefile.Test -export ARCH_LIBDIR -include Makefile.Test - -ifeq ($(BUILD_VERBOSE),1) - RUNLTPOPTS += -v -endif +.PHONY: all +all: $(INSTALLDIR)/INSTALL_SUCCESS manifests etc/nsswitch.conf etc/passwd $(SRCDIR)/Makefile: $(error "$(SRCDIR) is empty. Please run `git submodule update --init $(SRCDIR)` or download the LTP source code (https://github.com/linux-test-project/ltp) into $(SRCDIR).") @@ -53,33 +49,30 @@ $(INSTALLDIR)/INSTALL_SUCCESS: $(BUILDDIR)/BUILD_SUCCESS ln -sf $(abspath Makefile_testcases.LTP) $(TESTCASEDIR)/Makefile touch $@ -.PHONY: build-manifest -build-manifest: $(TESTCASEDIR)/manifest.template $(INSTALLDIR)/INSTALL_SUCCESS - $(MAKE) -C $(TESTCASEDIR) +.PHONY: manifests +manifests: $(INSTALLDIR)/INSTALL_SUCCESS + gramine-test build -$(TESTCASEDIR)/manifest.template: manifest.LTP $(INSTALLDIR)/INSTALL_SUCCESS - sed -e 's|$$(ARCH_LIBDIR)|'"$(ARCH_LIBDIR)"'|g; s|TESTCASEDIR|$(TESTCASEDIR)|g' \ - $< > $@ +etc/nsswitch.conf: + mkdir -p etc + printf "passwd: compat\ngroup: compat\nshadow: compat\nhosts: files\n" > $@ -.PHONY: sgx-tokens -sgx-tokens: build-manifest - $(MAKE) -C $(TESTCASEDIR) $@ +etc/passwd: + mkdir -p etc + printf "root:x:0:0:root:/root:/bin/bash\nnobody:x:65534:65534:nobody:/nonexistent:/usr/sbin/nologin\n" > $@ .PHONY: regression -regression: -ifeq ($(SGX),1) - $(RM) ltp-sgx.xml - $(MAKE) ltp-sgx.xml -else - $(RM) ltp.xml - $(MAKE) ltp.xml -endif - -%.xml: $(LTPCFG) $(target) $(INSTALLDIR)/INSTALL_SUCCESS - ./contrib/conf_lint.py $(LTPCFG) --scenario $(LTPTESTFILE) - ./runltp_tests.py $(RUNLTPOPTS) $(foreach cfg,$(LTPCFG),-c $(cfg)) $(LTPTESTFILE) -o ltproot=$(LTPROOT) -O $@ +regression: manifests + LTP_CONFIG="$(CFG)" LTP_SCENARIO=$(LTP_SCENARIO) python3 -m pytest -v --junit-xml="$(XML_FILE)" -.PHONY: clean-build -clean-build: +.PHONY: clean +clean: + if test -f $(INSTALLDIR)/INSTALL_SUCCESS; then gramine-test clean; fi $(MAKE) -C $(SRCDIR) clean - $(RM) -r build* install* ltp*.xml + $(RM) -r \ + $(BUILDDIR) \ + $(INSTALLDIR) \ + ltp*.xml \ + etc/ \ + .pytest_cache \ + __pycache__ diff --git a/ltp_config/manifest.template b/ltp_config/manifest.template new file mode 100644 index 00000000..fe5b9f03 --- /dev/null +++ b/ltp_config/manifest.template @@ -0,0 +1,85 @@ +loader.entrypoint = "file:{{ gramine.libos }}" +libos.entrypoint = "{{ entrypoint }}" + +loader.env.LD_LIBRARY_PATH = "/lib:{{ arch_libdir }}:/usr/lib:/usr/lib64" +loader.env.PATH = "/bin:/usr/bin:." +loader.env.LD_PRELOAD = "{{ coreutils_libdir }}/libstdbuf.so" +loader.env._STDBUF_O = "L" +loader.insecure__use_cmdline_argv = true + +fs.root.type = "chroot" +fs.root.uri = "file:{{ binary_dir }}" + +fs.mount.etc.type = "chroot" +fs.mount.etc.path = "etc" +fs.mount.etc.uri = "file:etc" + +fs.mount.bin.type = "chroot" +fs.mount.bin.path = "/bin" +fs.mount.bin.uri = "file:/bin" + +fs.mount.boot.type = "chroot" +fs.mount.boot.path = "/boot" +fs.mount.boot.uri = "file:/boot" + +fs.mount.proc.type = "chroot" +fs.mount.proc.path = "/proc" +fs.mount.proc.uri = "file:/proc" + +fs.mount.shm.type = "chroot" +fs.mount.shm.path = "/dev/shm" +fs.mount.shm.uri = "file:/tmp" + +fs.mount.lib.type = "chroot" +fs.mount.lib.path = "/lib" +fs.mount.lib.uri = "file:{{ gramine.runtimedir() }}" + +fs.mount.lib64.type = "chroot" +fs.mount.lib64.path = "{{ arch_libdir }}" +fs.mount.lib64.uri = "file:{{ arch_libdir }}" + +fs.mount.usr.type = "chroot" +fs.mount.usr.path = "/usr" +fs.mount.usr.uri = "file:/usr" + +fs.mount.tmp.type = "chroot" +fs.mount.tmp.path = "/tmp" +fs.mount.tmp.uri = "file:/tmp" + +fs.experimental__enable_sysfs_topology = true +sys.brk.max_size = "32M" +sys.stack.size = "4M" +sgx.nonpie_binary = true +sgx.debug = true +sgx.thread_num = 13 + +sgx.allowed_files = [ + "file:/tmp", + "file:etc", + "file:{{ gramine.runtimedir() }}", + "file:/bin", + "file:/proc", + "file:{{ gramine.runtimedir() }}/libnss_compat.so.2", + "file:{{ arch_libdir }}/libnss_compat.so.2", + "file:{{ arch_libdir }}/libnss_systemd.so.2", + "file:install-sgx/testcases/bin/pipe2_02_child", + "file:install-sgx/testcases/bin/execvp01_child", + "file:install-sgx/testcases/bin/execv01_child", + "file:install-sgx/testcases/bin/execlp01_child", + "file:install-sgx/testcases/bin/execl01_child", +] + +sgx.trusted_files = [ + "file:{{ gramine.libos }}", + "file:{{ binary_dir }}/{{ entrypoint }}", + "file:{{ gramine.runtimedir() }}/ld-linux-x86-64.so.2", + "file:{{ gramine.runtimedir() }}/libc.so.6", + "file:{{ gramine.runtimedir() }}/libdl.so.2", + "file:{{ gramine.runtimedir() }}/libm.so.6", + "file:{{ gramine.runtimedir() }}/libpthread.so.0", + "file:{{ gramine.runtimedir() }}/librt.so.1", + "file:{{ coreutils_libdir }}/libstdbuf.so", + "file:{{ gramine.runtimedir() }}/libnss_files.so.2", + "file:{{ arch_libdir }}/libnss_files.so.2", +] + diff --git a/ltp_config/runltp_tests.py b/ltp_config/runltp_tests.py deleted file mode 100755 index 0e6a838e..00000000 --- a/ltp_config/runltp_tests.py +++ /dev/null @@ -1,719 +0,0 @@ -#!/usr/bin/env python3 -# SPDX-License-Identifier: LGPL-3.0-or-later -# Copyright (C) 2019 Wojtek Porczyk - -import abc -import argparse -import asyncio -import configparser -import fnmatch -import logging -import os -import pathlib -import shlex -import signal -import subprocess -import sys -import time -import re - -from lxml import etree - -try: - fspath = os.fspath -except AttributeError: - # python < 3.6 - fspath = str - -DEFAULT_CONFIG = 'ltp.cfg' -ERRORHANDLER = 'backslashreplace' - -argparser = argparse.ArgumentParser() -argparser.add_argument('--config', '-c', metavar='FILENAME', - action='append', - type=argparse.FileType('r'), - help='config file (default: {}); may be given multiple times'.format( - DEFAULT_CONFIG)) - -argparser.add_argument('--option', '-o', metavar='KEY=VALUE', - action='append', - help='set an option') - -argparser.add_argument('--verbose', '-v', - action='count', - help='increase verbosity') - -argparser.add_argument('--list-executables', - action='store_true', default=False, - help='only list executables needed to run the suite') - -argparser.add_argument('cmdfile', metavar='FILENAME', - type=argparse.FileType('r'), - nargs='?', - help='cmdfile (default: stdin)') - -argparser.add_argument('--output-file', '-O', metavar='FILENAME', - type=argparse.FileType('w'), - help='write XML report to a file (use - for stdout)') - -argparser.set_defaults( - config=None, - option=[], - verbose=0, - cmdfile='-') - -_log = logging.getLogger('LTP') # pylint: disable=invalid-name - - -class AbnormalTestResult(Exception): - '''Raised in some cases of test not succeeding. - - Args: - message (str): a message to be logged - ''' - - loglevel = logging.WARNING - - def __init__(self, message, *, loglevel=None): - super().__init__() - self.message = message - if loglevel is not None: - self.loglevel = loglevel - - @abc.abstractmethod - def apply_to(self, runner): - '''Apply a status to a runner. - - Args: - runner (TestRunner): runner to apply the status to - ''' - raise NotImplementedError() - -class Fail(AbnormalTestResult): - '''Raised when test fails nominally.''' - def apply_to(self, runner): - runner.failure(self.message, loglevel=self.loglevel) - -class Skip(AbnormalTestResult): - '''Raised when test is skipped.''' - def apply_to(self, runner): - runner.skipped(self.message, loglevel=self.loglevel) - -class Error(AbnormalTestResult): - '''Raised when test fails for external or grave reason.''' - loglevel = logging.ERROR - def apply_to(self, runner): - runner.error(self.message, loglevel=self.loglevel) - - -class TestRunner: - '''A runner which will run a single scenario. - - The arguments *tag* and *cmd* most likely come from parsing a scenario file. - The command should be a simple invocation, limited to a single executable - with arguments. Compound commands (i.e. with pipes) are not supported and - will result in :py:exc:`Error`. - - Args: - suite (TestSuite): a suite, for which this runner will add a result - tag (str): a name of the test - cmd (iterable): the command (full *argv*) - ''' - def __init__(self, suite, tag, cmd): - self.suite = suite - self.tag = tag - self.cmd = tuple(cmd) - - try: - self.cfgsection = self.suite.config[self.tag] - except (configparser.NoSectionError, KeyError): - self.cfgsection = self.suite.config[ - self.suite.config.default_section] - - self.classname = self.cfgsection.get('junit-classname') - self.log = _log.getChild(self.tag) - - self.stdout = None - self.stderr = None - self.time = None - self.props = {} - - self._added_result = False - - - def _add_result(self): - if self._added_result: - raise RuntimeError('multiple results for a testcase') - self._added_result = True - - element = etree.Element('testcase', - classname=self.classname, name=self.tag) - - self.suite.add_result(element) - self.suite.inc('tests') - - if self.time is not None: - element.set('time', '{:.3f}'.format(self.time)) - self.suite.inc('time', self.time, type=float, fmt='.3f') - - if self.stdout is not None: - try: - etree.SubElement(element, 'system-out').text = self.stdout - except ValueError: - res = re.sub(u'[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\U00010000-\U0010FFFF]+', '', self.stdout) - etree.SubElement(element, 'system-out').text = res - if self.stderr is not None: - etree.SubElement(element, 'system-err').text = self.stderr - - if self.props: - properties = etree.SubElement(element, 'properties') - for name, value in self.props.items(): - etree.SubElement(properties, 'property', - name=str(name), value=str(value)) - - return element - - def success(self, *, loglevel=logging.WARNING): - '''Add a success to the report''' - # pylint: disable=redefined-outer-name - self.log.log(loglevel, '-> PASS') - self._add_result() - - def failure(self, message, *, loglevel=logging.WARNING): - '''Add a nominal failure to the report - - Args: - message (str): a message to display (“Stack Trace” in Jenkins) - ''' - # pylint: disable=redefined-outer-name - self.log.log(loglevel, '-> FAIL (%s)', message) - etree.SubElement(self._add_result(), 'failure', message=message) - self.suite.inc('failures') - - def error(self, message, *, loglevel=logging.ERROR): - '''Add an error to the report - - Args: - message (str): a message to display - ''' - # pylint: disable=redefined-outer-name - self.log.log(loglevel, '-> ERROR (%s)', message) - etree.SubElement(self._add_result(), 'error').text = message - self.suite.inc('errors') - - def skipped(self, message, *, loglevel=logging.WARNING): - '''Add a skipped test to the report - - Args: - message (str): a message to display (“Skip Message” in Jenkins) - ''' - # pylint: disable=redefined-outer-name - self.log.log(loglevel, '-> SKIP (%s)', message) - etree.SubElement(self._add_result(), 'skipped').text = message - self.suite.inc('skipped') - - - def _prepare(self): - '''Common initalization - - This is used in two ways, so was refactored to a separate function - ''' - - if self.cfgsection.getboolean('skip', fallback=False): - raise Skip('skipped via config', loglevel=logging.INFO) - - for name, section in self.suite.match_sections(self.tag): - if section.getboolean('skip', fallback=False): - raise Skip('skipped via fnmatch section {}'.format(name), loglevel=logging.INFO) - - if any(c in self.cmd for c in ';|&'): - # This is a shell command which would spawn multiple processes. - # We don't run those in unit tests. - if 'must-pass' in self.cfgsection: - raise Error('invalid shell command with must-pass') - raise Skip('invalid shell command') - - def get_executable_name(self): - '''Return the executable name, or :py:obj:`None` if the test will not - run.''' - try: - self._prepare() - except AbnormalTestResult: - return None - else: - return self.cmd[0] - - async def cmd_execute(self, cmd): - timeout = self.cfgsection.getfloat('timeout') - self.log.info('starting %r with timeout %d', cmd, timeout) - start_time = time.time() - - # pylint: disable=subprocess-popen-preexec-fn - proc = await asyncio.create_subprocess_exec( - *cmd, - cwd=fspath(self.suite.bindir), - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - preexec_fn=os.setsid, - close_fds=True) - - # XXX: change `ensure_future` to `create_task` once versions < 3.7 are - # deprecated - tasks = [asyncio.ensure_future(i) for i in [proc.wait(), - proc.communicate()]] - - done, pending = await asyncio.wait(tasks, timeout=timeout, - return_when=asyncio.FIRST_COMPLETED) - - timedout = not done - - try: - # after `setsid` pgid should be the same as pid - if proc.pid != os.getpgid(proc.pid): - self.log.warning('main process changed pgid, this might ' - 'indicate an error and prevent all processes from being ' - 'cleaned up') - except ProcessLookupError: - pass - - try: - os.killpg(proc.pid, signal.SIGKILL) - except ProcessLookupError: - pass - - self.time = time.time() - start_time - - if pending: - _, pending = await asyncio.wait(pending) - assert not pending - - assert tasks[1].done() - self.stdout, self.stderr = (stream.decode(errors=ERRORHANDLER) - for stream in tasks[1].result()) - - if timedout: - raise Error('Timed out after {} s.'.format(timeout)) - - return proc - - async def run_test_setup(self): - exec_name = os.path.join(self.suite.bindir.resolve(), - self.get_executable_name()).replace("run", "setup") - cmd = [exec_name] - - proc = await self.cmd_execute(cmd) - - if any(x in str(self.stdout) for x in ["TCONF", "TFAIL", "TBROK"]) or any(x in str(self.stderr) for x in ["TCONF", "TFAIL", "TBROK"]): - self.props['returncode'] = proc.returncode - raise Error("Failed in setup function for {}".format(cmd)) - - async def _run_cmd(self): - '''Actually run the test and possibly set various attributes that result - from the test run. - - Raises: - AbnormalTestResult: for assorted failures - ''' - if "_run" in self.cmd[0]: - await self.run_test_setup() - - cmd = [self.suite.loader, *self.cmd] - - proc = await self.cmd_execute(cmd) - - self.log.info('finished pid=%d time=%.3f returncode=%d stdout=%s', - proc.pid, self.time, proc.returncode, self.stdout) - if self.stderr: - self.log.info('stderr=%s', self.stderr) - - self.props['returncode'] = proc.returncode - - try: - os.system("rm -rf /tmp/* > /dev/null 2>&1") - except: - pass - - return proc.returncode - - def check_system_error_output(self, stderr): - error_list = [] - error_list = stderr.split("\n") - for error in error_list: - if error == "": - pass - elif "Using insecure argv source" in error or \ - "error: Mounting file:/proc may expose unsanitized" in error or \ - "error: Failed to read ELF header" in error or \ - "Disallowing access to file '/usr/bin/systemd-detect-virt'" in error or \ - "Disallowing access to file '/lib64/libnss_nis.so.2'" in error or \ - "Disallowing access to file '/usr/lib64/libnss_nis.so.2'" in error or \ - "Disallowing access to file '/lib64/libtinfo.so.6'" in error or \ - "Disallowing access to file '/usr/lib64/libtinfo.so.6'" in error or \ - "Detected deprecated syntax. Consider switching to new syntax: 'sgx.allowed_files" in error or \ - "-" in error or \ - "Gramine detected the following insecure configurations:" in error or \ - "Gramine will continue application execution, but this configuration must not be used in production!" in error or \ - 'WARNING! "allowed_files" is an insecure feature designed for debugging and prototyping, it must never be used in production!' in error or \ - "error: Mounting file:/dev/cpu_dma_latency may expose unsanitized" in error or \ - "Disallowing access to file '/lib/x86_64-linux-gnu/libnss_nis.so.2" in error : - pass - else: - raise Fail('Error Message={}'.format(error)) - - async def execute(self): - '''Execute the test, parse the results and add report in the suite.''' - try: - self._prepare() - - async with self.suite.semaphore: - returncode = await self._run_cmd() - - must_pass = self.cfgsection.getintset('must-pass') - woken_string = re.compile(r'woken up early | \[\d+\,\d+\]') - woken_string_result = woken_string.findall(self.stdout) - if not self.stdout: - raise Fail('returncode={}'.format(returncode)) - elif must_pass is not None: - self._parse_test_output(must_pass) - elif (("TFAIL" in self.stdout or "TBROK" in self.stdout) and not woken_string_result): - raise Fail('returncode={}'.format(returncode)) - elif (woken_string_result): - flag_woken_issue = False - for entry in woken_string_result: - find_num = re.findall(r'\d+', entry) - if (find_num): - num_list = list(map(int, find_num)) - num_list_1 = num_list[0] - num_list_2 = num_list[1] - if ( num_list_1 > 50000 and num_list_2 > 50000 ): - raise Fail('returncode={}'.format(returncode)) - else: - flag_woken_issue = True - continue - if (flag_woken_issue == True): - pass - elif ("TCONF" in self.stdout and "TPASS" not in self.stdout): - raise Fail('returncode={}'.format(returncode)) - - self.check_system_error_output(self.stderr) - - except AbnormalTestResult as result: - result.apply_to(self) - - else: - self.success() - - def _parse_test_output(self, must_pass): - '''Parse the output - - This is normally done only for a test that has non-empty ``must-pass`` - config directive. - ''' - notfound = must_pass.copy() - passed = set() - failed = set() - skipped = set() - dontcare = set() - - # on empty must_pass, it is always needed - maybe_unneeded_must_pass = bool(must_pass) - - subtest = 0 - for line in self.stdout.split('\n'): - self.log.debug('<- %r', line) - - if line == 'Summary:': - break - - # Drop this line so that we get consistent offsets - if line == 'WARNING: no physical memory support, process creation may be slow.': - continue - - tokens = line.split() - if len(tokens) < 2: - continue - - if 'INFO' in line: - continue - - if tokens[1].isdigit(): - subtest = int(tokens[1]) - else: - subtest += 1 - - try: - notfound.remove(subtest) - except KeyError: - # subtest is not in must-pass - maybe_unneeded_must_pass = False - - if 'TPASS' in line or 'PASS:' in line: - if subtest in must_pass: - passed.add(subtest) - else: - dontcare.add(subtest) - continue - - if any(t in line for t in ( - 'TFAIL', 'FAIL:', 'TCONF', 'CONF:', 'TBROK', 'BROK:')): - if subtest in must_pass: - failed.add(subtest) - maybe_unneeded_must_pass = False - else: - skipped.add(subtest) - continue - - #self.error(line, subtest=subtest) - self.log.info('additional info: %s', line) - - - self.props.update( - must_pass=', '.join(str(i) for i in sorted(must_pass)), - passed=', '.join(str(i) for i in sorted(passed)), - failed=', '.join(str(i) for i in sorted(failed)), - skipped=', '.join(str(i) for i in sorted(skipped)), - notfound=', '.join(str(i) for i in sorted(notfound)), - dontcare=', '.join(str(i) for i in sorted(dontcare)), - ) - - stat = ( - 'FAILED=[{failed}] ' - 'NOTFOUND=[{notfound}] ' - 'passed=[{passed}] ' - 'dontcare=[{dontcare}] ' - 'skipped=[{skipped}] ' - 'returncode={returncode}' - ).format(**self.props) - - if not (passed or failed or skipped or dontcare): - if must_pass: - raise Error('binary did not provide any subtests, see stdout ' - '(returncode={returncode}, must-pass=[{must_pass}])'.format( - **self.props)) - raise Skip('binary without subtests, see stdout ' - '(returncode={returncode})'.format(**self.props)) - - if maybe_unneeded_must_pass and not notfound: - # all subtests passed and must-pass specified exactly all subtests - raise Error( - 'must-pass is unneeded, remove it from config ({})'.format(stat) - ) - - if failed or notfound: - raise Fail('some required subtests failed or not attempted, ' - 'see stdout ({})'.format(stat)) - - if not passed: - raise Skip('all subtests skipped ({})'.format(stat)) - - -class TestSuite: - '''A test suite and result generator. - - Args: - config (configparser.Configparser): configuration - ''' - def __init__(self, config): - self.config = config - self.fnmatch_names = [ - name for name in config - if is_fnmatch_pattern(name) - ] - self.sgx = self.config.getboolean(config.default_section, 'sgx') - - self.loader = 'gramine-sgx' if self.sgx else 'gramine-direct' - - self.bindir = ( - config.getpath(config.default_section, 'ltproot') / 'testcases/bin') - - # Running parallel tests under SGX is risky, see README. - # However, if user wanted to do that, we shouldn't stand in the way, - # just issue a warning. - processes = config.getint(config.default_section, 'jobs', - fallback=(1 if self.sgx else len(os.sched_getaffinity(0)))) - if self.sgx and processes != 1: - _log.warning('WARNING: SGX is enabled and jobs = %d (!= 1);' - ' expect stability issues', processes) - - self.semaphore = asyncio.BoundedSemaphore(processes) - self.queue = [] - self.xml = etree.Element('testsuite') - self.time = 0 - - def match_sections(self, name): - ''' - Find all fnmatch (wildcard) sections that match a given name. - ''' - - for fnmatch_name in self.fnmatch_names: - if fnmatch.fnmatch(name, fnmatch_name): - yield fnmatch_name, self.config[fnmatch_name] - - def add_test(self, tag, cmd): - '''Instantiate appropriate :py:class:`TestRunner` and add it to the - suite - - Args: - tag (str): test case name - cmd (iterable): command (full *argv*) - ''' - self.queue.append(TestRunner(self, tag, cmd)) - - def add_result(self, element): - '''Add a result. - - This should only be invoked from the :py:class:`TestRunner`. - - Args: - element (lxml.etree.Element): XML element - ''' - self.xml.append(element) - - def get_executable_names(self): - '''Return a list for all executables that would be run, without actually - running them.''' - names = {runner.get_executable_name() for runner in self.queue} - names.discard(None) - return sorted(names) - - def _get(self, accumulator, *, default=0, type=int): - # pylint: disable=redefined-builtin - return type(self.xml.get(accumulator, default)) - - def inc(self, accumulator, value=1, *, type=int, fmt=''): - '''Increase a counter on the report. - - Args: - accumulator (str): the counter name - value (int or float): the increment (default: 1) - type: the type the existing value, or callable that given a string - would parse and return it (default: :py:class:`int`) - fmt (str): the desired format to be stored, as accepted by - :py:func:`format` - (default is equivalent to what :py:func:`repr` does) - ''' - # pylint: disable=redefined-builtin - self.xml.set(accumulator, - format(self._get(accumulator, type=type) + value, fmt)) - - @property - def returncode(self): - '''A suggested return code for the application that run this test suite - ''' - return min(255, self._get('errors') + self._get('failures')) - - def write_report(self, stream): - '''Write the XML report to a file - - Args: - stream: a file-like object - ''' - stream.write(etree.tostring(self.xml, pretty_print=True).decode('ascii')) - - def log_summary(self): - tests = self._get('tests') - failures = self._get('failures') - errors = self._get('errors') - skipped = self._get('skipped') - passed = tests - (failures + errors + skipped) - _log.warning('LTP finished' - ' tests=%d passed=%d failures=%d errors=%d skipped=%d returncode=%d', - tests, passed, failures, errors, skipped, self.returncode) - - async def execute(self): - '''Execute the suite''' - # Spawn tasks first, then run asyncio.gather(), so that they are not started in a random - # order under Python 3.6 (see: https://stackoverflow.com/a/60856811). - # Note that we still need to sort the results afterwards. - # loop = asyncio.get_event_loop() - # tasks = [loop.create_task(runner.execute()) for runner in self.queue] - # await asyncio.gather(*tasks) - # self.sort_xml() - for runner in self.queue: - await runner.execute() - - def sort_xml(self): - '''Sort test results by name''' - self.xml[:] = sorted(self.xml, key=lambda test: test.get('name')) - -def _getintset(value): - return set(int(i) for i in value.strip().split()) - -def load_config(files): - '''Load the configuration from given files - - Returns: - configparser.ConfigParser: - ''' - config = configparser.ConfigParser( - converters={ - 'path': pathlib.Path, - 'intset': _getintset, - }, - defaults={ - 'timeout': '30', - 'sgx': 'false', - 'ltproot': './install', - 'junit-classname': 'apps.LTP', - }) - - for file in files: - with file: - config.read_file(file) - - for name, proxy in config.items(): - if is_fnmatch_pattern(name): - for key in proxy: - if key != 'skip' and proxy[key] != config.defaults().get(key): - raise ValueError( - 'fnmatch sections like {!r} can only contain "skip", not {!r}'.format( - name, key)) - - return config - -def is_fnmatch_pattern(name): - ''' - Check if a name is a fnmatch pattern. - ''' - - return bool(set(name) & set('*?[]!')) - -def main(args=None): - logging.basicConfig( - format='%(asctime)s %(name)s: %(message)s', - level=logging.WARNING) - args = argparser.parse_args(args) - _log.setLevel(_log.level - args.verbose * 10) - - if args.config is None: - args.config = [open(DEFAULT_CONFIG)] - - config = load_config(args.config) - for token in args.option: - key, value = token.split('=', maxsplit=1) - config[config.default_section][key] = value - - suite = TestSuite(config) - with args.cmdfile as file: - for line in file: - if line[0] in '\n#': - continue - tag, *cmd = shlex.split(line) - suite.add_test(tag, cmd) - - if args.list_executables: - print('\n'.join(suite.get_executable_names())) - return 0 - - try: - loop = asyncio.get_event_loop() - loop.run_until_complete(suite.execute()) - finally: - loop.close() - - if args.output_file: - suite.write_report(args.output_file) - suite.log_summary() - return suite.returncode - -if __name__ == '__main__': - sys.exit(main()) diff --git a/ltp_config/test_ltp.py b/ltp_config/test_ltp.py new file mode 100644 index 00000000..61a16039 --- /dev/null +++ b/ltp_config/test_ltp.py @@ -0,0 +1,321 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: LGPL-3.0-or-later +# Copyright (C) 2019 Wojtek Porczyk +# Copyright (c) 2021 Intel Corporation +# Paweł Marczewski + +import configparser +import fnmatch +import logging +import os +import pathlib +import shlex +import subprocess +import sys +import re +import pytest + +from graminelibos.regression import HAS_SGX, run_command + +sgx_mode = os.environ.get('SGX') +if sgx_mode == '1': + DEFAULT_LTP_SCENARIO = 'install-sgx/runtest/syscalls-new' + DEFAULT_LTP_CONFIG = 'ltp_tests.cfg ltp-sgx_tests.cfg ltp-bug-1075_tests.cfg' +else: + DEFAULT_LTP_SCENARIO = 'install/runtest/syscalls-new' + DEFAULT_LTP_CONFIG = 'ltp_tests.cfg' + +LTP_SCENARIO = os.environ.get('LTP_SCENARIO', DEFAULT_LTP_SCENARIO) +LTP_CONFIG = os.environ.get('LTP_CONFIG', DEFAULT_LTP_CONFIG).split(' ') +LTP_TIMEOUT_FACTOR = float(os.environ.get('LTP_TIMEOUT_FACTOR', '1')) + +def read_scenario(scenario): + """Read an LTP scenario file (list of tests). + + Each line specifies a name (tag) and a command. + """ + + with open(scenario, 'r') as f: + for line in f: + if line[0] in '\n#': + continue + tag, *cmd = shlex.split(line) + yield tag, cmd + + +def is_wildcard_pattern(name): + return bool(set(name) & set('*?[]!')) + + +def get_int_set(value): + return set(int(i) for i in value.strip().split()) + + +class Config: + """Parser for LTP configuration files. + + A section name can be a test tag, or a wildcard matching many tags (e.g. `access*`). A wildcard + section can only contain `skip = yes`. + + TODO: Instead of Python's INI flavor (`configparser`), use TOML, for consistency with the rest + of the project. + """ + + def __init__(self, config_paths): + self.cfg = configparser.ConfigParser( + converters={ + 'path': pathlib.Path, + 'intset': get_int_set, + }, + defaults={ + 'timeout': '30', + }, + ) + + for path in config_paths: + with open(path, 'r') as f: + self.cfg.read_file(f) + + self.skip_patterns = [] + for name, section in self.cfg.items(): + if is_wildcard_pattern(name): + for key in section: + if key != 'skip' and section[key] != self.cfg.defaults().get(key): + raise ValueError( + 'wildcard sections like {!r} can only contain "skip", not {!r}'.format( + name, key)) + if section.get('skip'): + self.skip_patterns.append(name) + + def get(self, tag): + """Find a section for given tag. + + Returns the default section if there's no specific one, and None if the test should be + skipped. + """ + if self.cfg.has_section(tag): + section = self.cfg[tag] + if section.get('skip'): + return None + return section + + for pattern in self.skip_patterns: + if fnmatch.fnmatch(tag, pattern): + return None + + return self.cfg[self.cfg.default_section] + + +def list_tests(ltp_config=LTP_CONFIG, ltp_scenario=LTP_SCENARIO): + """List all tests along with their configuration.""" + + config = Config(ltp_config) + + for tag, cmd in read_scenario(ltp_scenario): + section = config.get(tag) + yield tag, cmd, section + +def woken_up_valid_check(woken_string_result): + if (woken_string_result): + flag_woken_issue = False + for entry in woken_string_result: + find_num = re.findall(r'\d+', entry) + if (find_num): + num_list = list(map(int, find_num)) + num_list_1 = num_list[0] + num_list_2 = num_list[1] + if ( num_list_1 > 50000 and num_list_2 > 50000 ): + return False + else: + flag_woken_issue = True + continue + if (flag_woken_issue == True): + return True + else: + return True + +def parse_test_output(stdout, _stderr): + """Parse LTP stdout to determine passed/failed subtests. + + Returns two sets: passed and failed subtest numbers. + """ + + passed = set() + failed = set() + conf = set() + subtest = 0 + woken_string = re.compile(r'woken up early | \[\d+\,\d+\]') + woken_string_result = woken_string.findall(stdout) + woken_up_valid = woken_up_valid_check(woken_string_result) + system_error_output_value = check_system_error_output(_stderr) + for line in stdout.splitlines(): + if line == 'Summary': + break + + # Drop this line so that we get consistent offsets + if line == 'WARNING: no physical memory support, process creation may be slow.': + continue + + tokens = line.split() + if len(tokens) < 2: + continue + + if 'INFO' in line: + continue + + if tokens[1].isdigit(): + subtest = int(tokens[1]) + else: + subtest += 1 + + if 'TPASS' in line or 'PASS:' in line and (system_error_output_value == True) and (woken_up_valid == True): + passed.add(subtest) + elif (any(t in line for t in ['TFAIL', 'FAIL:', 'TBROK', 'BROK:']) and (system_error_output_value == True) and (woken_up_valid == True)): + failed.add(subtest) + elif ('TCONF' in line or 'CONF' in line and (system_error_output_value == True) and (woken_up_valid == True)): + conf.add(subtest) + + return passed, failed, conf + + +def check_must_pass(passed, failed, must_pass, conf): + """Verify the test results based on `must-pass` specified in configuration file.""" + + # No `must-pass` means all tests must pass + if not must_pass: + if failed: + pytest.fail('Failed subtests: {}'.format(failed)) + elif conf and not passed: + pytest.fail('Only TCONF found: {}'.format(failed)) + return + + must_pass_passed = set() + must_pass_failed = set() + must_pass_unknown = set() + for subtest in must_pass: + if subtest in passed: + must_pass_passed.add(subtest) + elif subtest in failed: + must_pass_failed.add(subtest) + else: + must_pass_unknown.add(subtest) + + if must_pass_failed or must_pass_unknown: + pytest.fail('Failed or unknown subtests specified in must-pass: {}'.format( + must_pass_failed + must_pass_unknown)) + + if not failed and passed == must_pass_passed and not conf: + pytest.fail('The must-pass list specifies all tests, remove it from config') + + if not passed and not conf: + pytest.fail('All subtests skipped, replace must-pass with skip') + +def check_system_error_output(_stderr): + error_list = [] + error_list = _stderr.split("\n") + for error in error_list: + if error == "": + return True + elif "Using insecure argv source" in error or \ + "error: Mounting file:/proc may expose unsanitized" in error or \ + "error: Failed to read ELF header" in error or \ + "Disallowing access to file '/usr/bin/systemd-detect-virt'" in error or \ + "Disallowing access to file '/lib64/libnss_nis.so.2'" in error or \ + "Disallowing access to file '/usr/lib64/libnss_nis.so.2'" in error or \ + "Disallowing access to file '/lib64/libtinfo.so.6'" in error or \ + "Disallowing access to file '/usr/lib64/libtinfo.so.6'" in error or \ + "Detected deprecated syntax. Consider switching to new syntax: 'sgx.allowed_files" in error or \ + "-" in error or \ + "Gramine detected the following insecure configurations:" in error or \ + "Gramine will continue application execution, but this configuration must not be used in production!" in error or \ + 'WARNING! "allowed_files" is an insecure feature designed for debugging and prototyping, it must never be used in production!' in error or \ + "error: Mounting file:/dev/cpu_dma_latency may expose unsanitized" in error or \ + "Disallowing access to file '/lib/x86_64-linux-gnu/libnss_nis.so.2" in error : + return True + else: + return False + +def test_ltp(cmd, section): + must_pass = section.getintset('must-pass') + if sgx_mode == '1': + binary_dir_ltp = "install-sgx/testcases/bin" + else: + binary_dir_ltp = "install/testcases/bin" + loader = 'gramine-sgx' if HAS_SGX else 'gramine-direct' + timeout = int(section.getfloat('timeout') * LTP_TIMEOUT_FACTOR) + full_cmd = [loader, *cmd] + match = re.search(r'_run', full_cmd[1]) + if match: + setup_bin = os.path.join(binary_dir_ltp, full_cmd[1].replace("run", "setup")) + returncode_setup, stdout_setup, _stderr_setup = run_command(setup_bin, timeout=timeout, can_fail=True) + logging.info('command: %s', full_cmd) + logging.info('must_pass: %s', list(must_pass) if must_pass else 'all') + + returncode, stdout, _stderr = run_command(full_cmd, timeout=timeout, can_fail=True) + + # Parse output regardless of whether `must_pass` is specified: unfortunately some tests + # do not exit with non-zero code when failing, because they rely on `MAP_SHARED` (which + # we do not support correctly) for collecting test results. + passed, failed, conf = parse_test_output(stdout, _stderr) + + logging.info('returncode: %s', returncode) + logging.info('passed: %s', list(passed)) + logging.info('failed: %s', list(failed)) + + if must_pass: + check_must_pass(passed, failed, must_pass, conf) + + +def test_lint(): + cmd = ['./contrib/conf_lint.py', '--scenario', LTP_SCENARIO, *LTP_CONFIG] + p = subprocess.run(cmd) + if p.returncode: + pytest.fail('conf_lint.py failed, see stdout for details') + + +def pytest_generate_tests(metafunc): + """Generate all tests. + + This function is called by Pytest, and it's responsible for generating parameters for + `test_ltp`. + """ + + if metafunc.function is test_ltp: + params = [] + for tag, cmd, section in list_tests(): + # If a test should be skipped, mark it as such, but add it for Pytest anyway: we want + # skipped tests to be visible in the report. + marks = [] if section else [pytest.mark.skip] + params.append(pytest.param(cmd, section, id=tag, marks=marks)) + + metafunc.parametrize('cmd,section', params) + + +def main(): + if sys.argv[1:] == ['--list']: + seen = set() + for _tag, cmd, section in list_tests(): + executable = cmd[0] + if section and executable not in seen: + seen.add(executable) + print(executable) + else: + usage = '''\ +Usage: + + {} --list (to list test executables) + +Invoke Pytest directly (python3 -m pytest) to run tests. + +Supports the following environment variables: + + SGX: set to 1 to enable SGX mode (default: disabled) + LTP_SCENARIO: LTP scenario file (default: {}) + LTP_CONFIG: space-separated list of LTP config files (default: {}) + LTP_TIMEOUT_FACTOR: multiply all timeouts by given value +'''.format(sys.argv[0], DEFAULT_LTP_SCENARIO, DEFAULT_LTP_CONFIG) + print(usage, file=sys.stderr) + sys.exit(1) + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/ltp_config/toml_files/tests_direct.toml b/ltp_config/toml_files/tests_direct.toml new file mode 100644 index 00000000..28579251 --- /dev/null +++ b/ltp_config/toml_files/tests_direct.toml @@ -0,0 +1,3 @@ +binary_dir = "install/testcases/bin" + +manifests_cmd = "LTP_CONFIG='ltp_tests.cfg' ./test_ltp.py --list" \ No newline at end of file diff --git a/ltp_config/toml_files/tests_sgx.toml b/ltp_config/toml_files/tests_sgx.toml new file mode 100644 index 00000000..a17942e3 --- /dev/null +++ b/ltp_config/toml_files/tests_sgx.toml @@ -0,0 +1,3 @@ +binary_dir = "install-sgx/testcases/bin" + +manifests_cmd = "LTP_CONFIG='ltp_tests.cfg ltp-sgx_tests.cfg ltp-bug-1075_tests.cfg' ./test_ltp.py --list" \ No newline at end of file diff --git a/ltp_config/update_manifest.sh b/ltp_config/update_manifest.sh index f0482b96..64fa2377 100644 --- a/ltp_config/update_manifest.sh +++ b/ltp_config/update_manifest.sh @@ -1,9 +1,9 @@ #!/bin/bash if [[ $LTPSCENARIO == *"syscalls-new"* ]]; then - sed -i '/sgx.allowed_files/ a pipe202child = "file:pipe2_02_child"' $PWD/install-sgx/testcases/bin/pipe2_02.manifest - sed -i '/sgx.allowed_files/ a execvpchild = "file:execvp01_child"' $PWD/install-sgx/testcases/bin/execvp01.manifest - sed -i '/sgx.allowed_files/ a execvchild = "file:execv01_child"' $PWD/install-sgx/testcases/bin/execv01.manifest - sed -i '/sgx.allowed_files/ a execlpchild = "file:execlp01_child"' $PWD/install-sgx/testcases/bin/execlp01.manifest - sed -i '/sgx.allowed_files/ a execlchild = "file:execl01_child"' $PWD/install-sgx/testcases/bin/execl01.manifest + sed 's/allowed_files = \[/&"file:pipe2_02_child",/' $PWD/pipe2_02.manifest + sed 's/allowed_files = \[/&"file:execvp01_child",/' $PWD/execvp01.manifest + sed 's/allowed_files = \[/&"file:execv01_child",/' $PWD/execv01.manifest + sed 's/allowed_files = \[/&"file:execlp01_child",/' $PWD/execlp01.manifest + sed 's/allowed_files = \[/&"file:execl01_child",/' $PWD/execl01.manifest fi \ No newline at end of file