diff --git a/tests/README.md b/tests/README.md index c3a3aca9e65b..07070a1b036a 100644 --- a/tests/README.md +++ b/tests/README.md @@ -393,3 +393,31 @@ uvm.ssh.run("ls") snap = uvm.snapshot_full() uvm.help.tmux_ssh() ``` + +It supports a number of options, you can check with `devtool sandbox -- +--help`. + +### Running outside of Docker + +Running without Docker + +``` +source /etc/os-release +case $ID-$VERSION_ID in +amzn-2) + sudo yum remove -y python3 + sudo amazon-linux-extras install -y python3.8 + sudo ln -sv /usr/bin/python3.8 /usr/bin/python3 + sudo ln -sv /usr/bin/pip3.8 /usr/bin/pip3 +esac + +sudo pip3 install pytest ipython requests psutil tenacity filelock "urllib3<2.0" requests_unixsocket + +sudo env PYTHONPATH=tests HOME=$HOME ~/.local/bin/ipython3 -i ./tools/sandbox.py -- --binary-dir ../repro/v1.4.1 +``` + +> :warning: **Notice this runs as root!** + +```python +!id +``` diff --git a/tests/conftest.py b/tests/conftest.py index ced61be0f0f8..8f291d16fc77 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -176,18 +176,6 @@ def test_fc_session_root_path(): yield fc_session_root_path -@pytest.fixture(scope="session") -def bin_cloner_path(test_fc_session_root_path): - """Build a binary that `clone`s into the jailer. - - It's necessary because Python doesn't interface well with the `clone()` - syscall directly. - """ - cloner_bin_path = os.path.join(test_fc_session_root_path, "newpid_cloner") - build_tools.gcc_compile("host_tools/newpid_cloner.c", cloner_bin_path) - yield cloner_bin_path - - @pytest.fixture(scope="session") def bin_vsock_path(test_fc_session_root_path): """Build a simple vsock client/server application.""" @@ -276,7 +264,7 @@ def fc_tmp_path(test_fc_session_root_path): @pytest.fixture() -def microvm_factory(fc_tmp_path, bin_cloner_path, request): +def microvm_factory(fc_tmp_path, request, record_property): """Fixture to create microvms simply. In order to avoid running out of space when instantiating many microvms, @@ -290,10 +278,9 @@ def microvm_factory(fc_tmp_path, bin_cloner_path, request): jailer_binary_path = Path(binary_dir) / "jailer" else: fc_binary_path, jailer_binary_path = build_tools.get_firecracker_binaries() + record_property("firecracker_bin", str(fc_binary_path)) - uvm_factory = MicroVMFactory( - fc_tmp_path, bin_cloner_path, fc_binary_path, jailer_binary_path - ) + uvm_factory = MicroVMFactory(fc_tmp_path, fc_binary_path, jailer_binary_path) yield uvm_factory uvm_factory.kill() shutil.rmtree(fc_tmp_path) diff --git a/tests/framework/defs.py b/tests/framework/defs.py index 58254aad6930..ebc25ebcdc7b 100644 --- a/tests/framework/defs.py +++ b/tests/framework/defs.py @@ -8,9 +8,6 @@ # Firecracker's binary name FC_BINARY_NAME = "firecracker" -# Jailer's binary name -JAILER_BINARY_NAME = "jailer" - # The Firecracker sources workspace dir FC_WORKSPACE_DIR = Path(__file__).parent.parent.parent.resolve() @@ -32,10 +29,6 @@ # Absolute path to the test results folder TEST_RESULTS_DIR = FC_WORKSPACE_DIR / "test_results" -# Name of the file that stores firecracker's PID when launched by jailer with -# `--new-pid-ns`. -FC_PID_FILE_NAME = "firecracker.pid" - # The minimum required host kernel version for which io_uring is supported in # Firecracker. MIN_KERNEL_VERSION_FOR_IO_URING = "5.10.51" diff --git a/tests/framework/jailer.py b/tests/framework/jailer.py index e3bcdad9f4c7..424e160130f8 100644 --- a/tests/framework/jailer.py +++ b/tests/framework/jailer.py @@ -7,7 +7,7 @@ import stat from pathlib import Path -from retry.api import retry_call +from tenacity import Retrying, retry_if_exception_type, stop_after_delay from framework import defs, utils from framework.defs import FC_BINARY_NAME @@ -226,13 +226,13 @@ def cleanup(self): # Obtain the tasks from each cgroup and wait on them before # removing the microvm's associated cgroup folder. try: - retry_call( - f=self._kill_cgroup_tasks, - fargs=[controller], - exceptions=TimeoutError, - max_delay=5, - logger=None, - ) + for attempt in Retrying( + retry=retry_if_exception_type(TimeoutError), + stop=stop_after_delay(5), + reraise=True, + ): + with attempt: + self._kill_cgroup_tasks(controller) except TimeoutError: pass @@ -271,3 +271,8 @@ def _kill_cgroup_tasks(self, controller): if os.path.exists("/proc/{}".format(task)): raise TimeoutError return True + + @property + def pid_file(self): + """Return the PID file of the jailed process""" + return Path(self.chroot_path()) / (self.exec_file.name + ".pid") diff --git a/tests/framework/microvm.py b/tests/framework/microvm.py index 3de4056a611b..19ebe11d2c6e 100644 --- a/tests/framework/microvm.py +++ b/tests/framework/microvm.py @@ -17,6 +17,8 @@ import re import select import shutil +import signal +import subprocess import time import uuid from collections import namedtuple @@ -26,13 +28,13 @@ from pathlib import Path from typing import Optional -from retry import retry +from tenacity import retry, stop_after_attempt, wait_fixed import host_tools.cargo_build as build_tools import host_tools.network as net_tools from framework import utils from framework.artifacts import NetIfaceConfig -from framework.defs import FC_PID_FILE_NAME, MAX_API_CALL_DURATION_MS +from framework.defs import MAX_API_CALL_DURATION_MS from framework.http_api import Api from framework.jailer import JailerContext from framework.microvm_helpers import MicrovmHelpers @@ -149,46 +151,44 @@ class Microvm: def __init__( self, - resource_path, - fc_binary_path, - jailer_binary_path, - microvm_id=None, - bin_cloner_path=None, - monitor_memory=True, + resource_path: Path, + microvm_id: str, + fc_binary_path: Path, + jailer_binary_path: Path, + monitor_memory: bool = True, ): """Set up microVM attributes, paths, and data structures.""" # pylint: disable=too-many-statements # Unique identifier for this machine. - if microvm_id is None: - microvm_id = str(uuid.uuid4()) + assert microvm_id is not None self._microvm_id = microvm_id # Compose the paths to the resources specific to this microvm. - self._path = os.path.join(resource_path, microvm_id) - os.makedirs(self._path, exist_ok=True) + self._path = resource_path / microvm_id + self._path.mkdir(parents=True, exist_ok=True) self.kernel_file = None self.rootfs_file = None self.ssh_key = None self.initrd_file = None self.boot_args = None - self._fc_binary_path = str(fc_binary_path) + self._fc_binary_path = Path(fc_binary_path) assert fc_binary_path.exists() - self._jailer_binary_path = str(jailer_binary_path) + self._jailer_binary_path = Path(jailer_binary_path) assert jailer_binary_path.exists() # Create the jailer context associated with this microvm. self.jailer = JailerContext( jailer_id=self._microvm_id, exec_file=self._fc_binary_path, + new_pid_ns=True, ) - self.jailer_clone_pid = None # Copy the /etc/localtime file in the jailer root self.jailer.jailed_path("/etc/localtime", subdir="etc") - # Initialize the logging subsystem. self._screen_pid = None + self._screen_firecracker_pid = None self.time_api_requests = global_props.host_linux_version != "6.1" # disable the HTTP API timings as they cause a lot of false positives @@ -202,6 +202,7 @@ def __init__( self.api = None self.log_file = None self.metrics_file = None + self._spawned = False # device dictionaries self.iface = {} @@ -209,9 +210,6 @@ def __init__( self.vcpus_count = None self.mem_size_bytes = None - # External clone/exec tool, because Python can't into clone - self.bin_cloner_path = bin_cloner_path - # Flag checked in destructor to see abnormal signal-induced crashes. self.expect_kill_by_signal = False @@ -237,10 +235,11 @@ def kill(self): LOG.error(self.log_data) if self.jailer.daemonize: - if self.jailer_clone_pid: - utils.run_cmd( - "kill -9 {}".format(self.jailer_clone_pid), ignore_return_code=True - ) + if self.firecracker_pid: + try: + os.kill(self.firecracker_pid, signal.SIGKILL) + except ProcessLookupError: + pass else: # Killing screen will send SIGHUP to underlying Firecracker. # Needed to avoid false positives in case kill() is called again. @@ -250,12 +249,6 @@ def kill(self): if self.time_api_requests: self._validate_api_response_times() - # Check if Firecracker was launched by the jailer in a new pid ns. - if self.jailer.new_pid_ns: - # We need to explicitly kill the Firecracker pid, since it's - # different from the jailer pid that was previously killed. - utils.run_cmd(f"kill -9 {self.pid_in_new_ns}", ignore_return_code=True) - if self.memory_monitor: if self.memory_monitor.is_alive(): self.memory_monitor.signal_stop() @@ -347,18 +340,25 @@ def state(self): return self.api.describe.get().json()["state"] @property - @retry(delay=0.1, tries=5, logger=None) def pid_in_new_ns(self): """Get the pid of the Firecracker process in the new namespace. Reads the pid from a file created by jailer with `--new-pid-ns` flag. """ # Check if the pid file exists. - pid_file_path = Path(f"{self.jailer.chroot_path()}/{FC_PID_FILE_NAME}") - assert pid_file_path.exists() + assert self.jailer.pid_file.exists() # Read the PID stored inside the file. - return int(pid_file_path.read_text(encoding="ascii")) + return int(self.jailer.pid_file.read_text(encoding="ascii")) + + @property + def firecracker_pid(self): + """Return Firecracker's PID""" + if not self._spawned: + return None + if self.jailer.new_pid_ns: + return self.pid_in_new_ns + return self._screen_firecracker_pid @property def dimensions(self): @@ -415,9 +415,9 @@ def screen_pid(self): def pin_vmm(self, cpu_id: int) -> bool: """Pin the firecracker process VMM thread to a cpu list.""" - if self.jailer_clone_pid: + if self.firecracker_pid: for thread_name, thread_pids in utils.ProcessManager.get_threads( - self.jailer_clone_pid + self.firecracker_pid ).items(): # the firecracker thread should start with firecracker... if thread_name.startswith("firecracker"): @@ -428,8 +428,8 @@ def pin_vmm(self, cpu_id: int) -> bool: def pin_vcpu(self, vcpu_id: int, cpu_id: int): """Pin the firecracker vcpu thread to a cpu list.""" - if self.jailer_clone_pid: - for thread in utils.ProcessManager.get_threads(self.jailer_clone_pid)[ + if self.firecracker_pid: + for thread in utils.ProcessManager.get_threads(self.firecracker_pid)[ f"fc_vcpu {vcpu_id}" ]: utils.ProcessManager.set_cpu_affinity(thread, [cpu_id]) @@ -438,8 +438,8 @@ def pin_vcpu(self, vcpu_id: int, cpu_id: int): def pin_api(self, cpu_id: int): """Pin the firecracker process API server thread to a cpu list.""" - if self.jailer_clone_pid: - for thread in utils.ProcessManager.get_threads(self.jailer_clone_pid)[ + if self.firecracker_pid: + for thread in utils.ProcessManager.get_threads(self.firecracker_pid)[ "fc_api" ]: utils.ProcessManager.set_cpu_affinity(thread, [cpu_id]) @@ -486,34 +486,36 @@ def spawn( {"metadata": os.path.basename(self.metadata_file)} ) - jailer_param_list = self.jailer.construct_param_list() - if log_level != "Debug": # Checking the timings requires DEBUG level log messages self.time_api_requests = False # When the daemonize flag is on, we want to clone-exec into the - # jailer rather than executing it via spawning a shell. Going - # forward, we'll probably switch to this method for running - # Firecracker in general, because it represents the way it's meant - # to be run by customers (together with CLONE_NEWPID flag). - # - # We have to use an external tool for CLONE_NEWPID, because - # 1) Python doesn't provide os.clone() interface, and - # 2) Python's ctypes libc interface appears to be broken, causing - # our clone / exec to deadlock at some point. + # jailer rather than executing it via spawning a shell. if self.jailer.daemonize: - self.daemonize_jailer(jailer_param_list) + res = subprocess.Popen( + [str(self._jailer_binary_path)] + self.jailer.construct_param_list(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + stdout, stderr = res.communicate() + if res.returncode != 0: + raise RuntimeError(res.returncode, stdout, stderr) else: - # This file will collect any output from 'screen'ed Firecracker. + # Run Firecracker under screen. This is used when we want to access + # the serial console. The file will collect the output from + # 'screen'ed Firecracker. + self.jailer.new_pid_ns = False screen_pid, binary_pid = utils.start_screen_process( self.screen_log, self.screen_session, self._jailer_binary_path, - jailer_param_list, + self.jailer.construct_param_list(), ) self._screen_pid = screen_pid - self.jailer_clone_pid = binary_pid + self._screen_firecracker_pid = binary_pid + + self._spawned = True # Wait for the jailer to create resources needed, and Firecracker to # create its API socket. @@ -525,19 +527,19 @@ def spawn( if self.log_file and log_level in ("Trace", "Debug", "Info"): self.check_log_message("Running Firecracker") - @retry(delay=0.2, tries=5, logger=None) + @retry(wait=wait_fixed(0.2), stop=stop_after_attempt(5), reraise=True) def _wait_create(self): """Wait until the API socket and chroot folder are available.""" os.stat(self.jailer.api_socket_path()) - @retry(delay=0.2, tries=5, logger=None) + @retry(wait=wait_fixed(0.2), stop=stop_after_attempt(5), reraise=True) def check_log_message(self, message): """Wait until `message` appears in logging output.""" assert ( message in self.log_data ), f'Message ("{message}") not found in log data ("{self.log_data}").' - @retry(delay=0.2, tries=5, logger=None) + @retry(wait=wait_fixed(0.2), stop=stop_after_attempt(5), reraise=True) def check_any_log_message(self, messages): """Wait until any message in `messages` appears in logging output.""" for message in messages: @@ -620,34 +622,6 @@ def basic_config( io_engine=rootfs_io_engine, ) - def daemonize_jailer(self, jailer_param_list): - """Daemonize the jailer.""" - if self.bin_cloner_path and self.jailer.new_pid_ns is not True: - cmd = ( - [self.bin_cloner_path] + [self._jailer_binary_path] + jailer_param_list - ) - _p = utils.run_cmd(cmd) - # Terrible hack to make the tests fail when starting the - # jailer fails with a panic. This is needed because we can't - # get the exit code of the jailer. In newpid_clone.c we are - # not waiting for the process and we always return 0 if the - # clone was successful (which in most cases will be) and we - # don't do anything if the jailer was not started - # successfully. - if _p.stderr.strip(): - raise Exception(_p.stderr) - self.jailer_clone_pid = int(_p.stdout.rstrip()) - else: - # Fallback mechanism for when we offload PID namespacing - # to the jailer. - _pid = os.fork() - if _pid == 0: - os.execv( - self._jailer_binary_path, - [self._jailer_binary_path] + jailer_param_list, - ) - self.jailer_clone_pid = _pid - def add_drive( self, drive_id, @@ -811,7 +785,7 @@ def ssh_iface(self, iface_idx=0): guest_ip = list(self.iface.values())[iface_idx]["iface"].guest_ip self.ssh_key = Path(self.ssh_key) return net_tools.SSHConnection( - netns_path=self.jailer.netns_file_path(), + netns=self.jailer.netns, ssh_key=self.ssh_key, user="root", host=guest_ip, @@ -826,19 +800,17 @@ def ssh(self): class MicroVMFactory: """MicroVM factory""" - def __init__(self, base_path, bin_cloner, fc_binary_path, jailer_binary_path): + def __init__(self, base_path: Path, fc_binary_path: Path, jailer_binary_path: Path): self.base_path = Path(base_path) - self.bin_cloner_path = bin_cloner self.vms = [] - self.fc_binary_path = fc_binary_path - self.jailer_binary_path = jailer_binary_path + self.fc_binary_path = Path(fc_binary_path) + self.jailer_binary_path = Path(jailer_binary_path) - def build(self, kernel=None, rootfs=None, microvm_id=None, **kwargs): + def build(self, kernel=None, rootfs=None, **kwargs): """Build a microvm""" vm = Microvm( resource_path=self.base_path, - microvm_id=microvm_id or str(uuid.uuid4()), - bin_cloner_path=self.bin_cloner_path, + microvm_id=kwargs.pop("microvm_id", str(uuid.uuid4())), fc_binary_path=kwargs.pop("fc_binary_path", self.fc_binary_path), jailer_binary_path=kwargs.pop( "jailer_binary_path", self.jailer_binary_path diff --git a/tests/framework/microvm_helpers.py b/tests/framework/microvm_helpers.py index c3889c585103..b03aeead4f1d 100644 --- a/tests/framework/microvm_helpers.py +++ b/tests/framework/microvm_helpers.py @@ -67,7 +67,7 @@ def gdbserver(self, port=2000): See https://sourceware.org/gdb/current/onlinedocs/gdb.html/Remote-Debugging.html#Remote-Debugging """ comm = f"localhost:{port}" - subprocess.Popen(["gdbserver", "--attach", comm, str(self.vm.jailer_clone_pid)]) + subprocess.Popen(["gdbserver", "--attach", comm, str(self.vm.firecracker_pid)]) print(f"Connect gdb with:\n\tgdb --ex 'target remote {DOCKER.ip}:{port}'") def lldbserver(self, port=2001): @@ -84,7 +84,7 @@ def lldbserver(self, port=2001): docker_apt_install("lldb") subprocess.Popen(["lldb-server", "p", "--listen", f"*:{port}", "--server"]) print( - f"Connect lldb with\n\tlldb -o 'platform select remote-linux' -o 'platform connect connect://{DOCKER.ip}:{port}' -o 'attach {self.vm.jailer_clone_pid}'" + f"Connect lldb with\n\tlldb -o 'platform select remote-linux' -o 'platform connect connect://{DOCKER.ip}:{port}' -o 'attach {self.vm.firecracker_pid}'" ) def tmux_neww(self, cmd: str): diff --git a/tests/framework/properties.py b/tests/framework/properties.py index 036d3743b424..f7ae4b8b3ffb 100644 --- a/tests/framework/properties.py +++ b/tests/framework/properties.py @@ -47,6 +47,8 @@ def get_host_os(kv: str = None): if kv is None: kv = platform.release() parts = kv.split("-") + if len(parts) < 2: + return None misc = parts[1].split(".") if len(misc) > 2 and misc[2] in {"amzn2", "amzn2023"}: return misc[2] diff --git a/tests/framework/utils.py b/tests/framework/utils.py index 802e002b12c0..184ca010cca3 100644 --- a/tests/framework/utils.py +++ b/tests/framework/utils.py @@ -21,8 +21,13 @@ import packaging.version import psutil -from retry import retry -from retry.api import retry_call +from tenacity import ( + Retrying, + retry, + retry_if_exception_type, + stop_after_attempt, + wait_fixed, +) from framework.defs import MIN_KERNEL_VERSION_FOR_IO_URING @@ -582,7 +587,7 @@ def run_guest_cmd(ssh_connection, cmd, expected, use_json=False): assert stdout == expected -@retry(delay=0.5, tries=5, logger=None) +@retry(wait=wait_fixed(0.5), stop=stop_after_attempt(5), reraise=True) def wait_process_termination(p_pid): """Wait for a process to terminate. @@ -701,18 +706,19 @@ def start_screen_process(screen_log, session_name, binary_path, binary_params): # Build a regex object to match (number).session_name regex_object = re.compile(r"([0-9]+)\.{}".format(session_name)) - # Run 'screen -ls' in a retry_call loop, 30 times with a 1s - # delay between calls. - # If the output of 'screen -ls' matches the regex object, it will - # return the PID. Otherwise, a RuntimeError will be raised. - screen_pid = retry_call( - search_output_from_cmd, - fkwargs={"cmd": "screen -ls", "find_regex": regex_object}, - exceptions=RuntimeError, - tries=30, - delay=1, - logger=None, - ).group(1) + # Run 'screen -ls' in a retry loop, 30 times with a 1s delay between calls. + # If the output of 'screen -ls' matches the regex object, it will return the + # PID. Otherwise, a RuntimeError will be raised. + for attempt in Retrying( + retry=retry_if_exception_type(RuntimeError), + stop=stop_after_attempt(30), + wait=wait_fixed(1), + reraise=True, + ): + with attempt: + screen_pid = search_output_from_cmd( + cmd="screen -ls", find_regex=regex_object + ).group(1) # Make sure the screen process launched successfully # As the parent process for the binary. @@ -725,7 +731,7 @@ def start_screen_process(screen_log, session_name, binary_path, binary_params): children_count = len(screen_ps.children()) if children_count != 1: raise RuntimeError( - f"Failed to retrieve child process id for binary {binary_path}. " + f"Failed to retrieve child process id for binary '{binary_path}' " f"screen session process had [{children_count}]" ) @@ -760,7 +766,7 @@ def check_entropy(ssh_connection): assert exit_code == 0, stderr -@retry(delay=0.5, tries=5, logger=None) +@retry(wait=wait_fixed(0.5), stop=stop_after_attempt(5), reraise=True) def wait_process_running(process): """Wait for a process to run. diff --git a/tests/framework/utils_iperf.py b/tests/framework/utils_iperf.py index 4b1b5691e41d..219928d68e3a 100644 --- a/tests/framework/utils_iperf.py +++ b/tests/framework/utils_iperf.py @@ -72,7 +72,7 @@ def run_test(self, first_free_cpu): futures = [] cpu_load_future = executor.submit( get_cpu_percent, - self._microvm.jailer_clone_pid, + self._microvm.firecracker_pid, # Ignore the final two data points as they are impacted by test teardown self._runtime - 2, self._omit, diff --git a/tests/host_tools/memory.py b/tests/host_tools/memory.py index 25badf06ded5..fe9e7af931ea 100644 --- a/tests/host_tools/memory.py +++ b/tests/host_tools/memory.py @@ -55,7 +55,7 @@ def run(self): guest_mem_bytes = self._vm.mem_size_bytes try: - ps = psutil.Process(self._vm.jailer_clone_pid) + ps = psutil.Process(self._vm.firecracker_pid) except psutil.NoSuchProcess: return while not self._should_stop: diff --git a/tests/host_tools/network.py b/tests/host_tools/network.py index 60f1624fb90f..6e3157b69c48 100644 --- a/tests/host_tools/network.py +++ b/tests/host_tools/network.py @@ -2,13 +2,11 @@ # SPDX-License-Identifier: Apache-2.0 """Utilities for test host microVM network setup.""" -import contextlib import random import string from pathlib import Path -from nsenter import Namespace -from retry import retry +from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_fixed from framework import utils @@ -25,9 +23,9 @@ class SSHConnection: ssh -i ssh_key_path username@hostname """ - def __init__(self, netns_path, ssh_key: Path, host, user): + def __init__(self, netns, ssh_key: Path, host, user): """Instantiate a SSH client and connect to a microVM.""" - self.netns_file_path = netns_path + self.netns = netns self.ssh_key = ssh_key # check that the key exists and the permissions are 0o400 # This saves a lot of debugging time. @@ -76,7 +74,12 @@ def scp_get(self, remote_path, local_path, recursive=False): opts.append("-r") self._scp(self.remote_path(remote_path), local_path, opts) - @retry(ConnectionError, delay=0.15, tries=20, logger=None) + @retry( + retry=retry_if_exception_type(ConnectionError), + wait=wait_fixed(0.15), + stop=stop_after_attempt(20), + reraise=True, + ) def _init_connection(self): """Create an initial SSH client connection (retry until it works). @@ -103,16 +106,9 @@ def run(self, cmd_string, timeout=None): def _exec(self, cmd, timeout=None): """Private function that handles the ssh client invocation.""" - - # TODO: If a microvm runs in a particular network namespace, we have to - # temporarily switch to that namespace when doing something that routes - # packets over the network, otherwise the destination will not be - # reachable. Use a better setup/solution at some point! - ctx = contextlib.nullcontext() - if self.netns_file_path is not None: - ctx = Namespace(self.netns_file_path, "net") - with ctx: - return utils.run_cmd(cmd, ignore_return_code=True, timeout=timeout) + if self.netns is not None: + cmd = ["ip", "netns", "exec", self.netns] + cmd + return utils.run_cmd(cmd, ignore_return_code=True, timeout=timeout) def mac_from_ip(ip_address): diff --git a/tests/host_tools/newpid_cloner.c b/tests/host_tools/newpid_cloner.c deleted file mode 100644 index 4d0cd7af51b5..000000000000 --- a/tests/host_tools/newpid_cloner.c +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -/* - * This is a very simple tool, used by the testing system to clone/exec into - * the jailer. - * All it does is - * - clone() into a new PID namespace, then - * - have the child process exec() into the binary received via command line, - * and - * - have the parent process print the child PID to stdout. - * - * Usage: ./newpid_cloner ... - * Example: ./newpid_cloner /bin/firecracker --api-sock /var/run/fire.sock - * - */ - -#define _GNU_SOURCE - -#include -#include -#include -#include -#include - - -#define CHILD_STACK_SIZE 4096 - - -int child_main(void *arg) { - char **argv = (char**)arg; - execv(argv[0], argv); -} - -int main(int argc, char *const argv[]) { - - char child_stack[CHILD_STACK_SIZE]; - int child_pid = child_pid = clone( - child_main, - (char*)child_stack + CHILD_STACK_SIZE, - CLONE_NEWPID, - ((char **)argv) + 1 - ); - - printf("%d", child_pid); - return (child_pid != -1) ? 0 : errno; -} diff --git a/tests/integration_tests/functional/test_api.py b/tests/integration_tests/functional/test_api.py index 4632076316cb..bd74610a91e7 100644 --- a/tests/integration_tests/functional/test_api.py +++ b/tests/integration_tests/functional/test_api.py @@ -380,7 +380,7 @@ def test_api_machine_config(test_microvm_with_api): test_microvm.basic_config() # Test mem_size_mib of valid type, but too large. - firecracker_pid = int(test_microvm.jailer_clone_pid) + firecracker_pid = test_microvm.firecracker_pid resource.prlimit( firecracker_pid, resource.RLIMIT_AS, (MEM_LIMIT, resource.RLIM_INFINITY) ) @@ -774,9 +774,9 @@ def test_send_ctrl_alt_del(test_microvm_with_api): test_microvm.api.actions.put(action_type="SendCtrlAltDel") - firecracker_pid = test_microvm.jailer_clone_pid + firecracker_pid = test_microvm.firecracker_pid - # If everyting goes as expected, the guest OS will issue a reboot, + # If everything goes as expected, the guest OS will issue a reboot, # causing Firecracker to exit. # We'll keep poking Firecracker for at most 30 seconds, waiting for it # to die. diff --git a/tests/integration_tests/functional/test_balloon.py b/tests/integration_tests/functional/test_balloon.py index 040dae5ea0cb..d01f6cdb8ace 100644 --- a/tests/integration_tests/functional/test_balloon.py +++ b/tests/integration_tests/functional/test_balloon.py @@ -7,14 +7,14 @@ from subprocess import TimeoutExpired import pytest -from retry import retry +from tenacity import retry, stop_after_attempt, wait_fixed from framework.utils import get_free_mem_ssh, run_cmd STATS_POLLING_INTERVAL_S = 1 -@retry(delay=0.5, tries=10, logger=None) +@retry(wait=wait_fixed(0.5), stop=stop_after_attempt(10), reraise=True) def get_stable_rss_mem_by_pid(pid, percentage_delta=1): """ Get the RSS memory that a guest uses, given the pid of the guest. @@ -88,7 +88,7 @@ def make_guest_dirty_memory(ssh_connection, amount_mib=32): def _test_rss_memory_lower(test_microvm, stable_delta=1): """Check inflating the balloon makes guest use less rss memory.""" # Get the firecracker pid, and open an ssh connection. - firecracker_pid = test_microvm.jailer_clone_pid + firecracker_pid = test_microvm.firecracker_pid ssh_connection = test_microvm.ssh # Using deflate_on_oom, get the RSS as low as possible @@ -152,7 +152,7 @@ def test_inflate_reduces_free(test_microvm_with_api): # Start the microvm test_microvm.start() - firecracker_pid = test_microvm.jailer_clone_pid + firecracker_pid = test_microvm.firecracker_pid # Get the free memory before ballooning. available_mem_deflated = get_free_mem_ssh(test_microvm.ssh) @@ -197,7 +197,7 @@ def test_deflate_on_oom(test_microvm_with_api, deflate_on_oom): # Start the microvm. test_microvm.start() - firecracker_pid = test_microvm.jailer_clone_pid + firecracker_pid = test_microvm.firecracker_pid # We get an initial reading of the RSS, then calculate the amount # we need to inflate the balloon with by subtracting it from the @@ -241,7 +241,7 @@ def test_reinflate_balloon(test_microvm_with_api): # Start the microvm. test_microvm.start() - firecracker_pid = test_microvm.jailer_clone_pid + firecracker_pid = test_microvm.firecracker_pid # First inflate the balloon to free up the uncertain amount of memory # used by the kernel at boot and establish a baseline, then give back @@ -302,7 +302,7 @@ def test_size_reduction(test_microvm_with_api): # Start the microvm. test_microvm.start() - firecracker_pid = test_microvm.jailer_clone_pid + firecracker_pid = test_microvm.firecracker_pid # Check memory usage. first_reading = get_stable_rss_mem_by_pid(firecracker_pid) @@ -344,7 +344,7 @@ def test_stats(test_microvm_with_api): # Start the microvm. test_microvm.start() - firecracker_pid = test_microvm.jailer_clone_pid + firecracker_pid = test_microvm.firecracker_pid # Get an initial reading of the stats. initial_stats = test_microvm.api.balloon_stats.get().json() @@ -404,7 +404,7 @@ def test_stats_update(test_microvm_with_api): # Start the microvm. test_microvm.start() - firecracker_pid = test_microvm.jailer_clone_pid + firecracker_pid = test_microvm.firecracker_pid # Dirty 30MB of pages. make_guest_dirty_memory(test_microvm.ssh, amount_mib=30) @@ -460,7 +460,7 @@ def test_balloon_snapshot(microvm_factory, guest_kernel, rootfs): time.sleep(1) # Get the firecracker pid, and open an ssh connection. - firecracker_pid = vm.jailer_clone_pid + firecracker_pid = vm.firecracker_pid # Check memory usage. first_reading = get_stable_rss_mem_by_pid(firecracker_pid) @@ -485,7 +485,7 @@ def test_balloon_snapshot(microvm_factory, guest_kernel, rootfs): microvm.ssh.run("true") # Get the firecracker from snapshot pid, and open an ssh connection. - firecracker_pid = microvm.jailer_clone_pid + firecracker_pid = microvm.firecracker_pid # Wait out the polling interval, then get the updated stats. time.sleep(STATS_POLLING_INTERVAL_S) @@ -561,7 +561,7 @@ def test_memory_scrub(microvm_factory, guest_kernel, rootfs): microvm.api.balloon.patch(amount_mib=60) # Get the firecracker pid, and open an ssh connection. - firecracker_pid = microvm.jailer_clone_pid + firecracker_pid = microvm.firecracker_pid # Wait for the inflate to complete. _ = get_stable_rss_mem_by_pid(firecracker_pid) diff --git a/tests/integration_tests/functional/test_cmd_line_start.py b/tests/integration_tests/functional/test_cmd_line_start.py index 8ad60b46dbc3..8250d4417106 100644 --- a/tests/integration_tests/functional/test_cmd_line_start.py +++ b/tests/integration_tests/functional/test_cmd_line_start.py @@ -11,7 +11,7 @@ from pathlib import Path import pytest -from retry.api import retry_call +from tenacity import Retrying, retry_if_exception_type, stop_after_attempt, wait_fixed from framework import utils, utils_cpuid from framework.utils import generate_mmds_get_request, generate_mmds_session_token @@ -135,26 +135,23 @@ def test_config_start_no_api(uvm_plain, vm_config_file): test_microvm.jailer.extra_args.update({"no-api": None}) test_microvm.spawn() - # Get Firecracker PID so we can check the names of threads. - firecracker_pid = test_microvm.jailer_clone_pid - # Get names of threads in Firecracker. - cmd = "ps -T --no-headers -p {} | awk '{{print $5}}'".format(firecracker_pid) + cmd = f"ps -T --no-headers -p {test_microvm.firecracker_pid} | awk '{{print $5}}'" # Retry running 'ps' in case it failed to list the firecracker process # The regex matches any expression that contains 'firecracker' and does # not contain 'fc_api' - retry_call( - utils.search_output_from_cmd, - fkwargs={ - "cmd": cmd, - "find_regex": re.compile("^(?!.*fc_api)(?:.*)?firecracker", re.DOTALL), - }, - exceptions=RuntimeError, - tries=10, - delay=1, - logger=None, - ) + for attempt in Retrying( + retry=retry_if_exception_type(RuntimeError), + stop=stop_after_attempt(10), + wait=wait_fixed(1), + reraise=True, + ): + with attempt: + utils.search_output_from_cmd( + cmd=cmd, + find_regex=re.compile("^(?!.*fc_api)(?:.*)?firecracker", re.DOTALL), + ) @pytest.mark.parametrize("vm_config_file", ["framework/vm_config_network.json"]) @@ -233,7 +230,7 @@ def test_config_machine_config_params(uvm_plain, test_config): ) else: test_microvm.check_log_message( - "Successfully started microvm that was configured " "from one single json" + "Successfully started microvm that was configured from one single json" ) diff --git a/tests/integration_tests/functional/test_error_code.py b/tests/integration_tests/functional/test_error_code.py index 1675ba9831d4..19836a1b764e 100644 --- a/tests/integration_tests/functional/test_error_code.py +++ b/tests/integration_tests/functional/test_error_code.py @@ -32,7 +32,7 @@ def test_enosys_error_code(uvm_plain): vm.start() # Check if FC process is closed - wait_process_termination(vm.jailer_clone_pid) + wait_process_termination(vm.firecracker_pid) vm.check_log_message( "Received ENOSYS error because KVM failed to emulate an instruction." diff --git a/tests/integration_tests/functional/test_serial_io.py b/tests/integration_tests/functional/test_serial_io.py index 375b28fd3431..9e82e5449605 100644 --- a/tests/integration_tests/functional/test_serial_io.py +++ b/tests/integration_tests/functional/test_serial_io.py @@ -151,13 +151,13 @@ def test_serial_dos(test_microvm_with_api): microvm.start() # Open an fd for firecracker process terminal. - tty_path = f"/proc/{microvm.jailer_clone_pid}/fd/0" + tty_path = f"/proc/{microvm.firecracker_pid}/fd/0" tty_fd = os.open(tty_path, os.O_RDWR) # Check if the total memory size changed. - before_size = get_total_mem_size(microvm.jailer_clone_pid) + before_size = get_total_mem_size(microvm.firecracker_pid) send_bytes(tty_fd, 100000000, timeout=1) - after_size = get_total_mem_size(microvm.jailer_clone_pid) + after_size = get_total_mem_size(microvm.firecracker_pid) assert before_size == after_size, ( "The memory size of the " "Firecracker process " diff --git a/tests/integration_tests/functional/test_shut_down.py b/tests/integration_tests/functional/test_shut_down.py index 0ec77c1e3eaa..adf592ece607 100644 --- a/tests/integration_tests/functional/test_shut_down.py +++ b/tests/integration_tests/functional/test_shut_down.py @@ -29,7 +29,7 @@ def test_reboot(test_microvm_with_api): vm.start() # Get Firecracker PID so we can count the number of threads. - firecracker_pid = vm.jailer_clone_pid + firecracker_pid = vm.firecracker_pid # Get number of threads in Firecracker cmd = "ps -o nlwp {} | tail -1 | awk '{{print $1}}'".format(firecracker_pid) diff --git a/tests/integration_tests/functional/test_signals.py b/tests/integration_tests/functional/test_signals.py index 836b2e92ce74..89b75179b44b 100644 --- a/tests/integration_tests/functional/test_signals.py +++ b/tests/integration_tests/functional/test_signals.py @@ -39,7 +39,6 @@ def test_generic_signal_handler(test_microvm_with_api, signum): microvm.basic_config() microvm.start() - firecracker_pid = int(microvm.jailer_clone_pid) sleep(0.5) metrics_jail_path = microvm.metrics_file @@ -47,7 +46,7 @@ def test_generic_signal_handler(test_microvm_with_api, signum): line_metrics = metrics_fd.readlines() assert len(line_metrics) == 1 - os.kill(firecracker_pid, signum) + os.kill(microvm.firecracker_pid, signum) # Firecracker gracefully handles SIGPIPE (doesn't terminate). if signum == int(SIGPIPE): msg = "Received signal 13" @@ -57,7 +56,7 @@ def test_generic_signal_handler(test_microvm_with_api, signum): else: microvm.expect_kill_by_signal = True # Ensure that the process was terminated. - utils.wait_process_termination(firecracker_pid) + utils.wait_process_termination(microvm.firecracker_pid) msg = "Shutting down VM after intercepting signal {}".format(signum) microvm.check_log_message(msg) @@ -88,7 +87,7 @@ def test_sigxfsz_handler(uvm_plain_rw): line_metrics = metrics_fd.readlines() assert len(line_metrics) == 1 - firecracker_pid = int(microvm.jailer_clone_pid) + firecracker_pid = microvm.firecracker_pid size = os.path.getsize(metrics_jail_path) # The SIGXFSZ is triggered because the size of rootfs is bigger than # the size of metrics file times 3. Since the metrics file is flushed @@ -123,7 +122,6 @@ def test_handled_signals(test_microvm_with_api): microvm.basic_config(vcpu_count=2) microvm.add_net_iface() microvm.start() - firecracker_pid = int(microvm.jailer_clone_pid) # Open a SSH connection to validate the microVM stays alive. # Just validate a simple command: `nproc` @@ -136,7 +134,7 @@ def test_handled_signals(test_microvm_with_api): # The 35 is the SIGRTMIN for musl libc. # We hardcode this value since the SIGRTMIN python reports # is 34, which is likely the one for glibc. - os.kill(firecracker_pid, 35) + os.kill(microvm.firecracker_pid, 35) # Validate the microVM is still up and running. _, stdout, stderr = microvm.ssh.run(cmd) diff --git a/tests/integration_tests/functional/test_snapshot_basic.py b/tests/integration_tests/functional/test_snapshot_basic.py index 7975b98d8244..14da9f259a6e 100644 --- a/tests/integration_tests/functional/test_snapshot_basic.py +++ b/tests/integration_tests/functional/test_snapshot_basic.py @@ -188,7 +188,7 @@ def test_load_snapshot_failure_handling(test_microvm_with_api): vm.api.snapshot_load.put(mem_file_path=jailed_mem, snapshot_path=jailed_vmstate) # Check if FC process is closed - wait_process_termination(vm.jailer_clone_pid) + wait_process_termination(vm.firecracker_pid) def test_cmp_full_and_first_diff_mem(microvm_factory, guest_kernel, rootfs): diff --git a/tests/integration_tests/functional/test_snapshot_restore_cross_kernel.py b/tests/integration_tests/functional/test_snapshot_restore_cross_kernel.py index 110d4d8fe6f4..d5535a27a33b 100644 --- a/tests/integration_tests/functional/test_snapshot_restore_cross_kernel.py +++ b/tests/integration_tests/functional/test_snapshot_restore_cross_kernel.py @@ -26,7 +26,7 @@ def _test_balloon(microvm): # Get the firecracker pid. - firecracker_pid = microvm.jailer_clone_pid + firecracker_pid = microvm.firecracker_pid # Check memory usage. first_reading = get_stable_rss_mem_by_pid(firecracker_pid) diff --git a/tests/integration_tests/performance/test_block_performance.py b/tests/integration_tests/performance/test_block_performance.py index c6660a177e2f..4c7fad8ad55e 100644 --- a/tests/integration_tests/performance/test_block_performance.py +++ b/tests/integration_tests/performance/test_block_performance.py @@ -125,7 +125,7 @@ def run_fio(env_id, basevm, mode, bs): with concurrent.futures.ThreadPoolExecutor() as executor: cpu_load_future = executor.submit( get_cpu_percent, - basevm.jailer_clone_pid, + basevm.firecracker_pid, RUNTIME_SEC, omit=WARMUP_SEC, ) diff --git a/tests/integration_tests/performance/test_memory_overhead.py b/tests/integration_tests/performance/test_memory_overhead.py index eb12d6a36d4f..da76f88f36e3 100644 --- a/tests/integration_tests/performance/test_memory_overhead.py +++ b/tests/integration_tests/performance/test_memory_overhead.py @@ -57,7 +57,7 @@ def test_memory_overhead( mem_stats = defaultdict(int) mem_stats["guest_memory"] = guest_mem_bytes - ps = psutil.Process(microvm.jailer_clone_pid) + ps = psutil.Process(microvm.firecracker_pid) for pmmap in ps.memory_maps(grouped=False): # We publish 'size' and 'rss' (resident). size would be the worst case, diff --git a/tests/integration_tests/performance/test_process_startup_time.py b/tests/integration_tests/performance/test_process_startup_time.py index 7daf7a81143d..d7418068ec3d 100644 --- a/tests/integration_tests/performance/test_process_startup_time.py +++ b/tests/integration_tests/performance/test_process_startup_time.py @@ -36,7 +36,6 @@ def test_startup_time_new_pid_ns(test_microvm_with_api, startup_time): Check startup time when jailer is spawned in a new PID namespace. """ microvm = test_microvm_with_api - microvm.bin_cloner_path = None microvm.jailer.new_pid_ns = True startup_time(_test_startup_time(microvm)) diff --git a/tests/integration_tests/performance/test_rate_limiter.py b/tests/integration_tests/performance/test_rate_limiter.py index 0bd83c8c6dcd..1fc2868603c0 100644 --- a/tests/integration_tests/performance/test_rate_limiter.py +++ b/tests/integration_tests/performance/test_rate_limiter.py @@ -122,8 +122,8 @@ def test_rx_rate_limiting_cpu_load(test_microvm_with_api): # the first one created. # A possible improvement is to find it by name. cpu_load_monitor = cpu_load.CpuLoadMonitor( - process_pid=test_microvm.jailer_clone_pid, - thread_pid=test_microvm.jailer_clone_pid, + process_pid=test_microvm.firecracker_pid, + thread_pid=test_microvm.firecracker_pid, threshold=20, ) with cpu_load_monitor: diff --git a/tests/integration_tests/security/test_custom_seccomp.py b/tests/integration_tests/security/test_custom_seccomp.py index 36217a242a05..189789dce981 100644 --- a/tests/integration_tests/security/test_custom_seccomp.py +++ b/tests/integration_tests/security/test_custom_seccomp.py @@ -79,7 +79,7 @@ def test_allow_all(test_microvm_with_api): test_microvm.start() - utils.assert_seccomp_level(test_microvm.jailer_clone_pid, "2") + utils.assert_seccomp_level(test_microvm.firecracker_pid, "2") def test_working_filter(test_microvm_with_api): @@ -140,7 +140,7 @@ def test_working_filter(test_microvm_with_api): test_microvm.start() # level should be 2, with no additional errors - utils.assert_seccomp_level(test_microvm.jailer_clone_pid, "2") + utils.assert_seccomp_level(test_microvm.firecracker_pid, "2") def test_failing_filter(test_microvm_with_api): @@ -206,7 +206,7 @@ def test_failing_filter(test_microvm_with_api): assert num_faults >= 1 # assert that the process was killed - assert not psutil.pid_exists(test_microvm.jailer_clone_pid) + assert not psutil.pid_exists(test_microvm.firecracker_pid) @pytest.mark.parametrize("vm_config_file", ["framework/vm_config.json"]) @@ -234,4 +234,4 @@ def test_invalid_bpf(test_microvm_with_api, vm_config_file): time.sleep(1) # assert that the process was killed - assert not psutil.pid_exists(test_microvm.jailer_clone_pid) + assert not psutil.pid_exists(test_microvm.firecracker_pid) diff --git a/tests/integration_tests/security/test_jail.py b/tests/integration_tests/security/test_jail.py index 57bb55382201..cf0f8b53d242 100644 --- a/tests/integration_tests/security/test_jail.py +++ b/tests/integration_tests/security/test_jail.py @@ -477,11 +477,9 @@ def test_args_default_resource_limits(test_microvm_with_api): Test the default resource limits are correctly set by the jailer. """ test_microvm = test_microvm_with_api - test_microvm.spawn() - # Get firecracker's PID - pid = int(test_microvm.jailer_clone_pid) + pid = test_microvm.firecracker_pid assert pid != 0 # Fetch firecracker process limits for number of open fds @@ -503,11 +501,9 @@ def test_args_resource_limits(test_microvm_with_api): """ test_microvm = test_microvm_with_api test_microvm.jailer.resource_limits = RESOURCE_LIMITS - test_microvm.spawn() - # Get firecracker's PID - pid = int(test_microvm.jailer_clone_pid) + pid = test_microvm.firecracker_pid assert pid != 0 # Check limit values were correctly set. @@ -560,7 +556,7 @@ def test_negative_file_size_limit(uvm_plain): test_microvm.check_log_message(msg) time.sleep(1) # Check that the process was terminated. - assert not psutil.pid_exists(test_microvm.jailer_clone_pid) + assert not psutil.pid_exists(test_microvm.firecracker_pid) else: assert False, "Negative test failed" @@ -575,9 +571,8 @@ def test_negative_no_file_limit(test_microvm_with_api): # pylint: disable=W0703 try: test_microvm.spawn() - except Exception as error: + except RuntimeError as error: assert "No file descriptors available (os error 24)" in str(error) - assert test_microvm.jailer_clone_pid is None else: assert False, "Negative test failed" @@ -587,14 +582,11 @@ def test_new_pid_ns_resource_limits(test_microvm_with_api): Test that Firecracker process inherits jailer resource limits. """ test_microvm = test_microvm_with_api - - test_microvm.jailer.new_pid_ns = True test_microvm.jailer.resource_limits = RESOURCE_LIMITS - test_microvm.spawn() # Get Firecracker's PID. - fc_pid = test_microvm.pid_in_new_ns + fc_pid = test_microvm.firecracker_pid # Check limit values were correctly set. check_limits(fc_pid, NOFILE, FSIZE) @@ -605,13 +597,9 @@ def test_new_pid_namespace(test_microvm_with_api): Test that Firecracker is spawned in a new PID namespace if requested. """ test_microvm = test_microvm_with_api - - test_microvm.jailer.new_pid_ns = True - test_microvm.spawn() - # Check that the PID file exists. - fc_pid = test_microvm.pid_in_new_ns + fc_pid = test_microvm.firecracker_pid # Validate the PID. stdout = subprocess.check_output("pidof firecracker", shell=True) diff --git a/tests/integration_tests/security/test_seccomp.py b/tests/integration_tests/security/test_seccomp.py index 5a0b3e138844..28be679268b6 100644 --- a/tests/integration_tests/security/test_seccomp.py +++ b/tests/integration_tests/security/test_seccomp.py @@ -248,7 +248,7 @@ def test_no_seccomp(test_microvm_with_api): test_microvm.start() - utils.assert_seccomp_level(test_microvm.jailer_clone_pid, "0") + utils.assert_seccomp_level(test_microvm.firecracker_pid, "0") def test_default_seccomp_level(test_microvm_with_api): @@ -262,7 +262,7 @@ def test_default_seccomp_level(test_microvm_with_api): test_microvm.start() - utils.assert_seccomp_level(test_microvm.jailer_clone_pid, "2") + utils.assert_seccomp_level(test_microvm.firecracker_pid, "2") def test_seccomp_rust_panic(bin_seccomp_paths): diff --git a/tools/create_snapshot_artifact/main.py b/tools/create_snapshot_artifact/main.py index 7a060ed02bf0..af80004be5c9 100755 --- a/tools/create_snapshot_artifact/main.py +++ b/tools/create_snapshot_artifact/main.py @@ -92,7 +92,7 @@ def main(): print("Cleanup") shutil.rmtree(SNAPSHOT_ARTIFACTS_ROOT_DIR, ignore_errors=True) root_path = tempfile.mkdtemp(dir=DEFAULT_TEST_SESSION_ROOT_PATH) - vm_factory = MicroVMFactory(root_path, None, *get_firecracker_binaries()) + vm_factory = MicroVMFactory(root_path, *get_firecracker_binaries()) cpu_templates = ["None"] if get_cpu_vendor() == CpuVendor.INTEL: diff --git a/tools/devctr/poetry.lock b/tools/devctr/poetry.lock index c11fe121d2ce..a3c2f3782bff 100644 --- a/tools/devctr/poetry.lock +++ b/tools/devctr/poetry.lock @@ -133,17 +133,6 @@ files = [ {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"}, ] -[[package]] -name = "argparse" -version = "1.4.0" -description = "Python command-line parsing library" -optional = false -python-versions = "*" -files = [ - {file = "argparse-1.4.0-py2.py3-none-any.whl", hash = "sha256:c31647edb69fd3d465a847ea3157d37bed1f95f19760b11a47aa91c04b666314"}, - {file = "argparse-1.4.0.tar.gz", hash = "sha256:62b089a55be1d8949cd2bc7e0df0bddb9e028faefc8c32038cc84862aefdd6e4"}, -] - [[package]] name = "arrow" version = "1.2.3" @@ -445,17 +434,6 @@ files = [ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -[[package]] -name = "contextlib2" -version = "21.6.0" -description = "Backports and enhancements for the contextlib module" -optional = false -python-versions = ">=3.6" -files = [ - {file = "contextlib2-21.6.0-py2.py3-none-any.whl", hash = "sha256:3fbdb64466afd23abaf6c977627b75b6139a5a3e8ce38405c5b413aed7a0471f"}, - {file = "contextlib2-21.6.0.tar.gz", hash = "sha256:ab1e2bfe1d01d968e1b7e8d9023bc51ef3509bba217bb730cee3827e1ee82869"}, -] - [[package]] name = "decorator" version = "5.1.1" @@ -932,22 +910,6 @@ files = [ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"}, ] -[[package]] -name = "nsenter" -version = "0.2" -description = "Enter kernel namespaces from Python" -optional = false -python-versions = "*" -files = [ - {file = "nsenter-0.2-py3-none-any.whl", hash = "sha256:d44ba70199f9b454139f2093141e63218bca5b1285c65fcf6e4b139116088867"}, - {file = "nsenter-0.2.tar.gz", hash = "sha256:876a18cb03de85948e4cd72fd4cfda4879561b7264f5722603f6437d452a25cb"}, -] - -[package.dependencies] -argparse = "*" -contextlib2 = "*" -pathlib = "*" - [[package]] name = "numpy" version = "1.24.3" @@ -1121,17 +1083,6 @@ files = [ {file = "pathable-0.4.3.tar.gz", hash = "sha256:5c869d315be50776cc8a993f3af43e0c60dc01506b399643f919034ebf4cdcab"}, ] -[[package]] -name = "pathlib" -version = "1.0.1" -description = "Object-oriented filesystem paths" -optional = false -python-versions = "*" -files = [ - {file = "pathlib-1.0.1-py3-none-any.whl", hash = "sha256:f35f95ab8b0f59e6d354090350b44a80a80635d22efdedfa84c7ad1cf0a74147"}, - {file = "pathlib-1.0.1.tar.gz", hash = "sha256:6940718dfc3eff4258203ad5021090933e5c04707d5ca8cc9e73c94a7894ea9f"}, -] - [[package]] name = "pathspec" version = "0.11.2" @@ -1263,17 +1214,6 @@ files = [ [package.extras] tests = ["pytest"] -[[package]] -name = "py" -version = "1.11.0" -description = "library with cross-python path, ini-parsing, io, code, log facilities" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" -files = [ - {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, - {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, -] - [[package]] name = "pygments" version = "2.16.1" @@ -1571,21 +1511,6 @@ files = [ [package.dependencies] requests = ">=1.1" -[[package]] -name = "retry" -version = "0.9.2" -description = "Easy to use retry decorator." -optional = false -python-versions = "*" -files = [ - {file = "retry-0.9.2-py2.py3-none-any.whl", hash = "sha256:ccddf89761fa2c726ab29391837d4327f819ea14d244c232a1d24c67a2f98606"}, - {file = "retry-0.9.2.tar.gz", hash = "sha256:f8bfa8b99b69c4506d6f5bd3b0aabf77f98cdb17f3c9fc3f5ca820033336fba4"}, -] - -[package.dependencies] -decorator = ">=3.4.2" -py = ">=1.4.26,<2.0.0" - [[package]] name = "rfc3339-validator" version = "0.1.4" diff --git a/tools/devctr/pyproject.toml b/tools/devctr/pyproject.toml index 2838290b9cfa..f4b902b12320 100644 --- a/tools/devctr/pyproject.toml +++ b/tools/devctr/pyproject.toml @@ -12,7 +12,6 @@ click = "8.1.3" gitlint = "^0.19.1" ipython = "^8.15.0" isort = "^5.12.0" -nsenter = "^0.2" # numpy >1.25.0,<=1.25.2 trigger an illegal instruction on AL2 4.14 on c7g.metal instances, due to these instances # not advertising SVE support. See https://github.com/numpy/numpy/issues/24028 numpy = "1.24.3" @@ -29,7 +28,6 @@ python = "3.10.*" PyYAML = "^6.0" requests = "^2.31.0" requests-unixsocket = "^0.3.0" -retry = "^0.9.2" scipy = "^1.11.2" setproctitle = "^1.3.2" tenacity = "^8.2.2" diff --git a/tools/sandbox.py b/tools/sandbox.py index 655c55338878..323f2e55f532 100755 --- a/tools/sandbox.py +++ b/tools/sandbox.py @@ -2,12 +2,15 @@ # Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 +# pylint:disable=invalid-name + """ Run Firecracker in an IPython REPL """ import argparse import re +from pathlib import Path from framework.artifacts import disks, kernels from framework.microvm import MicroVMFactory @@ -50,12 +53,19 @@ def parse_byte_size(param): default=128 * 2**20, # 128MB ) parser.add_argument("--rootfs-size", type=parse_byte_size, default=1 * 2**30) # 1GB +parser.add_argument("--binary-dir", help="Path to the firecracker binaries") args = parser.parse_args() print(args) +bins = None +if args.binary_dir: + binary_dir = Path(args.binary_dir).resolve() + bins = binary_dir / "firecracker", binary_dir / "jailer" +else: + bins = get_firecracker_binaries() print("This step may take a while to compile Firecracker ...") -vmfcty = MicroVMFactory("/srv", None, *get_firecracker_binaries()) +vmfcty = MicroVMFactory("/srv", *bins) uvm = vmfcty.build(args.kernel, args.rootfs) uvm.help.enable_console() uvm.help.resize_disk(uvm.rootfs_file, args.rootfs_size) diff --git a/tools/test-popular-containers/test-docker-rootfs.py b/tools/test-popular-containers/test-docker-rootfs.py index 0a6b783c0a68..16dc58d8bff4 100755 --- a/tools/test-popular-containers/test-docker-rootfs.py +++ b/tools/test-popular-containers/test-docker-rootfs.py @@ -26,7 +26,7 @@ # Use the latest guest kernel kernel = kernels[-1] -vmfcty = MicroVMFactory("/srv", None, *get_firecracker_binaries()) +vmfcty = MicroVMFactory("/srv", *get_firecracker_binaries()) # (may take a while to compile Firecracker...) for rootfs in Path(".").glob("*.ext4"):