diff --git a/nixops/args.py b/nixops/args.py index 97ade4469..74b5dafea 100644 --- a/nixops/args.py +++ b/nixops/args.py @@ -352,6 +352,12 @@ subparser.add_argument( "args", metavar="SSH_ARGS", nargs=REMAINDER, help="SSH flags and/or command", ) +subparser.add_argument( + "--now", + dest="now", + action="store_true", + help="do not acquire a lock before fetching the state", +) subparser = add_subparser( subparsers, "ssh-for-each", help="execute a command on each machine via SSH" @@ -443,14 +449,12 @@ "--freeze", dest="freeze_fs", action="store_true", - default=False, help="freeze filesystems for non-root filesystems that support this (e.g. xfs)", ) subparser.add_argument( "--force", dest="force", action="store_true", - default=False, help="start new backup even if previous is still running", ) subparser.add_argument( @@ -478,17 +482,12 @@ help="do not perform backup actions on the specified machines", ) subparser.add_argument( - "--wait", - dest="wait", - action="store_true", - default=False, - help="wait until backup is finished", + "--wait", dest="wait", action="store_true", help="wait until backup is finished", ) subparser.add_argument( "--latest", dest="latest", action="store_true", - default=False, help="show status of latest backup only", ) @@ -498,7 +497,6 @@ subparser.add_argument( "--keep-physical", dest="keep_physical", - default=False, action="store_true", help="do not remove the physical backups, only remove backups from nixops state", ) @@ -518,7 +516,6 @@ subparser.add_argument( "--keep-physical", dest="keep_physical", - default=False, action="store_true", help="do not remove the physical backups, only remove backups from nixops state", ) diff --git a/nixops/locks/__init__.py b/nixops/locks/__init__.py index c3ba726c5..c0850768b 100644 --- a/nixops/locks/__init__.py +++ b/nixops/locks/__init__.py @@ -2,10 +2,35 @@ from typing_extensions import Protocol +""" +Interface to a lock driver. + +An implementation should inherit from LockDriver in order to for a plugin to be +able to integrate it. +""" + + +# This separation was introduced to hide the LockOptions details from the +# LockInterface type. It only matters for construction and clients don't have +# to know about it. +class LockInterface(Protocol): + # lock: acquire a lock. + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def lock(self, description: str, exclusive: bool, **kwargs) -> None: + raise NotImplementedError + + # unlock: release the lock. + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def unlock(self, **kwargs) -> None: + raise NotImplementedError + + LockOptions = TypeVar("LockOptions") -class LockDriver(Protocol[LockOptions]): +class LockDriver(LockInterface, Protocol[LockOptions]): # Hack: Make T a mypy invariant. According to PEP-0544, a # Protocol[T] whose T is only used in function arguments and # returns is "de-facto covariant". @@ -26,15 +51,3 @@ def options(**kwargs) -> LockOptions: def __init__(self, args: LockOptions) -> None: raise NotImplementedError - - # lock: acquire a lock. - # Note: no arguments will be passed over kwargs. Making it part of - # the type definition allows adding new arguments later. - def lock(self, **kwargs) -> None: - raise NotImplementedError - - # unlock: release the lock. - # Note: no arguments will be passed over kwargs. Making it part of - # the type definition allows adding new arguments later. - def unlock(self, **kwargs) -> None: - raise NotImplementedError diff --git a/nixops/locks/noop.py b/nixops/locks/noop.py index 71632d7df..6e31c2b9a 100644 --- a/nixops/locks/noop.py +++ b/nixops/locks/noop.py @@ -19,5 +19,5 @@ def __init__(self, args: NoopLockOptions) -> None: def unlock(self, **_kwargs) -> None: pass - def lock(self, **_kwargs) -> None: + def lock(self, description, exclusive, **_kwargs) -> None: pass diff --git a/nixops/script_defs.py b/nixops/script_defs.py index d396ee60d..b1407fd11 100644 --- a/nixops/script_defs.py +++ b/nixops/script_defs.py @@ -2,8 +2,8 @@ from nixops.nix_expr import py2nix from nixops.parallel import run_tasks -from nixops.storage import StorageBackend -from nixops.locks import LockDriver +from nixops.storage import StorageBackend, StorageInterface +from nixops.locks import LockDriver, LockInterface import contextlib import nixops.statefile @@ -66,15 +66,17 @@ def set_common_depl(depl: nixops.deployment.Deployment, args: Namespace) -> None @contextlib.contextmanager -def deployment(args: Namespace) -> Generator[nixops.deployment.Deployment, None, None]: - with network_state(args) as sf: +def deployment( + args: Namespace, writable: bool, activityDescription: str +) -> Generator[nixops.deployment.Deployment, None, None]: + with network_state(args, writable, description=activityDescription) as sf: depl = open_deployment(sf, args) set_common_depl(depl, args) yield depl -def get_lock(network: NetworkEval) -> LockDriver: - lock: LockDriver +def get_lock(network: NetworkEval) -> LockInterface: + lock: LockInterface lock_class: Type[LockDriver] lock_drivers = PluginManager.lock_drivers() try: @@ -94,7 +96,9 @@ def get_lock(network: NetworkEval) -> LockDriver: @contextlib.contextmanager -def network_state(args: Namespace) -> Generator[nixops.statefile.StateFile, None, None]: +def network_state( + args: Namespace, writable: bool, description: str, doLock: bool = True +) -> Generator[nixops.statefile.StateFile, None, None]: network = eval_network(get_network_file(args)) storage_backends = PluginManager.storage_backends() storage_class: Optional[Type[StorageBackend]] = storage_backends.get( @@ -109,24 +113,33 @@ def network_state(args: Namespace) -> Generator[nixops.statefile.StateFile, None ) raise Exception("Missing storage provider plugin.") - lock = get_lock(network) + lock: Optional[LockInterface] + if doLock: + lock = get_lock(network) + else: + lock = None storage_class_options = storage_class.options(**network.storage.configuration) - storage: StorageBackend = storage_class(storage_class_options) + storage: StorageInterface = storage_class(storage_class_options) with TemporaryDirectory("nixops") as statedir: statefile = statedir + "/state.nixops" - lock.lock() - storage.fetchToFile(statefile) - state = nixops.statefile.StateFile(statefile) + if lock is not None: + lock.lock(description=description, exclusive=writable) try: - storage.onOpen(state) + storage.fetchToFile(statefile) + state = nixops.statefile.StateFile(statefile, writable, lock=lock) + try: + storage.onOpen(state) - yield state + yield state + finally: + state.close() + if writable: + storage.uploadFromFile(statefile) finally: - state.close() - storage.uploadFromFile(statefile) - lock.unlock() + if lock is not None: + lock.unlock() def op_list_plugins(args: Namespace) -> None: @@ -162,9 +175,9 @@ def sort_deployments( # $NIXOPS_DEPLOYMENT. @contextlib.contextmanager def one_or_all( - args: Namespace, + args: Namespace, writable: bool, activityDescription: str ) -> Generator[List[nixops.deployment.Deployment], None, None]: - with network_state(args) as sf: + with network_state(args, writable, description=activityDescription) as sf: if args.all: yield sf.get_all_deployments() else: @@ -172,7 +185,7 @@ def one_or_all( def op_list_deployments(args: Namespace) -> None: - with network_state(args) as sf: + with network_state(args, False, "nixops list") as sf: tbl = create_table( [ ("UUID", "l"), @@ -249,7 +262,7 @@ def modify_deployment(args: Namespace, depl: nixops.deployment.Deployment) -> No def op_create(args: Namespace) -> None: - with network_state(args) as sf: + with network_state(args, True, "nixops create") as sf: depl = sf.create_deployment() sys.stderr.write("created deployment ‘{0}’\n".format(depl.uuid)) modify_deployment(args, depl) @@ -266,14 +279,14 @@ def op_create(args: Namespace) -> None: def op_modify(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, True, "nixops modify") as depl: modify_deployment(args, depl) if args.name: set_name(depl, args.name) def op_clone(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, True, "nixops clone") as depl: depl2 = depl.clone() sys.stderr.write("created deployment ‘{0}’\n".format(depl2.uuid)) set_name(depl2, args.name) @@ -281,7 +294,7 @@ def op_clone(args: Namespace) -> None: def op_delete(args: Namespace) -> None: - with one_or_all(args) as depls: + with one_or_all(args, True, "nixops delete") as depls: for depl in depls: depl.delete(force=args.force or False) @@ -400,7 +413,7 @@ def name_to_key(name: str) -> Tuple[str, str, List[object]]: ) if args.all: - with network_state(args) as sf: + with network_state(args, False, "nixops info") as sf: if not args.plain: tbl = create_table([("Deployment", "l")] + table_headers) for depl in sort_deployments(sf.get_all_deployments()): @@ -410,7 +423,7 @@ def name_to_key(name: str) -> Tuple[str, str, List[object]]: print(tbl) else: - with deployment(args) as depl: + with deployment(args, False, "nixops info") as depl: do_eval(depl) if args.plain: @@ -480,7 +493,11 @@ def check(depl: nixops.deployment.Deployment) -> None: else: resources.append(m) - with one_or_all(args) as depls: + # TODO: writable=False? + # Historically, nixops check was allowed to write to the state file. + # With remote state however, this requires an exclusive lock, which may + # not be the best choice. + with one_or_all(args, writable=True, activityDescription="nixops check") as depls: for depl in depls: check(depl) @@ -600,17 +617,17 @@ def op_clean_backups(args: Namespace) -> None: ) if not (args.keep or args.keep_days): raise Exception("Please specify at least --keep or --keep-days arguments.") - with deployment(args) as depl: + with deployment(args, True, "nixops clean-backups") as depl: depl.clean_backups(args.keep, args.keep_days, args.keep_physical) def op_remove_backup(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, True, "nixops remove-backup") as depl: depl.remove_backup(args.backupid, args.keep_physical) def op_backup(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, True, "nixops backup") as depl: def do_backup(): backup_id = depl.backup( @@ -636,7 +653,7 @@ def do_backup(): def op_backup_status(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, False, "nixops backup-status") as depl: backupid = args.backupid while True: backups = depl.get_backups( @@ -670,7 +687,7 @@ def op_backup_status(args: Namespace) -> None: def op_restore(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, True, "nixops restore") as depl: depl.restore( include=args.include or [], exclude=args.exclude or [], @@ -680,7 +697,7 @@ def op_restore(args: Namespace) -> None: def op_deploy(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, True, "nixops deploy") as depl: if args.confirm: depl.logger.set_autoresponse("y") if args.evaluate_only: @@ -710,12 +727,12 @@ def op_deploy(args: Namespace) -> None: def op_send_keys(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, False, "nixops send-keys") as depl: depl.send_keys(include=args.include or [], exclude=args.exclude or []) def op_set_args(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, True, "nixops set-args") as depl: for [n, v] in args.args or []: depl.set_arg(n, v) for [n, v] in args.argstrs or []: @@ -725,7 +742,7 @@ def op_set_args(args: Namespace) -> None: def op_destroy(args: Namespace) -> None: - with one_or_all(args) as depls: + with one_or_all(args, True, "nixops destroy") as depls: for depl in depls: if args.confirm: depl.logger.set_autoresponse("y") @@ -735,7 +752,7 @@ def op_destroy(args: Namespace) -> None: def op_reboot(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, True, "nixops reboot") as depl: depl.reboot_machines( include=args.include or [], exclude=args.exclude or [], @@ -746,26 +763,26 @@ def op_reboot(args: Namespace) -> None: def op_delete_resources(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, True, "nixops delete-resources") as depl: if args.confirm: depl.logger.set_autoresponse("y") depl.delete_resources(include=args.include or [], exclude=args.exclude or []) def op_stop(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, True, "nixops stop") as depl: if args.confirm: depl.logger.set_autoresponse("y") depl.stop_machines(include=args.include or [], exclude=args.exclude or []) def op_start(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, True, "nixops start") as depl: depl.start_machines(include=args.include or [], exclude=args.exclude or []) def op_rename(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, True, "nixops rename") as depl: depl.rename(args.current_name, args.new_name) @@ -779,7 +796,7 @@ def print_physical_backup_spec( def op_show_arguments(cli_args: Namespace) -> None: - with deployment(cli_args) as depl: + with deployment(cli_args, False, "nixops show-arguments") as depl: tbl = create_table([("Name", "l"), ("Location", "l")]) args = depl.get_arguments() for arg in sorted(args.keys()): @@ -789,7 +806,7 @@ def op_show_arguments(cli_args: Namespace) -> None: def op_show_physical(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, False, "nixops show-physical") as depl: if args.backupid: print_physical_backup_spec(depl, args.backupid) return @@ -828,7 +845,7 @@ def nix_paths(depl: nixops.deployment.Deployment) -> List[str]: paths: List[str] = [] - with one_or_all(args) as depls: + with one_or_all(args, False, "nixops dump-nix-paths") as depls: for depl in depls: paths.extend(nix_paths(depl)) @@ -839,7 +856,7 @@ def nix_paths(depl: nixops.deployment.Deployment) -> List[str]: def op_export(args: Namespace) -> None: res = {} - with one_or_all(args) as depls: + with one_or_all(args, False, "nixops export") as depls: for depl in depls: res[depl.uuid] = depl.export() print(json.dumps(res, indent=2, sort_keys=True, cls=nixops.util.NixopsEncoder)) @@ -852,7 +869,7 @@ def op_unlock(args: Namespace) -> None: def op_import(args: Namespace) -> None: - with network_state(args) as sf: + with network_state(args, True, "nixops import") as sf: existing = set(sf.query_deployments()) dump = json.loads(sys.stdin.read()) @@ -908,9 +925,19 @@ def parse_machine( def op_ssh(args: Namespace) -> None: - with deployment(args) as depl: + with network_state( + args, False, description="nixops ssh", doLock=not args.now + ) as sf: + depl = open_deployment(sf, args) + set_common_depl(depl, args) + (username, _, m) = parse_machine(args.machine, depl) flags, command = m.ssh.split_openssh_args(args.args) + + # unlock early, to avoid blocking mutable operations (deploy etc) while + # an interactive session is active. + if sf.lock is not None: + sf.lock.unlock() sys.exit( m.ssh.run_command( command, @@ -925,7 +952,7 @@ def op_ssh(args: Namespace) -> None: def op_ssh_for_each(args: Namespace) -> None: results: List[Optional[int]] = [] - with one_or_all(args) as depls: + with one_or_all(args, False, "nixops ssh-for-each") as depls: for depl in depls: def worker(m: nixops.backends.GenericMachineState) -> Optional[int]: @@ -954,7 +981,7 @@ def scp_loc(user: str, ssh_name: str, remote: str, loc: str) -> str: def op_scp(args: Namespace) -> None: if args.scp_from == args.scp_to: raise Exception("exactly one of ‘--from’ and ‘--to’ must be specified") - with deployment(args) as depl: + with deployment(args, False, "nixops scp") as depl: (username, machine, m) = parse_machine(args.machine, depl) ssh_name = m.get_ssh_name() from_loc = scp_loc(username, ssh_name, args.scp_from, args.source) @@ -969,7 +996,7 @@ def op_scp(args: Namespace) -> None: def op_mount(args: Namespace) -> None: # TODO: Fixme - with deployment(args) as depl: + with deployment(args, False, "nixops mount") as depl: (username, rest, m) = parse_machine(args.machine, depl) try: remote_path = args.machine.split(":")[1] @@ -995,7 +1022,7 @@ def op_mount(args: Namespace) -> None: def op_show_option(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, False, "nixops show-option") as depl: if args.include_physical: depl.evaluate() json.dump( @@ -1009,9 +1036,9 @@ def op_show_option(args: Namespace) -> None: @contextlib.contextmanager def deployment_with_rollback( - args: Namespace, + args: Namespace, activityDescription: str, ) -> Generator[nixops.deployment.Deployment, None, None]: - with deployment(args) as depl: + with deployment(args, True, activityDescription) as depl: if not depl.rollback_enabled: raise Exception( "rollback is not enabled for this network; please set ‘network.enableRollback’ to ‘true’ and redeploy" @@ -1020,7 +1047,7 @@ def deployment_with_rollback( def op_list_generations(args: Namespace) -> None: - with deployment_with_rollback(args) as depl: + with deployment_with_rollback(args, "nixops list-generations") as depl: if ( subprocess.call(["nix-env", "-p", depl.get_profile(), "--list-generations"]) != 0 @@ -1029,7 +1056,7 @@ def op_list_generations(args: Namespace) -> None: def op_delete_generation(args: Namespace) -> None: - with deployment_with_rollback(args) as depl: + with deployment_with_rollback(args, "nixops delete-generation") as depl: if ( subprocess.call( [ @@ -1046,7 +1073,7 @@ def op_delete_generation(args: Namespace) -> None: def op_rollback(args: Namespace) -> None: - with deployment_with_rollback(args) as depl: + with deployment_with_rollback(args, "nixops rollback") as depl: depl.rollback( generation=args.generation, include=args.include or [], @@ -1061,7 +1088,7 @@ def op_rollback(args: Namespace) -> None: def op_show_console_output(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, False, "nixops show-console-output") as depl: m = depl.machines.get(args.machine) if not m: raise Exception("unknown machine ‘{0}’".format(args.machine)) @@ -1069,7 +1096,7 @@ def op_show_console_output(args: Namespace) -> None: def op_edit(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, False, "nixops edit") as depl: editor = os.environ.get("EDITOR") if not editor: raise Exception("the $EDITOR environment variable is not set") @@ -1079,7 +1106,7 @@ def op_edit(args: Namespace) -> None: def op_copy_closure(args: Namespace) -> None: - with deployment(args) as depl: + with deployment(args, False, "nixops copy-closure") as depl: (username, machine, m) = parse_machine(args.machine, depl) m.copy_closure_to(args.storepath) diff --git a/nixops/statefile.py b/nixops/statefile.py index e7ca07323..deb2e3ed2 100644 --- a/nixops/statefile.py +++ b/nixops/statefile.py @@ -7,17 +7,28 @@ import threading from typing import Any, Optional, List, Type from types import TracebackType +import re import nixops.deployment +from nixops.locks import LockInterface class Connection(sqlite3.Connection): def __init__(self, db_file: str, **kwargs: Any) -> None: - db_exists = os.path.exists(db_file) + matchMaybe: Optional[re.Match[str]] = re.fullmatch( + "(file://)?([^?]*)(\\?.*)?", db_file + ) + file: str + if matchMaybe is None: + file = db_file + else: + file = matchMaybe.group(2) + + db_exists: bool = os.path.exists(file) if not db_exists: - os.fdopen(os.open(db_file, os.O_WRONLY | os.O_CREAT, 0o600), "w").close() + os.fdopen(os.open(file, os.O_WRONLY | os.O_CREAT, 0o600), "w").close() sqlite3.Connection.__init__(self, db_file, **kwargs) # type: ignore - self.db_file = db_file + self.db_file = file self.nesting = 0 self.lock = threading.RLock() @@ -76,22 +87,36 @@ def get_default_state_file() -> str: class StateFile(object): """NixOps state file.""" - current_schema = 3 + current_schema: int = 3 + lock: Optional[LockInterface] - def __init__(self, db_file: str) -> None: + def __init__( + self, db_file: str, writable: bool, lock: Optional[LockInterface] = None + ) -> None: self.db_file: str = db_file + self.lock = lock if os.path.splitext(db_file)[1] not in [".nixops", ".charon"]: raise Exception( "state file ‘{0}’ should have extension ‘.nixops’".format(db_file) ) - db = sqlite3.connect( - db_file, - timeout=60, - check_same_thread=False, - factory=Connection, - isolation_level=None, - ) + + def connect(writable: bool) -> sqlite3.Connection: + query: str = "" + + if not writable: + query = "?mode=ro" + + return sqlite3.connect( + f"file://{db_file}{query}", + uri=True, + timeout=60, + check_same_thread=False, + factory=Connection, + isolation_level=None, + ) + + db: sqlite3.Connection = connect(writable) db.execute("pragma journal_mode = wal") db.execute("pragma foreign_keys = 1") @@ -99,7 +124,7 @@ def __init__(self, db_file: str) -> None: # FIXME: this is not actually transactional, because pysqlite (not # sqlite) does an implicit commit before "create table". with db: - c = db.cursor() + c: sqlite3.Cursor = db.cursor() # Get the schema version. version: int = 0 # new database @@ -111,22 +136,36 @@ def __init__(self, db_file: str) -> None: if version == self.current_schema: pass - elif version == 0: - self._create_schema(c) - elif version < self.current_schema: - if version <= 1: - self._upgrade_1_to_2(c) - if version <= 2: - self._upgrade_2_to_3(c) - c.execute( - "update SchemaVersion set version = ?", (self.current_schema,) - ) else: - raise Exception( - "this NixOps version is too old to deal with schema version {0}".format( - version + # If a schema update is necessary, we'll need to do so despite + # being in read only mode. This is ok, because the purpose of + # read only mode is to catch problems where we didn't request + # the appropriate level of locking. This still works, because + # the 'returned' db is still read only. + # + # IMPORTANT: schema upgrades must not touch anything outside the + # db. Otherwise, it must reject to run when we're in + # read-only mode. + # + mutableDb: sqlite3.Connection = connect(True) + c = mutableDb.cursor() + if version == 0: + self._create_schema(c) + elif version < self.current_schema: + if version <= 1: + self._upgrade_1_to_2(c) + if version <= 2: + self._upgrade_2_to_3(c) + c.execute( + "update SchemaVersion set version = ?", (self.current_schema,) ) - ) + else: + raise Exception( + "this NixOps version is too new to deal with schema version {0}".format( + version + ) + ) + mutableDb.close() self._db: sqlite3.Connection = db diff --git a/nixops/storage/__init__.py b/nixops/storage/__init__.py index f5646e5fb..975b2ef6c 100644 --- a/nixops/storage/__init__.py +++ b/nixops/storage/__init__.py @@ -3,6 +3,39 @@ from typing_extensions import Protocol + +""" +Interface to a storage driver. + +An implementation should inherit from LockDriver in order to for a plugin to be +able to integrate it. +""" + + +# This separation was introduced to hide the T (options) details from the +# StorageInterface type. It only matters for construction and clients don't have +# to know about it. +class StorageInterface(Protocol): + # fetchToFile: download the state file to the local disk. + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def fetchToFile(self, path: str, **kwargs) -> None: + raise NotImplementedError + + # onOpen: receive the StateFile object for last-minute, backend + # specific changes to the state file. + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def onOpen(self, sf: nixops.statefile.StateFile, **kwargs) -> None: + pass + + # uploadFromFile: upload the new version of the state file + # Note: no arguments will be passed over kwargs. Making it part of + # the type definition allows adding new arguments later. + def uploadFromFile(self, path: str, **kwargs) -> None: + raise NotImplementedError + + if TYPE_CHECKING: import nixops.statefile @@ -11,7 +44,7 @@ StorageArgValues = Mapping[str, Any] -class StorageBackend(Protocol[T]): +class StorageBackend(StorageInterface, Protocol[T]): # Hack: Make T a mypy invariant. According to PEP-0544, a # Protocol[T] whose T is only used in function arguments and # returns is "de-facto covariant". @@ -32,22 +65,3 @@ def options(**kwargs) -> T: def __init__(self, args: T) -> None: raise NotImplementedError - - # fetchToFile: download the state file to the local disk. - # Note: no arguments will be passed over kwargs. Making it part of - # the type definition allows adding new arguments later. - def fetchToFile(self, path: str, **kwargs) -> None: - raise NotImplementedError - - # onOpen: receive the StateFile object for last-minute, backend - # specific changes to the state file. - # Note: no arguments will be passed over kwargs. Making it part of - # the type definition allows adding new arguments later. - def onOpen(self, sf: nixops.statefile.StateFile, **kwargs) -> None: - pass - - # uploadFromFile: upload the new version of the state file - # Note: no arguments will be passed over kwargs. Making it part of - # the type definition allows adding new arguments later. - def uploadFromFile(self, path: str, **kwargs) -> None: - raise NotImplementedError diff --git a/tests/__init__.py b/tests/__init__.py index a3bfd02d2..6eb5fec25 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -11,7 +11,7 @@ def setup(): - nixops.statefile.StateFile(db_file).close() + nixops.statefile.StateFile(db_file, writable=True).close() def destroy(sf, uuid): @@ -30,7 +30,7 @@ def destroy(sf, uuid): def teardown(): - sf = nixops.statefile.StateFile(db_file) + sf = nixops.statefile.StateFile(db_file, writable=True) uuids = sf.query_deployments() threads = [] for uuid in uuids: diff --git a/tests/functional/__init__.py b/tests/functional/__init__.py index 7152f23e8..ab62bd662 100644 --- a/tests/functional/__init__.py +++ b/tests/functional/__init__.py @@ -8,7 +8,7 @@ class DatabaseUsingTest(object): _multiprocess_can_split_ = True def setup(self): - self.sf = nixops.statefile.StateFile(db_file) + self.sf = nixops.statefile.StateFile(db_file, writable=True) def teardown(self): self.sf.close()