From cb73c56a5f46c47a757cee754951518fa2a441ee Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Jan=C3=9Fen?= <janssen@mpie.de>
Date: Sun, 8 Sep 2024 11:01:39 +0200
Subject: [PATCH 01/20] Add more type hints and docstrings

---
 pyiron_base/jobs/dynamic.py                   |  2 +-
 pyiron_base/jobs/flex/executablecontainer.py  |  9 ++-
 .../jobs/flex/pythonfunctioncontainer.py      |  8 +-
 pyiron_base/jobs/script.py                    | 23 +++---
 pyiron_base/jobs/worker.py                    | 78 ++++++++++++++-----
 .../project/archiving/export_archive.py       | 10 ++-
 .../project/archiving/import_archive.py       | 24 ++++--
 7 files changed, 107 insertions(+), 47 deletions(-)

diff --git a/pyiron_base/jobs/dynamic.py b/pyiron_base/jobs/dynamic.py
index 1eea4c84e..8098ca412 100644
--- a/pyiron_base/jobs/dynamic.py
+++ b/pyiron_base/jobs/dynamic.py
@@ -3,7 +3,7 @@
 from typing import List
 
 
-def warn_dynamic_job_classes(resource_folder_lst: List[str], logger: logging.Logger):
+def warn_dynamic_job_classes(resource_folder_lst: List[str], logger: logging.Logger) -> None:
     """
     Warns about deprecated 'dynamic' and 'templates' folders in the resource directory.
 
diff --git a/pyiron_base/jobs/flex/executablecontainer.py b/pyiron_base/jobs/flex/executablecontainer.py
index c63a3a156..f289c8d09 100644
--- a/pyiron_base/jobs/flex/executablecontainer.py
+++ b/pyiron_base/jobs/flex/executablecontainer.py
@@ -1,4 +1,6 @@
 import cloudpickle
+from typing import Optional
+
 import numpy as np
 
 from pyiron_base.jobs.job.runfunction import (
@@ -119,8 +121,8 @@ def get_calculate_function(self) -> callable:
             callable: calculate() functione
         """
 
-        def get_combined_write_input_funct(input_job_dict, write_input_funct=None):
-            def write_input_combo_funct(working_directory, input_dict):
+        def get_combined_write_input_funct(input_job_dict: dict, write_input_funct: Optional[callable]=None):
+            def write_input_combo_funct(working_directory: str, input_dict: dict):
                 write_input_files_from_input_dict(
                     input_dict=input_job_dict,
                     working_directory=working_directory,
@@ -159,12 +161,13 @@ def _to_dict(self) -> dict:
             )
         return job_dict
 
-    def _from_dict(self, obj_dict: dict, version=None):
+    def _from_dict(self, obj_dict: dict, version: str = None):
         """
         Load the job attributes from a dictionary representation.
 
         Args:
             obj_dict (dict): A dictionary containing the job attributes.
+            version (str): The version of the job object.
 
         """
         super()._from_dict(obj_dict=obj_dict)
diff --git a/pyiron_base/jobs/flex/pythonfunctioncontainer.py b/pyiron_base/jobs/flex/pythonfunctioncontainer.py
index 340b629d5..11dc0defd 100644
--- a/pyiron_base/jobs/flex/pythonfunctioncontainer.py
+++ b/pyiron_base/jobs/flex/pythonfunctioncontainer.py
@@ -1,4 +1,5 @@
 import inspect
+from typing import Optional
 
 import cloudpickle
 import numpy as np
@@ -85,12 +86,13 @@ def _to_dict(self) -> dict:
         )
         return job_dict
 
-    def _from_dict(self, obj_dict: dict, version=None) -> None:
+    def _from_dict(self, obj_dict: dict, version: Optional[str] =None) -> None:
         """
         Load the job object from a dictionary representation.
 
         Args:
             obj_dict (dict): The dictionary representation of the job object.
+            version (str): The version of the job object.
         """
         super()._from_dict(obj_dict=obj_dict)
         self._function = cloudpickle.loads(obj_dict["function"])
@@ -98,7 +100,7 @@ def _from_dict(self, obj_dict: dict, version=None) -> None:
             obj_dict["_automatically_rename_on_save_using_input"]
         )
 
-    def save(self):
+    def save(self) -> None:
         """
         Save the job to the project.
 
@@ -127,7 +129,7 @@ def save(self):
             return  # Without saving
         super().save()
 
-    def run_static(self):
+    def run_static(self) -> None:
         """
         Run the static function.
 
diff --git a/pyiron_base/jobs/script.py b/pyiron_base/jobs/script.py
index cfed42c79..4f2a63ae3 100644
--- a/pyiron_base/jobs/script.py
+++ b/pyiron_base/jobs/script.py
@@ -260,30 +260,31 @@ def script_path(self, path: str) -> None:
                 "path should be a string, but ", path, " is a ", type(path), " instead."
             )
 
-    def collect_logfiles(self):
+    def collect_logfiles(self) -> None:
         """
         Compatibility function - but no log files are being collected
         """
         pass
 
-    def disable_mpi4py(self):
+    def disable_mpi4py(self) -> None:
         """
         Disables the usage of mpi4py for parallel execution.
         """
         self._enable_mpi4py = False
 
-    def enable_mpi4py(self):
+    def enable_mpi4py(self) -> None:
         """
         Enable the usage of mpi4py for parallel execution.
         """
         self._enable_mpi4py = True
 
-    def _from_dict(self, obj_dict, version=None) -> None:
+    def _from_dict(self, obj_dict: dict, version: Optional[str]=None) -> None:
         """
         Load job attributes from a dictionary.
 
         Args:
             obj_dict (dict): The dictionary containing the job attributes.
+            version (str): The version of the job object.
         """
         super()._from_dict(obj_dict=obj_dict)
         if "parallel" in obj_dict["input"].keys():
@@ -294,7 +295,7 @@ def _from_dict(self, obj_dict, version=None) -> None:
         if "custom_dict" in obj_dict["input"].keys():
             self.input.update(obj_dict["input"]["custom_dict"])
 
-    def get_input_parameter_dict(self):
+    def get_input_parameter_dict(self) -> dict:
         """
         Get an hierarchical dictionary of input files. On the first level the dictionary is divided in file_to_create
         and files_to_copy. Both are dictionaries use the file names as keys. In file_to_create the values are strings
@@ -320,7 +321,7 @@ def get_input_parameter_dict(self):
                 self.executable._mpi = True
         return input_file_dict
 
-    def run_if_lib(self):
+    def run_if_lib(self) -> None:
         """
         Compatibility function - but library run mode is not available
         """
@@ -328,7 +329,7 @@ def run_if_lib(self):
             "Library run mode is not implemented for script jobs."
         )
 
-    def run_static(self):
+    def run_static(self) -> None:
         """
         The run_static() function is called internally in pyiron to trigger the execution of the executable. This is
         typically divided into three steps: (1) the generation of the calculate function and its inputs, (2) the
@@ -349,10 +350,10 @@ def run_static(self):
 
     def save_output(
         self, output_dict: Optional[dict] = None, shell_output: Optional[str] = None
-    ):
+    ) -> None:
         pass
 
-    def set_input_to_read_only(self):
+    def set_input_to_read_only(self) -> None:
         """
         This function enforces read-only mode for the input classes, but it has to be implement in the individual
         classes.
@@ -373,7 +374,7 @@ def _to_dict(self) -> dict:
         job_dict["input/custom_dict"] = self.input.to_builtin()
         return job_dict
 
-    def validate_ready_to_run(self):
+    def validate_ready_to_run(self) -> None:
         """
         Validates if the job is ready to run by checking if the script path is provided.
 
@@ -386,7 +387,7 @@ def validate_ready_to_run(self):
                 + "running."
             )
 
-    def _executable_activate_mpi(self):
+    def _executable_activate_mpi(self) -> None:
         """
         Internal helper function to switch the executable to MPI mode
         """
diff --git a/pyiron_base/jobs/worker.py b/pyiron_base/jobs/worker.py
index d17d9b652..389bd541c 100644
--- a/pyiron_base/jobs/worker.py
+++ b/pyiron_base/jobs/worker.py
@@ -9,6 +9,7 @@
 import time
 from datetime import datetime
 from multiprocessing import Pool
+from typing import Tuple
 
 import numpy as np
 import psutil
@@ -28,7 +29,7 @@
 __date__ = "Nov 5, 2021"
 
 
-def worker_function(args):
+def worker_function(args: list) -> None:
     """
     The worker function is executed inside an aproc processing pool.
 
@@ -128,7 +129,7 @@ class WorkerJob(PythonTemplateJob):
 
     """
 
-    def __init__(self, project, job_name):
+    def __init__(self, project: "pyiron_base.project.generic.Project", job_name: str):
         super(WorkerJob, self).__init__(project, job_name)
         if not state.database.database_is_disabled:
             self.input.project = project.path
@@ -142,54 +143,72 @@ def __init__(self, project, job_name):
         self._job_with_calculate_function = True
 
     @property
-    def project_to_watch(self):
+    def project_to_watch(self) -> "pyiron_base.project.generic.Project":
         rel_path = os.path.relpath(self.input.project, self.project.path)
         return self.project.open(rel_path)
 
     @project_to_watch.setter
-    def project_to_watch(self, pr):
+    def project_to_watch(self, pr: "pyiron_base.project.generic.Project") -> None:
         self.input.project = pr.path
 
     @property
-    def cores_per_job(self):
+    def cores_per_job(self) -> int:
         return self.input.cores_per_job
 
     @cores_per_job.setter
-    def cores_per_job(self, cores):
+    def cores_per_job(self, cores: int) -> None:
         self.input.cores_per_job = int(cores)
 
     @property
-    def queue_limit_factor(self):
+    def queue_limit_factor(self) -> int:
         return self.input.queue_limit_factor
 
     @queue_limit_factor.setter
-    def queue_limit_factor(self, limit_factor):
+    def queue_limit_factor(self, limit_factor: int) -> None:
         self.input.queue_limit_factor = limit_factor
 
     @property
-    def child_runtime(self):
+    def child_runtime(self) -> int:  # in seconds:
         return self.input.child_runtime
 
     @child_runtime.setter
-    def child_runtime(self, time_in_sec):
+    def child_runtime(self, time_in_sec: int) -> None:
         self.input.child_runtime = time_in_sec
 
     @property
-    def sleep_interval(self):
+    def sleep_interval(self) -> int:  # in seconds:
         return self.input.sleep_interval
 
     @sleep_interval.setter
-    def sleep_interval(self, interval):
+    def sleep_interval(self, interval: int) -> None:
         self.input.sleep_interval = int(interval)
 
     # This function is executed
-    def run_static(self):
+    def run_static(self) -> None:
+        """
+        Run the worker job in static mode.
+
+        If the database is enabled, the worker job will execute calculations by querying the database for jobs to
+        execute. If the database is disabled, the worker job will execute calculations by scanning the working
+        directory for HDF5 files.
+
+        Returns:
+            None
+        """
         if not state.database.database_is_disabled:
             self.run_static_with_database()
         else:
             self.run_static_without_database()
 
-    def run_static_with_database(self):
+    def run_static_with_database(self) -> None:
+        """
+        Run the worker job in static mode with database.
+
+        This method executes calculations by querying the database for jobs to execute.
+
+        Returns:
+            None
+        """
         self.status.running = True
         master_id = self.job_id
         pr = self.project_to_watch
@@ -277,13 +296,36 @@ def run_static_with_database(self):
         self.status.finished = True
 
     @staticmethod
-    def _get_working_directory_and_h5path(path):
+    def _get_working_directory_and_h5path(path: str) -> Tuple[str, str]:
+        """
+        Get the working directory and h5path from the given path.
+
+        Args:
+            path (str): The path to the h5 file.
+
+        Returns:
+            Tuple[str, str]: The working directory and h5path.
+
+        Example:
+            >>> _get_working_directory_and_h5path("/path/to/job.h5")
+            ("/path/to/job_hdf5/job", "/path/to/job.h5/job")
+        """
         path_split = path.split("/")
         job_name = path_split[-1].split(".h5")[0]
         parent_dir = "/".join(path_split[:-1])
-        return parent_dir + "/" + job_name + "_hdf5/" + job_name, path + "/" + job_name
+        working_directory = parent_dir + "/" + job_name + "_hdf5/" + job_name
+        h5path = path + "/" + job_name
+        return working_directory, h5path
 
-    def run_static_without_database(self):
+    def run_static_without_database(self) -> None:
+        """
+        Run the worker job in static mode without a database.
+
+        This method executes calculations by scanning the working directory for HDF5 files.
+
+        Returns:
+            None
+        """
         self.project_hdf5.create_working_directory()
         working_directory = self.working_directory
         log_file = os.path.join(working_directory, "worker.log")
@@ -335,7 +377,7 @@ def run_static_without_database(self):
         # The job is finished
         self.status.finished = True
 
-    def wait_for_worker(self, interval_in_s=60, max_iterations=10):
+    def wait_for_worker(self, interval_in_s: int=60, max_iterations: int=10) -> None:
         """
         Wait for the workerjob to finish the execution of all jobs. If no job is in status running or submitted the
         workerjob shuts down automatically after 10 minutes.
diff --git a/pyiron_base/project/archiving/export_archive.py b/pyiron_base/project/archiving/export_archive.py
index e7e24ec0c..23b1149a7 100644
--- a/pyiron_base/project/archiving/export_archive.py
+++ b/pyiron_base/project/archiving/export_archive.py
@@ -4,6 +4,8 @@
 import tempfile
 from typing import Optional
 
+import pandas
+
 
 def copy_files_to_archive(
     directory_to_transfer: str,
@@ -11,7 +13,7 @@ def copy_files_to_archive(
     compress: bool = True,
     copy_all_files: bool = False,
     arcname: Optional[str] = None,
-    df: Optional["DataFrame"] = None,
+    df: Optional[pandas.DataFrame] = None,
 ):
     """
     Copy files from a directory to an archive, optionally compressing the archive.
@@ -26,7 +28,7 @@ def copy_files_to_archive(
 
     """
 
-    def copy_files(origin, destination, copy_all_files=copy_all_files):
+    def copy_files(origin: str, destination: str, copy_all_files: bool = copy_all_files):
         """
         Copy files from the origin directory to the destination directory.
 
@@ -65,7 +67,7 @@ def copy_files(origin, destination, copy_all_files=copy_all_files):
         os.remove(os.path.join(directory_to_transfer, "export.csv"))
 
 
-def copy_h5_files(src, dst):
+def copy_h5_files(src: str, dst: str) -> None:
     """
     Copies all .h5 files from the source directory to the destination directory,
     preserving the directory structure.
@@ -89,7 +91,7 @@ def copy_h5_files(src, dst):
                 shutil.copy2(src_file, os.path.join(dst_dir, file))
 
 
-def export_database(df: "DataFrame"):
+def export_database(df: pandas.DataFrame) -> pandas.DataFrame:
     """
     Export the project database to an archive directory.
 
diff --git a/pyiron_base/project/archiving/import_archive.py b/pyiron_base/project/archiving/import_archive.py
index 5b8ccab9e..ac295bbff 100644
--- a/pyiron_base/project/archiving/import_archive.py
+++ b/pyiron_base/project/archiving/import_archive.py
@@ -3,6 +3,7 @@
 import posixpath
 import tarfile
 import tempfile
+from typing import Tuple
 from shutil import copytree
 
 import numpy as np
@@ -12,7 +13,17 @@
 from pyiron_base.utils.instance import static_isinstance
 
 
-def update_id_lst(record_lst, job_id_lst):
+def update_id_lst(record_lst: list, job_id_lst: list) -> list:
+    """
+    Update the list of master IDs based on the record list and job ID list.
+
+    Args:
+        record_lst (list): List of master IDs.
+        job_id_lst (list): List of job IDs.
+
+    Returns:
+        list: Updated list of master IDs.
+    """
     masterid_lst = []
     for masterid in record_lst:
         if masterid is None or np.isnan(masterid):
@@ -23,7 +34,7 @@ def update_id_lst(record_lst, job_id_lst):
     return masterid_lst
 
 
-def import_jobs(project_instance, archive_directory):
+def import_jobs(project_instance: "pyiron_base.project.generic.Project", archive_directory: str):
     """
     Import jobs from an archive directory to a pyiron project.
 
@@ -94,7 +105,7 @@ def import_jobs(project_instance, archive_directory):
             )
 
 
-def transfer_files(origin_path: str, project_path: str):
+def transfer_files(origin_path: str, project_path: str) -> Tuple[pandas.DataFrame, str]:
     """
     Transfer files from the origin path to the project path.
 
@@ -103,8 +114,7 @@ def transfer_files(origin_path: str, project_path: str):
         project_path (str): Path to the project directory.
 
     Returns:
-        pandas.DataFrame: Job table.
-        str: Common path.
+        Tuple[pandas.DataFrame, str]: A tuple containing the job table and the common path.
     """
     df = get_dataframe(origin_path=origin_path)
     common_path = posixpath.commonpath(list(df["project"]))
@@ -112,7 +122,7 @@ def transfer_files(origin_path: str, project_path: str):
     return df, common_path
 
 
-def get_dataframe(origin_path: str, csv_file_name: str = "export.csv") -> "DataFrame":
+def get_dataframe(origin_path: str, csv_file_name: str = "export.csv") -> pandas.DataFrame:
     """
     Get the job table from the csv file.
 
@@ -133,7 +143,7 @@ def get_dataframe(origin_path: str, csv_file_name: str = "export.csv") -> "DataF
     raise FileNotFoundError(f"File: {csv_file_name} was not found.")
 
 
-def inspect_csv(tar_path: str, csv_file: str = "export.csv"):
+def inspect_csv(tar_path: str, csv_file: str = "export.csv") -> None:
     """
     Inspect the csv file inside a tar archive.
 

From 194e94549724323b9f713a31cecd8bf3c61f6cc8 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
 <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Sun, 8 Sep 2024 09:02:39 +0000
Subject: [PATCH 02/20] [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
---
 pyiron_base/jobs/dynamic.py                      |  4 +++-
 pyiron_base/jobs/flex/executablecontainer.py     |  6 ++++--
 pyiron_base/jobs/flex/pythonfunctioncontainer.py |  2 +-
 pyiron_base/jobs/script.py                       |  2 +-
 pyiron_base/jobs/worker.py                       |  4 +++-
 pyiron_base/project/archiving/export_archive.py  |  4 +++-
 pyiron_base/project/archiving/import_archive.py  | 10 +++++++---
 7 files changed, 22 insertions(+), 10 deletions(-)

diff --git a/pyiron_base/jobs/dynamic.py b/pyiron_base/jobs/dynamic.py
index 8098ca412..c331c328b 100644
--- a/pyiron_base/jobs/dynamic.py
+++ b/pyiron_base/jobs/dynamic.py
@@ -3,7 +3,9 @@
 from typing import List
 
 
-def warn_dynamic_job_classes(resource_folder_lst: List[str], logger: logging.Logger) -> None:
+def warn_dynamic_job_classes(
+    resource_folder_lst: List[str], logger: logging.Logger
+) -> None:
     """
     Warns about deprecated 'dynamic' and 'templates' folders in the resource directory.
 
diff --git a/pyiron_base/jobs/flex/executablecontainer.py b/pyiron_base/jobs/flex/executablecontainer.py
index f289c8d09..5604e3692 100644
--- a/pyiron_base/jobs/flex/executablecontainer.py
+++ b/pyiron_base/jobs/flex/executablecontainer.py
@@ -1,6 +1,6 @@
-import cloudpickle
 from typing import Optional
 
+import cloudpickle
 import numpy as np
 
 from pyiron_base.jobs.job.runfunction import (
@@ -121,7 +121,9 @@ def get_calculate_function(self) -> callable:
             callable: calculate() functione
         """
 
-        def get_combined_write_input_funct(input_job_dict: dict, write_input_funct: Optional[callable]=None):
+        def get_combined_write_input_funct(
+            input_job_dict: dict, write_input_funct: Optional[callable] = None
+        ):
             def write_input_combo_funct(working_directory: str, input_dict: dict):
                 write_input_files_from_input_dict(
                     input_dict=input_job_dict,
diff --git a/pyiron_base/jobs/flex/pythonfunctioncontainer.py b/pyiron_base/jobs/flex/pythonfunctioncontainer.py
index 11dc0defd..b91526863 100644
--- a/pyiron_base/jobs/flex/pythonfunctioncontainer.py
+++ b/pyiron_base/jobs/flex/pythonfunctioncontainer.py
@@ -86,7 +86,7 @@ def _to_dict(self) -> dict:
         )
         return job_dict
 
-    def _from_dict(self, obj_dict: dict, version: Optional[str] =None) -> None:
+    def _from_dict(self, obj_dict: dict, version: Optional[str] = None) -> None:
         """
         Load the job object from a dictionary representation.
 
diff --git a/pyiron_base/jobs/script.py b/pyiron_base/jobs/script.py
index 4f2a63ae3..0a5c6f2b7 100644
--- a/pyiron_base/jobs/script.py
+++ b/pyiron_base/jobs/script.py
@@ -278,7 +278,7 @@ def enable_mpi4py(self) -> None:
         """
         self._enable_mpi4py = True
 
-    def _from_dict(self, obj_dict: dict, version: Optional[str]=None) -> None:
+    def _from_dict(self, obj_dict: dict, version: Optional[str] = None) -> None:
         """
         Load job attributes from a dictionary.
 
diff --git a/pyiron_base/jobs/worker.py b/pyiron_base/jobs/worker.py
index 389bd541c..7bbb6cbb4 100644
--- a/pyiron_base/jobs/worker.py
+++ b/pyiron_base/jobs/worker.py
@@ -377,7 +377,9 @@ def run_static_without_database(self) -> None:
         # The job is finished
         self.status.finished = True
 
-    def wait_for_worker(self, interval_in_s: int=60, max_iterations: int=10) -> None:
+    def wait_for_worker(
+        self, interval_in_s: int = 60, max_iterations: int = 10
+    ) -> None:
         """
         Wait for the workerjob to finish the execution of all jobs. If no job is in status running or submitted the
         workerjob shuts down automatically after 10 minutes.
diff --git a/pyiron_base/project/archiving/export_archive.py b/pyiron_base/project/archiving/export_archive.py
index 23b1149a7..1bc917cb7 100644
--- a/pyiron_base/project/archiving/export_archive.py
+++ b/pyiron_base/project/archiving/export_archive.py
@@ -28,7 +28,9 @@ def copy_files_to_archive(
 
     """
 
-    def copy_files(origin: str, destination: str, copy_all_files: bool = copy_all_files):
+    def copy_files(
+        origin: str, destination: str, copy_all_files: bool = copy_all_files
+    ):
         """
         Copy files from the origin directory to the destination directory.
 
diff --git a/pyiron_base/project/archiving/import_archive.py b/pyiron_base/project/archiving/import_archive.py
index ac295bbff..eb332aaad 100644
--- a/pyiron_base/project/archiving/import_archive.py
+++ b/pyiron_base/project/archiving/import_archive.py
@@ -3,8 +3,8 @@
 import posixpath
 import tarfile
 import tempfile
-from typing import Tuple
 from shutil import copytree
+from typing import Tuple
 
 import numpy as np
 import pandas
@@ -34,7 +34,9 @@ def update_id_lst(record_lst: list, job_id_lst: list) -> list:
     return masterid_lst
 
 
-def import_jobs(project_instance: "pyiron_base.project.generic.Project", archive_directory: str):
+def import_jobs(
+    project_instance: "pyiron_base.project.generic.Project", archive_directory: str
+):
     """
     Import jobs from an archive directory to a pyiron project.
 
@@ -122,7 +124,9 @@ def transfer_files(origin_path: str, project_path: str) -> Tuple[pandas.DataFram
     return df, common_path
 
 
-def get_dataframe(origin_path: str, csv_file_name: str = "export.csv") -> pandas.DataFrame:
+def get_dataframe(
+    origin_path: str, csv_file_name: str = "export.csv"
+) -> pandas.DataFrame:
     """
     Get the job table from the csv file.
 

From bdf0096395f6a810c4ab2d83d2962cf145556e68 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Jan=C3=9Fen?= <janssen@mpie.de>
Date: Sun, 8 Sep 2024 11:34:02 +0200
Subject: [PATCH 03/20] pyirontable and job wrapper

---
 pyiron_base/jobs/datamining.py  | 278 ++++++++++++++++++++++++--------
 pyiron_base/jobs/job/wrapper.py |  33 ++--
 2 files changed, 226 insertions(+), 85 deletions(-)

diff --git a/pyiron_base/jobs/datamining.py b/pyiron_base/jobs/datamining.py
index bbbb1ca84..75d10a41a 100644
--- a/pyiron_base/jobs/datamining.py
+++ b/pyiron_base/jobs/datamining.py
@@ -8,7 +8,7 @@
 import os
 import types
 from datetime import datetime
-from typing import List, Tuple
+from typing import List, Tuple, Optional, Any, Union
 
 import cloudpickle
 import numpy as np
@@ -33,13 +33,31 @@
 __date__ = "Sep 1, 2018"
 
 
-def _to_pickle(hdf, key, value):
+def _to_pickle(hdf: FileHDFio, key: str, value: Any) -> None:
+    """
+    Pickle and store an object in an HDF file.
+
+    Args:
+        hdf (FileHDFio): The HDF file object.
+        key (str): The key to store the object under.
+        value (Any): The object to be pickled and stored.
+    """
     hdf[key] = codecs.encode(
         cloudpickle.dumps(obj=value, protocol=5, buffer_callback=None), "base64"
     ).decode()
 
 
-def _from_pickle(hdf, key):
+def _from_pickle(hdf: FileHDFio, key: str) -> Any:
+    """
+    Load and unpickle an object from an HDF file.
+
+    Args:
+        hdf (FileHDFio): The HDF file object.
+        key (str): The key of the object in the HDF file.
+
+    Returns:
+        Any: The unpickled object.
+    """
     try:
         return cloudpickle.loads(codecs.decode(hdf[key].encode(), "base64"))
     except ModuleNotFoundError:
@@ -49,6 +67,16 @@ def _from_pickle(hdf, key):
 
 
 def get_job_id(job):
+    """
+    Get the job ID.
+
+    Args:
+        job: The job object.
+
+    Returns:
+        dict: A dictionary containing the job ID.
+
+    """
     return {"job_id": job.job_id}
 
 
@@ -58,7 +86,7 @@ class FunctionContainer(object):
 
     """
 
-    def __init__(self, system_function_lst=None):
+    def __init__(self, system_function_lst: Optional[List[callable]]=None):
         if system_function_lst is None:
             system_function_lst = []
         self._user_function_dict = {}
@@ -77,17 +105,32 @@ def _function_lst(self):
             and self._system_function_dict[funct.__name__]
         ] + list(self._user_function_dict.values())
 
-    def _to_hdf(self, hdf):
+    def _to_hdf(self, hdf: FileHDFio) -> None:
+        """
+        Store the user and system function dictionaries in an HDF file.
+
+        Args:
+            hdf (FileHDFio): The HDF file object.
+        """
         _to_pickle(hdf=hdf, key="user_function_dict", value=self._user_function_dict)
         _to_pickle(
             hdf=hdf, key="system_function_dict", value=self._system_function_dict
         )
 
-    def _from_hdf(self, hdf):
+    def _from_hdf(self, hdf: FileHDFio) -> None:
+        """
+        Load data from an HDF file.
+
+        Args:
+            hdf (str): The path to the HDF file.
+
+        Returns:
+            None
+        """
         self._user_function_dict = _from_pickle(hdf=hdf, key="user_function_dict")
         self._system_function_dict = _from_pickle(hdf=hdf, key="system_function_dict")
 
-    def __setitem__(self, key, item):
+    def __setitem__(self, key: str, item: Union[str, types.FunctionType]) -> None:
         if isinstance(item, str):
             self._user_function_dict[key] = eval(
                 'lambda job: {"' + key + '":' + item + "}"
@@ -97,17 +140,17 @@ def __setitem__(self, key, item):
         else:
             raise TypeError("unsupported function type!")
 
-    def __getitem__(self, key):
+    def __getitem__(self, key: str) -> callable:
         return self._user_function_dict[key]
 
-    def __getattr__(self, name):
+    def __getattr__(self, name: str) -> callable:
         if name in list(self._system_function_dict.keys()):
             self._system_function_dict[name] = True
             return self._system_function_dict[name]
         else:
             super(FunctionContainer, self).__getattr__(name)
 
-    def __dir__(self):
+    def __dir__(self) -> list:
         return list(self._system_function_dict.keys())
 
 
@@ -118,14 +161,14 @@ class JobFilters(object):
     """
 
     @staticmethod
-    def job_type(job_type):
+    def job_type(job_type: str) -> callable:
         def filter_job_type(job):
             return job.__name__ == job_type
 
         return filter_job_type
 
     @staticmethod
-    def job_name_contains(job_name_segment):
+    def job_name_contains(job_name_segment: str) -> callable:
         def filter_job_name_segment(job):
             return job_name_segment in job.job_name
 
@@ -137,13 +180,13 @@ class PyironTable:
     Class for easy, efficient, and pythonic analysis of data from pyiron projects
 
     Args:
-        project (pyiron.project.Project/None): The project to analyze
+        project (pyiron_base.project.generic.Project): The project to analyze
         name (str): Name of the pyiron table
         system_function_lst (list/ None): List of built-in functions
     """
 
     def __init__(
-        self, project, name=None, system_function_lst=None, csv_file_name=None
+        self, project: "pyiron_base.project.generic.Project", name: Optional[str] = None, system_function_lst: List[callable]=None, csv_file_name: Optional[str]=None
     ):
         self._project = project
         self._df = pandas.DataFrame({})
@@ -157,7 +200,7 @@ def __init__(
         self._csv_file = csv_file_name
 
     @property
-    def filter(self):
+    def filter(self) -> JobFilters:
         """
         Object containing pre-defined filter functions
 
@@ -168,7 +211,7 @@ def filter(self):
         return self._filter
 
     @property
-    def name(self):
+    def name(self) -> str:
         """
         Name of the table. Takes the project name if not specified
 
@@ -181,7 +224,7 @@ def name(self):
         return self._name
 
     @property
-    def db_filter_function(self):
+    def db_filter_function(self) -> Union[callable, None]:
         """
         Function to filter the a project database table before job specific functions are applied.
 
@@ -198,11 +241,11 @@ def db_filter_function(self):
         return self._db_filter_function
 
     @db_filter_function.setter
-    def db_filter_function(self, funct):
+    def db_filter_function(self, funct: callable) -> None:
         self._db_filter_function = funct
 
     @property
-    def filter_function(self):
+    def filter_function(self) -> Union[callable, None]:
         """
         Function to filter each job before more expensive functions are applied
 
@@ -216,10 +259,26 @@ def filter_function(self):
         return self._filter_function
 
     @filter_function.setter
-    def filter_function(self, funct):
+    def filter_function(self, funct: callable):
         self._filter_function = funct
 
     def _get_new_functions(self, file: FileHDFio) -> Tuple[List, List]:
+        """
+        Get new user-defined and system functions from an HDF5 file.
+
+        Args:
+            file (FileHDFio): The HDF5 file to extract data from.
+
+        Returns:
+            Tuple[List, List]: A tuple containing two lists:
+                - new_user_functions (List): A list of new user-defined functions.
+                - new_system_functions (List): A list of new system functions.
+
+        Raises:
+            IndexError: If an index is out of range.
+            ValueError: If a value is not valid.
+            TypeError: If a type is incorrect.
+        """
         try:
             (
                 temp_user_function_dict,
@@ -240,7 +299,7 @@ def _get_new_functions(self, file: FileHDFio) -> Tuple[List, List]:
             new_system_functions = []
         return new_user_functions, new_system_functions
 
-    def create_table(self, file, job_status_list, executor=None, enforce_update=False):
+    def create_table(self, file: FileHDFio, job_status_list: List[str], executor: Optional["concurrent.futures.Executor"]=None, enforce_update: bool=False):
         """
         Create or update the table.
 
@@ -299,21 +358,21 @@ def create_table(self, file, job_status_list, executor=None, enforce_update=Fals
             if len(df_new_ids) > 0:
                 self._df = pandas.concat([self._df, df_new_ids], ignore_index=True)
 
-    def get_dataframe(self):
+    def get_dataframe(self) -> pandas.DataFrame:
         return self._df
 
-    def _list_nodes(self):
+    def _list_nodes(self) -> list:
         return list(self._df.columns)
 
-    def __getitem__(self, item):
+    def __getitem__(self, item: str) -> Union[np.ndarray, None]:
         if item in self.list_nodes():
             return np.array(self._df[item])
         return None
 
-    def __str__(self):
+    def __str__(self) -> str:
         return self._df.__str__()
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         """
         Human readable string representation
 
@@ -323,29 +382,66 @@ def __repr__(self):
         return self._df.__repr__()
 
     @property
-    def _file_name_csv(self):
+    def _file_name_csv(self) -> str:
+        """
+        Get the file name of the CSV file.
+
+        Returns:
+            str: The file name of the CSV file.
+        """
         if self._csv_file is None:
             return self._project.path + self.name + ".csv"
         else:
             return self._csv_file
 
-    def _load_csv(self):
-        # Legacy method to read tables written to csv
+    def _load_csv(self) -> None:
+        """
+        Load the table from a CSV file.
+
+        Returns:
+            None
+        """
         self._df = pandas.read_csv(self._file_name_csv)
 
     @staticmethod
-    def _get_data_from_hdf5(hdf):
+    def _get_data_from_hdf5(hdf: FileHDFio) -> Tuple[dict, dict]:
+        """
+        Load user-defined and system function dictionaries from an HDF file.
+
+        Args:
+            hdf (FileHDFio): The HDF file object.
+
+        Returns:
+            Tuple[dict, dict]: A tuple containing two dictionaries:
+                - temp_user_function_dict (dict): The user-defined function dictionary.
+                - temp_system_function_dict (dict): The system function dictionary.
+        """
         temp_user_function_dict = _from_pickle(hdf=hdf, key="user_function_dict")
         temp_system_function_dict = _from_pickle(hdf=hdf, key="system_function_dict")
         return temp_user_function_dict, temp_system_function_dict
 
-    def _get_job_ids(self):
+    def _get_job_ids(self) -> np.ndarray:
+        """
+        Get the job IDs from the dataframe.
+
+        Returns:
+            np.ndarray: An array of job IDs.
+        """
         if len(self._df) > 0:
             return self._df.job_id.values
         else:
             return np.array([])
 
-    def _get_filtered_job_ids_from_project(self, recursive=True):
+    def _get_filtered_job_ids_from_project(self, recursive: bool=True) -> List[int]:
+        """
+        Get the filtered job IDs from the project.
+
+        Args:
+            recursive (bool): Flag to indicate whether to include jobs from subprojects (default is True).
+
+        Returns:
+            List[int]: A list of filtered job IDs.
+        """
         project_table = self._project.job_table(recursive=recursive)
         filter_funct = self.db_filter_function
         return project_table[filter_funct(project_table)]["id"].tolist()
@@ -395,7 +491,7 @@ def _iterate_over_job_lst(
         return pandas.DataFrame(diff_dict_lst)
 
     @staticmethod
-    def total_lst_of_keys(diff_dict_lst):
+    def total_lst_of_keys(diff_dict_lst: List[dict]) -> set:
         """
         Get unique list of all keys occuring in list.
         """
@@ -405,7 +501,7 @@ def total_lst_of_keys(diff_dict_lst):
                 total_key_lst.append(key)
         return set(total_key_lst)
 
-    def refill_dict(self, diff_dict_lst):
+    def refill_dict(self, diff_dict_lst: List) -> None:
         """
         Ensure that all dictionaries in the list have the same keys.
 
@@ -417,7 +513,7 @@ def refill_dict(self, diff_dict_lst):
                 if key not in sub_dict.keys():
                     sub_dict[key] = None
 
-    def _collect_job_update_lst(self, job_status_list, job_stored_ids=None):
+    def _collect_job_update_lst(self, job_status_list: List, job_stored_ids: Optional[List]=None) -> List:
         """
         Collect jobs to update the pyiron table.
 
@@ -455,7 +551,7 @@ def _collect_job_update_lst(self, job_status_list, job_stored_ids=None):
                 job_update_lst.append(job_id)
         return job_update_lst
 
-    def _repr_html_(self):
+    def _repr_html_(self) -> str:
         """
         Internal helper function to represent the GenericParameters object within the Jupyter Framework
 
@@ -526,11 +622,11 @@ def __init__(self, project, job_name):
         self.analysis_project = project.project
 
     @property
-    def filter(self):
+    def filter(self) -> JobFilters:
         return self._pyiron_table.filter
 
     @property
-    def db_filter_function(self):
+    def db_filter_function(self) -> Union[callable, None]:
         """
         function: database level filter function
 
@@ -540,11 +636,11 @@ def db_filter_function(self):
         return self._pyiron_table.db_filter_function
 
     @db_filter_function.setter
-    def db_filter_function(self, funct):
+    def db_filter_function(self, funct: callable) -> None:
         self._pyiron_table.db_filter_function = funct
 
     @property
-    def filter_function(self):
+    def filter_function(self) -> Union[callable, None]:
         """
         function: job level filter function
 
@@ -554,18 +650,18 @@ def filter_function(self):
         return self._pyiron_table.filter_function
 
     @filter_function.setter
-    def filter_function(self, funct):
+    def filter_function(self, funct: callable) -> None:
         self._pyiron_table.filter_function = funct
 
     @property
-    def job_status(self):
+    def job_status(self) -> List[str]:
         """
         list of str: only jobs with status in this list are included in the table.
         """
         return self._job_status
 
     @job_status.setter
-    def job_status(self, status):
+    def job_status(self, status: Union[str, List[str]]) -> None:
         if isinstance(status, str):
             status = [status]
         for s in status:
@@ -577,20 +673,20 @@ def job_status(self, status):
         self._job_status = status
 
     @property
-    def pyiron_table(self):
+    def pyiron_table(self) -> PyironTable:  
         return self._pyiron_table
 
     @property
     @deprecate("Use analysis_project instead!")
-    def ref_project(self):
+    def ref_project(self) -> "pyiron_base.project.generic.Project":
         return self.analysis_project
 
     @ref_project.setter
-    def ref_project(self, project):
+    def ref_project(self, project: "pyiron_base.project.generic.Project") -> None:
         self.analysis_project = project
 
     @property
-    def analysis_project(self):
+    def analysis_project(self) -> "pyiron_base.project.generic.Project":
         """
         :class:`.Project`: which pyiron project should be searched for jobs
 
@@ -599,7 +695,7 @@ def analysis_project(self):
         return self._analysis_project
 
     @analysis_project.setter
-    def analysis_project(self, project):
+    def analysis_project(self, project: "pyiron_base.project.generic.Project") -> None:
         self._analysis_project = project
         self._pyiron_table = PyironTable(
             project=self._analysis_project,
@@ -608,7 +704,7 @@ def analysis_project(self, project):
         )
 
     @property
-    def add(self):
+    def add(self) -> FunctionContainer:
         """
         Add a function to analyse job data
 
@@ -622,25 +718,31 @@ def add(self):
         return self._pyiron_table.add
 
     @property
-    def convert_to_object(self):
+    def convert_to_object(self) -> bool:
         """
         bool: if `True` convert fully load jobs before passing them to functions, if `False` use inspect mode.
         """
         return self._pyiron_table.convert_to_object
 
     @convert_to_object.setter
-    def convert_to_object(self, conv_to_obj):
+    def convert_to_object(self, conv_to_obj: bool) -> None:
         self._pyiron_table.convert_to_object = conv_to_obj
 
     @property
-    def enforce_update(self):
+    def enforce_update(self) -> bool:
         """
         bool: if `True` re-evaluate all function on all jobs when :meth:`.update_table` is called.
         """
         return self._enforce_update
 
     @enforce_update.setter
-    def enforce_update(self, enforce):
+    def enforce_update(self, enforce: bool) -> None:
+        """
+        Set the enforce_update property.
+
+        Args:
+            enforce (bool): If True, re-evaluate all functions on all jobs when update_table is called.
+        """
         if isinstance(enforce, bool):
             if enforce:
                 self._enforce_update = True
@@ -649,15 +751,27 @@ def enforce_update(self, enforce):
             else:
                 self._enforce_update = False
         else:
-            raise TypeError()
+            raise TypeError("enforce must be a boolean")
+
+    def _save_output(self) -> None:
+        """
+        Save the pyiron table dataframe to the HDF5 output file.
 
-    def _save_output(self):
+        Returns:
+            None
+        """
         with self.project_hdf5.open("output") as hdf5_output:
             self.pyiron_table._df.to_hdf(
                 hdf5_output.file_name, key=hdf5_output.h5_path + "/table"
             )
 
-    def _to_dict(self):
+    def _to_dict(self) -> dict:
+        """
+        Convert the TableJob object to a dictionary.
+
+        Returns:
+            dict: The TableJob object represented as a dictionary.
+        """
         job_dict = super()._to_dict()
         job_dict["input/bool_dict"] = {
             "enforce_update": self._enforce_update,
@@ -683,7 +797,17 @@ def _to_dict(self):
             )
         return job_dict
 
-    def _from_dict(self, obj_dict, version=None):
+    def _from_dict(self, obj_dict: dict, version: str=None):
+        """
+        Restore the TableJob object from a dictionary.
+
+        Args:
+            obj_dict (dict): The TableJob object represented as a dictionary.
+            version (str): The version of the object.
+
+        Returns:
+            None
+        """
         super()._from_dict(obj_dict=obj_dict, version=version)
         if "project" in obj_dict["input"].keys():
             project_dict = obj_dict["input"]["project"]
@@ -713,26 +837,31 @@ def _from_dict(self, obj_dict, version=None):
         self._pyiron_table.convert_to_object = bool_dict["convert_to_object"]
         self._pyiron_table.add._from_hdf(obj_dict["input"])
 
-    def to_hdf(self, hdf=None, group_name=None):
+    def to_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"] = None, group_name: Optional[str] = None) -> None:
         """
         Store pyiron table job in HDF5
 
         Args:
-            hdf:
-            group_name:
+            hdf (Optional[ProjectHDFio]): The HDF5 file object.
+            group_name (Optional[str]): The name of the group in the HDF5 file.
 
+        Returns:
+            None
         """
         super(TableJob, self).to_hdf(hdf=hdf, group_name=group_name)
         if len(self.pyiron_table._df) != 0:
             self._save_output()
 
-    def from_hdf(self, hdf=None, group_name=None):
+    def from_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"] = None, group_name: Optional[str] = None) -> None:
         """
         Restore pyiron table job from HDF5
 
         Args:
-            hdf:
-            group_name:
+            hdf (Optional[ProjectHDFio]): The HDF5 file object.
+            group_name (Optional[str]): The name of the group in the HDF5 file.
+
+        Returns:
+            None
         """
         super(TableJob, self).from_hdf(hdf=hdf, group_name=group_name)
         hdf_version = self.project_hdf5.get("HDF_VERSION", "0.1.0")
@@ -757,18 +886,29 @@ def from_hdf(self, hdf=None, group_name=None):
                             json.loads(hdf5_output["table"])
                         )
 
-    def validate_ready_to_run(self):
+    def validate_ready_to_run(self) -> None:
+        """
+        Validate if the job is ready to run.
+
+        Raises:
+            ValueError: If the analysis project is not defined.
+        """
         if self._analysis_project is None:
             raise ValueError("Analysis project not defined!")
 
-    def run_static(self):
+    def run_static(self) -> None:
+        """
+        Run the static analysis job.
+
+        This method creates the working directory, updates the table, and sets the job status to finished.
+        """
         self._create_working_directory()
         self.status.running = True
         self.update_table()
         self.status.finished = True
 
     @deprecate(job_status_list="Use TableJob.job_status instead!")
-    def update_table(self, job_status_list=None):
+    def update_table(self, job_status_list: Optional[List[str]]=None) -> None:
         """
         Update the pyiron table object, add new columns if a new function was added or add new rows for new jobs.
 
@@ -808,7 +948,7 @@ def update_table(self, job_status_list=None):
         self._save_output()
         self.run_time_to_db()
 
-    def get_dataframe(self):
+    def get_dataframe(self) -> pandas.DataFrame:
         """
         Returns aggregated results over all jobs.
 
@@ -818,7 +958,7 @@ def get_dataframe(self):
         return self.pyiron_table._df
 
 
-def always_true_pandas(job_table):
+def always_true_pandas(job_table) -> "pandas.Series":
     """
     A function which returns a pandas Series with all True values based on the size of the input pandas dataframe
     Args:
@@ -844,7 +984,7 @@ def always_true(_):
     return True
 
 
-def _apply_list_of_functions_on_job(input_parameters):
+def _apply_list_of_functions_on_job(input_parameters: Tuple) -> dict:
     from pyiron_snippets.logger import logger
 
     from pyiron_base.jobs.job.path import JobPath
diff --git a/pyiron_base/jobs/job/wrapper.py b/pyiron_base/jobs/job/wrapper.py
index 87290e26d..c39cbb85b 100644
--- a/pyiron_base/jobs/job/wrapper.py
+++ b/pyiron_base/jobs/job/wrapper.py
@@ -7,6 +7,7 @@
 
 import logging
 import os
+from typing import Optional
 
 from pyiron_base.database.filetable import (
     get_hamilton_from_file,
@@ -44,14 +45,14 @@ class JobWrapper(object):
 
     def __init__(
         self,
-        working_directory,
-        job_id=None,
-        hdf5_file=None,
-        h5_path=None,
-        submit_on_remote=False,
-        debug=False,
-        connection_string=None,
-        collect=False,
+        working_directory: str,
+        job_id: Optional[int]=None,
+        hdf5_file: Optional["pyiron_base.storage.hdfio.ProjectHDFio"]=None,
+        h5_path: Optional[str]=None,
+        submit_on_remote: bool=False,
+        debug: bool=False,
+        connection_string: Optional[str]=None,
+        collect: bool=False,
     ):
         self.working_directory = working_directory
         self._remote_flag = submit_on_remote
@@ -94,7 +95,7 @@ def __init__(
         self._logger = self.setup_logger(debug=debug)
 
     @staticmethod
-    def setup_logger(debug=False):
+    def setup_logger(debug: bool = False) -> logging.Logger:
         """
         Setup the error logger
 
@@ -117,7 +118,7 @@ def setup_logger(debug=False):
         logger.addHandler(ch)
         return logger
 
-    def run(self):
+    def run(self) -> None:
         """
         The job wrapper run command, sets the job status to 'running' and executes run_if_modal().
         """
@@ -133,12 +134,12 @@ def run(self):
 
 
 def job_wrapper_function(
-    working_directory,
-    job_id=None,
-    file_path=None,
-    submit_on_remote=False,
-    debug=False,
-    collect=False,
+    working_directory: str,
+    job_id: Optional[int]=None,
+    file_path: Optional[str]=None,
+    submit_on_remote: bool=False,
+    debug: bool=False,
+    collect: bool=False,
 ):
     """
     Job Wrapper function - creates a JobWrapper object and calls run() on that object

From a2387ac343fc92ba677aa7416913a5c0a769a687 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
 <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Sun, 8 Sep 2024 09:34:11 +0000
Subject: [PATCH 04/20] [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
---
 pyiron_base/jobs/datamining.py  | 42 ++++++++++++++++++++++++---------
 pyiron_base/jobs/job/wrapper.py | 24 +++++++++----------
 2 files changed, 43 insertions(+), 23 deletions(-)

diff --git a/pyiron_base/jobs/datamining.py b/pyiron_base/jobs/datamining.py
index 75d10a41a..45718bdce 100644
--- a/pyiron_base/jobs/datamining.py
+++ b/pyiron_base/jobs/datamining.py
@@ -8,7 +8,7 @@
 import os
 import types
 from datetime import datetime
-from typing import List, Tuple, Optional, Any, Union
+from typing import Any, List, Optional, Tuple, Union
 
 import cloudpickle
 import numpy as np
@@ -86,7 +86,7 @@ class FunctionContainer(object):
 
     """
 
-    def __init__(self, system_function_lst: Optional[List[callable]]=None):
+    def __init__(self, system_function_lst: Optional[List[callable]] = None):
         if system_function_lst is None:
             system_function_lst = []
         self._user_function_dict = {}
@@ -186,7 +186,11 @@ class PyironTable:
     """
 
     def __init__(
-        self, project: "pyiron_base.project.generic.Project", name: Optional[str] = None, system_function_lst: List[callable]=None, csv_file_name: Optional[str]=None
+        self,
+        project: "pyiron_base.project.generic.Project",
+        name: Optional[str] = None,
+        system_function_lst: List[callable] = None,
+        csv_file_name: Optional[str] = None,
     ):
         self._project = project
         self._df = pandas.DataFrame({})
@@ -299,7 +303,13 @@ def _get_new_functions(self, file: FileHDFio) -> Tuple[List, List]:
             new_system_functions = []
         return new_user_functions, new_system_functions
 
-    def create_table(self, file: FileHDFio, job_status_list: List[str], executor: Optional["concurrent.futures.Executor"]=None, enforce_update: bool=False):
+    def create_table(
+        self,
+        file: FileHDFio,
+        job_status_list: List[str],
+        executor: Optional["concurrent.futures.Executor"] = None,
+        enforce_update: bool = False,
+    ):
         """
         Create or update the table.
 
@@ -432,7 +442,7 @@ def _get_job_ids(self) -> np.ndarray:
         else:
             return np.array([])
 
-    def _get_filtered_job_ids_from_project(self, recursive: bool=True) -> List[int]:
+    def _get_filtered_job_ids_from_project(self, recursive: bool = True) -> List[int]:
         """
         Get the filtered job IDs from the project.
 
@@ -513,7 +523,9 @@ def refill_dict(self, diff_dict_lst: List) -> None:
                 if key not in sub_dict.keys():
                     sub_dict[key] = None
 
-    def _collect_job_update_lst(self, job_status_list: List, job_stored_ids: Optional[List]=None) -> List:
+    def _collect_job_update_lst(
+        self, job_status_list: List, job_stored_ids: Optional[List] = None
+    ) -> List:
         """
         Collect jobs to update the pyiron table.
 
@@ -673,7 +685,7 @@ def job_status(self, status: Union[str, List[str]]) -> None:
         self._job_status = status
 
     @property
-    def pyiron_table(self) -> PyironTable:  
+    def pyiron_table(self) -> PyironTable:
         return self._pyiron_table
 
     @property
@@ -797,7 +809,7 @@ def _to_dict(self) -> dict:
             )
         return job_dict
 
-    def _from_dict(self, obj_dict: dict, version: str=None):
+    def _from_dict(self, obj_dict: dict, version: str = None):
         """
         Restore the TableJob object from a dictionary.
 
@@ -837,7 +849,11 @@ def _from_dict(self, obj_dict: dict, version: str=None):
         self._pyiron_table.convert_to_object = bool_dict["convert_to_object"]
         self._pyiron_table.add._from_hdf(obj_dict["input"])
 
-    def to_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"] = None, group_name: Optional[str] = None) -> None:
+    def to_hdf(
+        self,
+        hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"] = None,
+        group_name: Optional[str] = None,
+    ) -> None:
         """
         Store pyiron table job in HDF5
 
@@ -852,7 +868,11 @@ def to_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"] = None,
         if len(self.pyiron_table._df) != 0:
             self._save_output()
 
-    def from_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"] = None, group_name: Optional[str] = None) -> None:
+    def from_hdf(
+        self,
+        hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"] = None,
+        group_name: Optional[str] = None,
+    ) -> None:
         """
         Restore pyiron table job from HDF5
 
@@ -908,7 +928,7 @@ def run_static(self) -> None:
         self.status.finished = True
 
     @deprecate(job_status_list="Use TableJob.job_status instead!")
-    def update_table(self, job_status_list: Optional[List[str]]=None) -> None:
+    def update_table(self, job_status_list: Optional[List[str]] = None) -> None:
         """
         Update the pyiron table object, add new columns if a new function was added or add new rows for new jobs.
 
diff --git a/pyiron_base/jobs/job/wrapper.py b/pyiron_base/jobs/job/wrapper.py
index c39cbb85b..b30a465a1 100644
--- a/pyiron_base/jobs/job/wrapper.py
+++ b/pyiron_base/jobs/job/wrapper.py
@@ -46,13 +46,13 @@ class JobWrapper(object):
     def __init__(
         self,
         working_directory: str,
-        job_id: Optional[int]=None,
-        hdf5_file: Optional["pyiron_base.storage.hdfio.ProjectHDFio"]=None,
-        h5_path: Optional[str]=None,
-        submit_on_remote: bool=False,
-        debug: bool=False,
-        connection_string: Optional[str]=None,
-        collect: bool=False,
+        job_id: Optional[int] = None,
+        hdf5_file: Optional["pyiron_base.storage.hdfio.ProjectHDFio"] = None,
+        h5_path: Optional[str] = None,
+        submit_on_remote: bool = False,
+        debug: bool = False,
+        connection_string: Optional[str] = None,
+        collect: bool = False,
     ):
         self.working_directory = working_directory
         self._remote_flag = submit_on_remote
@@ -135,11 +135,11 @@ def run(self) -> None:
 
 def job_wrapper_function(
     working_directory: str,
-    job_id: Optional[int]=None,
-    file_path: Optional[str]=None,
-    submit_on_remote: bool=False,
-    debug: bool=False,
-    collect: bool=False,
+    job_id: Optional[int] = None,
+    file_path: Optional[str] = None,
+    submit_on_remote: bool = False,
+    debug: bool = False,
+    collect: bool = False,
 ):
     """
     Job Wrapper function - creates a JobWrapper object and calls run() on that object

From 24ca78e8d4b944fe7b7a8990709869021ea045f3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Jan=C3=9Fen?= <janssen@mpie.de>
Date: Sun, 8 Sep 2024 12:02:25 +0200
Subject: [PATCH 05/20] TemplateJob and run functions

---
 pyiron_base/jobs/job/runfunction.py | 98 ++++++++++++++++++++---------
 pyiron_base/jobs/job/template.py    | 31 ++++++---
 pyiron_base/jobs/job/util.py        | 72 ++++++++++-----------
 3 files changed, 128 insertions(+), 73 deletions(-)

diff --git a/pyiron_base/jobs/job/runfunction.py b/pyiron_base/jobs/job/runfunction.py
index 7a63c9bfb..4a07f233c 100644
--- a/pyiron_base/jobs/job/runfunction.py
+++ b/pyiron_base/jobs/job/runfunction.py
@@ -60,7 +60,7 @@
 """
 
 
-def write_input_files_from_input_dict(input_dict: dict, working_directory: str):
+def write_input_files_from_input_dict(input_dict: dict, working_directory: str) -> None:
     """
     Write input files based on hierarchical input dictionary. On the first level the input dictionary is divided in
     file_to_create and files_to_copy. Both are dictionaries use the file names as keys. In file_to_create the values are
@@ -165,7 +165,7 @@ def __call__(
 
 
 # Parameter
-def run_job_with_parameter_repair(job):
+def run_job_with_parameter_repair(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     Internal helper function the run if repair function is called when the run() function is called with the
     'repair' parameter.
@@ -177,7 +177,7 @@ def run_job_with_parameter_repair(job):
 
 
 # Job Status
-def run_job_with_status_initialized(job, debug=False):
+def run_job_with_status_initialized(job: "pyiron_base.jobs.job.generic.GenericJob", debug: bool = False):
     """
     Internal helper function the run if new function is called when the job status is 'initialized'. It prepares
     the hdf5 file and the corresponding directory structure.
@@ -204,7 +204,7 @@ def run_job_with_status_initialized(job, debug=False):
             job.run()
 
 
-def run_job_with_status_created(job):
+def run_job_with_status_created(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     Internal helper function the run if created function is called when the job status is 'created'. It executes
     the simulation, either in modal mode, meaning waiting for the simulation to finish, manually, or submits the
@@ -256,8 +256,8 @@ def run_job_with_status_created(job):
 
 
 def run_job_with_status_submitted(
-    job,
-):  # Submitted jobs are handled by the job wrapper!
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+) -> None:  # Submitted jobs are handled by the job wrapper!
     """
     Internal helper function the run if submitted function is called when the job status is 'submitted'. It means
     the job is waiting in the queue. ToDo: Display a list of the users jobs in the queue.
@@ -277,7 +277,7 @@ def run_job_with_status_submitted(
         print("Job " + str(job.job_id) + " is waiting in the que!")
 
 
-def run_job_with_status_running(job):
+def run_job_with_status_running(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     Internal helper function the run if running function is called when the job status is 'running'. It allows the
     user to interact with the simulation while it is running.
@@ -298,7 +298,7 @@ def run_job_with_status_running(job):
         print("Job " + str(job.job_id) + " is running!")
 
 
-def run_job_with_status_refresh(job):
+def run_job_with_status_refresh(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     Internal helper function the run if refresh function is called when the job status is 'refresh'. If the job was
     suspended previously, the job is going to be started again, to be continued.
@@ -311,7 +311,7 @@ def run_job_with_status_refresh(job):
     )
 
 
-def run_job_with_status_busy(job):
+def run_job_with_status_busy(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     Internal helper function the run if busy function is called when the job status is 'busy'.
 
@@ -323,7 +323,7 @@ def run_job_with_status_busy(job):
     )
 
 
-def run_job_with_status_collect(job):
+def run_job_with_status_collect(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     Internal helper function the run if collect function is called when the job status is 'collect'. It collects
     the simulation output using the standardized functions collect_output() and collect_logfiles(). Afterwards the
@@ -352,7 +352,7 @@ def run_job_with_status_collect(job):
     job.update_master()
 
 
-def run_job_with_status_suspended(job):
+def run_job_with_status_suspended(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     Internal helper function the run if suspended function is called when the job status is 'suspended'. It
     restarts the job by calling the run if refresh function after setting the status to 'refresh'.
@@ -368,7 +368,7 @@ def run_job_with_status_suspended(job):
     run_again="Either delete the job via job.remove() or use delete_existing_job=True.",
     version="0.4.0",
 )
-def run_job_with_status_finished(job):
+def run_job_with_status_finished(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     Internal helper function the run if finished function is called when the job status is 'finished'. It loads
     the existing job.
@@ -384,7 +384,7 @@ def run_job_with_status_finished(job):
 
 
 # Run Modes
-def run_job_with_runmode_manually(job, _manually_print=True):
+def run_job_with_runmode_manually(job: "pyiron_base.jobs.job.generic.GenericJob", _manually_print: bool=True) -> None:
     """
     Internal helper function to run a job manually.
 
@@ -418,7 +418,7 @@ def run_job_with_runmode_manually(job, _manually_print=True):
             )
 
 
-def run_job_with_runmode_modal(job):
+def run_job_with_runmode_modal(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     The run if modal function is called by run to execute the simulation, while waiting for the output. For this we
     use subprocess.check_output()
@@ -429,7 +429,7 @@ def run_job_with_runmode_modal(job):
     job.run_static()
 
 
-def run_job_with_runmode_non_modal(job):
+def run_job_with_runmode_non_modal(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     The run if non modal function is called by run to execute the simulation in the background. For this we use
     multiprocessing.Process()
@@ -472,7 +472,7 @@ def run_job_with_runmode_non_modal(job):
             job._process.start()
 
 
-def run_job_with_runmode_queue(job):
+def run_job_with_runmode_queue(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     The run if queue function is called by run if the user decides to submit the job to and queing system. The job
     is submitted to the queuing system using subprocess.Popen()
@@ -547,7 +547,7 @@ def run_job_with_runmode_queue(job):
     )
 
 
-def run_job_with_runmode_srun(job):
+def run_job_with_runmode_srun(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     working_directory = job.project_hdf5.working_directory
     if not state.database.database_is_disabled:
         if not state.database.using_local_database:
@@ -582,7 +582,7 @@ def run_job_with_runmode_srun(job):
     )
 
 
-def run_job_with_runmode_executor(job, executor, gpus_per_slot=None):
+def run_job_with_runmode_executor(job: "pyiron_base.jobs.job.generic.GenericJob", executor: "concurrent.futures.Executor", gpus_per_slot: Optional[int]=None) -> None:
     """
     Introduced in Python 3.2 the concurrent.futures interface enables the asynchronous execution of python programs.
     A function is submitted to the executor and a future object is returned. The future object is updated in the
@@ -630,7 +630,7 @@ def run_job_with_runmode_executor(job, executor, gpus_per_slot=None):
         )
 
 
-def run_job_with_runmode_executor_futures(job, executor):
+def run_job_with_runmode_executor_futures(job: "pyiron_base.jobs.job.generic.GenericJob", executor: "concurrent.futures.Executor") -> None:
     """
     Interface for the ProcessPoolExecutor implemented in the python standard library as part of the concurrent.futures
     module. The ProcessPoolExecutor does not provide any resource management, so the user is responsible to keep track of
@@ -671,7 +671,7 @@ def run_job_with_runmode_executor_futures(job, executor):
     )
 
 
-def run_job_with_runmode_executor_flux(job, executor, gpus_per_slot=None):
+def run_job_with_runmode_executor_flux(job: "pyiron_base.jobs.job.generic.GenericJob", executor: "concurrent.futures.Executor", gpus_per_slot: Optional[int]=None) -> None:
     """
     Interface for the flux.job.FluxExecutor executor. Flux is a hierarchical resource management. It can either be used to
     replace queuing systems like SLURM or be used as a user specific queuing system within an existing allocation.
@@ -726,8 +726,8 @@ def run_job_with_runmode_executor_flux(job, executor, gpus_per_slot=None):
     job.server.future = executor.submit(jobspec)
 
 
-def run_time_decorator(func):
-    def wrapper(job):
+def run_time_decorator(func: callable) -> callable:
+    def wrapper(job: "pyiron_base.jobs.job.generic.GenericJob"):
         if not state.database.database_is_disabled and job.job_id is not None:
             job.project.db.item_update({"timestart": datetime.now()}, job.job_id)
             output = func(job)
@@ -749,6 +749,30 @@ def execute_subprocess(
     conda_environment_name: Optional[str] = None,
     conda_environment_path: Optional[str] = None,
 ) -> str:
+    """
+    Execute a subprocess with the given parameters.
+
+    Args:
+        executable (str): The executable command to run.
+        shell (bool): If True, the command will be executed through the shell.
+        working_directory (str): The working directory for the subprocess.
+        cores (int, optional): The number of CPU cores to allocate. Defaults to 1.
+        threads (int, optional): The number of threads to allocate. Defaults to 1.
+        gpus (int, optional): The number of GPUs to allocate. Defaults to 1.
+        conda_environment_name (str, optional): The name of the conda environment to activate. Defaults to None.
+        conda_environment_path (str, optional): The path to the conda environment to activate. Defaults to None.
+
+    Returns:
+        str: The output of the subprocess.
+
+    Raises:
+        subprocess.CalledProcessError: If the subprocess returns a non-zero exit status.
+
+    Note:
+        - If both `conda_environment_name` and `conda_environment_path` are None, the subprocess will be executed using `subprocess.run`.
+        - If `conda_environment_name` is not None, the subprocess will be executed using `conda_subprocess.run` with the specified environment name.
+        - If `conda_environment_path` is not None, the subprocess will be executed using `conda_subprocess.run` with the specified environment path.
+    """
     environment_dict = os.environ.copy()
     environment_dict.update(
         {
@@ -790,7 +814,7 @@ def execute_subprocess(
 
 
 @run_time_decorator
-def execute_job_with_external_executable(job):
+def execute_job_with_external_executable(job: "pyiron_base.jobs.job.generic.GenericJob") -> str:
     """
     The run static function is called by run to execute the simulation.
 
@@ -833,7 +857,7 @@ def execute_job_with_external_executable(job):
     return out
 
 
-def handle_finished_job(job, job_crashed=False, collect_output=True):
+def handle_finished_job(job: "pyiron_base.jobs.job.generic.GenericJob", job_crashed: bool=False, collect_output: bool=True) -> None:
     """
     Handle finished jobs, collect the calculation output and set the status to aborted if the job crashed
 
@@ -851,7 +875,7 @@ def handle_finished_job(job, job_crashed=False, collect_output=True):
         job._hdf5["status"] = job.status.string
 
 
-def handle_failed_job(job, error):
+def handle_failed_job(job: "pyiron_base.jobs.job.generic.GenericJob", error: subprocess.SubprocessError) -> Tuple[bool, str]:
     """
     Handle failed jobs write error message to text file and update database
 
@@ -878,7 +902,7 @@ def handle_failed_job(job, error):
         raise_runtimeerror_for_failed_job(job=job)
 
 
-def raise_runtimeerror_for_failed_job(job):
+def raise_runtimeerror_for_failed_job(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     job._logger.warning("Job aborted")
     job.status.aborted = True
     job._hdf5["status"] = job.status.string
@@ -889,8 +913,24 @@ def raise_runtimeerror_for_failed_job(job):
 
 
 def multiprocess_wrapper(
-    working_directory, job_id=None, file_path=None, debug=False, connection_string=None
+    working_directory: str, job_id: Optional[int]=None, file_path: Optional[str]=None, debug: bool=False, connection_string: Optional[str]=None
 ):
+    """
+    Wrapper function for running a job in a separate process.
+
+    Args:
+        working_directory (str): The working directory for the job.
+        job_id (Optional[int], optional): The ID of the job. Defaults to None.
+        file_path (Optional[str], optional): The file path of the job. Defaults to None.
+        debug (bool, optional): Whether to run the job in debug mode. Defaults to False.
+        connection_string (Optional[str], optional): The connection string for the job. Defaults to None.
+
+    Raises:
+        ValueError: If both job_id and file_path are None.
+
+    Returns:
+        None
+    """
     if job_id is not None:
         job_wrap = JobWrapper(
             working_directory=str(working_directory),
@@ -976,7 +1016,7 @@ def execute_command_with_error_handling(
 
 
 @run_time_decorator
-def execute_job_with_calculate_function(job):
+def execute_job_with_calculate_function(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     The run_static() function is called internally in pyiron to trigger the execution of the executable. This is
     typically divided into three steps: (1) the generation of the calculate function and its inputs, (2) the
@@ -1008,7 +1048,7 @@ def execute_job_with_calculate_function(job):
                 job.status.finished = True
 
 
-def _generate_flux_execute_string(job, database_is_disabled):
+def _generate_flux_execute_string(job: "pyiron_base.jobs.job.generic.GenericJob", database_is_disabled: bool) -> Tuple[str, str]:
     from jinja2 import Template
 
     if not database_is_disabled:
diff --git a/pyiron_base/jobs/job/template.py b/pyiron_base/jobs/job/template.py
index 46687204f..b3b64f50e 100644
--- a/pyiron_base/jobs/job/template.py
+++ b/pyiron_base/jobs/job/template.py
@@ -4,7 +4,9 @@
 """
 Template class to define jobs
 """
+from typing import Optional
 
+from pyiron_base.storage.datacontainer import DataContainer
 from pyiron_base.interfaces.object import HasStorage
 from pyiron_base.jobs.job.generic import GenericJob
 
@@ -64,36 +66,49 @@ class TemplateJob(GenericJob, HasStorage):
 
     """
 
-    def __init__(self, project, job_name):
+    def __init__(self, project: "pyiron_base.storage.hdfio.ProjectHDFio", job_name: str):
         GenericJob.__init__(self, project, job_name)
         HasStorage.__init__(self, group_name="")
         self.storage.create_group("input")
         self.storage.create_group("output")
 
     @property
-    def input(self):
+    def input(self) -> DataContainer:
         return self.storage.input
 
     @property
-    def output(self):
+    def output(self) -> DataContainer:
         return self.storage.output
 
-    def _to_dict(self):
+    def _to_dict(self) -> dict:
+        """
+        Convert the job object to a dictionary.
+
+        Returns:
+            dict: A dictionary representation of the job object.
+        """
         job_dict = super()._to_dict()
         job_dict["input/data"] = self.storage.input.to_dict()
         return job_dict
 
-    def _from_dict(self, obj_dict, version=None):
+    def _from_dict(self, obj_dict: dict, version: str=None):
+        """
+        Update the object attributes from a dictionary representation.
+
+        Args:
+            obj_dict (dict): The dictionary containing the object attributes.
+            version (str, optional): The version of the dictionary format. Defaults to None.
+        """
         super()._from_dict(obj_dict=obj_dict, version=version)
         input_dict = obj_dict["input"]
         if "data" in input_dict.keys():
             self.storage.input.update(input_dict["data"])
 
-    def to_hdf(self, hdf=None, group_name=None):
+    def to_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"]=None, group_name: Optional[str]=None) -> None:
         GenericJob.to_hdf(self=self, hdf=hdf, group_name=group_name)
         HasStorage.to_hdf(self=self, hdf=self.project_hdf5)
 
-    def from_hdf(self, hdf=None, group_name=None):
+    def from_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"]=None, group_name: Optional[str]=None) -> None:
         GenericJob.from_hdf(self=self, hdf=hdf, group_name=group_name)
         HasStorage.from_hdf(self=self, hdf=self.project_hdf5)
 
@@ -131,7 +146,7 @@ class PythonTemplateJob(TemplateJob):
 
     """
 
-    def __init__(self, project, job_name):
+    def __init__(self, project: "pyiron_base.storeage.hdfio.ProjectHDFio", job_name: str):
         super().__init__(project, job_name)
         self._job_with_calculate_function = True
         self._write_work_dir_warnings = False
diff --git a/pyiron_base/jobs/job/util.py b/pyiron_base/jobs/job/util.py
index 1bc03a995..8ecb875a3 100644
--- a/pyiron_base/jobs/job/util.py
+++ b/pyiron_base/jobs/job/util.py
@@ -32,7 +32,16 @@
 __date__ = "Nov 28, 2020"
 
 
-def _copy_database_entry(new_job_core, job_copied_id):
+_special_symbol_replacements = {
+    ".": "d",
+    "-": "m",
+    "+": "p",
+    ",": "c",
+    " ": "_",
+}
+
+
+def _copy_database_entry(new_job_core: "pyiron_base.jobs.job.generic.GenericJob", job_copied_id: int) -> None:
     """
     Copy database entry from previous job
 
@@ -51,7 +60,7 @@ def _copy_database_entry(new_job_core, job_copied_id):
         new_job_core.reset_job_id(job_id=job_id)
 
 
-def _copy_to_delete_existing(project_class, job_name, delete_job):
+def _copy_to_delete_existing(project_class: "pyiron_base.project.generic.Project", job_name: str, delete_job: bool):
     """
     Check if the job exists already in the project, if that is the case either
     delete it or reload it depending on the setting of delete_job
@@ -75,7 +84,7 @@ def _copy_to_delete_existing(project_class, job_name, delete_job):
             return None
 
 
-def _get_project_for_copy(job, project, new_job_name):
+def _get_project_for_copy(job: "pyiron_base.jobs.job.core.JobCore", project: Optional[Union["pyiron_base.project.generic.Project", "pyiron_base.jobs.job.core.JobCore", "pyiron_base.storage.hdfio.ProjectHDFio"]], new_job_name: str) -> Tuple["pyiron_base.project.generic.Project", "pyiron_base.storage.hdfio.ProjectHDFio"]:
     """
     Internal helper function to generate a project and hdf5 project for copying
 
@@ -113,20 +122,11 @@ def _get_project_for_copy(job, project, new_job_name):
     return file_project, hdf5_project
 
 
-_special_symbol_replacements = {
-    ".": "d",
-    "-": "m",
-    "+": "p",
-    ",": "c",
-    " ": "_",
-}
-
-
 def _get_safe_job_name(
     name: Union[str, tuple],
     ndigits: Optional[int] = 8,
     special_symbols: Optional[dict] = None,
-):
+) -> str:
     """
     Sanitize a job name, optionally appending numeric values.
 
@@ -167,7 +167,7 @@ def round_(value, ndigits=ndigits):
 )
 
 
-def _rename_job(job, new_job_name):
+def _rename_job(job: Union["pyiron_base.jobs.job.generic.GenericJob", "pyiron_base.jobs.job.core.JobCore"], new_job_name: str) -> None:
     """ """
     new_job_name = _get_safe_job_name(new_job_name)
     child_ids = job.child_ids
@@ -201,7 +201,7 @@ def _rename_job(job, new_job_name):
         )
 
 
-def _is_valid_job_name(job_name):
+def _is_valid_job_name(job_name: str) -> None:
     """
     internal function to validate the job_name - only available in Python 3.4 <
 
@@ -218,7 +218,7 @@ def _is_valid_job_name(job_name):
         )
 
 
-def _get_restart_copy_dict(job):
+def _get_restart_copy_dict(job: "pyiron_base.jobs.job.generic.GenericJob") -> dict:
     copy_dict = {}
     for i, actual_name in enumerate(
         [os.path.basename(f) for f in job.restart_file_list]
@@ -231,7 +231,7 @@ def _get_restart_copy_dict(job):
     return copy_dict
 
 
-def _copy_restart_files(job):
+def _copy_restart_files(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
     """
     Internal helper function to copy the files required for the restart job.
     """
@@ -246,7 +246,7 @@ def _copy_restart_files(job):
         )
 
 
-def _kill_child(job):
+def _kill_child(job: "pyiron_base.jobs.job.core.JobCore") -> None:
     """
     Internal helper function to kill a child process.
 
@@ -273,19 +273,19 @@ def _kill_child(job):
                     job_process.kill()
 
 
-def _job_compressed_name(job):
+def _job_compressed_name(job: "pyiron_base.jobs.job.core.JobCore") -> str:
     """Return the canonical file name of a compressed job."""
     return _get_compressed_job_name(working_directory=job.working_directory)
 
 
-def _get_compressed_job_name(working_directory):
+def _get_compressed_job_name(working_directory: "pyiron_base.jobs.job.core.JobCore") -> str:
     """Return the canonical file name of a compressed job from the working directory."""
     return os.path.join(
         working_directory, os.path.basename(working_directory) + ".tar.bz2"
     )
 
 
-def _job_compress(job, files_to_compress=[], files_to_remove=[]):
+def _job_compress(job: "pyiron_base.jobs.job.core.JobCore", files_to_compress: list=[], files_to_remove: list=[]) -> None:
     """
     Compress the output files of a job object.
 
@@ -322,7 +322,7 @@ def delete_file_or_folder(fullname):
         job.logger.info("The files are already compressed!")
 
 
-def _job_decompress(job):
+def _job_decompress(job: "pyiron_base.jobs.job.core.JobCore") -> None:
     """
     Decompress the output files of a compressed job object.
 
@@ -338,7 +338,7 @@ def _job_decompress(job):
         pass
 
 
-def _working_directory_is_compressed(working_directory):
+def _working_directory_is_compressed(working_directory: str) -> bool:
     """
     Check if the working directory of a given job is already compressed or not.
 
@@ -354,7 +354,7 @@ def _working_directory_is_compressed(working_directory):
     return compressed_name in os.listdir(working_directory)
 
 
-def _job_is_compressed(job):
+def _job_is_compressed(job: "pyiron_base.jobs.job.core.JobCore") -> bool:
     """
     Check if the job is already compressed or not.
 
@@ -367,7 +367,7 @@ def _job_is_compressed(job):
     return _working_directory_is_compressed(working_directory=job.working_directory)
 
 
-def _working_directory_list_files(working_directory, include_archive=True):
+def _working_directory_list_files(working_directory: str, include_archive: bool=True) -> list:
     """
     Returns list of files in the jobs working directory.
 
@@ -399,7 +399,7 @@ def _working_directory_list_files(working_directory, include_archive=True):
     return []
 
 
-def _job_list_files(job):
+def _job_list_files(job: "pyiron_base.jobs.job.core.JobCore") -> list:
     """
     Returns list of files in the jobs working directory.
 
@@ -414,7 +414,7 @@ def _job_list_files(job):
     return _working_directory_list_files(working_directory=job.working_directory)
 
 
-def _working_directory_read_file(working_directory, file_name, tail=None):
+def _working_directory_read_file(working_directory: str, file_name: str, tail: Optional[int]=None) -> list:
     """
     Return list of lines of the given file.
 
@@ -472,7 +472,7 @@ def _working_directory_read_file(working_directory, file_name, tail=None):
             return lines
 
 
-def _job_read_file(job, file_name, tail=None):
+def _job_read_file(job: "pyiron_base.jobs.job.core.JobCore", file_name: str, tail: Optional[int]=None) -> list:
     """
     Return list of lines of the given file.
 
@@ -493,7 +493,7 @@ def _job_read_file(job, file_name, tail=None):
     )
 
 
-def _job_archive(job):
+def _job_archive(job: "pyiron_base.jobs.job.core.JobCore") -> None:
     """
     Compress HDF5 file of the job object to tar-archive
 
@@ -522,7 +522,7 @@ def _job_archive(job):
         os.chdir(cwd)
 
 
-def _job_unarchive(job):
+def _job_unarchive(job: "pyiron_base.jobs.job.core.JobCore") -> None:
     """
     Decompress HDF5 file of the job object from tar-archive
 
@@ -539,7 +539,7 @@ def _job_unarchive(job):
         pass
 
 
-def _job_is_archived(job):
+def _job_is_archived(job: "pyiron_base.jobs.job.core.JobCore") -> bool:
     """
     Check if the HDF5 file of the Job is compressed as tar-archive
 
@@ -554,7 +554,7 @@ def _job_is_archived(job):
     )
 
 
-def _job_delete_hdf(job):
+def _job_delete_hdf(job: "pyiron_base.jobs.job.core.JobCore") -> None:
     """
     Delete HDF5 file of job object
 
@@ -565,7 +565,7 @@ def _job_delete_hdf(job):
         os.remove(job.project_hdf5.file_name)
 
 
-def _job_delete_files(job):
+def _job_delete_files(job: "pyiron_base.jobs.job.core.JobCore") -> None:
     """
     Delete files in the working directory of job object
 
@@ -579,7 +579,7 @@ def _job_delete_files(job):
         job._import_directory = None
 
 
-def _job_remove_folder(job):
+def _job_remove_folder(job: "pyiron_base.jobs.job.core.JobCore") -> None:
     """
     Delete the working directory of the job object
 
@@ -591,7 +591,7 @@ def _job_remove_folder(job):
         shutil.rmtree(working_directory)
 
 
-def _job_store_before_copy(job):
+def _job_store_before_copy(job: "pyiron_base.jobs.job.core.JobCore") -> bool:
     """
     Store job in HDF5 file for copying
 
@@ -609,7 +609,7 @@ def _job_store_before_copy(job):
     return delete_file_after_copy
 
 
-def _job_reload_after_copy(job, delete_file_after_copy):
+def _job_reload_after_copy(job: "pyiron_base.jobs.job.core.JobCore", delete_file_after_copy: bool) -> None:
     """
     Reload job from HDF5 file after copying
 

From fef81cf2c0bec6feddd5e5b3e2dfd8c153a91681 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
 <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Sun, 8 Sep 2024 10:02:34 +0000
Subject: [PATCH 06/20] [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
---
 pyiron_base/jobs/job/runfunction.py | 73 ++++++++++++++++++++++-------
 pyiron_base/jobs/job/template.py    | 25 +++++++---
 pyiron_base/jobs/job/util.py        | 57 ++++++++++++++++++----
 3 files changed, 123 insertions(+), 32 deletions(-)

diff --git a/pyiron_base/jobs/job/runfunction.py b/pyiron_base/jobs/job/runfunction.py
index 4a07f233c..1105fc24c 100644
--- a/pyiron_base/jobs/job/runfunction.py
+++ b/pyiron_base/jobs/job/runfunction.py
@@ -165,7 +165,9 @@ def __call__(
 
 
 # Parameter
-def run_job_with_parameter_repair(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
+def run_job_with_parameter_repair(
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+) -> None:
     """
     Internal helper function the run if repair function is called when the run() function is called with the
     'repair' parameter.
@@ -177,7 +179,9 @@ def run_job_with_parameter_repair(job: "pyiron_base.jobs.job.generic.GenericJob"
 
 
 # Job Status
-def run_job_with_status_initialized(job: "pyiron_base.jobs.job.generic.GenericJob", debug: bool = False):
+def run_job_with_status_initialized(
+    job: "pyiron_base.jobs.job.generic.GenericJob", debug: bool = False
+):
     """
     Internal helper function the run if new function is called when the job status is 'initialized'. It prepares
     the hdf5 file and the corresponding directory structure.
@@ -352,7 +356,9 @@ def run_job_with_status_collect(job: "pyiron_base.jobs.job.generic.GenericJob")
     job.update_master()
 
 
-def run_job_with_status_suspended(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
+def run_job_with_status_suspended(
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+) -> None:
     """
     Internal helper function the run if suspended function is called when the job status is 'suspended'. It
     restarts the job by calling the run if refresh function after setting the status to 'refresh'.
@@ -368,7 +374,9 @@ def run_job_with_status_suspended(job: "pyiron_base.jobs.job.generic.GenericJob"
     run_again="Either delete the job via job.remove() or use delete_existing_job=True.",
     version="0.4.0",
 )
-def run_job_with_status_finished(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
+def run_job_with_status_finished(
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+) -> None:
     """
     Internal helper function the run if finished function is called when the job status is 'finished'. It loads
     the existing job.
@@ -384,7 +392,9 @@ def run_job_with_status_finished(job: "pyiron_base.jobs.job.generic.GenericJob")
 
 
 # Run Modes
-def run_job_with_runmode_manually(job: "pyiron_base.jobs.job.generic.GenericJob", _manually_print: bool=True) -> None:
+def run_job_with_runmode_manually(
+    job: "pyiron_base.jobs.job.generic.GenericJob", _manually_print: bool = True
+) -> None:
     """
     Internal helper function to run a job manually.
 
@@ -429,7 +439,9 @@ def run_job_with_runmode_modal(job: "pyiron_base.jobs.job.generic.GenericJob") -
     job.run_static()
 
 
-def run_job_with_runmode_non_modal(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
+def run_job_with_runmode_non_modal(
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+) -> None:
     """
     The run if non modal function is called by run to execute the simulation in the background. For this we use
     multiprocessing.Process()
@@ -582,7 +594,11 @@ def run_job_with_runmode_srun(job: "pyiron_base.jobs.job.generic.GenericJob") ->
     )
 
 
-def run_job_with_runmode_executor(job: "pyiron_base.jobs.job.generic.GenericJob", executor: "concurrent.futures.Executor", gpus_per_slot: Optional[int]=None) -> None:
+def run_job_with_runmode_executor(
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+    executor: "concurrent.futures.Executor",
+    gpus_per_slot: Optional[int] = None,
+) -> None:
     """
     Introduced in Python 3.2 the concurrent.futures interface enables the asynchronous execution of python programs.
     A function is submitted to the executor and a future object is returned. The future object is updated in the
@@ -630,7 +646,10 @@ def run_job_with_runmode_executor(job: "pyiron_base.jobs.job.generic.GenericJob"
         )
 
 
-def run_job_with_runmode_executor_futures(job: "pyiron_base.jobs.job.generic.GenericJob", executor: "concurrent.futures.Executor") -> None:
+def run_job_with_runmode_executor_futures(
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+    executor: "concurrent.futures.Executor",
+) -> None:
     """
     Interface for the ProcessPoolExecutor implemented in the python standard library as part of the concurrent.futures
     module. The ProcessPoolExecutor does not provide any resource management, so the user is responsible to keep track of
@@ -671,7 +690,11 @@ def run_job_with_runmode_executor_futures(job: "pyiron_base.jobs.job.generic.Gen
     )
 
 
-def run_job_with_runmode_executor_flux(job: "pyiron_base.jobs.job.generic.GenericJob", executor: "concurrent.futures.Executor", gpus_per_slot: Optional[int]=None) -> None:
+def run_job_with_runmode_executor_flux(
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+    executor: "concurrent.futures.Executor",
+    gpus_per_slot: Optional[int] = None,
+) -> None:
     """
     Interface for the flux.job.FluxExecutor executor. Flux is a hierarchical resource management. It can either be used to
     replace queuing systems like SLURM or be used as a user specific queuing system within an existing allocation.
@@ -814,7 +837,9 @@ def execute_subprocess(
 
 
 @run_time_decorator
-def execute_job_with_external_executable(job: "pyiron_base.jobs.job.generic.GenericJob") -> str:
+def execute_job_with_external_executable(
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+) -> str:
     """
     The run static function is called by run to execute the simulation.
 
@@ -857,7 +882,11 @@ def execute_job_with_external_executable(job: "pyiron_base.jobs.job.generic.Gene
     return out
 
 
-def handle_finished_job(job: "pyiron_base.jobs.job.generic.GenericJob", job_crashed: bool=False, collect_output: bool=True) -> None:
+def handle_finished_job(
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+    job_crashed: bool = False,
+    collect_output: bool = True,
+) -> None:
     """
     Handle finished jobs, collect the calculation output and set the status to aborted if the job crashed
 
@@ -875,7 +904,9 @@ def handle_finished_job(job: "pyiron_base.jobs.job.generic.GenericJob", job_cras
         job._hdf5["status"] = job.status.string
 
 
-def handle_failed_job(job: "pyiron_base.jobs.job.generic.GenericJob", error: subprocess.SubprocessError) -> Tuple[bool, str]:
+def handle_failed_job(
+    job: "pyiron_base.jobs.job.generic.GenericJob", error: subprocess.SubprocessError
+) -> Tuple[bool, str]:
     """
     Handle failed jobs write error message to text file and update database
 
@@ -902,7 +933,9 @@ def handle_failed_job(job: "pyiron_base.jobs.job.generic.GenericJob", error: sub
         raise_runtimeerror_for_failed_job(job=job)
 
 
-def raise_runtimeerror_for_failed_job(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
+def raise_runtimeerror_for_failed_job(
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+) -> None:
     job._logger.warning("Job aborted")
     job.status.aborted = True
     job._hdf5["status"] = job.status.string
@@ -913,7 +946,11 @@ def raise_runtimeerror_for_failed_job(job: "pyiron_base.jobs.job.generic.Generic
 
 
 def multiprocess_wrapper(
-    working_directory: str, job_id: Optional[int]=None, file_path: Optional[str]=None, debug: bool=False, connection_string: Optional[str]=None
+    working_directory: str,
+    job_id: Optional[int] = None,
+    file_path: Optional[str] = None,
+    debug: bool = False,
+    connection_string: Optional[str] = None,
 ):
     """
     Wrapper function for running a job in a separate process.
@@ -1016,7 +1053,9 @@ def execute_command_with_error_handling(
 
 
 @run_time_decorator
-def execute_job_with_calculate_function(job: "pyiron_base.jobs.job.generic.GenericJob") -> None:
+def execute_job_with_calculate_function(
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+) -> None:
     """
     The run_static() function is called internally in pyiron to trigger the execution of the executable. This is
     typically divided into three steps: (1) the generation of the calculate function and its inputs, (2) the
@@ -1048,7 +1087,9 @@ def execute_job_with_calculate_function(job: "pyiron_base.jobs.job.generic.Gener
                 job.status.finished = True
 
 
-def _generate_flux_execute_string(job: "pyiron_base.jobs.job.generic.GenericJob", database_is_disabled: bool) -> Tuple[str, str]:
+def _generate_flux_execute_string(
+    job: "pyiron_base.jobs.job.generic.GenericJob", database_is_disabled: bool
+) -> Tuple[str, str]:
     from jinja2 import Template
 
     if not database_is_disabled:
diff --git a/pyiron_base/jobs/job/template.py b/pyiron_base/jobs/job/template.py
index b3b64f50e..98c09df65 100644
--- a/pyiron_base/jobs/job/template.py
+++ b/pyiron_base/jobs/job/template.py
@@ -4,11 +4,12 @@
 """
 Template class to define jobs
 """
+
 from typing import Optional
 
-from pyiron_base.storage.datacontainer import DataContainer
 from pyiron_base.interfaces.object import HasStorage
 from pyiron_base.jobs.job.generic import GenericJob
+from pyiron_base.storage.datacontainer import DataContainer
 
 __author__ = "Jan Janssen"
 __copyright__ = (
@@ -66,7 +67,9 @@ class TemplateJob(GenericJob, HasStorage):
 
     """
 
-    def __init__(self, project: "pyiron_base.storage.hdfio.ProjectHDFio", job_name: str):
+    def __init__(
+        self, project: "pyiron_base.storage.hdfio.ProjectHDFio", job_name: str
+    ):
         GenericJob.__init__(self, project, job_name)
         HasStorage.__init__(self, group_name="")
         self.storage.create_group("input")
@@ -91,7 +94,7 @@ def _to_dict(self) -> dict:
         job_dict["input/data"] = self.storage.input.to_dict()
         return job_dict
 
-    def _from_dict(self, obj_dict: dict, version: str=None):
+    def _from_dict(self, obj_dict: dict, version: str = None):
         """
         Update the object attributes from a dictionary representation.
 
@@ -104,11 +107,19 @@ def _from_dict(self, obj_dict: dict, version: str=None):
         if "data" in input_dict.keys():
             self.storage.input.update(input_dict["data"])
 
-    def to_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"]=None, group_name: Optional[str]=None) -> None:
+    def to_hdf(
+        self,
+        hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"] = None,
+        group_name: Optional[str] = None,
+    ) -> None:
         GenericJob.to_hdf(self=self, hdf=hdf, group_name=group_name)
         HasStorage.to_hdf(self=self, hdf=self.project_hdf5)
 
-    def from_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"]=None, group_name: Optional[str]=None) -> None:
+    def from_hdf(
+        self,
+        hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"] = None,
+        group_name: Optional[str] = None,
+    ) -> None:
         GenericJob.from_hdf(self=self, hdf=hdf, group_name=group_name)
         HasStorage.from_hdf(self=self, hdf=self.project_hdf5)
 
@@ -146,7 +157,9 @@ class PythonTemplateJob(TemplateJob):
 
     """
 
-    def __init__(self, project: "pyiron_base.storeage.hdfio.ProjectHDFio", job_name: str):
+    def __init__(
+        self, project: "pyiron_base.storeage.hdfio.ProjectHDFio", job_name: str
+    ):
         super().__init__(project, job_name)
         self._job_with_calculate_function = True
         self._write_work_dir_warnings = False
diff --git a/pyiron_base/jobs/job/util.py b/pyiron_base/jobs/job/util.py
index 8ecb875a3..ed457317b 100644
--- a/pyiron_base/jobs/job/util.py
+++ b/pyiron_base/jobs/job/util.py
@@ -41,7 +41,9 @@
 }
 
 
-def _copy_database_entry(new_job_core: "pyiron_base.jobs.job.generic.GenericJob", job_copied_id: int) -> None:
+def _copy_database_entry(
+    new_job_core: "pyiron_base.jobs.job.generic.GenericJob", job_copied_id: int
+) -> None:
     """
     Copy database entry from previous job
 
@@ -60,7 +62,11 @@ def _copy_database_entry(new_job_core: "pyiron_base.jobs.job.generic.GenericJob"
         new_job_core.reset_job_id(job_id=job_id)
 
 
-def _copy_to_delete_existing(project_class: "pyiron_base.project.generic.Project", job_name: str, delete_job: bool):
+def _copy_to_delete_existing(
+    project_class: "pyiron_base.project.generic.Project",
+    job_name: str,
+    delete_job: bool,
+):
     """
     Check if the job exists already in the project, if that is the case either
     delete it or reload it depending on the setting of delete_job
@@ -84,7 +90,19 @@ def _copy_to_delete_existing(project_class: "pyiron_base.project.generic.Project
             return None
 
 
-def _get_project_for_copy(job: "pyiron_base.jobs.job.core.JobCore", project: Optional[Union["pyiron_base.project.generic.Project", "pyiron_base.jobs.job.core.JobCore", "pyiron_base.storage.hdfio.ProjectHDFio"]], new_job_name: str) -> Tuple["pyiron_base.project.generic.Project", "pyiron_base.storage.hdfio.ProjectHDFio"]:
+def _get_project_for_copy(
+    job: "pyiron_base.jobs.job.core.JobCore",
+    project: Optional[
+        Union[
+            "pyiron_base.project.generic.Project",
+            "pyiron_base.jobs.job.core.JobCore",
+            "pyiron_base.storage.hdfio.ProjectHDFio",
+        ]
+    ],
+    new_job_name: str,
+) -> Tuple[
+    "pyiron_base.project.generic.Project", "pyiron_base.storage.hdfio.ProjectHDFio"
+]:
     """
     Internal helper function to generate a project and hdf5 project for copying
 
@@ -167,7 +185,12 @@ def round_(value, ndigits=ndigits):
 )
 
 
-def _rename_job(job: Union["pyiron_base.jobs.job.generic.GenericJob", "pyiron_base.jobs.job.core.JobCore"], new_job_name: str) -> None:
+def _rename_job(
+    job: Union[
+        "pyiron_base.jobs.job.generic.GenericJob", "pyiron_base.jobs.job.core.JobCore"
+    ],
+    new_job_name: str,
+) -> None:
     """ """
     new_job_name = _get_safe_job_name(new_job_name)
     child_ids = job.child_ids
@@ -278,14 +301,20 @@ def _job_compressed_name(job: "pyiron_base.jobs.job.core.JobCore") -> str:
     return _get_compressed_job_name(working_directory=job.working_directory)
 
 
-def _get_compressed_job_name(working_directory: "pyiron_base.jobs.job.core.JobCore") -> str:
+def _get_compressed_job_name(
+    working_directory: "pyiron_base.jobs.job.core.JobCore",
+) -> str:
     """Return the canonical file name of a compressed job from the working directory."""
     return os.path.join(
         working_directory, os.path.basename(working_directory) + ".tar.bz2"
     )
 
 
-def _job_compress(job: "pyiron_base.jobs.job.core.JobCore", files_to_compress: list=[], files_to_remove: list=[]) -> None:
+def _job_compress(
+    job: "pyiron_base.jobs.job.core.JobCore",
+    files_to_compress: list = [],
+    files_to_remove: list = [],
+) -> None:
     """
     Compress the output files of a job object.
 
@@ -367,7 +396,9 @@ def _job_is_compressed(job: "pyiron_base.jobs.job.core.JobCore") -> bool:
     return _working_directory_is_compressed(working_directory=job.working_directory)
 
 
-def _working_directory_list_files(working_directory: str, include_archive: bool=True) -> list:
+def _working_directory_list_files(
+    working_directory: str, include_archive: bool = True
+) -> list:
     """
     Returns list of files in the jobs working directory.
 
@@ -414,7 +445,9 @@ def _job_list_files(job: "pyiron_base.jobs.job.core.JobCore") -> list:
     return _working_directory_list_files(working_directory=job.working_directory)
 
 
-def _working_directory_read_file(working_directory: str, file_name: str, tail: Optional[int]=None) -> list:
+def _working_directory_read_file(
+    working_directory: str, file_name: str, tail: Optional[int] = None
+) -> list:
     """
     Return list of lines of the given file.
 
@@ -472,7 +505,9 @@ def _working_directory_read_file(working_directory: str, file_name: str, tail: O
             return lines
 
 
-def _job_read_file(job: "pyiron_base.jobs.job.core.JobCore", file_name: str, tail: Optional[int]=None) -> list:
+def _job_read_file(
+    job: "pyiron_base.jobs.job.core.JobCore", file_name: str, tail: Optional[int] = None
+) -> list:
     """
     Return list of lines of the given file.
 
@@ -609,7 +644,9 @@ def _job_store_before_copy(job: "pyiron_base.jobs.job.core.JobCore") -> bool:
     return delete_file_after_copy
 
 
-def _job_reload_after_copy(job: "pyiron_base.jobs.job.core.JobCore", delete_file_after_copy: bool) -> None:
+def _job_reload_after_copy(
+    job: "pyiron_base.jobs.job.core.JobCore", delete_file_after_copy: bool
+) -> None:
     """
     Reload job from HDF5 file after copying
 

From 3f3215141c678218c9d74f90893818a2bdb74c7b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Jan=C3=9Fen?= <janssen@mpie.de>
Date: Sun, 8 Sep 2024 12:04:34 +0200
Subject: [PATCH 07/20] Add Tuple

---
 pyiron_base/jobs/job/util.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pyiron_base/jobs/job/util.py b/pyiron_base/jobs/job/util.py
index ed457317b..1b570e073 100644
--- a/pyiron_base/jobs/job/util.py
+++ b/pyiron_base/jobs/job/util.py
@@ -11,7 +11,7 @@
 import stat
 import tarfile
 from itertools import islice
-from typing import Optional, Union
+from typing import Optional, Union, Tuple
 
 import monty.io
 import psutil

From 9b96f0de58bac5c4de8936035396969a079d25fb Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
 <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Sun, 8 Sep 2024 10:04:56 +0000
Subject: [PATCH 08/20] [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
---
 pyiron_base/jobs/job/util.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pyiron_base/jobs/job/util.py b/pyiron_base/jobs/job/util.py
index 1b570e073..1a9c4259c 100644
--- a/pyiron_base/jobs/job/util.py
+++ b/pyiron_base/jobs/job/util.py
@@ -11,7 +11,7 @@
 import stat
 import tarfile
 from itertools import islice
-from typing import Optional, Union, Tuple
+from typing import Optional, Tuple, Union
 
 import monty.io
 import psutil

From c78ba196236479a72a6c8253adb0daf067d62c3b Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Jan=C3=9Fen?= <janssen@mpie.de>
Date: Sun, 8 Sep 2024 12:18:32 +0200
Subject: [PATCH 09/20] JobPath and JobType

---
 pyiron_base/jobs/job/jobtype.py | 40 ++++++++++++------------
 pyiron_base/jobs/job/path.py    | 55 +++++++++++++++++----------------
 2 files changed, 48 insertions(+), 47 deletions(-)

diff --git a/pyiron_base/jobs/job/jobtype.py b/pyiron_base/jobs/job/jobtype.py
index e0f8c4bcc..824a6f22a 100644
--- a/pyiron_base/jobs/job/jobtype.py
+++ b/pyiron_base/jobs/job/jobtype.py
@@ -8,7 +8,7 @@
 import importlib
 import inspect
 import os
-from typing import Union
+from typing import Union, Optional
 
 from pyiron_snippets.singleton import Singleton
 
@@ -48,12 +48,12 @@ class JobType:
 
     def __new__(
         cls,
-        class_name,
-        project,
-        job_name,
-        job_class_dict=None,
-        delete_existing_job=False,
-        delete_aborted_job=False,
+        class_name: Union[type, str],
+        project: ProjectHDFio,
+        job_name: str,
+        job_class_dict: Optional[dict]=None,
+        delete_existing_job: bool=False,
+        delete_aborted_job: bool =False,
     ):
         """
         The __new__() method allows to create objects from other classes - the class selected by class_name
@@ -119,7 +119,7 @@ def __new__(
         return job
 
     @classmethod
-    def unregister(cls, job_name_or_class):
+    def unregister(cls, job_name_or_class: Union[str, type]) -> Optional[type]:
         """Unregister job type from the exposed list of available job types
 
         Args:
@@ -140,7 +140,7 @@ def unregister(cls, job_name_or_class):
         return _cls
 
     @staticmethod
-    def _convert_pyiron_to_pyiron_atomistics_module(cls_module_str):
+    def _convert_pyiron_to_pyiron_atomistics_module(cls_module_str: str) -> str:
         if cls_module_str.startswith("pyiron."):
             # Currently, we set all sub-modules of pyiron_atomistics to be sub-modules of pyiron. Thus, any class
             # pyiron.submodule.PyironClass is identical to pyiron_atomistics_submodule.PyironClass:
@@ -151,9 +151,9 @@ def _convert_pyiron_to_pyiron_atomistics_module(cls_module_str):
     def register(
         cls,
         job_class_or_module_str: Union[type, str],
-        job_name: str = None,
-        overwrite=False,
-    ):
+        job_name: Optional[str] = None,
+        overwrite: bool = False,
+    ) -> None:
         """Register job type from the exposed list of available job types
 
         Args:
@@ -196,7 +196,7 @@ def register(
             cls._job_class_dict[job_name] = cls_module_str
 
     @staticmethod
-    def convert_str_to_class(job_class_dict, class_name):
+    def convert_str_to_class(job_class_dict: dict, class_name: str) -> type:
         """
         convert the name of a class to the corresponding class object - only for pyiron internal classes.
 
@@ -233,17 +233,17 @@ class JobFactory(PyironFactory):
     which is wrapped as pyiron job type.
     """
 
-    def __init__(self, project):
+    def __init__(self, project: "pyiron_base.project.generic.Project"):
         self._job_class_dict = JOB_CLASS_DICT
         self._project = project
 
-    def __dir__(self):
+    def __dir__(self) -> list:
         """
         Enable autocompletion by overwriting the __dir__() function.
         """
         return list(self._job_class_dict.keys())
 
-    def __getattr__(self, name):
+    def __getattr__(self, name: str) -> callable:
         if name in self._job_class_dict:
 
             def wrapper(job_name, delete_existing_job=False, delete_aborted_job=False):
@@ -291,20 +291,20 @@ def __init__(self):
         self.job_class_dict = JOB_CLASS_DICT
 
     @property
-    def job_class_dict(self):
+    def job_class_dict(self) -> dict:
         return self._job_class_dict
 
     @job_class_dict.setter
-    def job_class_dict(self, job_class_dict):
+    def job_class_dict(self, job_class_dict: dict) -> None:
         self._job_class_dict = job_class_dict
 
-    def __getattr__(self, name):
+    def __getattr__(self, name: str) -> str:
         if name in self._job_class_dict.keys():
             return name
         else:
             raise AttributeError("no job class named '{}' defined".format(name))
 
-    def __dir__(self):
+    def __dir__(self) -> list:
         """
         Enable autocompletion by overwriting the __dir__() function.
         """
diff --git a/pyiron_base/jobs/job/path.py b/pyiron_base/jobs/job/path.py
index 6dcc671ec..e550db61f 100644
--- a/pyiron_base/jobs/job/path.py
+++ b/pyiron_base/jobs/job/path.py
@@ -6,6 +6,7 @@
 """
 
 import os
+from typing import Any, Tuple
 
 from pyiron_base.jobs.job.core import JobCore
 from pyiron_base.project.generic import Project
@@ -102,7 +103,7 @@ class JobPath(JobCore):
             path inside the HDF5 file - also stored as absolute path
     """
 
-    def __init__(self, job_path):
+    def __init__(self, job_path: str):
         """
         Load a job from the given path.
 
@@ -128,7 +129,7 @@ def __init__(self, job_path):
         super().__init__(project=hdf_project, job_name=job_path_lst[1].split("/")[-1])
 
     @classmethod
-    def from_job_id(cls, db, job_id):
+    def from_job_id(cls, db, job_id: int) -> "JobPath":
         """
         Load a job path from a database connection and the job id.
 
@@ -143,7 +144,7 @@ def from_job_id(cls, db, job_id):
         return cls.from_db_entry(db_entry)
 
     @classmethod
-    def from_db_entry(cls, db_entry):
+    def from_db_entry(cls, db_entry: dict) -> "JobPath":
         """
         Load a job path from a database entry.
 
@@ -174,7 +175,7 @@ def from_db_entry(cls, db_entry):
         return job
 
     @property
-    def is_root(self):
+    def is_root(self) -> bool:
         """
         Check if the current h5_path is pointing to the HDF5 root group.
 
@@ -184,7 +185,7 @@ def is_root(self):
         return self.project_hdf5.is_root
 
     @property
-    def is_empty(self):
+    def is_empty(self) -> bool:
         """
         Check if the HDF5 file is empty
 
@@ -194,7 +195,7 @@ def is_empty(self):
         return self.project_hdf5.is_empty
 
     @property
-    def base_name(self):
+    def base_name(self) -> str:
         """
         Name of the HDF5 file - but without the file extension .h5
 
@@ -204,7 +205,7 @@ def base_name(self):
         return self.project_hdf5.base_name
 
     @property
-    def file_path(self):
+    def file_path(self) -> str:
         """
         Path where the HDF5 file is located - posixpath.dirname()
 
@@ -214,7 +215,7 @@ def file_path(self):
         return self.project_hdf5.file_path
 
     @property
-    def h5_path(self):
+    def h5_path(self) -> str:
         """
         Get the path in the HDF5 file starting from the root group - meaning this path starts with '/'
 
@@ -224,7 +225,7 @@ def h5_path(self):
         return self.project_hdf5.h5_path
 
     @h5_path.setter
-    def h5_path(self, path):
+    def h5_path(self, path: str) -> None:
         """
         Set the path in the HDF5 file starting from the root group
 
@@ -233,7 +234,7 @@ def h5_path(self, path):
         """
         self.project_hdf5.h5_path = path
 
-    def create_group(self, name):
+    def create_group(self, name: str) -> "pyiron_base.storage.hdfio.ProjectHDFio":
         """
         Create an HDF5 group - similar to a folder in the filesystem - the HDF5 groups allow the users to structure their
         data.
@@ -246,7 +247,7 @@ def create_group(self, name):
         """
         return self.project_hdf5.create_group(name)
 
-    def open(self, h5_rel_path):
+    def open(self, h5_rel_path: str) -> "pyiron_base.storage.hdfio.ProjectHDFio":
         """
         Create an HDF5 group and enter this specific group. If the group exists in the HDF5 path only the h5_path is
         set correspondingly otherwise the group is created first.
@@ -259,19 +260,19 @@ def open(self, h5_rel_path):
         """
         return self.project_hdf5.open(h5_rel_path)
 
-    def close(self):
+    def close(self) -> None:
         """
         Close the current HDF5 path and return to the path before the last open
         """
         self.project_hdf5.close()
 
-    def remove_file(self):
+    def remove_file(self) -> None:
         """
         Remove the HDF5 file with all the related content
         """
         self.project_hdf5.remove_file()
 
-    def put(self, key, value):
+    def put(self, key: str, value: Any) -> None:
         """
         Store data inside the HDF5 file
 
@@ -281,7 +282,7 @@ def put(self, key, value):
         """
         self.project_hdf5.__setitem__(key, value)
 
-    def listdirs(self):
+    def listdirs(self) -> list:
         """
         equivalent to os.listdirs (consider groups as equivalent to dirs)
 
@@ -291,7 +292,7 @@ def listdirs(self):
         """
         return self.project_hdf5.list_groups()
 
-    def list_dirs(self):
+    def list_dirs(self) -> list:
         """
         equivalent to os.listdirs (consider groups as equivalent to dirs)
 
@@ -300,7 +301,7 @@ def list_dirs(self):
         """
         return self.project_hdf5.list_groups()
 
-    def keys(self):
+    def keys(self) -> list:
         """
         List all groups and nodes of the HDF5 file - where groups are equivalent to directories and nodes to files.
 
@@ -309,7 +310,7 @@ def keys(self):
         """
         return self.project_hdf5.keys()
 
-    def values(self):
+    def values(self) -> list:
         """
         List all values for all groups and nodes of the HDF5 file
 
@@ -318,7 +319,7 @@ def values(self):
         """
         return self.project_hdf5.values()
 
-    def items(self):
+    def items(self) -> Tuple[list, list]:
         """
         List all keys and values as items of all groups and nodes of the HDF5 file
 
@@ -327,7 +328,7 @@ def items(self):
         """
         return self.project_hdf5.items()
 
-    def groups(self):
+    def groups(self) -> list:
         """
         Filter HDF5 file by groups
 
@@ -336,7 +337,7 @@ def groups(self):
         """
         return self.project_hdf5.groups()
 
-    def nodes(self):
+    def nodes(self) -> list:
         """
         Filter HDF5 file by nodes
 
@@ -345,7 +346,7 @@ def nodes(self):
         """
         return self.project_hdf5.nodes()
 
-    def __enter__(self):
+    def __enter__(self) -> "JobPath":
         """
         Compatibility function for the with statement
         """
@@ -357,7 +358,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
         """
         self.project_hdf5.__exit__(exc_type=exc_type, exc_val=exc_val, exc_tb=exc_tb)
 
-    def __setitem__(self, key, value):
+    def __setitem__(self, key: str, value: Any) -> None:
         """
         Store data inside the HDF5 file
 
@@ -367,7 +368,7 @@ def __setitem__(self, key, value):
         """
         self.project_hdf5.__setitem__(key, value)
 
-    def __delitem__(self, key):
+    def __delitem__(self, key: str) -> None:
         """
         Delete item from the HDF5 file
 
@@ -376,7 +377,7 @@ def __delitem__(self, key):
         """
         self.project_hdf5.__delitem__(key)
 
-    def __str__(self):
+    def __str__(self) -> str:
         """
         Machine readable string representation
 
@@ -385,7 +386,7 @@ def __str__(self):
         """
         return self.project_hdf5.__str__()
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         """
         Human readable string representation
 
@@ -394,7 +395,7 @@ def __repr__(self):
         """
         return self.project_hdf5.__repr__()
 
-    def __del__(self):
+    def __del__(self) -> None:
         """
         When the object is deleted the HDF5 file has to be closed
         """

From a1497ada9efc4080aedd104cb65d129ba4f4f4ac Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
 <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Sun, 8 Sep 2024 10:18:40 +0000
Subject: [PATCH 10/20] [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
---
 pyiron_base/jobs/job/jobtype.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/pyiron_base/jobs/job/jobtype.py b/pyiron_base/jobs/job/jobtype.py
index 824a6f22a..6e95d09cc 100644
--- a/pyiron_base/jobs/job/jobtype.py
+++ b/pyiron_base/jobs/job/jobtype.py
@@ -8,7 +8,7 @@
 import importlib
 import inspect
 import os
-from typing import Union, Optional
+from typing import Optional, Union
 
 from pyiron_snippets.singleton import Singleton
 
@@ -51,9 +51,9 @@ def __new__(
         class_name: Union[type, str],
         project: ProjectHDFio,
         job_name: str,
-        job_class_dict: Optional[dict]=None,
-        delete_existing_job: bool=False,
-        delete_aborted_job: bool =False,
+        job_class_dict: Optional[dict] = None,
+        delete_existing_job: bool = False,
+        delete_aborted_job: bool = False,
     ):
         """
         The __new__() method allows to create objects from other classes - the class selected by class_name

From b2607e0fc124f69c14162098b5ea893982e9b615 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Jan=C3=9Fen?= <janssen@mpie.de>
Date: Sun, 8 Sep 2024 12:32:38 +0200
Subject: [PATCH 11/20] Add type hints for command line

---
 pyiron_base/cli/control.py    | 2 +-
 pyiron_base/cli/cp.py         | 5 +++--
 pyiron_base/cli/install.py    | 5 +++--
 pyiron_base/cli/ls.py         | 4 ++--
 pyiron_base/cli/mv.py         | 5 +++--
 pyiron_base/cli/reloadfile.py | 5 +++--
 pyiron_base/cli/rm.py         | 5 +++--
 pyiron_base/cli/wrapper.py    | 5 +++--
 8 files changed, 21 insertions(+), 15 deletions(-)

diff --git a/pyiron_base/cli/control.py b/pyiron_base/cli/control.py
index cbc462c70..cafc57c65 100644
--- a/pyiron_base/cli/control.py
+++ b/pyiron_base/cli/control.py
@@ -32,7 +32,7 @@
 }
 
 
-def main():
+def main() -> None:
     parser = argparse.ArgumentParser(prog="pyiron", description=__doc__)
     parser.add_argument(
         "-d", "--dirty", action="store_true", help="do not remove pyiron log files"
diff --git a/pyiron_base/cli/cp.py b/pyiron_base/cli/cp.py
index d40b20952..616a86ba3 100644
--- a/pyiron_base/cli/cp.py
+++ b/pyiron_base/cli/cp.py
@@ -4,16 +4,17 @@
 """
 Copy a pyiron project to a new location.
 """
+from argparse import ArgumentParser, Namespace
 
 from pyiron_base.project.generic import Project
 
 
-def register(parser):
+def register(parser: ArgumentParser) -> None:
     parser.add_argument("src", help="source project")
     parser.add_argument("dst", help="destination project")
 
 
-def main(args):
+def main(args: Namespace) -> None:
     src = Project(args.src)
     dst = Project(args.dst)
     src.copy_to(dst)
diff --git a/pyiron_base/cli/install.py b/pyiron_base/cli/install.py
index 605e76906..b048b9dc5 100644
--- a/pyiron_base/cli/install.py
+++ b/pyiron_base/cli/install.py
@@ -4,6 +4,7 @@
 """
 Install pyiron config and resources for the first time.
 """
+from argparse import ArgumentParser, Namespace
 
 from pyiron_base.state.install import install_pyiron
 
@@ -19,7 +20,7 @@
 __date__ = "Jun 26, 2020"
 
 
-def register(parser):
+def register(parser: ArgumentParser) -> None:
     parser.add_argument(
         "-c",
         "--config",
@@ -46,7 +47,7 @@ def register(parser):
     )
 
 
-def main(args):
+def main(args: Namespace) -> None:
     install_pyiron(
         config_file_name=args.config,
         project_path=args.project,
diff --git a/pyiron_base/cli/ls.py b/pyiron_base/cli/ls.py
index 423b64b24..420a8bdfe 100644
--- a/pyiron_base/cli/ls.py
+++ b/pyiron_base/cli/ls.py
@@ -43,7 +43,7 @@
 """
 
 
-def register(parser):
+def register(parser: argparse.ArgumentParser) -> None:
     parser.add_argument(
         "project", default=".", nargs="?", help="path to pyiron project"
     )
@@ -123,7 +123,7 @@ def register(parser):
     )
 
 
-def main(args):
+def main(args: argparse.Namespace) -> None:
     if args.status:
         if "status" not in args.columns:
             args.columns = args.columns + ["status"]
diff --git a/pyiron_base/cli/mv.py b/pyiron_base/cli/mv.py
index f63972f5b..4694d7156 100644
--- a/pyiron_base/cli/mv.py
+++ b/pyiron_base/cli/mv.py
@@ -4,16 +4,17 @@
 """
 Move a pyiron project to a new location.
 """
+from argparse import ArgumentParser, Namespace
 
 from pyiron_base.project.generic import Project
 
 
-def register(parser):
+def register(parser: ArgumentParser) -> None:
     parser.add_argument("src", help="source project")
     parser.add_argument("dst", help="destination project")
 
 
-def main(args):
+def main(args: Namespace) -> None:
     src = Project(args.src)
     dst = Project(args.dst)
     src.move_to(dst)
diff --git a/pyiron_base/cli/reloadfile.py b/pyiron_base/cli/reloadfile.py
index a2ffdbdd2..7c8c976b0 100644
--- a/pyiron_base/cli/reloadfile.py
+++ b/pyiron_base/cli/reloadfile.py
@@ -1,6 +1,7 @@
 # coding: utf-8
 # Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
 # Distributed under the terms of "New BSD License", see the LICENSE file.
+from argparse import ArgumentParser, Namespace
 import os
 import shutil
 
@@ -10,7 +11,7 @@
 from pyiron_base.state import state
 
 
-def register(parser):
+def register(parser: ArgumentParser) -> None:
     parser.add_argument(
         "-i",
         "--input-path",
@@ -23,7 +24,7 @@ def register(parser):
     )
 
 
-def main(args):
+def main(args: Namespace) -> None:
     with _open_hdf(filename=args.input_path, mode="r") as f:
         job_name = list(f.keys())[0]
     project_path = os.path.join(os.path.abspath("."), job_name + ".h5")
diff --git a/pyiron_base/cli/rm.py b/pyiron_base/cli/rm.py
index ef50de3a1..dfd5b47d2 100644
--- a/pyiron_base/cli/rm.py
+++ b/pyiron_base/cli/rm.py
@@ -4,6 +4,7 @@
 Remove jobs from pyiron project or whole project.
 """
 
+from argparse import ArgumentParser, Namespace
 import os
 
 from pyiron_base.project.generic import Project
@@ -20,7 +21,7 @@
 __date__ = "23 Jun, 2020"
 
 
-def register(parser):
+def register(parser: ArgumentParser):
     parser.add_argument(
         "project", default=".", nargs="?", help="path to pyiron project"
     )
@@ -32,7 +33,7 @@ def register(parser):
     )
 
 
-def main(args):
+def main(args: Namespace) -> None:
     pr = Project(args.project)
     if args.jobs_only:
         pr.remove_jobs(recursive=args.recursive, silently=True)
diff --git a/pyiron_base/cli/wrapper.py b/pyiron_base/cli/wrapper.py
index a393b7515..ca643f0b8 100644
--- a/pyiron_base/cli/wrapper.py
+++ b/pyiron_base/cli/wrapper.py
@@ -3,11 +3,12 @@
 """
 Run a job from hdf5.
 """
+from argparse import ArgumentParser, Namespace
 
 from pyiron_base.jobs.job.wrapper import job_wrapper_function
 
 
-def register(parser):
+def register(parser: ArgumentParser):
     parser.add_argument(
         "-d",
         "--debug",
@@ -33,7 +34,7 @@ def register(parser):
     )
 
 
-def main(args):
+def main(args: Namespace) -> None:
     job_wrapper_function(
         working_directory=args.project,
         job_id=args.job_id,

From 40bf5752b5c3f4aaee798d856584f2709320e130 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
 <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Sun, 8 Sep 2024 10:32:47 +0000
Subject: [PATCH 12/20] [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
---
 pyiron_base/cli/cp.py         | 1 +
 pyiron_base/cli/install.py    | 1 +
 pyiron_base/cli/mv.py         | 1 +
 pyiron_base/cli/reloadfile.py | 2 +-
 pyiron_base/cli/rm.py         | 2 +-
 pyiron_base/cli/wrapper.py    | 1 +
 6 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/pyiron_base/cli/cp.py b/pyiron_base/cli/cp.py
index 616a86ba3..be58f04eb 100644
--- a/pyiron_base/cli/cp.py
+++ b/pyiron_base/cli/cp.py
@@ -4,6 +4,7 @@
 """
 Copy a pyiron project to a new location.
 """
+
 from argparse import ArgumentParser, Namespace
 
 from pyiron_base.project.generic import Project
diff --git a/pyiron_base/cli/install.py b/pyiron_base/cli/install.py
index b048b9dc5..193c0f135 100644
--- a/pyiron_base/cli/install.py
+++ b/pyiron_base/cli/install.py
@@ -4,6 +4,7 @@
 """
 Install pyiron config and resources for the first time.
 """
+
 from argparse import ArgumentParser, Namespace
 
 from pyiron_base.state.install import install_pyiron
diff --git a/pyiron_base/cli/mv.py b/pyiron_base/cli/mv.py
index 4694d7156..0b870d495 100644
--- a/pyiron_base/cli/mv.py
+++ b/pyiron_base/cli/mv.py
@@ -4,6 +4,7 @@
 """
 Move a pyiron project to a new location.
 """
+
 from argparse import ArgumentParser, Namespace
 
 from pyiron_base.project.generic import Project
diff --git a/pyiron_base/cli/reloadfile.py b/pyiron_base/cli/reloadfile.py
index 7c8c976b0..64bafd91b 100644
--- a/pyiron_base/cli/reloadfile.py
+++ b/pyiron_base/cli/reloadfile.py
@@ -1,9 +1,9 @@
 # coding: utf-8
 # Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
 # Distributed under the terms of "New BSD License", see the LICENSE file.
-from argparse import ArgumentParser, Namespace
 import os
 import shutil
+from argparse import ArgumentParser, Namespace
 
 from h5io_browser.base import _open_hdf
 
diff --git a/pyiron_base/cli/rm.py b/pyiron_base/cli/rm.py
index dfd5b47d2..f96eca51c 100644
--- a/pyiron_base/cli/rm.py
+++ b/pyiron_base/cli/rm.py
@@ -4,8 +4,8 @@
 Remove jobs from pyiron project or whole project.
 """
 
-from argparse import ArgumentParser, Namespace
 import os
+from argparse import ArgumentParser, Namespace
 
 from pyiron_base.project.generic import Project
 
diff --git a/pyiron_base/cli/wrapper.py b/pyiron_base/cli/wrapper.py
index ca643f0b8..14fd67a0a 100644
--- a/pyiron_base/cli/wrapper.py
+++ b/pyiron_base/cli/wrapper.py
@@ -3,6 +3,7 @@
 """
 Run a job from hdf5.
 """
+
 from argparse import ArgumentParser, Namespace
 
 from pyiron_base.jobs.job.wrapper import job_wrapper_function

From 705766577f6208806a84dfcc881767a529cd97fc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Jan=C3=9Fen?= <janssen@mpie.de>
Date: Sun, 8 Sep 2024 17:22:29 +0200
Subject: [PATCH 13/20] Job class

---
 pyiron_base/jobs/job/core.py                  | 263 +++++++++--------
 pyiron_base/jobs/job/extension/executable.py  |  62 ++--
 pyiron_base/jobs/job/extension/files.py       | 104 +++----
 pyiron_base/jobs/job/extension/jobstatus.py   |  46 +--
 .../jobs/job/extension/server/queuestatus.py  |  39 +--
 .../jobs/job/extension/server/runmode.py      |  24 +-
 pyiron_base/jobs/job/generic.py               | 270 ++++++++++--------
 pyiron_base/jobs/job/interactive.py           |  93 +++---
 pyiron_base/storage/hdfio.py                  |   4 +-
 9 files changed, 493 insertions(+), 412 deletions(-)

diff --git a/pyiron_base/jobs/job/core.py b/pyiron_base/jobs/job/core.py
index 387e8028a..90e61115a 100644
--- a/pyiron_base/jobs/job/core.py
+++ b/pyiron_base/jobs/job/core.py
@@ -10,6 +10,7 @@
 import os
 import posixpath
 import shutil
+from typing import Any, Generator, Optional, Union, List
 import warnings
 
 from pyiron_snippets.deprecate import deprecate
@@ -138,7 +139,7 @@ def recursive_load_from_hdf(project_hdf5: ProjectHDFio, item: str):
         None: if nothing was found in the HDF file
     """
 
-    def successive_path_splits(name_lst):
+    def successive_path_splits(name_lst: list) -> Generator:
         """
         Yield successive split/joins of a path, i.e.
         /a/b/c/d
@@ -184,6 +185,62 @@ def successive_path_splits(name_lst):
             pass
 
 
+class DatabaseProperties(object):
+    """
+    Access the database entry of the job
+    """
+
+    def __init__(self, job_dict=None):
+        self._job_dict = job_dict
+
+    def __bool__(self):
+        return self._job_dict is not None
+
+    def __dir__(self):
+        return list(self._job_dict.keys())
+
+    def __getattr__(self, name):
+        if name in self._job_dict.keys():
+            return self._job_dict[name]
+        else:
+            raise AttributeError(name)
+
+    def __repr__(self):
+        return f"{self.__class__.__name__}({repr(self._job_dict)})"
+
+
+class HDF5Content(object):
+    """
+    Access the HDF5 file of the job
+    """
+
+    def __init__(self, project_hdf5):
+        self._project_hdf5 = project_hdf5
+
+    def __getattr__(self, name):
+        try:
+            return self[name]
+        except KeyError:
+            raise AttributeError(name) from None
+
+    def __getitem__(self, item):
+        value = recursive_load_from_hdf(self._project_hdf5, item)
+        if value is not None:
+            return value
+
+        if item in self._project_hdf5.list_groups():
+            return HDF5Content(self._project_hdf5[item])
+        else:
+            raise KeyError(item)
+
+    def __dir__(self):
+        return self._project_hdf5.list_nodes() + self._project_hdf5.list_groups()
+
+    def __repr__(self):
+        return self._project_hdf5.__repr__()
+
+
+
 class JobCore(HasGroups):
     __doc__ = (
         """
@@ -199,7 +256,7 @@ class JobCore(HasGroups):
         + _doc_str_job_core_attr
     )
 
-    def __init__(self, project, job_name):
+    def __init__(self, project: ProjectHDFio, job_name: str):
         job_name = _get_safe_job_name(job_name)
         self._name = job_name
         self._hdf5 = project.open(self._name)
@@ -214,17 +271,17 @@ def __init__(self, project, job_name):
         self._files_to_compress = list()
 
     @property
-    def content(self):
+    def content(self) -> HDF5Content:
         return self._hdf5_content
 
     @property
-    def files(self):
+    def files(self) -> FileBrowser:
         return FileBrowser(working_directory=self.working_directory)
 
     files.__doc__ = FileBrowser.__doc__
 
     @property
-    def job_name(self):
+    def job_name(self) -> str:
         """
         Get name of the job, which has to be unique within the project
 
@@ -234,7 +291,7 @@ def job_name(self):
         return self.name
 
     @job_name.setter
-    def job_name(self, new_job_name):
+    def job_name(self, new_job_name: str) -> None:
         """
         Set name of the job, which has to be unique within the project. When changing the job_name this also moves the
         HDF5 file as the name of the HDF5 file is the job_name plus the extension *.h5
@@ -245,7 +302,7 @@ def job_name(self, new_job_name):
         self.name = new_job_name
 
     @property
-    def name(self):
+    def name(self) -> str:
         """
         Get name of the job, which has to be unique within the project
 
@@ -255,7 +312,7 @@ def name(self):
         return self._name
 
     @name.setter
-    def name(self, new_job_name):
+    def name(self, new_job_name: str) -> None:
         """
         Set name of the job, which has to be unique within the project. When changing the job_name this also moves the
         HDF5 file as the name of the HDF5 file is the job_name plus the extension *.h5
@@ -266,7 +323,7 @@ def name(self, new_job_name):
         _rename_job(job=self, new_job_name=new_job_name)
 
     @property
-    def status(self):
+    def status(self) -> str:
         """
         Execution status of the job, can be one of the following [initialized, appended, created, submitted, running,
                                                                   aborted, collect, suspended, refresh, busy, finished]
@@ -277,7 +334,7 @@ def status(self):
         return self._status
 
     @property
-    def job_id(self):
+    def job_id(self) -> int:
         """
         Unique id to identify the job in the pyiron database
 
@@ -289,7 +346,7 @@ def job_id(self):
         return self._job_id
 
     @property
-    def id(self):
+    def id(self) -> int:
         """
         Unique id to identify the job in the pyiron database - use self.job_id instead
 
@@ -299,7 +356,7 @@ def id(self):
         return self.job_id
 
     @property
-    def database_entry(self):
+    def database_entry(self) -> DatabaseProperties:
         if not bool(self._database_property):
             self._database_property = DatabaseProperties(
                 job_dict=self.project.db.get_item_by_id(self.job_id)
@@ -307,7 +364,7 @@ def database_entry(self):
         return self._database_property
 
     @property
-    def parent_id(self):
+    def parent_id(self) -> int:
         """
         Get job id of the predecessor job - the job which was executed before the current one in the current job series
 
@@ -319,7 +376,7 @@ def parent_id(self):
         return self._parent_id
 
     @parent_id.setter
-    def parent_id(self, parent_id):
+    def parent_id(self, parent_id: int) -> None:
         """
         Set job id of the predecessor job - the job which was executed before the current one in the current job series
 
@@ -331,7 +388,7 @@ def parent_id(self, parent_id):
         self._parent_id = parent_id
 
     @property
-    def master_id(self):
+    def master_id(self) -> int:
         """
         Get job id of the master job - a meta job which groups a series of jobs, which are executed either in parallel
         or in serial.
@@ -344,7 +401,7 @@ def master_id(self):
         return self._master_id
 
     @master_id.setter
-    def master_id(self, master_id):
+    def master_id(self, master_id: int) -> None:
         """
         Set job id of the master job - a meta job which groups a series of jobs, which are executed either in parallel
         or in serial.
@@ -357,7 +414,7 @@ def master_id(self, master_id):
         self._master_id = master_id
 
     @property
-    def child_ids(self):
+    def child_ids(self) -> list:
         """
         list of child job ids - only meta jobs have child jobs - jobs which list the meta job as their master
 
@@ -369,7 +426,7 @@ def child_ids(self):
         )
 
     @property
-    def project_hdf5(self):
+    def project_hdf5(self) -> ProjectHDFio:
         """
         Get the ProjectHDFio instance which points to the HDF5 file the job is stored in
 
@@ -379,7 +436,7 @@ def project_hdf5(self):
         return self._hdf5
 
     @project_hdf5.setter
-    def project_hdf5(self, project):
+    def project_hdf5(self, project: ProjectHDFio) -> None:
         """
         Set the ProjectHDFio instance which points to the HDF5 file the job is stored in
 
@@ -389,14 +446,14 @@ def project_hdf5(self, project):
         self._hdf5 = project.copy()
 
     @property
-    def files_to_compress(self):
+    def files_to_compress(self) -> list:
         return self._files_to_compress
 
     @property
-    def files_to_remove(self):
+    def files_to_remove(self) -> list:
         return self._files_to_remove
 
-    def relocate_hdf5(self, h5_path=None):
+    def relocate_hdf5(self, h5_path: Optional[str]=None):
         """
         Relocate the hdf file. This function is needed when the child job is
         spawned by a parent job (cf. pyiron_base.jobs.master.generic)
@@ -409,7 +466,7 @@ def relocate_hdf5(self, h5_path=None):
         )
 
     @property
-    def project(self):
+    def project(self) -> "pyiron_base.project.generic.Project":
         """
         Project instance the jobs is located in
 
@@ -419,7 +476,7 @@ def project(self):
         return self._hdf5.project
 
     @property
-    def job_info_str(self):
+    def job_info_str(self) -> str:
         """
         Short string to describe the job by it is job_name and job ID - mainly used for logging
 
@@ -429,7 +486,7 @@ def job_info_str(self):
         return "job: {0} id: {1}".format(self._name, self.job_id)
 
     @property
-    def working_directory(self):
+    def working_directory(self) -> str:
         """
         working directory of the job is executed in - outside the HDF5 file
 
@@ -439,7 +496,7 @@ def working_directory(self):
         return self.project_hdf5.working_directory
 
     @property
-    def path(self):
+    def path(self) -> str:
         """
         Absolute path of the HDF5 group starting from the system root - combination of the absolute system path plus the
         absolute path inside the HDF5 file starting from the root group.
@@ -449,7 +506,7 @@ def path(self):
         """
         return self.project_hdf5.path
 
-    def check_if_job_exists(self, job_name=None, project=None):
+    def check_if_job_exists(self, job_name: Optional[str]=None, project: Optional[Union[ProjectHDFio, "pyiron_base.project.generic.Project"]]=None):
         """
         Check if a job already exists in an specific project.
 
@@ -481,13 +538,13 @@ def check_if_job_exists(self, job_name=None, project=None):
         else:
             return False
 
-    def show_hdf(self):
+    def show_hdf(self) -> None:
         """
         Iterating over the HDF5 datastructure and generating a human readable graph.
         """
         self.project_hdf5.show_hdf()
 
-    def get_from_table(self, path, name):
+    def get_from_table(self, path: str, name: str) -> Union[dict, list, float, int]:
         """
         Get a specific value from a pandas.Dataframe
 
@@ -500,7 +557,7 @@ def get_from_table(self, path, name):
         """
         return self.project_hdf5.get_from_table(path, name)
 
-    def remove(self, _protect_childs=True):
+    def remove(self, _protect_childs: bool=True) -> None:
         """
         Remove the job - this removes the HDF5 file, all data stored in the HDF5 file an the corresponding database entry.
 
@@ -528,7 +585,7 @@ def remove(self, _protect_childs=True):
         # After all children are deleted, remove the job itself.
         self.remove_child()
 
-    def remove_child(self):
+    def remove_child(self) -> None:
         """
         internal function to remove command that removes also child jobs.
         Do never use this command, since it will destroy the integrity of your project.
@@ -577,7 +634,7 @@ def remove_child(self):
         if self.job_id is not None:
             self.project.db.delete_item(self.job_id)
 
-    def to_object(self, object_type=None, **qwargs):
+    def to_object(self, object_type: Optional[str]=None, **qwargs) -> "pyiron_base.job.generic.GenericJob":
         """
         Load the full pyiron object from an HDF5 file
 
@@ -596,7 +653,7 @@ def to_object(self, object_type=None, **qwargs):
             )
         return self.project_hdf5.to_object(object_type, **qwargs)
 
-    def get(self, name, default=None):
+    def get(self, name: str, default: Optional[Any]=None) -> Any:
         """
         Internal wrapper function for __getitem__() - self[name]
 
@@ -617,7 +674,7 @@ def get(self, name, default=None):
                 return default
             raise
 
-    def load(self, job_specifier, convert_to_object=True):
+    def load(self, job_specifier: Union[str, int], convert_to_object: bool=True) -> Union["pyiron_base.job.generic.GenericJob", "JobCore"]:
         """
         Load an existing pyiron object - most commonly a job - from the database
 
@@ -634,7 +691,7 @@ def load(self, job_specifier, convert_to_object=True):
             job_specifier=job_specifier, convert_to_object=convert_to_object
         )
 
-    def inspect(self, job_specifier):
+    def inspect(self, job_specifier: Union[str, int]) -> "JobCore":
         """
         Inspect an existing pyiron object - most commonly a job - from the database
 
@@ -646,7 +703,7 @@ def inspect(self, job_specifier):
         """
         return self.project.inspect(job_specifier=job_specifier)
 
-    def is_master_id(self, job_id):
+    def is_master_id(self, job_id: int) -> bool:
         """
         Check if the job ID job_id is the master ID for any child job
 
@@ -668,7 +725,7 @@ def is_master_id(self, job_id):
             > 0
         )
 
-    def get_job_id(self, job_specifier=None):
+    def get_job_id(self, job_specifier: Optional[Union[str, int]]=None) -> Union[int, None]:
         """
         get the job_id for job named job_name in the local project path from database
 
@@ -693,7 +750,7 @@ def get_job_id(self, job_specifier=None):
         return None
 
     @deprecate("use job.files.list()")
-    def list_files(self):
+    def list_files(self) -> list:
         """
         List files inside the working directory
 
@@ -705,7 +762,7 @@ def list_files(self):
         """
         return _job_list_files(self)
 
-    def list_childs(self):
+    def list_childs(self) -> list:
         """
         List child jobs as JobPath objects - not loading the full GenericJob objects for each child
 
@@ -714,13 +771,13 @@ def list_childs(self):
         """
         return [self.project.inspect(child_id).job_name for child_id in self.child_ids]
 
-    def _list_groups(self):
+    def _list_groups(self) -> list:
         return self.project_hdf5.list_groups() + self._list_ext_childs()
 
-    def _list_nodes(self):
+    def _list_nodes(self) -> list:
         return self.project_hdf5.list_nodes()
 
-    def _list_all(self):
+    def _list_all(self) -> dict:
         """
         List all groups and nodes of the HDF5 file - where groups are equivalent to directories and nodes to files.
 
@@ -731,7 +788,7 @@ def _list_all(self):
         h5_dict["groups"] += self._list_ext_childs()
         return h5_dict
 
-    def copy(self):
+    def copy(self) -> "JobCore":
         """
         Copy the JobCore object which links to the HDF5 file
 
@@ -744,12 +801,12 @@ def copy(self):
 
     def _internal_copy_to(
         self,
-        project=None,
-        new_job_name=None,
-        new_database_entry=True,
-        copy_files=True,
-        delete_existing_job=False,
-    ):
+        project: Optional[Union["JobCore", ProjectHDFio, "pyiron_base.project.generic.Project"]]=None,
+        new_job_name: Optional[str]=None,
+        new_database_entry: bool=True,
+        copy_files: bool=True,
+        delete_existing_job: bool=False,
+    ) -> "JobCore":
         """
         Internal helper function for copy_to() which returns more
 
@@ -824,12 +881,12 @@ def _internal_copy_to(
 
     def copy_to(
         self,
-        project,
-        new_job_name=None,
-        input_only=False,
-        new_database_entry=True,
-        copy_files=True,
-    ):
+        project: Union["JobCore", ProjectHDFio, "pyiron_base.project.generic.Project"],
+        new_job_name: Optional[str]=None,
+        input_only: bool=False,
+        new_database_entry: bool=True,
+        copy_files: bool=True,
+    ) -> "JobCore":
         """
         Copy the content of the job including the HDF5 file to a new location
 
@@ -871,15 +928,12 @@ def copy_to(
             new_job_core._status = "initialized"
         return new_job_core
 
-    def move_to(self, project):
+    def move_to(self, project: ProjectHDFio) -> None:
         """
         Move the content of the job including the HDF5 file to a new location
 
         Args:
             project (ProjectHDFio): project to move the job to
-
-        Returns:
-            JobCore: JobCore object pointing to the new location.
         """
         delete_hdf5_after_copy = False
         old_working_directory = self.working_directory
@@ -910,7 +964,7 @@ def move_to(self, project):
             shutil.rmtree(old_working_directory)
             os.rmdir("/".join(old_working_directory.split("/")[:-1]))
 
-    def rename(self, new_job_name):
+    def rename(self, new_job_name: str) -> None:
         """
         Rename the job - by changing the job name
 
@@ -919,7 +973,7 @@ def rename(self, new_job_name):
         """
         self.job_name = new_job_name
 
-    def reset_job_id(self, job_id=None):
+    def reset_job_id(self, job_id: Optional[int]=None) -> None:
         """
         The reset_job_id function has to be implemented by the derived classes - usually the GenericJob class
 
@@ -931,13 +985,13 @@ def reset_job_id(self, job_id=None):
             job_id = int(job_id)
         self._job_id = job_id
 
-    def save(self):
+    def save(self) -> None:
         """
         The save function has to be implemented by the derived classes - usually the GenericJob class
         """
         raise NotImplementedError("save() should be implemented in the derived class")
 
-    def to_hdf(self, hdf=None, group_name="group"):
+    def to_hdf(self, hdf: Optional[ProjectHDFio]=None, group_name: str="group") -> None:
         """
         Store object in hdf5 format - The function has to be implemented by the derived classes
         - usually the GenericJob class
@@ -948,7 +1002,7 @@ def to_hdf(self, hdf=None, group_name="group"):
         """
         raise NotImplementedError("to_hdf() should be implemented in the derived class")
 
-    def from_hdf(self, hdf=None, group_name="group"):
+    def from_hdf(self, hdf: Optional[ProjectHDFio]=None, group_name: str="group") -> None:
         """
         Restore object from hdf5 format - The function has to be implemented by the derived classes
         - usually the GenericJob class
@@ -961,7 +1015,7 @@ def from_hdf(self, hdf=None, group_name="group"):
             "from_hdf() should be implemented in the derived class"
         )
 
-    def __del__(self):
+    def __del__(self) -> None:
         """
         The delete function is just implemented for compatibilty
         """
@@ -976,7 +1030,7 @@ def __del__(self):
         "Use job.output for results, job.files to access files; job.content to access HDF storage and "
         "job.child_project to access children of master jobs."
     )
-    def __getitem__(self, item):
+    def __getitem__(self, item: str) -> Any:
         """
         Get/read data from the HDF5 file, child jobs or access log files.
 
@@ -1027,7 +1081,7 @@ def __getitem__(self, item):
                 return child["/".join(name_lst[1:])]
         return None
 
-    def __setitem__(self, key, value):
+    def __setitem__(self, key: str, value: Any) -> None:
         """
         Stores data
 
@@ -1041,7 +1095,7 @@ def __setitem__(self, key, value):
             )
         self._hdf5[key] = value
 
-    def __delitem__(self, key):
+    def __delitem__(self, key: str) -> None:
         """
         Delete item from the HDF5 file
 
@@ -1050,7 +1104,7 @@ def __delitem__(self, key):
         """
         del self.project_hdf5[posixpath.join(self.project_hdf5.h5_path, key)]
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         """
         Human readable string representation
 
@@ -1059,13 +1113,13 @@ def __repr__(self):
         """
         return str(self.list_all())
 
-    def _create_working_directory(self):
+    def _create_working_directory(self) -> None:
         """
         internal function to create the working directory on the file system if it does not exist already.
         """
         self.project_hdf5.create_working_directory()
 
-    def _list_ext_childs(self):
+    def _list_ext_childs(self) -> list:
         """
         internal function to list nodes excluding childs
 
@@ -1076,7 +1130,7 @@ def _list_ext_childs(self):
         childs = self.list_childs()
         return list(set(childs) - set(nodes))
 
-    def compress(self, files_to_compress=None, files_to_remove=None):
+    def compress(self, files_to_compress: Optional[List[str]]=None, files_to_remove: Optional[List[str]]=None) -> None:
         """
         Compress the output files of a job object.
 
@@ -1097,13 +1151,13 @@ def compress(self, files_to_compress=None, files_to_remove=None):
             files_to_remove=files_to_remove,
         )
 
-    def decompress(self):
+    def decompress(self) -> None:
         """
         Decompress the output files of a compressed job object.
         """
         _job_decompress(job=self)
 
-    def is_compressed(self):
+    def is_compressed(self) -> bool:
         """
         Check if the job is already compressed or not.
 
@@ -1112,19 +1166,19 @@ def is_compressed(self):
         """
         return _job_is_compressed(job=self)
 
-    def self_archive(self):
+    def self_archive(self) -> None:
         """
         Compress HDF5 file of the job object to tar-archive
         """
         _job_archive(job=self)
 
-    def self_unarchive(self):
+    def self_unarchive(self) -> None:
         """
         Decompress HDF5 file of the job object from tar-archive
         """
         _job_unarchive(job=self)
 
-    def is_self_archived(self):
+    def is_self_archived(self) -> bool:
         """
         Check if the HDF5 file of the Job is compressed as tar-archive
 
@@ -1132,58 +1186,3 @@ def is_self_archived(self):
             bool: [True/False]
         """
         return _job_is_archived(job=self)
-
-
-class DatabaseProperties(object):
-    """
-    Access the database entry of the job
-    """
-
-    def __init__(self, job_dict=None):
-        self._job_dict = job_dict
-
-    def __bool__(self):
-        return self._job_dict is not None
-
-    def __dir__(self):
-        return list(self._job_dict.keys())
-
-    def __getattr__(self, name):
-        if name in self._job_dict.keys():
-            return self._job_dict[name]
-        else:
-            raise AttributeError(name)
-
-    def __repr__(self):
-        return f"{self.__class__.__name__}({repr(self._job_dict)})"
-
-
-class HDF5Content(object):
-    """
-    Access the HDF5 file of the job
-    """
-
-    def __init__(self, project_hdf5):
-        self._project_hdf5 = project_hdf5
-
-    def __getattr__(self, name):
-        try:
-            return self[name]
-        except KeyError:
-            raise AttributeError(name) from None
-
-    def __getitem__(self, item):
-        value = recursive_load_from_hdf(self._project_hdf5, item)
-        if value is not None:
-            return value
-
-        if item in self._project_hdf5.list_groups():
-            return HDF5Content(self._project_hdf5[item])
-        else:
-            raise KeyError(item)
-
-    def __dir__(self):
-        return self._project_hdf5.list_nodes() + self._project_hdf5.list_groups()
-
-    def __repr__(self):
-        return self._project_hdf5.__repr__()
diff --git a/pyiron_base/jobs/job/extension/executable.py b/pyiron_base/jobs/job/extension/executable.py
index 8df0492b6..3378ef4d5 100644
--- a/pyiron_base/jobs/job/extension/executable.py
+++ b/pyiron_base/jobs/job/extension/executable.py
@@ -4,6 +4,7 @@
 
 import os
 from dataclasses import asdict, fields
+from typing import List, Optional, Tuple, Union
 
 from pyiron_snippets.resources import ExecutableResolver
 
@@ -32,10 +33,10 @@ class Executable(HasDict):
 
     def __init__(
         self,
-        path_binary_codes=None,
-        codename=None,
-        module=None,
-        overwrite_nt_flag=False,
+        path_binary_codes: Optional[List[str]]=None,
+        codename: Optional[str]=None,
+        module: Optional[str]=None,
+        overwrite_nt_flag: bool=False,
     ):
         """
         Handle the path to the executable, as well as the version selection.
@@ -72,14 +73,14 @@ def __init__(
             self.version = self.default_version
 
     @property
-    def accepted_return_codes(self):
+    def accepted_return_codes(self) -> List[int]:
         """
         list of int: accept all of the return codes in this list as the result of a successful run
         """
         return self.storage.accepted_return_codes
 
     @accepted_return_codes.setter
-    def accepted_return_codes(self, value):
+    def accepted_return_codes(self, value: List[int]) -> None:
         if not isinstance(value, list) or any(
             not isinstance(c, int) or c > 255 for c in value
         ):
@@ -87,7 +88,7 @@ def accepted_return_codes(self, value):
         self.storage.accepted_return_codes = value
 
     @property
-    def version(self):
+    def version(self) -> str:
         """
         Version of the Executable
 
@@ -97,7 +98,7 @@ def version(self):
         return self.storage.version
 
     @property
-    def default_version(self):
+    def default_version(self) -> str:
         """
         Default Version of the Available Executables
         i.e. specifically defined
@@ -111,7 +112,7 @@ def default_version(self):
         return sorted(self.executable_lst.keys())[0]
 
     @version.setter
-    def version(self, new_version):
+    def version(self, new_version: str) -> None:
         """
         Version of the Executable
 
@@ -131,7 +132,7 @@ def version(self, new_version):
             )
 
     @property
-    def mpi(self):
+    def mpi(self) -> bool:
         """
         Check if the message processing interface is activated.
 
@@ -143,7 +144,7 @@ def mpi(self):
         return self.storage.mpi
 
     @mpi.setter
-    def mpi(self, mpi_bool):
+    def mpi(self, mpi_bool: bool) -> None:
         """
         Activate the message processing interface.
 
@@ -158,7 +159,7 @@ def mpi(self, mpi_bool):
             raise ValueError("No executable set!")
 
     @property
-    def available_versions(self):
+    def available_versions(self) -> List[str]:
         """
         List all available exectuables in the path_binary_codes for the specified codename.
 
@@ -167,7 +168,7 @@ def available_versions(self):
         """
         return self.list_executables()
 
-    def list_executables(self):
+    def list_executables(self) -> List[str]:
         """
         List all available exectuables in the path_binary_codes for the specified codename.
 
@@ -177,7 +178,7 @@ def list_executables(self):
         return sorted(list(self.executable_lst.keys()))
 
     @property
-    def executable_path(self):
+    def executable_path(self) -> str:
         """
         Get the executable path
 
@@ -192,7 +193,7 @@ def executable_path(self):
         return self._executable_select()
 
     @executable_path.setter
-    def executable_path(self, new_path):
+    def executable_path(self, new_path: str) -> None:
         """
         Set the executable path
 
@@ -207,17 +208,30 @@ def executable_path(self, new_path):
             self.storage.mpi = False
 
     @classmethod
-    def instantiate(cls, obj_dict: dict, version: str = None) -> "Self":
+    def instantiate(cls, obj_dict: dict, version: str = None) -> "Executable":
         try:
             codename = obj_dict["name"]
         except KeyError:
             codename = obj_dict["executable"]["name"]
         return cls(codename=codename)
 
-    def _to_dict(self):
+    def _to_dict(self) -> dict:
+        """
+        Convert the object to a dictionary.
+
+        Returns:
+            dict: A dictionary representation of the object.
+        """
         return asdict(self.storage)
 
-    def _from_dict(self, obj_dict, version=None):
+    def _from_dict(self, obj_dict: dict, version: Optional[str]=None) -> None:
+        """
+        Load the object from a dictionary representation.
+
+        Args:
+            obj_dict (dict): A dictionary representation of the object.
+            version (str, optional): The version of the object. Defaults to None.
+        """
         data_container_keys = tuple(f.name for f in fields(ExecutableDataClass))
         executable_class_dict = {}
         # Backwards compatibility; dict state used to be nested one level deeper
@@ -227,7 +241,7 @@ def _from_dict(self, obj_dict, version=None):
             executable_class_dict[key] = obj_dict.get(key, None)
         self.storage = ExecutableDataClass(**executable_class_dict)
 
-    def get_input_for_subprocess_call(self, cores, threads, gpus=None):
+    def get_input_for_subprocess_call(self, cores: int, threads: int, gpus: Optional[int]=None) -> Tuple[Union[str, List[str]], bool]:
         """
         Get the input parameters for the subprocess call to execute the job
 
@@ -253,19 +267,19 @@ def get_input_for_subprocess_call(self, cores, threads, gpus=None):
             shell = False
         return executable, shell
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         """
         Executable path
         """
         return repr(self.executable_path)
 
-    def __str__(self):
+    def __str__(self) -> str:
         """
         Executable path
         """
         return str(self.executable_path)
 
-    def _executable_versions_list(self):
+    def _executable_versions_list(self) -> dict:
         """
         Internal function to list all available exectuables in the path_binary_codes for the specified codename.
 
@@ -278,7 +292,7 @@ def _executable_versions_list(self):
             module=self._module,
         ).dict()
 
-    def _executable_select(self):
+    def _executable_select(self) -> str:
         """
         Internal function to select an executable based on the codename and the version.
 
@@ -293,5 +307,5 @@ def _executable_select(self):
             else:
                 return ""
 
-    def _get_hdf_group_name(self):
+    def _get_hdf_group_name(self) -> str:
         return "executable"
diff --git a/pyiron_base/jobs/job/extension/files.py b/pyiron_base/jobs/job/extension/files.py
index 12d68362a..27040a19a 100644
--- a/pyiron_base/jobs/job/extension/files.py
+++ b/pyiron_base/jobs/job/extension/files.py
@@ -1,7 +1,7 @@
 import os
 import posixpath
 from itertools import islice
-from typing import List, Optional
+from typing import List, Optional, Union, Generator
 
 from pyiron_base.jobs.job.util import (
     _working_directory_list_files,
@@ -9,6 +9,50 @@
 )
 
 
+class File:
+    __slots__ = ("_path",)
+
+    def __init__(self, path: str):
+        self._path = path
+
+    def __str__(self) -> str:
+        return self._path
+
+    def _read(self, tail: Optional[int]=None) -> List[str]:
+        return _working_directory_read_file(
+            working_directory=os.path.dirname(str(self)),
+            file_name=os.path.basename(str(self)),
+            tail=tail,
+        )
+
+    def __iter__(self) -> Generator:
+        return iter(self._read())
+
+    def list(self, lines: Optional[int] = None) -> List[str]:
+        """
+        Return file content as list of lines.
+
+        Args:
+            lines (int): only return the first `lines` lines
+
+        Return:
+            list of str: file content
+        """
+        return list(islice(iter(self), lines))
+
+    def tail(self, lines: int = 100) -> None:
+        """
+        Print the last `lines` to stdout.
+
+        Args:
+            lines (int): number of output lines
+        """
+        print(*self._read(tail=lines), sep="")
+
+    def __eq__(self, other: Union[str, "File"]) -> bool:
+        return self.__str__().__eq__(other)
+
+
 class FileBrowser:
     """
     Allows to browse the files in a job directory.
@@ -46,10 +90,10 @@ class FileBrowser:
 
     __slots__ = ("_working_directory",)
 
-    def __init__(self, working_directory):
+    def __init__(self, working_directory: str):
         self._working_directory = working_directory
 
-    def _get_file_dict(self):
+    def _get_file_dict(self) -> dict:
         return {
             f.replace(".", "_"): f
             for f in _working_directory_list_files(
@@ -57,7 +101,7 @@ def _get_file_dict(self):
             )
         }
 
-    def __dir__(self):
+    def __dir__(self) -> List[str]:
         return list(self._get_file_dict().keys()) + super().__dir__()
 
     def list(self) -> List[str]:
@@ -66,7 +110,7 @@ def list(self) -> List[str]:
         """
         return _working_directory_list_files(working_directory=self._working_directory)
 
-    def _ipython_display_(self):
+    def _ipython_display_(self) -> None:
         path = self._job.working_directory + ":"
         files = [
             "\t" + str(f)
@@ -76,7 +120,7 @@ def _ipython_display_(self):
         ]
         print(os.linesep.join([path, *files]))
 
-    def tail(self, file: str, lines: int = 100):
+    def tail(self, file: str, lines: int = 100) -> None:
         """
         Print the last lines of a file.
 
@@ -89,7 +133,7 @@ def tail(self, file: str, lines: int = 100):
         """
         return self[file].tail(lines=lines)
 
-    def __getitem__(self, item):
+    def __getitem__(self, item: str) -> Union[File, "FileBrowser"]:
         sub = os.path.join(self._working_directory, item)
         if os.path.isdir(sub):
             return FileBrowser(sub)
@@ -106,7 +150,7 @@ def __getitem__(self, item):
         else:
             raise FileNotFoundError(item)
 
-    def __getattr__(self, item):
+    def __getattr__(self, item: str) -> Union[File, "FileBrowser"]:
         if item.startswith("__") and item.endswith("__"):
             raise AttributeError(item)
         else:
@@ -114,47 +158,3 @@ def __getattr__(self, item):
                 return self[self._get_file_dict()[item]]
             except KeyError:
                 raise FileNotFoundError(item) from None
-
-
-class File:
-    __slots__ = ("_path",)
-
-    def __init__(self, path):
-        self._path = path
-
-    def __str__(self):
-        return self._path
-
-    def _read(self, tail=None):
-        return _working_directory_read_file(
-            working_directory=os.path.dirname(str(self)),
-            file_name=os.path.basename(str(self)),
-            tail=tail,
-        )
-
-    def __iter__(self):
-        return iter(self._read())
-
-    def list(self, lines: Optional[int] = None):
-        """
-        Return file content as list of lines.
-
-        Args:
-            lines (int): only return the first `lines` lines
-
-        Return:
-            list of str: file content
-        """
-        return list(islice(iter(self), lines))
-
-    def tail(self, lines: int = 100):
-        """
-        Print the last `lines` to stdout.
-
-        Args:
-            lines (int): number of output lines
-        """
-        print(*self._read(tail=lines), sep="")
-
-    def __eq__(self, other):
-        return self.__str__().__eq__(other)
diff --git a/pyiron_base/jobs/job/extension/jobstatus.py b/pyiron_base/jobs/job/extension/jobstatus.py
index a8967d910..fb2fe2a9a 100644
--- a/pyiron_base/jobs/job/extension/jobstatus.py
+++ b/pyiron_base/jobs/job/extension/jobstatus.py
@@ -4,6 +4,7 @@
 """
 The JobStatus class belongs to the GenericJob object.
 """
+from typing import Optional, Union
 
 from pyiron_base.utils.instance import static_isinstance
 
@@ -35,7 +36,7 @@
 ] + job_status_finished_lst
 
 
-def format_docstring_with_statuses(n_tabs=1):
+def format_docstring_with_statuses(n_tabs: int=1) -> callable:
     """
     Replaces a '{}' in the decorated object's docstring with the documentation for all possible job status.
 
@@ -92,7 +93,12 @@ class JobStatus(object):
             job status as string
     """
 
-    def __init__(self, initial_status="initialized", db=None, job_id=None):
+    def __init__(
+        self, 
+        initial_status: str="initialized", 
+        db: Optional[Union["pyiron_base.database.generic.DatabaseAccess", "pyiron_base.database.filetable.FileTable"]]=None, 
+        job_id: Optional[int]=None,
+    ):
         super(JobStatus, self).__setattr__("_status_dict", {})
         self._db = None
         self._job_id = None
@@ -101,7 +107,7 @@ def __init__(self, initial_status="initialized", db=None, job_id=None):
         self.job_id = job_id
 
     @property
-    def database(self):
+    def database(self) -> Union["pyiron_base.database.generic.DatabaseAccess", "pyiron_base.database.filetable.FileTable", None]:
         """
         Get the database which is responsible for this job. If no database is linked it returns None.
         Returns:
@@ -110,7 +116,7 @@ def database(self):
         return self._db
 
     @database.setter
-    def database(self, db):
+    def database(self, db: Union["pyiron_base.database.generic.DatabaseAccess", "pyiron_base.database.filetable.FileTable"]) -> None:
         """
         Set the database which is responsible for this job.
         Args:
@@ -124,7 +130,7 @@ def database(self, db):
         self._db = db
 
     @property
-    def job_id(self):
+    def job_id(self) -> int:
         """
         Get the job id of the job this jobstatus is associated to.
         Returns:
@@ -133,7 +139,7 @@ def job_id(self):
         return self._job_id
 
     @job_id.setter
-    def job_id(self, unique_id):
+    def job_id(self, unique_id: int)-> None:
         """
         Get the job id of the job this jobstatus is associated to.
         Args:
@@ -146,7 +152,7 @@ def job_id(self, unique_id):
 
     @format_docstring_with_statuses(n_tabs=2)
     @property
-    def string(self):
+    def string(self) -> str:
         """
         Get the current status as string, it can be: {}
 
@@ -159,7 +165,7 @@ def string(self):
 
     @format_docstring_with_statuses(n_tabs=2)
     @string.setter
-    def string(self, status):
+    def string(self, status: str) -> None:
         """
         Set the current status, to one of the following: {}
 
@@ -176,7 +182,7 @@ def string(self, status):
                 f"'{status}' is not a valid job status. Instead use [{', '.join(job_status_lst)}]"
             )
 
-    def refresh_status(self):
+    def refresh_status(self) -> None:
         """
         Refresh the job status - check if the database and job_id are set and if this is the case load the job status
         from the database.
@@ -195,7 +201,7 @@ def refresh_status(self):
             self._reset()
             self._status_dict[status] = True
 
-    def _status_write(self):
+    def _status_write(self) -> None:
         """
         Private function: Write the job status to the internal variable _key and store it in the database.
         """
@@ -204,14 +210,14 @@ def _status_write(self):
             if self.database.get_job_status(job_id=self.job_id) != current_status:
                 self.database.set_job_status(job_id=self.job_id, status=current_status)
 
-    def _reset(self):
+    def _reset(self) -> None:
         """
         internal function to reset the run mode - sets all run modes to false.
         """
         self._status_dict = {status: False for status in job_status_lst}
 
     @staticmethod
-    def _bool_check(boolean):
+    def _bool_check(boolean: bool) -> None:
         """
         Private function: Raise TypeError if boolean is not type bool and raise a ValueError if it is not True.
         Args:
@@ -224,10 +230,10 @@ def _bool_check(boolean):
         if boolean is False:
             raise ValueError("The JobStatus can only be set to True.")
 
-    def _get_status_from_dict(self):
+    def _get_status_from_dict(self) -> str:
         return [key for key, val in self._status_dict.items() if val][0]
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         """
         Human readable representation of the job status
         Returns:
@@ -235,7 +241,7 @@ def __repr__(self):
         """
         return repr(self.string)
 
-    def __str__(self):
+    def __str__(self) -> str:
         """
         Machine readable representation of the job status
         Returns:
@@ -243,7 +249,7 @@ def __str__(self):
         """
         return str(self.string)
 
-    def __getattr__(self, name):
+    def __getattr__(self, name: str) -> bool:
         if name in self._status_dict.keys():
             self.refresh_status()
             return self._status_dict[name]
@@ -251,7 +257,7 @@ def __getattr__(self, name):
             "'{}' object has no attribute '{}'".format(self.__class__.__name__, name)
         )
 
-    def __setattr__(self, name, value):
+    def __setattr__(self, name: str, value: bool) -> None:
         if name in self._status_dict.keys():
             if not isinstance(value, bool):
                 raise TypeError("A run mode can only be activated using [True].")
@@ -262,10 +268,10 @@ def __setattr__(self, name, value):
         else:
             super(JobStatus, self).__setattr__(name, value)
 
-    def __dir__(self):
+    def __dir__(self) -> list:
         return list(self._status_dict.keys())
 
-    def __eq__(self, other):
+    def __eq__(self, other: Union[str, "JobStatus"]) -> bool:
         if isinstance(other, self.__class__):
             return other._status_dict == self._status_dict
         elif isinstance(other, str):
@@ -273,7 +279,7 @@ def __eq__(self, other):
         else:
             return super(JobStatus, self).__eq__(other)
 
-    def __ne__(self, other):
+    def __ne__(self, other: Union[str, "JobStatus"]) -> bool:
         if isinstance(other, self.__class__):
             return other._status_dict != self._status_dict
         elif isinstance(other, str):
diff --git a/pyiron_base/jobs/job/extension/server/queuestatus.py b/pyiron_base/jobs/job/extension/server/queuestatus.py
index 0d9563eeb..0bde8c144 100644
--- a/pyiron_base/jobs/job/extension/server/queuestatus.py
+++ b/pyiron_base/jobs/job/extension/server/queuestatus.py
@@ -7,6 +7,7 @@
 
 import time
 from concurrent.futures import Future
+from typing import List, Optional, Union
 
 import numpy as np
 import pandas
@@ -30,8 +31,8 @@
 
 
 def queue_table(
-    job_ids=None, working_directory_lst=None, project_only=True, full_table=False
-):
+    job_ids: Optional[List[int]]=None, working_directory_lst: Optional[List[str]]=None, project_only: bool=True, full_table: bool=False
+) -> pandas.DataFrame:
     """
     Display the queuing system table as pandas.Dataframe
 
@@ -93,7 +94,7 @@ def queue_table(
         return None
 
 
-def queue_check_job_is_waiting_or_running(item):
+def queue_check_job_is_waiting_or_running(item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"]) -> Union[bool, None]:
     """
     Check if a job is still listed in the queue system as either waiting or running.
 
@@ -113,7 +114,7 @@ def queue_check_job_is_waiting_or_running(item):
         return None
 
 
-def queue_info_by_job_id(job_id):
+def queue_info_by_job_id(job_id: int) -> dict:
     """
     Display the queuing system info of job by qstat | grep  shell command
     as dictionary
@@ -130,7 +131,7 @@ def queue_info_by_job_id(job_id):
         return None
 
 
-def queue_is_empty():
+def queue_is_empty() -> bool:
     """
     Check if the queue table is currently empty - no more jobs to wait for.
 
@@ -143,7 +144,7 @@ def queue_is_empty():
         return True
 
 
-def queue_delete_job(item) -> None:
+def queue_delete_job(item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"]) -> Union[str, None]:
     """
     Delete a job from the queuing system
 
@@ -160,7 +161,7 @@ def queue_delete_job(item) -> None:
         return None
 
 
-def queue_enable_reservation(item):
+def queue_enable_reservation(item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"]) -> Union[str, None]:
     """
     Enable a reservation for a particular job within the queuing system
 
@@ -182,7 +183,7 @@ def queue_enable_reservation(item):
         return None
 
 
-def wait_for_job(job, interval_in_s=5, max_iterations=100):
+def wait_for_job(job: "pyiron_base.jobs.job.generic.GenericJob", interval_in_s: int=5, max_iterations: int=100) -> None:
     """
     Sleep until the job is finished but maximum interval_in_s * max_iterations seconds.
 
@@ -247,13 +248,13 @@ def wait_for_job(job, interval_in_s=5, max_iterations=100):
 
 
 def wait_for_jobs(
-    project,
-    interval_in_s=5,
-    max_iterations=100,
-    recursive=True,
-    ignore_exceptions=False,
-    try_collecting=False,
-):
+    project: "pyiron_base.project.generic.Project",
+    interval_in_s: int=5,
+    max_iterations: int =100,
+    recursive: bool =True,
+    ignore_exceptions: bool =False,
+    try_collecting: bool =False,
+) -> None:
     """
     Wait for the calculation in the project to be finished
 
@@ -282,8 +283,8 @@ def wait_for_jobs(
 
 
 def update_from_remote(
-    project, recursive=True, ignore_exceptions=False, try_collecting=False
-):
+    project: "pyiron_base.project.generic.Project", recursive: bool=True, ignore_exceptions: bool =False, try_collecting: bool =False
+) -> None:
     """
     Update jobs from the remote server
 
@@ -341,7 +342,7 @@ def update_from_remote(
             return failed_jobs
 
 
-def retrieve_job(job, try_collecting=False):
+def retrieve_job(job: "pyiron_base.jobs.job.generic.GenericJob", try_collecting: bool=False) -> None:
     """
     Retrieve a job from remote server and check if it has a "finished status".
     Optionally try to collect its output.
@@ -362,7 +363,7 @@ def retrieve_job(job, try_collecting=False):
         job.run()
 
 
-def validate_que_request(item):
+def validate_que_request(item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"]) -> int:
     """
     Internal function to convert the job_ID or hamiltonian to the queuing system ID.
 
diff --git a/pyiron_base/jobs/job/extension/server/runmode.py b/pyiron_base/jobs/job/extension/server/runmode.py
index 8e66e5d91..bc42b0e7f 100644
--- a/pyiron_base/jobs/job/extension/server/runmode.py
+++ b/pyiron_base/jobs/job/extension/server/runmode.py
@@ -6,6 +6,8 @@
 Runmode class defines the different modes a pyiron job can be executed in
 """
 
+from typing import List, Union
+
 __author__ = "Jan Janssen"
 __copyright__ = (
     "Copyright 2020, Max-Planck-Institut für Eisenforschung GmbH - "
@@ -51,13 +53,13 @@ class Runmode(object):
                      'interactive_non_modal', 'srun', 'executor']
     """
 
-    def __init__(self, mode="modal"):
+    def __init__(self, mode: str="modal"):
         super(Runmode, self).__init__()
         self._reset_mode()
         self.mode = mode
 
     @property
-    def mode(self):
+    def mode(self) -> str:
         """
         Get the run_mode of the job
         Returns:
@@ -66,7 +68,7 @@ def mode(self):
         return [key for key, val in self._mode.items() if val][0]
 
     @mode.setter
-    def mode(self, new_mode):
+    def mode(self, new_mode: str) -> None:
         """
         Set the run_mode of the job
         Args:
@@ -76,24 +78,24 @@ def mode(self, new_mode):
             self._reset_mode()
             self._mode[new_mode] = True
 
-    def _reset_mode(self):
+    def _reset_mode(self) -> None:
         super(Runmode, self).__setattr__(
             "_mode", {run_mode: False for run_mode in run_mode_lst}
         )
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         return repr(self.mode)
 
-    def __str__(self):
+    def __str__(self) -> str:
         return str(self.mode)
 
-    def __getattr__(self, name):
+    def __getattr__(self, name: str) -> bool:
         if name in self._mode.keys():
             return self._mode[name]
         else:
             raise AttributeError
 
-    def __setattr__(self, name, value):
+    def __setattr__(self, name: str, value: bool) -> None:
         if name in self._mode.keys():
             if not isinstance(value, bool):
                 raise TypeError("A run mode can only be activated using [True].")
@@ -104,10 +106,10 @@ def __setattr__(self, name, value):
         else:
             super(Runmode, self).__setattr__(name, value)
 
-    def __dir__(self):
+    def __dir__(self) -> List[str]:
         return list(self._mode.keys())
 
-    def __eq__(self, other):
+    def __eq__(self, other: Union[str, "Runmode"]) -> bool:
         if isinstance(other, self.__class__):
             return other._mode == self._mode
         elif isinstance(other, str):
@@ -115,7 +117,7 @@ def __eq__(self, other):
         else:
             return super(Runmode, self).__eq__(other)
 
-    def __ne__(self, other):
+    def __ne__(self, other: Union[str, "Runmode"]) -> bool:
         if isinstance(other, self.__class__):
             return other._mode != self._mode
         elif isinstance(other, str):
diff --git a/pyiron_base/jobs/job/generic.py b/pyiron_base/jobs/job/generic.py
index 1e197fda7..d971bc395 100644
--- a/pyiron_base/jobs/job/generic.py
+++ b/pyiron_base/jobs/job/generic.py
@@ -12,7 +12,7 @@
 from concurrent.futures import Executor, Future
 from datetime import datetime
 from inspect import isclass
-from typing import Optional
+from typing import Optional, Union
 
 from h5io_browser.base import _read_hdf, _write_hdf
 from pyiron_snippets.deprecate import deprecate
@@ -54,6 +54,7 @@
 )
 from pyiron_base.state import state
 from pyiron_base.state.signal import catch_signals
+from pyiron_base.storage.hdfio import ProjectHDFio
 from pyiron_base.utils.instance import import_class, static_isinstance
 
 __author__ = "Joerg Neugebauer, Jan Janssen"
@@ -133,8 +134,8 @@ class GenericJob(JobCore, HasDict):
         + _doc_str_generic_job_attr
     )
 
-    def __init__(self, project, job_name):
-        super(GenericJob, self).__init__(project, job_name)
+    def __init__(self, project: ProjectHDFio, job_name: str):
+        super(GenericJob, self).__init__(project=project, job_name=job_name)
         self.__name__ = type(self).__name__
         self.__version__ = "0.4"
         self.__hdf_version__ = "0.1.0"
@@ -173,7 +174,7 @@ def __init__(self, project, job_name):
         self.error = GenericError(working_directory=self.project_hdf5.working_directory)
 
     @property
-    def version(self):
+    def version(self) -> str:
         """
         Get the version of the hamiltonian, which is also the version of the executable unless a custom executable is
         used.
@@ -191,7 +192,7 @@ def version(self):
                 return None
 
     @version.setter
-    def version(self, new_version):
+    def version(self, new_version: str) -> None:
         """
         Set the version of the hamiltonian, which is also the version of the executable unless a custom executable is
         used.
@@ -203,7 +204,7 @@ def version(self, new_version):
         self._executable.version = new_version
 
     @property
-    def executable(self):
+    def executable(self) -> Executable:
         """
         Get the executable used to run the job - usually the path to an external executable.
 
@@ -214,7 +215,7 @@ def executable(self):
         return self._executable
 
     @executable.setter
-    def executable(self, exe):
+    def executable(self, exe: str) -> None:
         """
         Set the executable used to run the job - usually the path to an external executable.
 
@@ -225,7 +226,7 @@ def executable(self, exe):
         self._executable.executable_path = exe
 
     @property
-    def server(self):
+    def server(self) -> Server:
         """
         Get the server object to handle the execution environment for the job.
 
@@ -235,7 +236,7 @@ def server(self):
         return self._server
 
     @server.setter
-    def server(self, server):
+    def server(self, server: Server) -> None:
         """
         Set the server object to handle the execution environment for the job.
         Args:
@@ -244,7 +245,7 @@ def server(self, server):
         self._server = server
 
     @property
-    def queue_id(self):
+    def queue_id(self) -> int:
         """
         Get the queue ID, the ID returned from the queuing system - it is most likely not the same as the job ID.
 
@@ -254,7 +255,7 @@ def queue_id(self):
         return self.server.queue_id
 
     @queue_id.setter
-    def queue_id(self, qid):
+    def queue_id(self, qid: int) -> None:
         """
         Set the queue ID, the ID returned from the queuing system - it is most likely not the same as the job ID.
 
@@ -274,7 +275,7 @@ def logger(self):
         return self._logger
 
     @property
-    def restart_file_list(self):
+    def restart_file_list(self) -> list:
         """
         Get the list of files which are used to restart the calculation from these files.
 
@@ -287,7 +288,7 @@ def restart_file_list(self):
         return self._restart_file_list
 
     @restart_file_list.setter
-    def restart_file_list(self, filenames):
+    def restart_file_list(self, filenames: list) -> None:
         """
         Append new files to the restart file list - the list of files which are used to restart the calculation from.
 
@@ -302,7 +303,7 @@ def restart_file_list(self, filenames):
             self.restart_file_list.append(f)
 
     @property
-    def restart_file_dict(self):
+    def restart_file_dict(self) -> dict:
         """
         A dictionary of the new name of the copied restart files
         """
@@ -312,7 +313,7 @@ def restart_file_dict(self):
         return self._restart_file_dict
 
     @restart_file_dict.setter
-    def restart_file_dict(self, val):
+    def restart_file_dict(self, val: str) -> None:
         if not isinstance(val, dict):
             raise ValueError("restart_file_dict should be a dictionary!")
         else:
@@ -325,7 +326,7 @@ def restart_file_dict(self, val):
                 self._restart_file_dict[k] = v
 
     @property
-    def exclude_nodes_hdf(self):
+    def exclude_nodes_hdf(self) -> list:
         """
         Get the list of nodes which are excluded from storing in the hdf5 file
 
@@ -335,7 +336,7 @@ def exclude_nodes_hdf(self):
         return self._exclude_nodes_hdf
 
     @exclude_nodes_hdf.setter
-    def exclude_nodes_hdf(self, val):
+    def exclude_nodes_hdf(self, val: Union[list, str]) -> None:
         if isinstance(val, str):
             val = [val]
         elif not hasattr(val, "__len__"):
@@ -343,7 +344,7 @@ def exclude_nodes_hdf(self, val):
         self._exclude_nodes_hdf = val
 
     @property
-    def exclude_groups_hdf(self):
+    def exclude_groups_hdf(self) -> list:
         """
         Get the list of groups which are excluded from storing in the hdf5 file
 
@@ -353,7 +354,7 @@ def exclude_groups_hdf(self):
         return self._exclude_groups_hdf
 
     @exclude_groups_hdf.setter
-    def exclude_groups_hdf(self, val):
+    def exclude_groups_hdf(self, val: Union[list, str]) -> None:
         if isinstance(val, str):
             val = [val]
         elif not hasattr(val, "__len__"):
@@ -361,7 +362,7 @@ def exclude_groups_hdf(self, val):
         self._exclude_groups_hdf = val
 
     @property
-    def job_type(self):
+    def job_type(self) -> str:
         """
         Job type object with all the available job types: ['ExampleJob', 'ParallelMaster', 'ScriptJob',
                                                            'ListMaster']
@@ -371,7 +372,7 @@ def job_type(self):
         return self.project.job_type
 
     @property
-    def working_directory(self):
+    def working_directory(self) -> str:
         """
         Get the working directory of the job is executed in - outside the HDF5 file. The working directory equals the
         path but it is represented by the filesystem:
@@ -389,11 +390,11 @@ def working_directory(self):
         return self.project_hdf5.working_directory
 
     @property
-    def executor_type(self):
+    def executor_type(self) -> Union[str, Executor]:
         return self._executor_type
 
     @executor_type.setter
-    def executor_type(self, exe):
+    def executor_type(self, exe: Union[str, Executor]) -> None:
         if exe is None:
             self._executor_type = exe
         elif isinstance(exe, str):
@@ -455,7 +456,7 @@ def calculate_kwargs(self) -> dict:
             "output_parameter_dict": self.get_output_parameter_dict(),
         }
 
-    def clear_job(self):
+    def clear_job(self) -> None:
         """
         Convenience function to clear job info after suspend. Mimics deletion of all the job info after suspend in a
         local test environment.
@@ -470,7 +471,7 @@ def clear_job(self):
         del self._restart_file_list
         del self._restart_file_dict
 
-    def copy(self):
+    def copy(self) -> "GenericJob":
         """
         Copy the GenericJob object which links to the job and its HDF5 file
 
@@ -500,7 +501,7 @@ def copy(self):
             )
         return copied_self
 
-    def collect_logfiles(self):
+    def collect_logfiles(self) -> None:
         """
         Collect the log files of the external executable and store the information in the HDF5 file. This method has
         to be implemented in the individual hamiltonians.
@@ -559,10 +560,10 @@ def get_input_parameter_dict(self) -> dict:
                 "files_to_copy": _get_restart_copy_dict(job=self),
             }
 
-    def get_output_parameter_dict(self):
+    def get_output_parameter_dict(self) -> dict:
         return {}
 
-    def collect_output(self):
+    def collect_output(self) -> None:
         """
         Collect the output files of the external executable and store the information in the HDF5 file. This method has
         to be implemented in the individual hamiltonians.
@@ -573,7 +574,7 @@ def collect_output(self):
 
     def save_output(
         self, output_dict: Optional[dict] = None, shell_output: Optional[str] = None
-    ):
+    ) -> None:
         """
         Store output of the calculate function in the HDF5 file.
 
@@ -588,7 +589,7 @@ def save_output(
         if shell_output is not None or output_dict is not None:
             self.to_hdf()
 
-    def suspend(self):
+    def suspend(self) -> None:
         """
         Suspend the job by storing the object and its state persistently in HDF5 file and exit it.
         """
@@ -601,7 +602,7 @@ def suspend(self):
         )
         self.clear_job()
 
-    def refresh_job_status(self):
+    def refresh_job_status(self) -> None:
         """
         Refresh job status by updating the job status with the status from the database if a job ID is available.
         """
@@ -627,7 +628,7 @@ def refresh_job_status(self):
             else:
                 self.status.finished = True
 
-    def write_input(self):
+    def write_input(self) -> None:
         """
         Call routines that generate the code specific input files
         Returns:
@@ -639,11 +640,11 @@ def write_input(self):
 
     def _internal_copy_to(
         self,
-        project=None,
-        new_job_name=None,
-        new_database_entry=True,
-        copy_files=True,
-        delete_existing_job=False,
+        project: Optional[ProjectHDFio]=None,
+        new_job_name: str=None,
+        new_database_entry: bool=True,
+        copy_files: bool=True,
+        delete_existing_job: bool=False,
     ):
         # Store all job arguments in the HDF5 file
         delete_file_after_copy = _job_store_before_copy(job=self)
@@ -674,12 +675,12 @@ def _internal_copy_to(
 
     def copy_to(
         self,
-        project=None,
-        new_job_name=None,
-        input_only=False,
-        new_database_entry=True,
-        delete_existing_job=False,
-        copy_files=True,
+        project: Optional[Union[ProjectHDFio, JobCore]]=None,
+        new_job_name: Optional[str]=None,
+        input_only: bool=False,
+        new_database_entry: bool=True,
+        delete_existing_job: bool=False,
+        copy_files: bool =True,
     ):
         """
         Copy the content of the job including the HDF5 file to a new location.
@@ -727,7 +728,7 @@ def copy_to(
         )
         return new_job_core
 
-    def _after_generic_copy_to(self, original, new_database_entry, reloaded):
+    def _after_generic_copy_to(self, original: "GenericJob", new_database_entry: bool, reloaded: bool) -> None:
         """
         Called in :method:`.copy_to()` after :method`._internal_copy_to()` to allow sub classes to modify copy behavior.
 
@@ -738,7 +739,7 @@ def _after_generic_copy_to(self, original, new_database_entry, reloaded):
         """
         pass
 
-    def copy_file_to_working_directory(self, file):
+    def copy_file_to_working_directory(self, file: str) -> None:
         """
         Copy a specific file to the working directory before the job is executed.
 
@@ -750,7 +751,7 @@ def copy_file_to_working_directory(self, file):
         else:
             self.restart_file_list.append(os.path.abspath(file))
 
-    def copy_template(self, project=None, new_job_name=None):
+    def copy_template(self, project: Optional[Union[ProjectHDFio, JobCore]]=None, new_job_name: Optional[None]=None) -> "GenericJob":
         """
         Copy the content of the job including the HDF5 file but without the output data to a new location
 
@@ -770,7 +771,7 @@ def copy_template(self, project=None, new_job_name=None):
             new_database_entry=False,
         )
 
-    def remove(self, _protect_childs=True):
+    def remove(self, _protect_childs: bool=True) -> None:
         """
         Remove the job - this removes the HDF5 file, all data stored in the HDF5 file an the corresponding database entry.
 
@@ -782,7 +783,7 @@ def remove(self, _protect_childs=True):
             self.server.future.cancel()
         super().remove(_protect_childs=_protect_childs)
 
-    def remove_child(self):
+    def remove_child(self) -> None:
         """
         internal function to remove command that removes also child jobs.
         Do never use this command, since it will destroy the integrity of your project.
@@ -790,7 +791,16 @@ def remove_child(self):
         _kill_child(job=self)
         super(GenericJob, self).remove_child()
 
-    def remove_and_reset_id(self, _protect_childs=True):
+    def remove_and_reset_id(self, _protect_childs: bool=True) -> None:
+        """
+        Remove the job and reset its ID.
+
+        Args:
+            _protect_childs (bool): Flag indicating whether to protect child jobs (default is True).
+
+        Returns:
+            None
+        """
         if self.job_id is not None:
             master_id, parent_id = self.master_id, self.parent_id
             self.remove(_protect_childs=_protect_childs)
@@ -799,7 +809,16 @@ def remove_and_reset_id(self, _protect_childs=True):
         else:
             self.remove(_protect_childs=_protect_childs)
 
-    def kill(self):
+    def kill(self) -> None:
+        """
+        Kill the job.
+
+        This function is used to terminate the execution of the job. It checks if the job is currently running or submitted,
+        and if so, it removes and resets the job ID. If the job is not running or submitted, a `ValueError` is raised.
+
+        Returns:
+            None
+        """
         if self.status.running or self.status.submitted:
             self.remove_and_reset_id()
         else:
@@ -807,7 +826,7 @@ def kill(self):
                 "The kill() function is only available during the execution of the job."
             )
 
-    def validate_ready_to_run(self):
+    def validate_ready_to_run(self) -> None:
         """
         Validate that the calculation is ready to be executed. By default no generic checks are performed, but one could
         check that the input information is complete or validate the consistency of the input at this point.
@@ -817,7 +836,7 @@ def validate_ready_to_run(self):
         """
         pass
 
-    def check_setup(self):
+    def check_setup(self) -> None:
         """
         Checks whether certain parameters (such as plane wave cutoff radius in DFT) are changed from the pyiron standard
         values to allow for a physically meaningful results. This function is called manually or only when the job is
@@ -825,7 +844,7 @@ def check_setup(self):
         """
         pass
 
-    def reset_job_id(self, job_id=None):
+    def reset_job_id(self, job_id: Optional[int]=None) -> None:
         """
         Reset the job id sets the job_id to None in the GenericJob as well as all connected modules like JobStatus.
         """
@@ -838,12 +857,12 @@ def reset_job_id(self, job_id=None):
     )
     def run(
         self,
-        delete_existing_job=False,
-        repair=False,
-        debug=False,
-        run_mode=None,
-        run_again=False,
-    ):
+        delete_existing_job: bool=False,
+        repair: bool=False,
+        debug: bool =False,
+        run_mode: Optional[str]=None,
+        run_again: bool=False,
+    ) -> None:
         """
         This is the main run function, depending on the job status ['initialized', 'created', 'submitted', 'running',
         'collect','finished', 'refresh', 'suspended'] the corresponding run mode is chosen.
@@ -902,14 +921,14 @@ def run(
                 self.drop_status_to_aborted()
                 raise
 
-    def run_if_modal(self):
+    def run_if_modal(self) -> None:
         """
         The run if modal function is called by run to execute the simulation, while waiting for the output. For this we
         use subprocess.check_output()
         """
         run_job_with_runmode_modal(job=self)
 
-    def run_static(self):
+    def run_static(self) -> Union[None, int]:
         """
         The run static function is called by run to execute the simulation.
         """
@@ -919,7 +938,7 @@ def run_static(self):
         else:
             return execute_job_with_external_executable(job=self)
 
-    def run_if_scheduler(self):
+    def run_if_scheduler(self) -> Union[None, int]:
         """
         The run if queue function is called by run if the user decides to submit the job to and queing system. The job
         is submitted to the queuing system using subprocess.Popen()
@@ -928,7 +947,21 @@ def run_if_scheduler(self):
         """
         return run_job_with_runmode_queue(job=self)
 
-    def transfer_from_remote(self):
+    def transfer_from_remote(self) -> None:
+        """
+        Transfer the job from a remote location to the local machine.
+
+        This method transfers the job from a remote location to the local machine. It performs the following steps:
+        1. Retrieves the job from the remote location using the queue adapter.
+        2. Transfers the job file to the remote location, with the option to delete the file on the remote location after transfer.
+        3. Updates the project database if it is disabled, otherwise updates the file table in the database with the job information.
+
+        Args:
+            None
+
+        Returns:
+            None
+        """
         state.queue_adapter.get_job_from_remote(
             working_directory="/".join(self.working_directory.split("/")[:-1]),
         )
@@ -984,7 +1017,7 @@ def transfer_from_remote(self):
         if self.master_id is not None:
             self._reload_update_master(project=self.project, master_id=self.master_id)
 
-    def run_if_interactive(self):
+    def run_if_interactive(self) -> None:
         """
         For jobs which executables are available as Python library, those can also be executed with a library call
         instead of calling an external executable. This is usually faster than a single core python job.
@@ -993,7 +1026,7 @@ def run_if_interactive(self):
             "This function needs to be implemented in the specific class."
         )
 
-    def run_if_interactive_non_modal(self):
+    def run_if_interactive_non_modal(self) -> None:
         """
         For jobs which executables are available as Python library, those can also be executed with a library call
         instead of calling an external executable. This is usually faster than a single core python job.
@@ -1002,7 +1035,7 @@ def run_if_interactive_non_modal(self):
             "This function needs to be implemented in the specific class."
         )
 
-    def interactive_close(self):
+    def interactive_close(self) -> None:
         """
         For jobs which executables are available as Python library, those can also be executed with a library call
         instead of calling an external executable. This is usually faster than a single core python job. After the
@@ -1012,7 +1045,7 @@ def interactive_close(self):
             "This function needs to be implemented in the specific class."
         )
 
-    def interactive_fetch(self):
+    def interactive_fetch(self) -> None:
         """
         For jobs which executables are available as Python library, those can also be executed with a library call
         instead of calling an external executable. This is usually faster than a single core python job. To access the
@@ -1022,7 +1055,7 @@ def interactive_fetch(self):
             "This function needs to be implemented in the specific class."
         )
 
-    def interactive_flush(self, path="generic", include_last_step=True):
+    def interactive_flush(self, path: str="generic", include_last_step: bool=True) -> None:
         """
         For jobs which executables are available as Python library, those can also be executed with a library call
         instead of calling an external executable. This is usually faster than a single core python job. To write the
@@ -1032,7 +1065,7 @@ def interactive_flush(self, path="generic", include_last_step=True):
             "This function needs to be implemented in the specific class."
         )
 
-    def _init_child_job(self, parent):
+    def _init_child_job(self, parent: "GenericJob") -> None:
         """
         Finalize job initialization when job instance is created as a child from another one.
 
@@ -1043,7 +1076,7 @@ def _init_child_job(self, parent):
         """
         pass
 
-    def create_job(self, job_type, job_name, delete_existing_job=False):
+    def create_job(self, job_type: str, job_name: str, delete_existing_job: bool =False) -> "GenericJob":
         """
         Create one of the following jobs:
         - 'StructureContainer’:
@@ -1099,7 +1132,7 @@ def create_job(self, job_type, job_name, delete_existing_job=False):
         job._init_child_job(self)
         return job
 
-    def update_master(self, force_update=False):
+    def update_master(self, force_update: bool=False) -> None:
         """
         After a job is finished it checks whether it is linked to any metajob - meaning the master ID is pointing to
         this jobs job ID. If this is the case and the master job is in status suspended - the child wakes up the master
@@ -1130,7 +1163,7 @@ def update_master(self, force_update=False):
             ):
                 self._reload_update_master(project=project, master_id=master_id)
 
-    def job_file_name(self, file_name, cwd=None):
+    def job_file_name(self, file_name: str, cwd: Optional[str]=None) -> str:
         """
         combine the file name file_name with the path of the current working directory
 
@@ -1145,13 +1178,19 @@ def job_file_name(self, file_name, cwd=None):
             cwd = self.project_hdf5.working_directory
         return posixpath.join(cwd, file_name)
 
-    def _set_hdf(self, hdf=None, group_name=None):
+    def _set_hdf(self, hdf: Optional[ProjectHDFio]=None, group_name: Optional[str]=None) -> None:
         if hdf is not None:
             self._hdf5 = hdf
         if group_name is not None and self._hdf5 is not None:
             self._hdf5 = self._hdf5.open(group_name)
 
-    def _to_dict(self):
+    def _to_dict(self) -> dict:
+        """
+        Convert the GenericJob object to a dictionary.
+
+        Returns:
+            dict: The dictionary representation of the GenericJob object.
+        """
         data_dict = {}
         data_dict["status"] = self.status.string
         data_dict["input/generic_dict"] = {
@@ -1175,7 +1214,14 @@ def _to_dict(self):
         data_dict["HDF_VERSION"] = self.__version__
         return data_dict
 
-    def _from_dict(self, obj_dict, version=None):
+    def _from_dict(self, obj_dict: dict, version: str=None) -> None:
+        """
+        Restore the GenericJob object from a dictionary.
+
+        Args:
+            obj_dict (dict): The dictionary representation of the GenericJob object.
+            version (str): The version of the GenericJob object (optional).
+        """
         self._type_from_dict(type_dict=obj_dict)
         if "import_directory" in obj_dict.keys():
             self._import_directory = obj_dict["import_directory"]
@@ -1201,7 +1247,7 @@ def _from_dict(self, obj_dict, version=None):
         if "executor_type" in input_dict.keys():
             self._executor_type = input_dict["executor_type"]
 
-    def to_hdf(self, hdf=None, group_name=None):
+    def to_hdf(self, hdf: Optional[ProjectHDFio]=None, group_name: Optional[str]=None) -> None:
         """
         Store the GenericJob in an HDF5 file
 
@@ -1213,7 +1259,7 @@ def to_hdf(self, hdf=None, group_name=None):
         self._hdf5.write_dict(data_dict=self.to_dict())
 
     @classmethod
-    def from_hdf_args(cls, hdf):
+    def from_hdf_args(cls, hdf: ProjectHDFio) -> dict:
         """
         Read arguments for instance creation from HDF5 file
 
@@ -1226,7 +1272,7 @@ def from_hdf_args(cls, hdf):
         )
         return {"job_name": job_name, "project": project_hdf5}
 
-    def from_hdf(self, hdf=None, group_name=None):
+    def from_hdf(self, hdf: Optional[ProjectHDFio]=None, group_name: Optional[str]=None) -> None:
         """
         Restore the GenericJob from an HDF5 file
 
@@ -1245,7 +1291,7 @@ def from_hdf(self, hdf=None, group_name=None):
             job_dict["executable"] = {"executable": exe_dict}
         self.from_dict(obj_dict=job_dict)
 
-    def save(self):
+    def save(self) -> None:
         """
         Save the object, by writing the content to the HDF5 file and storing an entry in the database.
 
@@ -1277,7 +1323,7 @@ def save(self):
         )
         return job_id
 
-    def convergence_check(self):
+    def convergence_check(self) -> bool:
         """
         Validate the convergence of the calculation.
 
@@ -1286,7 +1332,7 @@ def convergence_check(self):
         """
         return True
 
-    def db_entry(self):
+    def db_entry(self) -> dict:
         """
         Generate the initial database entry for the current GenericJob
 
@@ -1310,7 +1356,7 @@ def db_entry(self):
         }
         return db_dict
 
-    def restart(self, job_name=None, job_type=None):
+    def restart(self, job_name: Optional[str]=None, job_type: Optional[str] =None) -> "GenericJob":
         """
         Create an restart calculation from the current calculation - in the GenericJob this is the same as create_job().
         A restart is only possible after the current job has finished. If you want to run the same job again with
@@ -1344,7 +1390,7 @@ def restart(self, job_name=None, job_type=None):
         new_ham._restart_file_dict = dict()
         return new_ham
 
-    def _list_all(self):
+    def _list_all(self) -> dict:
         """
         List all groups and nodes of the HDF5 file - where groups are equivalent to directories and nodes to files.
 
@@ -1356,7 +1402,7 @@ def _list_all(self):
             h5_dict["groups"] += self._list_ext_childs()
         return h5_dict
 
-    def signal_intercept(self, sig):
+    def signal_intercept(self, sig) -> None:
         """
         Abort the job and log signal that caused it.
 
@@ -1376,7 +1422,7 @@ def signal_intercept(self, sig):
         except:
             raise
 
-    def drop_status_to_aborted(self):
+    def drop_status_to_aborted(self) -> None:
         """
         Change the job status to aborted when the job was intercepted.
         """
@@ -1385,7 +1431,7 @@ def drop_status_to_aborted(self):
             self.status.aborted = True
             self.project_hdf5["status"] = self.status.string
 
-    def _run_if_new(self, debug=False):
+    def _run_if_new(self, debug: bool=False) -> None:
         """
         Internal helper function the run if new function is called when the job status is 'initialized'. It prepares
         the hdf5 file and the corresponding directory structure.
@@ -1395,7 +1441,7 @@ def _run_if_new(self, debug=False):
         """
         run_job_with_status_initialized(job=self, debug=debug)
 
-    def _run_if_created(self):
+    def _run_if_created(self) -> Union[None, int]:
         """
         Internal helper function the run if created function is called when the job status is 'created'. It executes
         the simulation, either in modal mode, meaning waiting for the simulation to finish, manually, or submits the
@@ -1406,41 +1452,41 @@ def _run_if_created(self):
         """
         return run_job_with_status_created(job=self)
 
-    def _run_if_repair(self):
+    def _run_if_repair(self) -> None:
         """
         Internal helper function the run if repair function is called when the run() function is called with the
         'repair' parameter.
         """
         run_job_with_parameter_repair(job=self)
 
-    def _run_if_running(self):
+    def _run_if_running(self) -> None:
         """
         Internal helper function the run if running function is called when the job status is 'running'. It allows the
         user to interact with the simulation while it is running.
         """
         run_job_with_status_running(job=self)
 
-    def run_if_refresh(self):
+    def run_if_refresh(self) -> None:
         """
         Internal helper function the run if refresh function is called when the job status is 'refresh'. If the job was
         suspended previously, the job is going to be started again, to be continued.
         """
         run_job_with_status_refresh(job=self)
 
-    def set_input_to_read_only(self):
+    def set_input_to_read_only(self) -> None:
         """
         This function enforces read-only mode for the input classes, but it has to be implemented in the individual
         classes.
         """
         self.server.lock()
 
-    def _run_if_busy(self):
+    def _run_if_busy(self) -> None:
         """
         Internal helper function the run if busy function is called when the job status is 'busy'.
         """
         run_job_with_status_busy(job=self)
 
-    def _run_if_collect(self):
+    def _run_if_collect(self) -> None:
         """
         Internal helper function the run if collect function is called when the job status is 'collect'. It collects
         the simulation output using the standardized functions collect_output() and collect_logfiles(). Afterwards the
@@ -1448,14 +1494,14 @@ def _run_if_collect(self):
         """
         run_job_with_status_collect(job=self)
 
-    def _run_if_suspended(self):
+    def _run_if_suspended(self) -> None:
         """
         Internal helper function the run if suspended function is called when the job status is 'suspended'. It
         restarts the job by calling the run if refresh function after setting the status to 'refresh'.
         """
         run_job_with_status_suspended(job=self)
 
-    def _executable_activate(self, enforce=False, codename=None):
+    def _executable_activate(self, enforce: bool=False, codename: Optional[str]=None) -> None:
         """
         Internal helper function to koad the executable object, if it was not loaded already.
 
@@ -1490,7 +1536,7 @@ def _executable_activate(self, enforce=False, codename=None):
                     path_binary_codes=None,
                 )
 
-    def _type_to_dict(self):
+    def _type_to_dict(self) -> dict:
         """
         Internal helper function to save type and version in HDF5 file root
         """
@@ -1501,12 +1547,12 @@ def _type_to_dict(self):
             data_dict["HDF_VERSION"] = self.__hdf_version__
         return data_dict
 
-    def _type_from_dict(self, type_dict):
+    def _type_from_dict(self, type_dict: dict) -> None:
         self.__obj_type__ = type_dict["TYPE"]
         if self._executable is None:
             self.__obj_version__ = type_dict["VERSION"]
 
-    def _type_from_hdf(self):
+    def _type_from_hdf(self) -> None:
         """
         Internal helper function to load type and version from HDF5 file root
         """
@@ -1517,14 +1563,14 @@ def _type_from_hdf(self):
             }
         )
 
-    def run_time_to_db(self):
+    def run_time_to_db(self) -> None:
         """
         Internal helper function to store the run_time in the database
         """
         if not state.database.database_is_disabled and self.job_id is not None:
             self.project.db.item_update(self._runtime(), self.job_id)
 
-    def _runtime(self):
+    def _runtime(self) -> dict:
         """
         Internal helper function to calculate runtime by substracting the starttime, from the stoptime.
 
@@ -1538,7 +1584,7 @@ def _runtime(self):
             "totalcputime": int((stop_time - start_time).total_seconds()),
         }
 
-    def _db_server_entry(self):
+    def _db_server_entry(self) -> str:
         """
         Internal helper function to connect all the info regarding the server into a single word that can be used
         e.g. as entry in a database
@@ -1549,7 +1595,7 @@ def _db_server_entry(self):
         """
         return self._server.db_entry()
 
-    def _executable_activate_mpi(self):
+    def _executable_activate_mpi(self) -> None:
         """
         Internal helper function to switch the executable to MPI mode
         """
@@ -1564,7 +1610,7 @@ def _executable_activate_mpi(self):
             )
 
     @deprecate("Use job.save()")
-    def _create_job_structure(self, debug=False):
+    def _create_job_structure(self, debug: bool=False) -> None:
         """
         Internal helper function to create the input directories, save the job in the database and write the wrapper.
 
@@ -1573,7 +1619,7 @@ def _create_job_structure(self, debug=False):
         """
         self.save()
 
-    def _check_if_input_should_be_written(self):
+    def _check_if_input_should_be_written(self) -> bool:
         if self._job_with_calculate_function:
             return False
         else:
@@ -1582,7 +1628,7 @@ def _check_if_input_should_be_written(self):
                 or self.server.run_mode.interactive_non_modal
             )
 
-    def _before_successor_calc(self, ham):
+    def _before_successor_calc(self, ham: "GenericJob") -> None:
         """
         Internal helper function which is executed based on the hamiltonian of the successor job, before it is executed.
         This function is used to execute a series of jobs based on their parent relationship - marked by the parent ID.
@@ -1590,7 +1636,7 @@ def _before_successor_calc(self, ham):
         """
         pass
 
-    def _reload_update_master(self, project, master_id):
+    def _reload_update_master(self, project: ProjectHDFio, master_id: int) -> None:
         queue_flag = self.server.run_mode.queue
         master_db_entry = project.db.get_item_by_id(master_id)
         if master_db_entry["status"] == "suspended":
@@ -1608,7 +1654,7 @@ def _reload_update_master(self, project, master_id):
             self._logger.info("busy master: {} {}".format(master_id, self.get_job_id()))
             del self
 
-    def _get_executor(self, max_workers=None):
+    def _get_executor(self, max_workers: Optional[int]=None) -> Executor:
         if self._executor_type is None:
             raise ValueError(
                 "No executor type defined - Please set self.executor_type."
@@ -1631,10 +1677,10 @@ def _get_executor(self, max_workers=None):
 
 
 class GenericError(object):
-    def __init__(self, working_directory):
+    def __init__(self, working_directory: str):
         self._working_directory = working_directory
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         all_messages = ""
         for message in [self.print_message(), self.print_queue()]:
             if message is True:
@@ -1643,13 +1689,13 @@ def __repr__(self):
             all_messages = "There is no error/warning"
         return all_messages
 
-    def print_message(self, string=""):
+    def print_message(self, string="") -> str:
         return self._print_error(file_name="error.msg", string=string)
 
-    def print_queue(self, string=""):
+    def print_queue(self, string="") -> str:
         return self._print_error(file_name="error.out", string=string)
 
-    def _print_error(self, file_name, string="", print_yes=True):
+    def _print_error(self, file_name: str, string: str="", print_yes: bool=True) -> str:
         if not os.path.exists(os.path.join(self._working_directory, file_name)):
             return ""
         elif print_yes:
diff --git a/pyiron_base/jobs/job/interactive.py b/pyiron_base/jobs/job/interactive.py
index 08bfa1e1b..f8fca3b7f 100644
--- a/pyiron_base/jobs/job/interactive.py
+++ b/pyiron_base/jobs/job/interactive.py
@@ -4,6 +4,7 @@
 """
 InteractiveBase class extends the Generic Job class with all the functionality to run the job object interactivley.
 """
+from typing import Any, Optional
 
 import numpy as np
 
@@ -126,8 +127,8 @@ class InteractiveBase(GenericJob):
 
     """
 
-    def __init__(self, project, job_name):
-        super(InteractiveBase, self).__init__(project, job_name)
+    def __init__(self, project: "pyiron_base.storage.hdfio.ProjectHDFio", job_name: str):
+        super(InteractiveBase, self).__init__(project=project, job_name=job_name)
         self._interactive_library = None
         self._interactive_write_input_files = False
         self._interactive_flush_frequency = 10000
@@ -135,11 +136,11 @@ def __init__(self, project, job_name):
         self.interactive_cache = {}
 
     @property
-    def interactive_flush_frequency(self):
+    def interactive_flush_frequency(self) -> int:
         return self._interactive_flush_frequency
 
     @interactive_flush_frequency.setter
-    def interactive_flush_frequency(self, frequency):
+    def interactive_flush_frequency(self, frequency: int) -> None:
         if not isinstance(frequency, int) or frequency < 1:
             raise AssertionError("interactive_flush_frequency must be an integer>0")
         if frequency < self._interactive_write_frequency:
@@ -149,18 +150,18 @@ def interactive_flush_frequency(self, frequency):
         self._interactive_flush_frequency = frequency
 
     @property
-    def interactive_write_frequency(self):
+    def interactive_write_frequency(self) -> int:
         return self._interactive_write_frequency
 
     @interactive_write_frequency.setter
-    def interactive_write_frequency(self, frequency):
+    def interactive_write_frequency(self, frequency: int) -> None:
         if not isinstance(frequency, int) or frequency < 1:
             raise AssertionError("interactive_write_frequency must be an integer>0")
         if self._interactive_flush_frequency < frequency:
             self.interactive_flush_frequency = frequency
         self._interactive_write_frequency = frequency
 
-    def validate_ready_to_run(self):
+    def validate_ready_to_run(self) -> None:
         """
         This should work but doesn't...
         """
@@ -169,11 +170,12 @@ def validate_ready_to_run(self):
                 "interactive_write_frequency must be smaller or equal to interactive_flush_frequency"
             )
 
-    def _run_if_running(self):
+    def _run_if_running(self) -> None:
         """
+        Run the job if it is in the running state.
 
         Returns:
-
+            None
         """
         if self.server.run_mode.interactive:
             self.run_if_interactive()
@@ -182,17 +184,24 @@ def _run_if_running(self):
         else:
             super(InteractiveBase, self)._run_if_running()
 
-    def _check_if_input_should_be_written(self):
+    def _check_if_input_should_be_written(self) -> bool:
+        """
+        Check if the input should be written.
+
+        Returns:
+            bool: True if the input should be written, False otherwise.
+        """
         return (
             super(InteractiveBase, self)._check_if_input_should_be_written()
             or self._interactive_write_input_files
         )
 
-    def interactive_is_activated(self):
+    def interactive_is_activated(self) -> bool:
         """
+        Check if the interactive library is activated.
 
         Returns:
-
+            bool: True if the interactive library is activated, False otherwise.
         """
         if self._interactive_library is None:
             return False
@@ -200,17 +209,18 @@ def interactive_is_activated(self):
             return True
 
     @staticmethod
-    def _extend_hdf(h5, path, key, data):
+    def _extend_hdf(h5: "pyiron_base.storage.hdfio.ProjectHDFio", path: str, key: str, data: Any) -> None:
         """
+        Extend an existing HDF5 dataset with new data.
 
         Args:
-            h5:
-            path:
-            key:
-            data:
+            h5 (pyiron_base.storage.hdfio.ProjectHDFio): HDF5 file object.
+            path (str): Path to the dataset within the HDF5 file.
+            key (str): Name of the dataset.
+            data (Union[list, np.ndarray]): Data to be added to the dataset.
 
         Returns:
-
+            None
         """
         if path in h5.list_groups() and key in h5[path].list_nodes():
             current_hdf = h5[path + "/" + key]
@@ -222,16 +232,17 @@ def _extend_hdf(h5, path, key, data):
         h5[path + "/" + key] = data
 
     @staticmethod
-    def _include_last_step(array, step=1, include_last=False):
+    def _include_last_step(array: np.ndarray, step: int = 1, include_last: bool = False) -> np.ndarray:
         """
+        Returns a new array with elements selected at a given step size.
 
         Args:
-            array:
-            step:
-            include_last:
+            array (np.ndarray): The input array.
+            step (int, optional): The step size for selecting elements. Defaults to 1.
+            include_last (bool, optional): Whether to include the last element in the new array. Defaults to False.
 
         Returns:
-
+            np.ndarray: The new array with selected elements.
         """
         if step == 1:
             return array
@@ -249,15 +260,16 @@ def _include_last_step(array, step=1, include_last=False):
                     return []
         return []
 
-    def interactive_flush(self, path="interactive", include_last_step=False):
+    def interactive_flush(self, path: str = "interactive", include_last_step: bool = False) -> None:
         """
+        Flushes the interactive cache to the HDF5 file.
 
         Args:
-            path:
-            include_last_step:
+            path (str): The path within the HDF5 file to store the flushed data.
+            include_last_step (bool): Whether to include the last step of the cache in the flushed data.
 
         Returns:
-
+            None
         """
         with self.project_hdf5.open("output") as h5:
             for key in self.interactive_cache.keys():
@@ -285,7 +297,7 @@ def interactive_flush(self, path="interactive", include_last_step=False):
                     )
                 self.interactive_cache[key] = []
 
-    def interactive_open(self):
+    def interactive_open(self) -> "pyiron_base.jobs.job.interactive.InteractiveBase":
         """
         Set the run mode to interactive.
 
@@ -296,7 +308,7 @@ def interactive_open(self):
         self.server.run_mode.interactive = True
         return _WithInteractiveOpen(self)
 
-    def interactive_close(self):
+    def interactive_close(self) -> None:
         """
         Stop interactive execution and sync interactive output cache.
 
@@ -317,28 +329,29 @@ def interactive_close(self):
         for key in self.interactive_cache.keys():
             self.interactive_cache[key] = []
 
-    def interactive_store_in_cache(self, key, value):
+    def interactive_store_in_cache(self, key: str, value: Any) -> None:
         """
+        Store a value in the interactive cache.
 
         Args:
-            key:
-            value:
+            key (str): The key to store the value under.
+            value (Any): The value to be stored.
 
         Returns:
-
+            None
         """
         self.interactive_cache[key] = value
 
     # def __del__(self):
     #     self.interactive_close()
 
-    def run_if_interactive(self):
+    def run_if_interactive(self) -> None:
         raise NotImplementedError("run_if_interactive() is not implemented!")
 
-    def run_if_interactive_non_modal(self):
+    def run_if_interactive_non_modal(self) -> None:
         raise NotImplementedError("run_if_interactive_non_modal() is not implemented!")
 
-    def to_hdf(self, hdf=None, group_name=None):
+    def to_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"]=None, group_name: Optional[str]=None):
         """
         Store the InteractiveBase object in the HDF5 File
 
@@ -353,7 +366,7 @@ def to_hdf(self, hdf=None, group_name=None):
                 "interactive_write_frequency": self._interactive_write_frequency,
             }
 
-    def from_hdf(self, hdf=None, group_name=None):
+    def from_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"]=None, group_name: Optional[str]=None):
         """
         Restore the InteractiveBase object in the HDF5 File
 
@@ -377,13 +390,13 @@ def from_hdf(self, hdf=None, group_name=None):
 
 
 class _WithInteractiveOpen:
-    def __init__(self, job):
+    def __init__(self, job: InteractiveBase):
         self._job = job
 
-    def __repr__(self):
+    def __repr__(self) -> str:
         return "Interactive ready"
 
-    def __enter__(self):
+    def __enter__(self) -> InteractiveBase:
         return self._job
 
     def __exit__(self, exc_type, exc_val, exc_tb):
diff --git a/pyiron_base/storage/hdfio.py b/pyiron_base/storage/hdfio.py
index f1f599f99..13db81598 100644
--- a/pyiron_base/storage/hdfio.py
+++ b/pyiron_base/storage/hdfio.py
@@ -1050,7 +1050,7 @@ class ProjectHDFio(FileHDFio, BaseHDFio):
 
     def __init__(
         self,
-        project: "Project",
+        project: "pyiron_base.project.generic.Project",
         file_name: str,
         h5_path: Optional[str] = None,
         mode: Optional[str] = None,
@@ -1100,7 +1100,7 @@ def path(self) -> str:
         return os.path.join(self._project.path, self.h5_path[1:]).replace("\\", "/")
 
     @property
-    def project(self) -> "Project":
+    def project(self) -> "pyiron_base.project.generic.Project":
         """
         Get the project instance the ProjectHDFio object is located in
 

From c08b2a7e4540d0562db389354078ba34e9c36ef4 Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
 <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Sun, 8 Sep 2024 15:22:38 +0000
Subject: [PATCH 14/20] [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
---
 pyiron_base/jobs/job/core.py                  | 65 ++++++++-----
 pyiron_base/jobs/job/extension/executable.py  | 14 +--
 pyiron_base/jobs/job/extension/files.py       |  4 +-
 pyiron_base/jobs/job/extension/jobstatus.py   | 34 +++++--
 .../jobs/job/extension/server/queuestatus.py  | 46 +++++++---
 .../jobs/job/extension/server/runmode.py      |  2 +-
 pyiron_base/jobs/job/generic.py               | 92 ++++++++++++-------
 pyiron_base/jobs/job/interactive.py           | 29 ++++--
 8 files changed, 193 insertions(+), 93 deletions(-)

diff --git a/pyiron_base/jobs/job/core.py b/pyiron_base/jobs/job/core.py
index 90e61115a..ec74b1ecc 100644
--- a/pyiron_base/jobs/job/core.py
+++ b/pyiron_base/jobs/job/core.py
@@ -10,8 +10,8 @@
 import os
 import posixpath
 import shutil
-from typing import Any, Generator, Optional, Union, List
 import warnings
+from typing import Any, Generator, List, Optional, Union
 
 from pyiron_snippets.deprecate import deprecate
 
@@ -240,7 +240,6 @@ def __repr__(self):
         return self._project_hdf5.__repr__()
 
 
-
 class JobCore(HasGroups):
     __doc__ = (
         """
@@ -453,7 +452,7 @@ def files_to_compress(self) -> list:
     def files_to_remove(self) -> list:
         return self._files_to_remove
 
-    def relocate_hdf5(self, h5_path: Optional[str]=None):
+    def relocate_hdf5(self, h5_path: Optional[str] = None):
         """
         Relocate the hdf file. This function is needed when the child job is
         spawned by a parent job (cf. pyiron_base.jobs.master.generic)
@@ -506,7 +505,13 @@ def path(self) -> str:
         """
         return self.project_hdf5.path
 
-    def check_if_job_exists(self, job_name: Optional[str]=None, project: Optional[Union[ProjectHDFio, "pyiron_base.project.generic.Project"]]=None):
+    def check_if_job_exists(
+        self,
+        job_name: Optional[str] = None,
+        project: Optional[
+            Union[ProjectHDFio, "pyiron_base.project.generic.Project"]
+        ] = None,
+    ):
         """
         Check if a job already exists in an specific project.
 
@@ -557,7 +562,7 @@ def get_from_table(self, path: str, name: str) -> Union[dict, list, float, int]:
         """
         return self.project_hdf5.get_from_table(path, name)
 
-    def remove(self, _protect_childs: bool=True) -> None:
+    def remove(self, _protect_childs: bool = True) -> None:
         """
         Remove the job - this removes the HDF5 file, all data stored in the HDF5 file an the corresponding database entry.
 
@@ -634,7 +639,9 @@ def remove_child(self) -> None:
         if self.job_id is not None:
             self.project.db.delete_item(self.job_id)
 
-    def to_object(self, object_type: Optional[str]=None, **qwargs) -> "pyiron_base.job.generic.GenericJob":
+    def to_object(
+        self, object_type: Optional[str] = None, **qwargs
+    ) -> "pyiron_base.job.generic.GenericJob":
         """
         Load the full pyiron object from an HDF5 file
 
@@ -653,7 +660,7 @@ def to_object(self, object_type: Optional[str]=None, **qwargs) -> "pyiron_base.j
             )
         return self.project_hdf5.to_object(object_type, **qwargs)
 
-    def get(self, name: str, default: Optional[Any]=None) -> Any:
+    def get(self, name: str, default: Optional[Any] = None) -> Any:
         """
         Internal wrapper function for __getitem__() - self[name]
 
@@ -674,7 +681,9 @@ def get(self, name: str, default: Optional[Any]=None) -> Any:
                 return default
             raise
 
-    def load(self, job_specifier: Union[str, int], convert_to_object: bool=True) -> Union["pyiron_base.job.generic.GenericJob", "JobCore"]:
+    def load(
+        self, job_specifier: Union[str, int], convert_to_object: bool = True
+    ) -> Union["pyiron_base.job.generic.GenericJob", "JobCore"]:
         """
         Load an existing pyiron object - most commonly a job - from the database
 
@@ -725,7 +734,9 @@ def is_master_id(self, job_id: int) -> bool:
             > 0
         )
 
-    def get_job_id(self, job_specifier: Optional[Union[str, int]]=None) -> Union[int, None]:
+    def get_job_id(
+        self, job_specifier: Optional[Union[str, int]] = None
+    ) -> Union[int, None]:
         """
         get the job_id for job named job_name in the local project path from database
 
@@ -801,11 +812,13 @@ def copy(self) -> "JobCore":
 
     def _internal_copy_to(
         self,
-        project: Optional[Union["JobCore", ProjectHDFio, "pyiron_base.project.generic.Project"]]=None,
-        new_job_name: Optional[str]=None,
-        new_database_entry: bool=True,
-        copy_files: bool=True,
-        delete_existing_job: bool=False,
+        project: Optional[
+            Union["JobCore", ProjectHDFio, "pyiron_base.project.generic.Project"]
+        ] = None,
+        new_job_name: Optional[str] = None,
+        new_database_entry: bool = True,
+        copy_files: bool = True,
+        delete_existing_job: bool = False,
     ) -> "JobCore":
         """
         Internal helper function for copy_to() which returns more
@@ -882,10 +895,10 @@ def _internal_copy_to(
     def copy_to(
         self,
         project: Union["JobCore", ProjectHDFio, "pyiron_base.project.generic.Project"],
-        new_job_name: Optional[str]=None,
-        input_only: bool=False,
-        new_database_entry: bool=True,
-        copy_files: bool=True,
+        new_job_name: Optional[str] = None,
+        input_only: bool = False,
+        new_database_entry: bool = True,
+        copy_files: bool = True,
     ) -> "JobCore":
         """
         Copy the content of the job including the HDF5 file to a new location
@@ -973,7 +986,7 @@ def rename(self, new_job_name: str) -> None:
         """
         self.job_name = new_job_name
 
-    def reset_job_id(self, job_id: Optional[int]=None) -> None:
+    def reset_job_id(self, job_id: Optional[int] = None) -> None:
         """
         The reset_job_id function has to be implemented by the derived classes - usually the GenericJob class
 
@@ -991,7 +1004,9 @@ def save(self) -> None:
         """
         raise NotImplementedError("save() should be implemented in the derived class")
 
-    def to_hdf(self, hdf: Optional[ProjectHDFio]=None, group_name: str="group") -> None:
+    def to_hdf(
+        self, hdf: Optional[ProjectHDFio] = None, group_name: str = "group"
+    ) -> None:
         """
         Store object in hdf5 format - The function has to be implemented by the derived classes
         - usually the GenericJob class
@@ -1002,7 +1017,9 @@ def to_hdf(self, hdf: Optional[ProjectHDFio]=None, group_name: str="group") -> N
         """
         raise NotImplementedError("to_hdf() should be implemented in the derived class")
 
-    def from_hdf(self, hdf: Optional[ProjectHDFio]=None, group_name: str="group") -> None:
+    def from_hdf(
+        self, hdf: Optional[ProjectHDFio] = None, group_name: str = "group"
+    ) -> None:
         """
         Restore object from hdf5 format - The function has to be implemented by the derived classes
         - usually the GenericJob class
@@ -1130,7 +1147,11 @@ def _list_ext_childs(self) -> list:
         childs = self.list_childs()
         return list(set(childs) - set(nodes))
 
-    def compress(self, files_to_compress: Optional[List[str]]=None, files_to_remove: Optional[List[str]]=None) -> None:
+    def compress(
+        self,
+        files_to_compress: Optional[List[str]] = None,
+        files_to_remove: Optional[List[str]] = None,
+    ) -> None:
         """
         Compress the output files of a job object.
 
diff --git a/pyiron_base/jobs/job/extension/executable.py b/pyiron_base/jobs/job/extension/executable.py
index 3378ef4d5..c9540d6d4 100644
--- a/pyiron_base/jobs/job/extension/executable.py
+++ b/pyiron_base/jobs/job/extension/executable.py
@@ -33,10 +33,10 @@ class Executable(HasDict):
 
     def __init__(
         self,
-        path_binary_codes: Optional[List[str]]=None,
-        codename: Optional[str]=None,
-        module: Optional[str]=None,
-        overwrite_nt_flag: bool=False,
+        path_binary_codes: Optional[List[str]] = None,
+        codename: Optional[str] = None,
+        module: Optional[str] = None,
+        overwrite_nt_flag: bool = False,
     ):
         """
         Handle the path to the executable, as well as the version selection.
@@ -224,7 +224,7 @@ def _to_dict(self) -> dict:
         """
         return asdict(self.storage)
 
-    def _from_dict(self, obj_dict: dict, version: Optional[str]=None) -> None:
+    def _from_dict(self, obj_dict: dict, version: Optional[str] = None) -> None:
         """
         Load the object from a dictionary representation.
 
@@ -241,7 +241,9 @@ def _from_dict(self, obj_dict: dict, version: Optional[str]=None) -> None:
             executable_class_dict[key] = obj_dict.get(key, None)
         self.storage = ExecutableDataClass(**executable_class_dict)
 
-    def get_input_for_subprocess_call(self, cores: int, threads: int, gpus: Optional[int]=None) -> Tuple[Union[str, List[str]], bool]:
+    def get_input_for_subprocess_call(
+        self, cores: int, threads: int, gpus: Optional[int] = None
+    ) -> Tuple[Union[str, List[str]], bool]:
         """
         Get the input parameters for the subprocess call to execute the job
 
diff --git a/pyiron_base/jobs/job/extension/files.py b/pyiron_base/jobs/job/extension/files.py
index 27040a19a..471a51641 100644
--- a/pyiron_base/jobs/job/extension/files.py
+++ b/pyiron_base/jobs/job/extension/files.py
@@ -1,7 +1,7 @@
 import os
 import posixpath
 from itertools import islice
-from typing import List, Optional, Union, Generator
+from typing import Generator, List, Optional, Union
 
 from pyiron_base.jobs.job.util import (
     _working_directory_list_files,
@@ -18,7 +18,7 @@ def __init__(self, path: str):
     def __str__(self) -> str:
         return self._path
 
-    def _read(self, tail: Optional[int]=None) -> List[str]:
+    def _read(self, tail: Optional[int] = None) -> List[str]:
         return _working_directory_read_file(
             working_directory=os.path.dirname(str(self)),
             file_name=os.path.basename(str(self)),
diff --git a/pyiron_base/jobs/job/extension/jobstatus.py b/pyiron_base/jobs/job/extension/jobstatus.py
index fb2fe2a9a..dd78bfc77 100644
--- a/pyiron_base/jobs/job/extension/jobstatus.py
+++ b/pyiron_base/jobs/job/extension/jobstatus.py
@@ -4,6 +4,7 @@
 """
 The JobStatus class belongs to the GenericJob object.
 """
+
 from typing import Optional, Union
 
 from pyiron_base.utils.instance import static_isinstance
@@ -36,7 +37,7 @@
 ] + job_status_finished_lst
 
 
-def format_docstring_with_statuses(n_tabs: int=1) -> callable:
+def format_docstring_with_statuses(n_tabs: int = 1) -> callable:
     """
     Replaces a '{}' in the decorated object's docstring with the documentation for all possible job status.
 
@@ -94,10 +95,15 @@ class JobStatus(object):
     """
 
     def __init__(
-        self, 
-        initial_status: str="initialized", 
-        db: Optional[Union["pyiron_base.database.generic.DatabaseAccess", "pyiron_base.database.filetable.FileTable"]]=None, 
-        job_id: Optional[int]=None,
+        self,
+        initial_status: str = "initialized",
+        db: Optional[
+            Union[
+                "pyiron_base.database.generic.DatabaseAccess",
+                "pyiron_base.database.filetable.FileTable",
+            ]
+        ] = None,
+        job_id: Optional[int] = None,
     ):
         super(JobStatus, self).__setattr__("_status_dict", {})
         self._db = None
@@ -107,7 +113,13 @@ def __init__(
         self.job_id = job_id
 
     @property
-    def database(self) -> Union["pyiron_base.database.generic.DatabaseAccess", "pyiron_base.database.filetable.FileTable", None]:
+    def database(
+        self,
+    ) -> Union[
+        "pyiron_base.database.generic.DatabaseAccess",
+        "pyiron_base.database.filetable.FileTable",
+        None,
+    ]:
         """
         Get the database which is responsible for this job. If no database is linked it returns None.
         Returns:
@@ -116,7 +128,13 @@ def database(self) -> Union["pyiron_base.database.generic.DatabaseAccess", "pyir
         return self._db
 
     @database.setter
-    def database(self, db: Union["pyiron_base.database.generic.DatabaseAccess", "pyiron_base.database.filetable.FileTable"]) -> None:
+    def database(
+        self,
+        db: Union[
+            "pyiron_base.database.generic.DatabaseAccess",
+            "pyiron_base.database.filetable.FileTable",
+        ],
+    ) -> None:
         """
         Set the database which is responsible for this job.
         Args:
@@ -139,7 +157,7 @@ def job_id(self) -> int:
         return self._job_id
 
     @job_id.setter
-    def job_id(self, unique_id: int)-> None:
+    def job_id(self, unique_id: int) -> None:
         """
         Get the job id of the job this jobstatus is associated to.
         Args:
diff --git a/pyiron_base/jobs/job/extension/server/queuestatus.py b/pyiron_base/jobs/job/extension/server/queuestatus.py
index 0bde8c144..af5e1f6cc 100644
--- a/pyiron_base/jobs/job/extension/server/queuestatus.py
+++ b/pyiron_base/jobs/job/extension/server/queuestatus.py
@@ -31,7 +31,10 @@
 
 
 def queue_table(
-    job_ids: Optional[List[int]]=None, working_directory_lst: Optional[List[str]]=None, project_only: bool=True, full_table: bool=False
+    job_ids: Optional[List[int]] = None,
+    working_directory_lst: Optional[List[str]] = None,
+    project_only: bool = True,
+    full_table: bool = False,
 ) -> pandas.DataFrame:
     """
     Display the queuing system table as pandas.Dataframe
@@ -94,7 +97,9 @@ def queue_table(
         return None
 
 
-def queue_check_job_is_waiting_or_running(item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"]) -> Union[bool, None]:
+def queue_check_job_is_waiting_or_running(
+    item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"],
+) -> Union[bool, None]:
     """
     Check if a job is still listed in the queue system as either waiting or running.
 
@@ -144,7 +149,9 @@ def queue_is_empty() -> bool:
         return True
 
 
-def queue_delete_job(item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"]) -> Union[str, None]:
+def queue_delete_job(
+    item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"],
+) -> Union[str, None]:
     """
     Delete a job from the queuing system
 
@@ -161,7 +168,9 @@ def queue_delete_job(item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"]
         return None
 
 
-def queue_enable_reservation(item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"]) -> Union[str, None]:
+def queue_enable_reservation(
+    item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"],
+) -> Union[str, None]:
     """
     Enable a reservation for a particular job within the queuing system
 
@@ -183,7 +192,11 @@ def queue_enable_reservation(item: Union[int, "pyiron_base.jobs.job.generic.Gene
         return None
 
 
-def wait_for_job(job: "pyiron_base.jobs.job.generic.GenericJob", interval_in_s: int=5, max_iterations: int=100) -> None:
+def wait_for_job(
+    job: "pyiron_base.jobs.job.generic.GenericJob",
+    interval_in_s: int = 5,
+    max_iterations: int = 100,
+) -> None:
     """
     Sleep until the job is finished but maximum interval_in_s * max_iterations seconds.
 
@@ -249,11 +262,11 @@ def wait_for_job(job: "pyiron_base.jobs.job.generic.GenericJob", interval_in_s:
 
 def wait_for_jobs(
     project: "pyiron_base.project.generic.Project",
-    interval_in_s: int=5,
-    max_iterations: int =100,
-    recursive: bool =True,
-    ignore_exceptions: bool =False,
-    try_collecting: bool =False,
+    interval_in_s: int = 5,
+    max_iterations: int = 100,
+    recursive: bool = True,
+    ignore_exceptions: bool = False,
+    try_collecting: bool = False,
 ) -> None:
     """
     Wait for the calculation in the project to be finished
@@ -283,7 +296,10 @@ def wait_for_jobs(
 
 
 def update_from_remote(
-    project: "pyiron_base.project.generic.Project", recursive: bool=True, ignore_exceptions: bool =False, try_collecting: bool =False
+    project: "pyiron_base.project.generic.Project",
+    recursive: bool = True,
+    ignore_exceptions: bool = False,
+    try_collecting: bool = False,
 ) -> None:
     """
     Update jobs from the remote server
@@ -342,7 +358,9 @@ def update_from_remote(
             return failed_jobs
 
 
-def retrieve_job(job: "pyiron_base.jobs.job.generic.GenericJob", try_collecting: bool=False) -> None:
+def retrieve_job(
+    job: "pyiron_base.jobs.job.generic.GenericJob", try_collecting: bool = False
+) -> None:
     """
     Retrieve a job from remote server and check if it has a "finished status".
     Optionally try to collect its output.
@@ -363,7 +381,9 @@ def retrieve_job(job: "pyiron_base.jobs.job.generic.GenericJob", try_collecting:
         job.run()
 
 
-def validate_que_request(item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"]) -> int:
+def validate_que_request(
+    item: Union[int, "pyiron_base.jobs.job.generic.GenericJob"],
+) -> int:
     """
     Internal function to convert the job_ID or hamiltonian to the queuing system ID.
 
diff --git a/pyiron_base/jobs/job/extension/server/runmode.py b/pyiron_base/jobs/job/extension/server/runmode.py
index bc42b0e7f..f0ca47298 100644
--- a/pyiron_base/jobs/job/extension/server/runmode.py
+++ b/pyiron_base/jobs/job/extension/server/runmode.py
@@ -53,7 +53,7 @@ class Runmode(object):
                      'interactive_non_modal', 'srun', 'executor']
     """
 
-    def __init__(self, mode: str="modal"):
+    def __init__(self, mode: str = "modal"):
         super(Runmode, self).__init__()
         self._reset_mode()
         self.mode = mode
diff --git a/pyiron_base/jobs/job/generic.py b/pyiron_base/jobs/job/generic.py
index d971bc395..3e7304210 100644
--- a/pyiron_base/jobs/job/generic.py
+++ b/pyiron_base/jobs/job/generic.py
@@ -640,11 +640,11 @@ def write_input(self) -> None:
 
     def _internal_copy_to(
         self,
-        project: Optional[ProjectHDFio]=None,
-        new_job_name: str=None,
-        new_database_entry: bool=True,
-        copy_files: bool=True,
-        delete_existing_job: bool=False,
+        project: Optional[ProjectHDFio] = None,
+        new_job_name: str = None,
+        new_database_entry: bool = True,
+        copy_files: bool = True,
+        delete_existing_job: bool = False,
     ):
         # Store all job arguments in the HDF5 file
         delete_file_after_copy = _job_store_before_copy(job=self)
@@ -675,12 +675,12 @@ def _internal_copy_to(
 
     def copy_to(
         self,
-        project: Optional[Union[ProjectHDFio, JobCore]]=None,
-        new_job_name: Optional[str]=None,
-        input_only: bool=False,
-        new_database_entry: bool=True,
-        delete_existing_job: bool=False,
-        copy_files: bool =True,
+        project: Optional[Union[ProjectHDFio, JobCore]] = None,
+        new_job_name: Optional[str] = None,
+        input_only: bool = False,
+        new_database_entry: bool = True,
+        delete_existing_job: bool = False,
+        copy_files: bool = True,
     ):
         """
         Copy the content of the job including the HDF5 file to a new location.
@@ -728,7 +728,9 @@ def copy_to(
         )
         return new_job_core
 
-    def _after_generic_copy_to(self, original: "GenericJob", new_database_entry: bool, reloaded: bool) -> None:
+    def _after_generic_copy_to(
+        self, original: "GenericJob", new_database_entry: bool, reloaded: bool
+    ) -> None:
         """
         Called in :method:`.copy_to()` after :method`._internal_copy_to()` to allow sub classes to modify copy behavior.
 
@@ -751,7 +753,11 @@ def copy_file_to_working_directory(self, file: str) -> None:
         else:
             self.restart_file_list.append(os.path.abspath(file))
 
-    def copy_template(self, project: Optional[Union[ProjectHDFio, JobCore]]=None, new_job_name: Optional[None]=None) -> "GenericJob":
+    def copy_template(
+        self,
+        project: Optional[Union[ProjectHDFio, JobCore]] = None,
+        new_job_name: Optional[None] = None,
+    ) -> "GenericJob":
         """
         Copy the content of the job including the HDF5 file but without the output data to a new location
 
@@ -771,7 +777,7 @@ def copy_template(self, project: Optional[Union[ProjectHDFio, JobCore]]=None, ne
             new_database_entry=False,
         )
 
-    def remove(self, _protect_childs: bool=True) -> None:
+    def remove(self, _protect_childs: bool = True) -> None:
         """
         Remove the job - this removes the HDF5 file, all data stored in the HDF5 file an the corresponding database entry.
 
@@ -791,7 +797,7 @@ def remove_child(self) -> None:
         _kill_child(job=self)
         super(GenericJob, self).remove_child()
 
-    def remove_and_reset_id(self, _protect_childs: bool=True) -> None:
+    def remove_and_reset_id(self, _protect_childs: bool = True) -> None:
         """
         Remove the job and reset its ID.
 
@@ -844,7 +850,7 @@ def check_setup(self) -> None:
         """
         pass
 
-    def reset_job_id(self, job_id: Optional[int]=None) -> None:
+    def reset_job_id(self, job_id: Optional[int] = None) -> None:
         """
         Reset the job id sets the job_id to None in the GenericJob as well as all connected modules like JobStatus.
         """
@@ -857,11 +863,11 @@ def reset_job_id(self, job_id: Optional[int]=None) -> None:
     )
     def run(
         self,
-        delete_existing_job: bool=False,
-        repair: bool=False,
-        debug: bool =False,
-        run_mode: Optional[str]=None,
-        run_again: bool=False,
+        delete_existing_job: bool = False,
+        repair: bool = False,
+        debug: bool = False,
+        run_mode: Optional[str] = None,
+        run_again: bool = False,
     ) -> None:
         """
         This is the main run function, depending on the job status ['initialized', 'created', 'submitted', 'running',
@@ -1055,7 +1061,9 @@ def interactive_fetch(self) -> None:
             "This function needs to be implemented in the specific class."
         )
 
-    def interactive_flush(self, path: str="generic", include_last_step: bool=True) -> None:
+    def interactive_flush(
+        self, path: str = "generic", include_last_step: bool = True
+    ) -> None:
         """
         For jobs which executables are available as Python library, those can also be executed with a library call
         instead of calling an external executable. This is usually faster than a single core python job. To write the
@@ -1076,7 +1084,9 @@ def _init_child_job(self, parent: "GenericJob") -> None:
         """
         pass
 
-    def create_job(self, job_type: str, job_name: str, delete_existing_job: bool =False) -> "GenericJob":
+    def create_job(
+        self, job_type: str, job_name: str, delete_existing_job: bool = False
+    ) -> "GenericJob":
         """
         Create one of the following jobs:
         - 'StructureContainer’:
@@ -1132,7 +1142,7 @@ def create_job(self, job_type: str, job_name: str, delete_existing_job: bool =Fa
         job._init_child_job(self)
         return job
 
-    def update_master(self, force_update: bool=False) -> None:
+    def update_master(self, force_update: bool = False) -> None:
         """
         After a job is finished it checks whether it is linked to any metajob - meaning the master ID is pointing to
         this jobs job ID. If this is the case and the master job is in status suspended - the child wakes up the master
@@ -1163,7 +1173,7 @@ def update_master(self, force_update: bool=False) -> None:
             ):
                 self._reload_update_master(project=project, master_id=master_id)
 
-    def job_file_name(self, file_name: str, cwd: Optional[str]=None) -> str:
+    def job_file_name(self, file_name: str, cwd: Optional[str] = None) -> str:
         """
         combine the file name file_name with the path of the current working directory
 
@@ -1178,7 +1188,9 @@ def job_file_name(self, file_name: str, cwd: Optional[str]=None) -> str:
             cwd = self.project_hdf5.working_directory
         return posixpath.join(cwd, file_name)
 
-    def _set_hdf(self, hdf: Optional[ProjectHDFio]=None, group_name: Optional[str]=None) -> None:
+    def _set_hdf(
+        self, hdf: Optional[ProjectHDFio] = None, group_name: Optional[str] = None
+    ) -> None:
         if hdf is not None:
             self._hdf5 = hdf
         if group_name is not None and self._hdf5 is not None:
@@ -1214,7 +1226,7 @@ def _to_dict(self) -> dict:
         data_dict["HDF_VERSION"] = self.__version__
         return data_dict
 
-    def _from_dict(self, obj_dict: dict, version: str=None) -> None:
+    def _from_dict(self, obj_dict: dict, version: str = None) -> None:
         """
         Restore the GenericJob object from a dictionary.
 
@@ -1247,7 +1259,9 @@ def _from_dict(self, obj_dict: dict, version: str=None) -> None:
         if "executor_type" in input_dict.keys():
             self._executor_type = input_dict["executor_type"]
 
-    def to_hdf(self, hdf: Optional[ProjectHDFio]=None, group_name: Optional[str]=None) -> None:
+    def to_hdf(
+        self, hdf: Optional[ProjectHDFio] = None, group_name: Optional[str] = None
+    ) -> None:
         """
         Store the GenericJob in an HDF5 file
 
@@ -1272,7 +1286,9 @@ def from_hdf_args(cls, hdf: ProjectHDFio) -> dict:
         )
         return {"job_name": job_name, "project": project_hdf5}
 
-    def from_hdf(self, hdf: Optional[ProjectHDFio]=None, group_name: Optional[str]=None) -> None:
+    def from_hdf(
+        self, hdf: Optional[ProjectHDFio] = None, group_name: Optional[str] = None
+    ) -> None:
         """
         Restore the GenericJob from an HDF5 file
 
@@ -1356,7 +1372,9 @@ def db_entry(self) -> dict:
         }
         return db_dict
 
-    def restart(self, job_name: Optional[str]=None, job_type: Optional[str] =None) -> "GenericJob":
+    def restart(
+        self, job_name: Optional[str] = None, job_type: Optional[str] = None
+    ) -> "GenericJob":
         """
         Create an restart calculation from the current calculation - in the GenericJob this is the same as create_job().
         A restart is only possible after the current job has finished. If you want to run the same job again with
@@ -1431,7 +1449,7 @@ def drop_status_to_aborted(self) -> None:
             self.status.aborted = True
             self.project_hdf5["status"] = self.status.string
 
-    def _run_if_new(self, debug: bool=False) -> None:
+    def _run_if_new(self, debug: bool = False) -> None:
         """
         Internal helper function the run if new function is called when the job status is 'initialized'. It prepares
         the hdf5 file and the corresponding directory structure.
@@ -1501,7 +1519,9 @@ def _run_if_suspended(self) -> None:
         """
         run_job_with_status_suspended(job=self)
 
-    def _executable_activate(self, enforce: bool=False, codename: Optional[str]=None) -> None:
+    def _executable_activate(
+        self, enforce: bool = False, codename: Optional[str] = None
+    ) -> None:
         """
         Internal helper function to koad the executable object, if it was not loaded already.
 
@@ -1610,7 +1630,7 @@ def _executable_activate_mpi(self) -> None:
             )
 
     @deprecate("Use job.save()")
-    def _create_job_structure(self, debug: bool=False) -> None:
+    def _create_job_structure(self, debug: bool = False) -> None:
         """
         Internal helper function to create the input directories, save the job in the database and write the wrapper.
 
@@ -1654,7 +1674,7 @@ def _reload_update_master(self, project: ProjectHDFio, master_id: int) -> None:
             self._logger.info("busy master: {} {}".format(master_id, self.get_job_id()))
             del self
 
-    def _get_executor(self, max_workers: Optional[int]=None) -> Executor:
+    def _get_executor(self, max_workers: Optional[int] = None) -> Executor:
         if self._executor_type is None:
             raise ValueError(
                 "No executor type defined - Please set self.executor_type."
@@ -1695,7 +1715,9 @@ def print_message(self, string="") -> str:
     def print_queue(self, string="") -> str:
         return self._print_error(file_name="error.out", string=string)
 
-    def _print_error(self, file_name: str, string: str="", print_yes: bool=True) -> str:
+    def _print_error(
+        self, file_name: str, string: str = "", print_yes: bool = True
+    ) -> str:
         if not os.path.exists(os.path.join(self._working_directory, file_name)):
             return ""
         elif print_yes:
diff --git a/pyiron_base/jobs/job/interactive.py b/pyiron_base/jobs/job/interactive.py
index f8fca3b7f..964ebf8ac 100644
--- a/pyiron_base/jobs/job/interactive.py
+++ b/pyiron_base/jobs/job/interactive.py
@@ -4,6 +4,7 @@
 """
 InteractiveBase class extends the Generic Job class with all the functionality to run the job object interactivley.
 """
+
 from typing import Any, Optional
 
 import numpy as np
@@ -127,7 +128,9 @@ class InteractiveBase(GenericJob):
 
     """
 
-    def __init__(self, project: "pyiron_base.storage.hdfio.ProjectHDFio", job_name: str):
+    def __init__(
+        self, project: "pyiron_base.storage.hdfio.ProjectHDFio", job_name: str
+    ):
         super(InteractiveBase, self).__init__(project=project, job_name=job_name)
         self._interactive_library = None
         self._interactive_write_input_files = False
@@ -209,7 +212,9 @@ def interactive_is_activated(self) -> bool:
             return True
 
     @staticmethod
-    def _extend_hdf(h5: "pyiron_base.storage.hdfio.ProjectHDFio", path: str, key: str, data: Any) -> None:
+    def _extend_hdf(
+        h5: "pyiron_base.storage.hdfio.ProjectHDFio", path: str, key: str, data: Any
+    ) -> None:
         """
         Extend an existing HDF5 dataset with new data.
 
@@ -232,7 +237,9 @@ def _extend_hdf(h5: "pyiron_base.storage.hdfio.ProjectHDFio", path: str, key: st
         h5[path + "/" + key] = data
 
     @staticmethod
-    def _include_last_step(array: np.ndarray, step: int = 1, include_last: bool = False) -> np.ndarray:
+    def _include_last_step(
+        array: np.ndarray, step: int = 1, include_last: bool = False
+    ) -> np.ndarray:
         """
         Returns a new array with elements selected at a given step size.
 
@@ -260,7 +267,9 @@ def _include_last_step(array: np.ndarray, step: int = 1, include_last: bool = Fa
                     return []
         return []
 
-    def interactive_flush(self, path: str = "interactive", include_last_step: bool = False) -> None:
+    def interactive_flush(
+        self, path: str = "interactive", include_last_step: bool = False
+    ) -> None:
         """
         Flushes the interactive cache to the HDF5 file.
 
@@ -351,7 +360,11 @@ def run_if_interactive(self) -> None:
     def run_if_interactive_non_modal(self) -> None:
         raise NotImplementedError("run_if_interactive_non_modal() is not implemented!")
 
-    def to_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"]=None, group_name: Optional[str]=None):
+    def to_hdf(
+        self,
+        hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"] = None,
+        group_name: Optional[str] = None,
+    ):
         """
         Store the InteractiveBase object in the HDF5 File
 
@@ -366,7 +379,11 @@ def to_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"]=None, g
                 "interactive_write_frequency": self._interactive_write_frequency,
             }
 
-    def from_hdf(self, hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"]=None, group_name: Optional[str]=None):
+    def from_hdf(
+        self,
+        hdf: Optional["pyiron_base.storage.hdfio.ProjectHDFio"] = None,
+        group_name: Optional[str] = None,
+    ):
         """
         Restore the InteractiveBase object in the HDF5 File
 

From b1d1b4f4259f10a360720028a63f39c8691ef2e2 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Jan=C3=9Fen?= <janssen@mpie.de>
Date: Sun, 8 Sep 2024 17:34:02 +0200
Subject: [PATCH 15/20] Server class

---
 .../jobs/job/extension/server/generic.py      | 106 ++++++++++--------
 1 file changed, 60 insertions(+), 46 deletions(-)

diff --git a/pyiron_base/jobs/job/extension/server/generic.py b/pyiron_base/jobs/job/extension/server/generic.py
index df4b4f44d..6581ddda5 100644
--- a/pyiron_base/jobs/job/extension/server/generic.py
+++ b/pyiron_base/jobs/job/extension/server/generic.py
@@ -9,8 +9,9 @@
 import socket
 from concurrent.futures import Executor, Future
 from dataclasses import asdict, fields
-from typing import Union
+from typing import Union, Optional
 
+import pandas
 from pyiron_snippets.deprecate import deprecate
 
 from pyiron_base.dataclasses.job import Server as ServerDataClass
@@ -86,13 +87,13 @@ class Server(
 
     def __init__(
         self,
-        host=None,
-        queue=None,
-        cores=1,
-        threads=1,
-        gpus=None,
-        run_mode="modal",
-        new_hdf=True,
+        host: Optional[str]=None,
+        queue: Optional[str]=None,
+        cores: int=1,
+        threads: int=1,
+        gpus: Optional[int]=None,
+        run_mode: str="modal",
+        new_hdf: bool=True,
     ):
         super().__init__()
         self._data = ServerDataClass(
@@ -120,25 +121,25 @@ def __init__(
         self.run_mode = run_mode
 
     @property
-    def accept_crash(self):
+    def accept_crash(self) -> bool:
         return self._data.accept_crash
 
     @accept_crash.setter
     @sentinel
-    def accept_crash(self, accept):
+    def accept_crash(self, accept: bool) -> None:
         self._data.accept_crash = accept
 
     @property
-    def additional_arguments(self):
+    def additional_arguments(self) -> dict:
         return self._data.additional_arguments
 
     @additional_arguments.setter
     @sentinel
-    def additional_arguments(self, additional_arguments):
+    def additional_arguments(self, additional_arguments: dict) -> None:
         self._data.additional_arguments = additional_arguments
 
     @property
-    def queue(self):
+    def queue(self) -> Union[str, None]:
         """
         The que selected for a current simulation
 
@@ -149,7 +150,7 @@ def queue(self):
 
     @queue.setter
     @sentinel
-    def queue(self, new_scheduler):
+    def queue(self, new_scheduler: Union[str, None]) -> None:
         """
         Set a queue for the current simulation, by choosing one of the options
         listed in :attribute:`~.queue_list`.
@@ -197,7 +198,7 @@ def queue(self, new_scheduler):
                 )
 
     @property
-    def queue_id(self):
+    def queue_id(self) -> int:
         """
         Get the queue ID - the ID in the queuing system is most likely not the same as the job ID.
 
@@ -208,7 +209,7 @@ def queue_id(self):
 
     @queue_id.setter
     @sentinel
-    def queue_id(self, qid):
+    def queue_id(self, qid: int) -> None:
         """
         Set the queue ID
 
@@ -218,7 +219,7 @@ def queue_id(self, qid):
         self._data.qid = int(qid)
 
     @property
-    def threads(self):
+    def threads(self) -> int:
         """
         The number of threads selected for the current simulation
 
@@ -229,7 +230,7 @@ def threads(self):
 
     @threads.setter
     @sentinel
-    def threads(self, number_of_threads):
+    def threads(self, number_of_threads: int) -> None:
         """
         The number of threads selected for the current simulation
 
@@ -239,7 +240,7 @@ def threads(self, number_of_threads):
         self._data.threads = number_of_threads
 
     @property
-    def gpus(self):
+    def gpus(self) -> Union[int, None]:
         """
         Total number of GPUs to use for this calculation.
 
@@ -250,7 +251,7 @@ def gpus(self):
 
     @gpus.setter
     @sentinel
-    def gpus(self, number_of_gpus):
+    def gpus(self, number_of_gpus: int) -> None:
         """
         Total number of GPUs to use for this calculation.
 
@@ -260,7 +261,7 @@ def gpus(self, number_of_gpus):
         self._data.gpus = number_of_gpus
 
     @property
-    def cores(self):
+    def cores(self) -> int:
         """
         The number of cores selected for the current simulation
 
@@ -271,7 +272,7 @@ def cores(self):
 
     @cores.setter
     @sentinel
-    def cores(self, new_cores):
+    def cores(self, new_cores: int) -> None:
         """
         The number of cores selected for the current simulation
 
@@ -300,7 +301,7 @@ def cores(self, new_cores):
             self._data.cores = new_cores
 
     @property
-    def run_time(self):
+    def run_time(self) -> int:
         """
         The run time in seconds selected for the current simulation
 
@@ -311,7 +312,7 @@ def run_time(self):
 
     @run_time.setter
     @sentinel
-    def run_time(self, new_run_time):
+    def run_time(self, new_run_time: int) -> None:
         """
         The run time in seconds selected for the current simulation
 
@@ -338,12 +339,12 @@ def run_time(self, new_run_time):
             self._data.run_time = new_run_time
 
     @property
-    def memory_limit(self):
+    def memory_limit(self) -> int:
         return self._data.memory_limit
 
     @memory_limit.setter
     @sentinel
-    def memory_limit(self, limit):
+    def memory_limit(self, limit: int) -> None:
         if state.queue_adapter is not None and self._data.queue is not None:
             memory_max = state.queue_adapter.check_queue_parameters(
                 queue=self.queue,
@@ -360,7 +361,7 @@ def memory_limit(self, limit):
             self._data.memory_limit = limit
 
     @property
-    def run_mode(self):
+    def run_mode(self) -> str:
         """
         Get the run mode of the job
 
@@ -372,7 +373,7 @@ def run_mode(self):
 
     @run_mode.setter
     @sentinel
-    def run_mode(self, new_mode):
+    def run_mode(self, new_mode: str) -> None:
         """
         Set the run mode of the job
 
@@ -388,7 +389,7 @@ def run_mode(self, new_mode):
                 self.queue = state.queue_adapter.config["queue_primary"]
 
     @property
-    def new_hdf(self):
+    def new_hdf(self) -> bool:
         """
         New_hdf5 defines whether a subjob should be stored in the same HDF5 file or in a new one.
 
@@ -400,7 +401,7 @@ def new_hdf(self):
 
     @new_hdf.setter
     @sentinel
-    def new_hdf(self, new_hdf_bool):
+    def new_hdf(self, new_hdf_bool: bool) -> None:
         """
         New_hdf5 defines whether a subjob should be stored in the same HDF5 file or in a new one.
 
@@ -415,7 +416,7 @@ def new_hdf(self, new_hdf_bool):
             )
 
     @property
-    def queue_list(self):
+    def queue_list(self) -> list:
         """
         List the available Job scheduler provided by the system.
 
@@ -425,7 +426,7 @@ def queue_list(self):
         return self.list_queues()
 
     @property
-    def queue_view(self):
+    def queue_view(self) -> pandas.DataFrame:
         """
         List the available Job scheduler provided by the system.
 
@@ -482,7 +483,7 @@ def future(self) -> Union[Future, None]:
     # We don't wrap future in sentinel, to allow it later to be dropped to
     # None, once execution is finished
     @future.setter
-    def future(self, future_obj: Future):
+    def future(self, future_obj: Future) -> None:
         """
         Set a python concurrent.futures.Future object to track the status of the execution of the job this server object
         is attached to. This is an internal pyiron feature and most users never have to interact with the future object
@@ -494,7 +495,7 @@ def future(self, future_obj: Future):
         self._future = future_obj
 
     @property
-    def conda_environment_name(self):
+    def conda_environment_name(self) -> str:
         """
         Get name of the conda environment the job should be executed in.
 
@@ -505,7 +506,7 @@ def conda_environment_name(self):
 
     @conda_environment_name.setter
     @sentinel
-    def conda_environment_name(self, environment_name):
+    def conda_environment_name(self, environment_name: str) -> None:
         """
         Set name of the conda environment the job should be executed in.
 
@@ -515,7 +516,7 @@ def conda_environment_name(self, environment_name):
         self._data.conda_environment_name = environment_name
 
     @property
-    def conda_environment_path(self):
+    def conda_environment_path(self) -> str:
         """
         Get path of the conda environment the job should be executed in.
 
@@ -526,7 +527,7 @@ def conda_environment_path(self):
 
     @conda_environment_path.setter
     @sentinel
-    def conda_environment_path(self, environment_path):
+    def conda_environment_path(self, environment_path: str) -> None:
         """
         Set path of the conda environment the job should be executed in.
 
@@ -536,7 +537,7 @@ def conda_environment_path(self, environment_path):
         self._data.conda_environment_path = environment_path
 
     @staticmethod
-    def list_queues():
+    def list_queues() -> list:
         """
         List the available Job scheduler provided by the system.
 
@@ -549,7 +550,7 @@ def list_queues():
             return None
 
     @staticmethod
-    def view_queues():
+    def view_queues() -> pandas.DataFrame:
         """
         List the available Job scheduler provided by the system.
 
@@ -561,11 +562,24 @@ def view_queues():
         else:
             return None
 
-    def _to_dict(self):
+    def _to_dict(self) -> dict:
+        """
+        Convert the Server object to a dictionary.
+
+        Returns:
+            dict: The Server object as a dictionary.
+        """
         self._data.run_mode = self._run_mode.mode
         return asdict(self._data)
 
-    def _from_dict(self, obj_dict, version=None):
+    def _from_dict(self, obj_dict: dict, version: Optional[str]=None) -> None:
+        """
+        Load the Server object from a dictionary.
+
+        Args:
+            obj_dict (dict): The dictionary containing the Server object data.
+            version (str, optional): The version of the Server object. Defaults to None.
+        """
         # backwards compatibility
         if "new_h5" in obj_dict.keys():
             obj_dict["new_hdf"] = obj_dict.pop("new_h5") == 1
@@ -587,7 +601,7 @@ def _from_dict(self, obj_dict, version=None):
         self._run_mode = Runmode(mode=self._data.run_mode)
 
     @deprecate(message="Use job.server.to_dict() instead of to_hdf()", version=0.9)
-    def to_hdf(self, hdf, group_name=None):
+    def to_hdf(self, hdf: "pyiron_base.storage.hdfio.ProjectHDFio", group_name: Optional[str]=None) -> None:
         """
         Store Server object in HDF5 file
         Args:
@@ -601,7 +615,7 @@ def to_hdf(self, hdf, group_name=None):
             hdf["server"] = self.to_dict()
 
     @deprecate(message="Use job.server.from_dict() instead of from_hdf()", version=0.9)
-    def from_hdf(self, hdf, group_name=None):
+    def from_hdf(self, hdf: "pyiron_base.storage.hdfio.ProjectHDFio", group_name: Optional[str]=None) -> None:
         """
         Recover Server object in HDF5 file
         Args:
@@ -614,7 +628,7 @@ def from_hdf(self, hdf, group_name=None):
         else:
             self.from_dict(obj_dict=hdf["server"])
 
-    def db_entry(self):
+    def db_entry(self) -> str:
         """
         connect all the info regarding the server into a single word that can be used e.g. as entry in a database
 
@@ -628,7 +642,7 @@ def db_entry(self):
             server_lst = [self._data.host, str(self.cores)]
         return self._data.user + "@" + "#".join(server_lst)
 
-    def __del__(self):
+    def __del__(self) -> None:
         """
         Delete the Server object from memory
         """
@@ -636,7 +650,7 @@ def __del__(self):
         del self._data
 
     @staticmethod
-    def _init_host(host):
+    def _init_host(host) -> Union[str, None]:
         if host is None:
             return socket.gethostname()
         else:

From bb5b9606eb13516ecc706c6544617364f116c53f Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
 <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Sun, 8 Sep 2024 15:34:11 +0000
Subject: [PATCH 16/20] [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
---
 .../jobs/job/extension/server/generic.py      | 30 ++++++++++++-------
 1 file changed, 19 insertions(+), 11 deletions(-)

diff --git a/pyiron_base/jobs/job/extension/server/generic.py b/pyiron_base/jobs/job/extension/server/generic.py
index 6581ddda5..c3cf645e2 100644
--- a/pyiron_base/jobs/job/extension/server/generic.py
+++ b/pyiron_base/jobs/job/extension/server/generic.py
@@ -9,7 +9,7 @@
 import socket
 from concurrent.futures import Executor, Future
 from dataclasses import asdict, fields
-from typing import Union, Optional
+from typing import Optional, Union
 
 import pandas
 from pyiron_snippets.deprecate import deprecate
@@ -87,13 +87,13 @@ class Server(
 
     def __init__(
         self,
-        host: Optional[str]=None,
-        queue: Optional[str]=None,
-        cores: int=1,
-        threads: int=1,
-        gpus: Optional[int]=None,
-        run_mode: str="modal",
-        new_hdf: bool=True,
+        host: Optional[str] = None,
+        queue: Optional[str] = None,
+        cores: int = 1,
+        threads: int = 1,
+        gpus: Optional[int] = None,
+        run_mode: str = "modal",
+        new_hdf: bool = True,
     ):
         super().__init__()
         self._data = ServerDataClass(
@@ -572,7 +572,7 @@ def _to_dict(self) -> dict:
         self._data.run_mode = self._run_mode.mode
         return asdict(self._data)
 
-    def _from_dict(self, obj_dict: dict, version: Optional[str]=None) -> None:
+    def _from_dict(self, obj_dict: dict, version: Optional[str] = None) -> None:
         """
         Load the Server object from a dictionary.
 
@@ -601,7 +601,11 @@ def _from_dict(self, obj_dict: dict, version: Optional[str]=None) -> None:
         self._run_mode = Runmode(mode=self._data.run_mode)
 
     @deprecate(message="Use job.server.to_dict() instead of to_hdf()", version=0.9)
-    def to_hdf(self, hdf: "pyiron_base.storage.hdfio.ProjectHDFio", group_name: Optional[str]=None) -> None:
+    def to_hdf(
+        self,
+        hdf: "pyiron_base.storage.hdfio.ProjectHDFio",
+        group_name: Optional[str] = None,
+    ) -> None:
         """
         Store Server object in HDF5 file
         Args:
@@ -615,7 +619,11 @@ def to_hdf(self, hdf: "pyiron_base.storage.hdfio.ProjectHDFio", group_name: Opti
             hdf["server"] = self.to_dict()
 
     @deprecate(message="Use job.server.from_dict() instead of from_hdf()", version=0.9)
-    def from_hdf(self, hdf: "pyiron_base.storage.hdfio.ProjectHDFio", group_name: Optional[str]=None) -> None:
+    def from_hdf(
+        self,
+        hdf: "pyiron_base.storage.hdfio.ProjectHDFio",
+        group_name: Optional[str] = None,
+    ) -> None:
         """
         Recover Server object in HDF5 file
         Args:

From 920d6b9f1b61fe88aa0f90ee83c2e0b1a5bcc7c3 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Jan=C3=9Fen?= <janssen@mpie.de>
Date: Sun, 8 Sep 2024 18:24:00 +0200
Subject: [PATCH 17/20] Database classes

---
 pyiron_base/database/filetable.py   | 162 ++++++++++++++++++++++------
 pyiron_base/database/generic.py     |  98 ++++++++---------
 pyiron_base/database/interface.py   |  68 ++++++------
 pyiron_base/database/jobtable.py    |  12 ++-
 pyiron_base/database/manager.py     |  41 +++----
 pyiron_base/database/performance.py |  27 ++---
 pyiron_base/database/tables.py      |   3 +-
 7 files changed, 257 insertions(+), 154 deletions(-)

diff --git a/pyiron_base/database/filetable.py b/pyiron_base/database/filetable.py
index 2ee452621..a96b2c617 100644
--- a/pyiron_base/database/filetable.py
+++ b/pyiron_base/database/filetable.py
@@ -9,6 +9,7 @@
 import os
 from abc import ABCMeta
 from collections.abc import Iterable
+from typing import Union, Optional, List
 
 import numpy as np
 import pandas
@@ -57,7 +58,7 @@ class FileTableSingleton(ABCMeta):
 
     _instances = {}
 
-    def __call__(cls, index_from):
+    def __call__(cls, index_from: str):
         path = os.path.abspath(os.path.expanduser(index_from))
         if path not in cls._instances:
             cls._instances[path] = super().__call__(
@@ -66,7 +67,7 @@ def __call__(cls, index_from):
             )
         return cls._instances[path]
 
-    def _get_fileindex_if_theres_a_common_path(cls, path):
+    def _get_fileindex_if_theres_a_common_path(cls, path: str) -> Union[PyFileIndex, None]:
         common_path = _get_most_common_path(
             path=path, reference_paths=cls._instances.keys()
         )
@@ -98,7 +99,7 @@ def __init__(self, index_from: str, fileindex: PyFileIndex = None):
         self._columns = list(table_columns.keys())
         self.force_reset(fileindex=fileindex)
 
-    def add_item_dict(self, par_dict):
+    def add_item_dict(self, par_dict: dict) -> int:
         """
         Create a new database item
 
@@ -142,7 +143,7 @@ def add_item_dict(self, par_dict):
         ).reset_index(drop=True)
         return int(par_dict_merged["id"])
 
-    def delete_item(self, item_id):
+    def delete_item(self, item_id: int) -> None:
         """
         Delete Item from database
 
@@ -157,7 +158,7 @@ def delete_item(self, item_id):
         else:
             raise ValueError
 
-    def force_reset(self, fileindex=None):
+    def force_reset(self, fileindex: Optional[PyFileIndex]=None) -> None:
         """
         Reset cache of the FileTable object
 
@@ -177,7 +178,7 @@ def force_reset(self, fileindex=None):
         else:
             self._job_table = pandas.DataFrame({k: [] for k in self._columns})
 
-    def get_child_ids(self, job_specifier, project=None, status=None):
+    def get_child_ids(self, job_specifier: Union[str, int], project: Optional[str]=None, status: Optional[str]=None) -> List[int]:
         """
         Get the childs for a specific job
 
@@ -209,7 +210,7 @@ def get_child_ids(self, job_specifier, project=None, status=None):
                 ].id.values
             return sorted(id_lst)
 
-    def get_item_by_id(self, item_id):
+    def get_item_by_id(self, item_id: int) -> dict:
         """
         Get item from database by searching for a specific item Id.
 
@@ -241,7 +242,7 @@ def get_item_by_id(self, item_id):
             for k, v in self._job_table[self._job_table.id == item_id].to_dict().items()
         }
 
-    def get_items_dict(self, item_dict, return_all_columns=True):
+    def get_items_dict(self, item_dict: dict, return_all_columns: bool=True) -> List[dict]:
         """
         Get list of jobs which fulfills the query in the dictionary
 
@@ -316,7 +317,7 @@ def get_items_dict(self, item_dict, return_all_columns=True):
         else:
             return [{"id": i} for i in df_dict["id"].values()]
 
-    def _get_jobs(self, user, sql_query, project=None, recursive=True, columns=None):
+    def _get_jobs(self, user: str, sql_query: str, project: Optional[str]=None, recursive: bool=True, columns: Optional[List[str]]=None) -> dict:
         """
         Get jobs as dictionary from filetable
 
@@ -353,7 +354,7 @@ def _get_jobs(self, user, sql_query, project=None, recursive=True, columns=None)
             ].tolist()  # ToDo: Check difference of tolist and to_list
         return dictionary
 
-    def get_job_id(self, job_specifier, project=None):
+    def get_job_id(self, job_specifier: Union[str, int], project: Optional[str]=None) -> int:
         """
         Get job ID from filetable
 
@@ -389,7 +390,7 @@ def get_job_id(self, job_specifier, project=None):
                 "job name '{0}' in this project is not unique".format(job_specifier)
             )
 
-    def get_job_status(self, job_id):
+    def get_job_status(self, job_id: int) -> str:
         """
         Get status of a given job selected by its job ID
 
@@ -401,7 +402,7 @@ def get_job_status(self, job_id):
         """
         return self._job_table[self._job_table.id == job_id].status.values[0]
 
-    def get_job_working_directory(self, job_id):
+    def get_job_working_directory(self, job_id: int) -> Union[str, None]:
         """
         Get the working directory of a particular job
 
@@ -425,7 +426,7 @@ def get_job_working_directory(self, job_id):
         except KeyError:
             return None
 
-    def init_table(self, fileindex, working_dir_lst=None):
+    def init_table(self, fileindex: PyFileIndex, working_dir_lst: Optional[List[str]]=None) -> List[dict]:
         """
         Initialize the filetable class
 
@@ -460,7 +461,7 @@ def init_table(self, fileindex, working_dir_lst=None):
                 job_lst.append(job_dict)
         return job_lst
 
-    def _item_update(self, par_dict, item_id):
+    def _item_update(self, par_dict: dict, item_id: int)-> None:
         """
         Modify Item in database
 
@@ -476,7 +477,7 @@ def _item_update(self, par_dict, item_id):
         for k, v in par_dict.items():
             self._job_table.loc[self._job_table.id == int(item_id), k] = v
 
-    def set_job_status(self, job_id, status):
+    def set_job_status(self, job_id: int, status: str) -> None:
         """
         Set job status
 
@@ -487,7 +488,14 @@ def set_job_status(self, job_id, status):
         super().set_job_status(job_id=job_id, status=status)
         self._update_hdf5_status(job_id=job_id, status=status)
 
-    def _update_hdf5_status(self, job_id, status):
+    def _update_hdf5_status(self, job_id: int, status: str) -> None:
+        """
+        Update the status of a job in its HDF5 file.
+
+        Args:
+            job_id (int): The ID of the job.
+            status (str): The new status of the job.
+        """
         if isinstance(job_id, Iterable):
             for j_id in job_id:
                 db_entry = self.get_item_by_id(item_id=j_id)
@@ -506,7 +514,7 @@ def _update_hdf5_status(self, job_id, status):
                 overwrite="update",
             )
 
-    def update(self):
+    def update(self) -> None:
         """
         Update the filetable cache
         """
@@ -551,7 +559,17 @@ def update(self):
                     self._job_table = df
 
     @staticmethod
-    def get_extract(path, mtime):
+    def get_extract(path: str, mtime: datetime.datetime) -> dict:
+        """
+        Extract job information from a given file path and modification time.
+
+        Args:
+            path (str): The file path.
+            mtime (datetime.datetime): The modification time.
+
+        Returns:
+            dict: A dictionary containing the extracted job information.
+        """
         basename = os.path.basename(path)
         job = os.path.splitext(basename)[0]
         time = datetime.datetime.fromtimestamp(mtime)
@@ -579,7 +597,16 @@ def get_extract(path, mtime):
         del return_dict["masterid"]
         return return_dict
 
-    def _get_job_status_from_hdf5(self, job_id):
+    def _get_job_status_from_hdf5(self, job_id: int) -> str:
+        """
+        Get the status of a job from its HDF5 file.
+
+        Args:
+            job_id (int): The ID of the job.
+
+        Returns:
+            str: The status of the job.
+        """
         db_entry = self.get_item_by_id(job_id)
         job_name = db_entry["subjob"][1:]
         return get_job_status_from_file(
@@ -589,13 +616,27 @@ def _get_job_status_from_hdf5(self, job_id):
 
     def _get_job_table(
         self,
-        sql_query,
-        user,
-        project_path=None,
-        recursive=True,
-        columns=None,
-        element_lst=None,
-    ):
+        sql_query: str,
+        user: str,
+        project_path: Optional[str]=None,
+        recursive: bool=True,
+        columns: Optional[List[str]]=None,
+        element_lst: Optional[List[str]]=None,
+    ) -> pandas.DataFrame:
+        """
+        Get the job table based on the specified parameters.
+
+        Args:
+            sql_query (str): SQL query string.
+            user (str): User name.
+            project_path (str, optional): Project path. Defaults to None.
+            recursive (bool, optional): Recursive flag. Defaults to True.
+            columns (List[str], optional): List of columns to return. Defaults to None.
+            element_lst (List[str], optional): List of elements. Defaults to None.
+
+        Returns:
+            pandas.DataFrame: The job table.
+        """
         self.update()
         if project_path is None:
             project_path = self._path
@@ -611,7 +652,7 @@ def _get_job_table(
         else:
             return self._job_table
 
-    def _get_table_headings(self, table_name=None):
+    def _get_table_headings(self, table_name: Optional[str]=None) -> List[str]:
         """
         Get column names
 
@@ -639,15 +680,40 @@ def _get_table_headings(self, table_name=None):
         """
         return self._job_table.columns.values
 
-    def _get_view_mode(self):
+    def _get_view_mode(self) -> bool:
+        """
+        Get the view mode of the file table.
+
+        Returns:
+            bool: The view mode of the file table - Always False.
+        """
         return False
 
 
-def filter_function(file_name):
+def filter_function(file_name: str) -> bool:
+    """
+    Filter function to check if a file name contains ".h5".
+
+    Args:
+        file_name (str): The name of the file.
+
+    Returns:
+        bool: True if the file name contains ".h5", False otherwise.
+    """
     return ".h5" in file_name
 
 
-def get_hamilton_from_file(hdf5_file, job_name):
+def get_hamilton_from_file(hdf5_file: str, job_name: str) -> str:
+    """
+    Get the Hamilton type from an HDF5 file.
+
+    Args:
+        hdf5_file (str): The path to the HDF5 file.
+        job_name (str): The name of the job.
+
+    Returns:
+        str: The Hamilton type.
+    """
     return (
         _read_hdf(hdf_filehandle=hdf5_file, h5_path=job_name + "/TYPE")
         .split(".")[-1]
@@ -655,18 +721,48 @@ def get_hamilton_from_file(hdf5_file, job_name):
     )
 
 
-def get_hamilton_version_from_file(hdf5_file, job_name):
+def get_hamilton_version_from_file(hdf5_file: str, job_name: str) -> str:
+    """
+    Get the Hamilton version from an HDF5 file.
+
+    Args:
+        hdf5_file (str): The path to the HDF5 file.
+        job_name (str): The name of the job.
+
+    Returns:
+        str: The Hamilton version.
+    """
     return _read_hdf(hdf_filehandle=hdf5_file, h5_path=job_name + "/VERSION")
 
 
-def get_job_status_from_file(hdf5_file, job_name):
+def get_job_status_from_file(hdf5_file: str, job_name: str) -> Union[str, None]:
+    """
+    Get the status of a job from an HDF5 file.
+
+    Args:
+        hdf5_file (str): The path to the HDF5 file.
+        job_name (str): The name of the job.
+
+    Returns:
+        Union[str, None]: The status of the job, or None if the file does not exist.
+    """
     if os.path.exists(hdf5_file):
         return _read_hdf(hdf_filehandle=hdf5_file, h5_path=job_name + "/status")
     else:
         return None
 
 
-def _get_most_common_path(path, reference_paths):
+def _get_most_common_path(path: str, reference_paths: List[str]) -> Union[str, None]:
+    """
+    Get the most common path between the given path and a list of reference paths.
+
+    Args:
+        path (str): The path to compare.
+        reference_paths (List[str]): The list of reference paths.
+
+    Returns:
+        Union[str, None]: The most common path or None if no common path is found.
+    """
     path_match_lst = [p for p in reference_paths if os.path.commonpath([path, p]) == p]
     if len(path_match_lst) > 0:
         return max(path_match_lst, key=len)
diff --git a/pyiron_base/database/generic.py b/pyiron_base/database/generic.py
index f8ab01239..324c80626 100644
--- a/pyiron_base/database/generic.py
+++ b/pyiron_base/database/generic.py
@@ -12,6 +12,7 @@
 from queue import Empty as QueueEmpty
 from queue import SimpleQueue
 from threading import Lock, Thread
+from typing import List, Optional, Union
 
 import numpy as np
 import pandas
@@ -26,6 +27,7 @@
     or_,
     text,
 )
+from sqlalchemy.engine import Engine
 from sqlalchemy.exc import DatabaseError, OperationalError
 from sqlalchemy.pool import NullPool
 from sqlalchemy.sql import select
@@ -86,7 +88,7 @@ def __init__(self, conn, lock, timeout=60):
         self._lock = lock
         self._timeout = timeout
 
-    def run(self):
+    def run(self) -> None:
         """
         Starts the watchdog.
         """
@@ -103,13 +105,13 @@ def run(self):
                         pass
                     break
 
-    def kick(self):
+    def kick(self) -> None:
         """
         Restarts the timeout.
         """
         self._queue.put(True)
 
-    def kill(self):
+    def kill(self) -> None:
         """
         Stop the watchdog and close the connection.
         """
@@ -118,7 +120,7 @@ def kill(self):
 
 
 class AutorestoredConnection:
-    def __init__(self, engine, timeout=60):
+    def __init__(self, engine: Engine, timeout: int=60):
         self.engine = engine
         self._conn = None
         self._lock = Lock()
@@ -159,11 +161,11 @@ def execute(self, *args, **kwargs):
             delay=5,
         )
 
-    def close(self):
+    def close(self) -> None:
         if self._conn is not None:
             self._conn.close()
 
-    def commit(self):
+    def commit(self) -> None:
         if self._conn is not None:
             self._conn.commit()
 
@@ -182,7 +184,7 @@ class DatabaseAccess(IsDatabase):
     Murat Han Celik
     """
 
-    def __init__(self, connection_string, table_name, timeout=60):
+    def __init__(self, connection_string: str, table_name: str, timeout: int=60):
         """
         Initialize the Database connection
 
@@ -217,7 +219,7 @@ def __init__(self, connection_string, table_name, timeout=60):
 
         self._chem_formula_lim_length = CHEMICALFORMULA_STR_LENGTH
 
-        def _create_table():
+        def _create_table() -> None:
             self.__reload_db()
             self.simulation_table = get_historical_table(
                 table_name=str(table_name), metadata=self.metadata, extend_existing=True
@@ -235,11 +237,11 @@ def _create_table():
         )
         self._view_mode = False
 
-    def _get_view_mode(self):
+    def _get_view_mode(self) -> bool:
         return self._view_mode
 
     @IsDatabase.view_mode.setter
-    def view_mode(self, value):
+    def view_mode(self, value: bool) -> None:
         """
         Set view_mode - if view_mode is enable pyiron has read only access to the database.
 
@@ -253,19 +255,19 @@ def view_mode(self, value):
 
     @IsDatabase.viewer_mode.setter
     @deprecate("use view_mode")
-    def viewer_mode(self, value):
+    def viewer_mode(self, value: bool) -> None:
         self.view_mode = value
 
     def _job_dict(
         self,
-        sql_query,
-        user,
-        project_path,
-        recursive,
-        job=None,
-        sub_job_name="%",
-        element_lst=None,
-    ):
+        sql_query: str,
+        user: str,
+        project_path: str,
+        recursive: bool,
+        job: Optional[str]=None,
+        sub_job_name: str ="%",
+        element_lst: List[str]=None,
+    ) -> Union[List[dict]]:
         """
         Internal function to access the database from the project directly.
 
@@ -347,13 +349,13 @@ def _job_dict(
 
     def _get_job_table(
         self,
-        sql_query,
-        user,
-        project_path,
-        recursive=True,
-        columns=None,
-        element_lst=None,
-    ):
+        sql_query: str,
+        user: str,
+        project_path: str,
+        recursive: bool=True,
+        columns: List[str]=None,
+        element_lst: List[str]=None,
+    ) -> pandas.DataFrame:
         job_dict = self._job_dict(
             sql_query=sql_query,
             user=user,
@@ -364,7 +366,7 @@ def _get_job_table(
         return pandas.DataFrame(job_dict, columns=columns)
 
     # Internal functions
-    def __del__(self):
+    def __del__(self) -> None:
         """
         Close database connection
 
@@ -374,7 +376,7 @@ def __del__(self):
         if not self._keep_connection:
             self.conn.close()
 
-    def __reload_db(self):
+    def __reload_db(self) -> None:
         """
         Reload database
 
@@ -385,7 +387,7 @@ def __reload_db(self):
         self.metadata.reflect(bind=self._engine)
 
     @staticmethod
-    def regexp(expr, item):
+    def regexp(expr: str, item: str) -> Union[str, None]:
         """
         Regex function for SQLite
         Args:
@@ -405,7 +407,7 @@ def regexp(expr, item):
             return reg.search(item) is not None
 
     # Table functions
-    def _get_table_headings(self, table_name=None):
+    def _get_table_headings(self, table_name: Optional[str]=None) -> List[str]:
         """
         Get column names
 
@@ -444,7 +446,7 @@ def _get_table_headings(self, table_name=None):
             raise ValueError(str(table_name) + " does not exist")
         return [column.name for column in iter(simulation_list.columns)]
 
-    def add_column(self, col_name, col_type):
+    def add_column(self, col_name: Union[str, List[str]], col_type: Union[str, List[str]]) -> None:
         """
         Add an additional column - required for modification on the database
 
@@ -470,7 +472,7 @@ def add_column(self, col_name, col_type):
         else:
             raise PermissionError("Not avilable in viewer mode.")
 
-    def change_column_type(self, col_name, col_type):
+    def change_column_type(self, col_name: Union[str, List[str]], col_type: Union[str, List[str]]) -> None:
         """
         Modify data type of an existing column - required for modification on the database
 
@@ -496,7 +498,7 @@ def change_column_type(self, col_name, col_type):
         else:
             raise PermissionError("Not avilable in viewer mode.")
 
-    def get_items_sql(self, where_condition=None, sql_statement=None):
+    def get_items_sql(self, where_condition: Optional[str]=None, sql_statement: Optional[str]=None) -> List[dict]:
         """
         Submit an SQL query to the database
 
@@ -588,12 +590,12 @@ def get_items_sql(self, where_condition=None, sql_statement=None):
             output_list += [dict(zip(col.keys(), tmp_values))]
         return output_list
 
-    def _check_chem_formula_length(self, par_dict):
+    def _check_chem_formula_length(self, par_dict: dict) -> dict:
         """
         performs a check whether the length of chemical formula exceeds the defined limit
-        args:
-        par_dict(dict): dictionary of the parameter
-        limit(int): the limit for the length of checmical formular
+        
+        Args:
+            par_dict(dict): dictionary of the parameters to be checked
         """
         key_limited = "ChemicalFormula"
         if (
@@ -604,7 +606,7 @@ def _check_chem_formula_length(self, par_dict):
             par_dict[key_limited] = "OVERFLOW_ERROR"
         return par_dict
 
-    def _check_duplidates(self, par_dict: dict):
+    def _check_duplidates(self, par_dict: dict) -> bool:
         """
         Check for duplicates in the database
 
@@ -621,7 +623,7 @@ def _check_duplidates(self, par_dict: dict):
         )
 
     # Item functions
-    def add_item_dict(self, par_dict, check_duplicates=False):
+    def add_item_dict(self, par_dict: dict, check_duplicates: bool=False) -> int:
         """
         Create a new database item
 
@@ -667,7 +669,7 @@ def add_item_dict(self, par_dict, check_duplicates=False):
         else:
             raise PermissionError("Not avilable in viewer mode.")
 
-    def __get_items(self, col_name, var):
+    def __get_items(self, col_name: str, var: Union[str, int]) -> List[dict]:
         """
         Get multiple items from the database
 
@@ -717,7 +719,7 @@ def __get_items(self, col_name, var):
             self.conn.close()
         return [dict(zip(col._mapping.keys(), col._mapping.values())) for col in row]
 
-    def _item_update(self, par_dict, item_id):
+    def _item_update(self, par_dict: dict, item_id: int) -> None:
         """
         Modify Item in database
 
@@ -758,7 +760,7 @@ def _item_update(self, par_dict, item_id):
         else:
             raise PermissionError("Not avilable in viewer mode.")
 
-    def delete_item(self, item_id: int):
+    def delete_item(self, item_id: int) -> None:
         """
         Delete Item from database
 
@@ -784,7 +786,7 @@ def delete_item(self, item_id: int):
             self.conn.close()
 
     # IsDatabase impl'
-    def _get_jobs(self, sql_query, user, project_path, recursive=True, columns=None):
+    def _get_jobs(self, sql_query: str, user: str, project_path: str, recursive: bool=True, columns:Optional[List[str]]=None) -> List[dict]:
         df = self.job_table(
             sql_query=sql_query,
             user=user,
@@ -797,7 +799,7 @@ def _get_jobs(self, sql_query, user, project_path, recursive=True, columns=None)
         return df.to_dict(orient="list")
 
     # Shortcut
-    def get_item_by_id(self, item_id):
+    def get_item_by_id(self, item_id: int) -> dict:
         """
         Get item from database by searching for a specific item Id.
 
@@ -844,7 +846,7 @@ def get_item_by_id(self, item_id):
         else:
             raise TypeError("THE SQL database ID has to be an integer.")
 
-    def query_for_element(self, element):
+    def query_for_element(self, element: str) -> Union[bool, str]:
         return or_(
             *[
                 self.simulation_table.c["chemicalformula"].like(
@@ -854,7 +856,7 @@ def query_for_element(self, element):
             ]
         )
 
-    def get_items_dict(self, item_dict, return_all_columns=True):
+    def get_items_dict(self, item_dict: dict, return_all_columns: bool=True) -> List[dict]:
         """
 
         Args:
@@ -961,13 +963,13 @@ def get_items_dict(self, item_dict, return_all_columns=True):
             self.conn.close()
         return [dict(zip(col._mapping.keys(), col._mapping.values())) for col in row]
 
-    def get_job_status(self, job_id):
+    def get_job_status(self, job_id: int) -> Union[str, None]:
         try:
             return self.get_item_by_id(item_id=job_id)["status"]
         except KeyError:
             return None
 
-    def get_job_working_directory(self, job_id):
+    def get_job_working_directory(self, job_id: int) -> Union[str, None]:
         try:
             db_entry = self.get_item_by_id(job_id)
             if db_entry:
diff --git a/pyiron_base/database/interface.py b/pyiron_base/database/interface.py
index f163ce1dd..bc3a3497a 100644
--- a/pyiron_base/database/interface.py
+++ b/pyiron_base/database/interface.py
@@ -7,7 +7,7 @@
 
 import fnmatch
 import re
-import typing
+from typing import Literal, Union, Optional, List
 import warnings
 from abc import ABC, abstractmethod
 from collections.abc import Iterable
@@ -35,11 +35,11 @@ class IsDatabase(ABC):
     """
 
     @abstractmethod
-    def _get_view_mode(self):
+    def _get_view_mode(self) -> bool:
         pass
 
     @property
-    def view_mode(self):
+    def view_mode(self) -> bool:
         """
         Get view_mode - if view_moded is enable pyiron has read only access to the database.
 
@@ -52,7 +52,7 @@ def view_mode(self):
 
     @property
     @deprecate("use view_mode")
-    def viewer_mode(self):
+    def viewer_mode(self) -> bool:
         return self.view_mode
 
     viewer_mode.__doc__ = view_mode.__doc__
@@ -60,19 +60,19 @@ def viewer_mode(self):
     @abstractmethod
     def _get_job_table(
         self,
-        sql_query,
-        user,
-        project_path,
-        recursive=True,
-        columns=None,
-        element_lst=None,
-    ):
+        sql_query: str,
+        user: str,
+        project_path: str,
+        recursive: bool=True,
+        columns: Optional[List[str]]=None,
+        element_lst: Optional[List[str]]=None,
+    ) -> pandas.DataFrame:
         pass
 
     @staticmethod
     def _get_filtered_job_table(
         df: pandas.DataFrame,
-        mode: typing.Literal["regex", "glob"] = "glob",
+        mode: Literal["regex", "glob"] = "glob",
         **kwargs: dict,
     ) -> pandas.DataFrame:
         """
@@ -117,18 +117,18 @@ def _get_filtered_job_table(
 
     def job_table(
         self,
-        sql_query,
-        user,
-        project_path,
-        recursive=True,
-        columns=None,
-        all_columns=False,
-        sort_by="id",
-        max_colwidth=200,
-        full_table=False,
-        element_lst=None,
-        job_name_contains="",
-        mode: typing.Literal["regex", "glob"] = "glob",
+        sql_query: str,
+        user: str,
+        project_path: sgr,
+        recursive: bool=True,
+        columns: Optional[List[str]]=None,
+        all_columns: bool=False,
+        sort_by: str="id",
+        max_colwidth: int=200,
+        full_table: bool=False,
+        element_lst: Optional[List[str]]=None,
+        job_name_contains: str="",
+        mode: Literal["regex", "glob"] = "glob",
         **kwargs,
     ):
         """
@@ -204,19 +204,19 @@ def job_table(
         return df
 
     @abstractmethod
-    def _get_table_headings(self, table_name=None):
+    def _get_table_headings(self, table_name: Optional[str]=None):
         pass
 
-    def item_update(self, par_dict, item_id):
+    def item_update(self, par_dict: dict, item_id: int) -> None:
         if isinstance(item_id, Iterable):
             return self._items_update(par_dict=par_dict, item_ids=item_id)
         return self._item_update(par_dict=par_dict, item_id=item_id)
 
     @abstractmethod
-    def _item_update(self, par_dict, item_id):
+    def _item_update(self, par_dict: dict, item_id: int) -> None:
         pass
 
-    def _items_update(self, par_dict, item_ids):
+    def _items_update(self, par_dict: dict, item_ids: List) -> None:
         """
         For now simply loops over all item_ids to call item_update,
         but can be made more efficient.
@@ -229,7 +229,7 @@ def _items_update(self, par_dict, item_ids):
         for i_id in item_ids:
             self._item_update(par_dict=par_dict, item_id=i_id)
 
-    def set_job_status(self, status, job_id):
+    def set_job_status(self, status: str, job_id: Union[int, List[int]]) -> None:
         """
         Set status of a job or multiple jobs if job_id is iterable.
 
@@ -247,7 +247,7 @@ def set_job_status(self, status, job_id):
             item_id=job_id,
         )
 
-    def get_table_headings(self, table_name=None):
+    def get_table_headings(self, table_name: Optional[str]=None) -> List[str]:
         """
         Get column names; if given table_name can select one of multiple tables defined in the database, but subclasses
         may ignore it
@@ -277,7 +277,7 @@ def get_table_headings(self, table_name=None):
         return self._get_table_headings(table_name=table_name)
 
     @deprecate("use get_table_headings()")
-    def get_db_columns(self):
+    def get_db_columns(self) -> List[str]:
         """
         Get column names
 
@@ -303,10 +303,10 @@ def get_db_columns(self):
         return self.get_table_headings()
 
     @abstractmethod
-    def _get_jobs(self, sql_query, user, project_path, recursive=True, columns=None):
+    def _get_jobs(self, sql_query: str, user: str, project_path: str, recursive: bool=True, columns: Optional[List[str]]=None) -> List[dict]:
         pass
 
-    def get_jobs(self, sql_query, user, project_path, recursive=True, columns=None):
+    def get_jobs(self, sql_query: str, user: str, project_path: str, recursive: bool=True, columns: Optional[List[str]]=None) -> List[dict]:
         """
         Internal function to return the jobs as dictionary rather than a pandas.Dataframe
 
@@ -326,7 +326,7 @@ def get_jobs(self, sql_query, user, project_path, recursive=True, columns=None):
             columns = ["id", "project"]
         return self._get_jobs(sql_query, user, project_path, recursive, columns)
 
-    def get_job_ids(self, sql_query, user, project_path, recursive=True):
+    def get_job_ids(self, sql_query: str, user: str, project_path: str, recursive: bool=True) -> List[int]:
         """
         Return the job IDs matching a specific query
 
diff --git a/pyiron_base/database/jobtable.py b/pyiron_base/database/jobtable.py
index adfcc9e31..a2d96c38f 100644
--- a/pyiron_base/database/jobtable.py
+++ b/pyiron_base/database/jobtable.py
@@ -4,10 +4,12 @@
 """
 The Jobtable module provides a set of top level functions to interact with the database.
 """
+from typing import Union, Optional, List
 
 import numpy as np
 
 from pyiron_base.database.filetable import FileTable
+from pyiron_base.database.generic import DatabaseAccess
 
 __author__ = "Jan Janssen"
 __copyright__ = (
@@ -21,7 +23,7 @@
 __date__ = "Sep 1, 2017"
 
 
-def get_child_ids(database, sql_query, user, project_path, job_specifier, status=None):
+def get_child_ids(database: Union[FileTable, DatabaseAccess], sql_query: str, user: str, project_path: str, job_specifier: str, status: Optional[str]=None) -> List[dict]:
     """
     Get the childs for a specific job
 
@@ -56,7 +58,7 @@ def get_child_ids(database, sql_query, user, project_path, job_specifier, status
         return database.get_child_ids(job_specifier=job_specifier, project=project_path)
 
 
-def get_job_id(database, sql_query, user, project_path, job_specifier):
+def get_job_id(database: Union[FileTable, DatabaseAccess], sql_query: str, user: str, project_path: str, job_specifier: str) -> Union[int, None]:
     """
     get the job_id for job named job_name in the local project path from database
 
@@ -103,7 +105,7 @@ def get_job_id(database, sql_query, user, project_path, job_specifier):
         return database.get_job_id(job_specifier=job_specifier, project=project_path)
 
 
-def set_job_status(database, sql_query, user, project_path, job_specifier, status):
+def set_job_status(database: Union[FileTable, DatabaseAccess], sql_query: str, user: str, project_path: str, job_specifier: Union[str, int], status: str) -> None:
     """
     Set the status of a particular job
 
@@ -129,7 +131,7 @@ def set_job_status(database, sql_query, user, project_path, job_specifier, statu
     )
 
 
-def get_job_status(database, sql_query, user, project_path, job_specifier):
+def get_job_status(database: Union[FileTable, DatabaseAccess], sql_query: str, user: str, project_path: str, job_specifier: Union[str, int]) -> str:
     """
     Get the status of a particular job
 
@@ -156,7 +158,7 @@ def get_job_status(database, sql_query, user, project_path, job_specifier):
     )
 
 
-def get_job_working_directory(database, sql_query, user, project_path, job_specifier):
+def get_job_working_directory(database: Union[FileTable, DatabaseAccess], sql_query: str, user: str, project_path: str, job_specifier: Union[str, int]) -> str:
     """
     Get the working directory of a particular job
 
diff --git a/pyiron_base/database/manager.py b/pyiron_base/database/manager.py
index abccce065..5fa0a1b2b 100644
--- a/pyiron_base/database/manager.py
+++ b/pyiron_base/database/manager.py
@@ -7,6 +7,7 @@
 
 import os
 from urllib.parse import quote_plus
+from typing import Union, Optional
 
 from pyiron_snippets.logger import logger
 from pyiron_snippets.singleton import Singleton
@@ -37,22 +38,22 @@ def database(self):
         return self._database
 
     @property
-    def using_local_database(self):
+    def using_local_database(self) -> bool:
         return self._use_local_database
 
     @property
-    def database_is_disabled(self):
+    def database_is_disabled(self) -> bool:
         return self._database_is_disabled
 
     @property
-    def project_check_enabled(self):
+    def project_check_enabled(self) -> bool:
         if self.database_is_disabled:
             return False
         else:
             return s.configuration["project_check_enabled"]
 
     @property
-    def connection_timeout(self):
+    def connection_timeout(self) -> int:
         """
         Get the connection timeout in seconds.  Zero means close the database after every connection.
 
@@ -62,15 +63,15 @@ def connection_timeout(self):
         return s.configuration["connection_timeout"]
 
     @connection_timeout.setter
-    def connection_timeout(self, val):
+    def connection_timeout(self, val: int) -> None:
         s.configuration["connection_timeout"] = val
 
     @staticmethod
-    def _sqlalchemy_string(prefix, user, key, host, database):
+    def _sqlalchemy_string(prefix: str, user: str, key: str, host: str, database: str):
         key = quote_plus(key)
         return f"{prefix}://{user}:{key}@{host}/{database}"
 
-    def _credentialed_sqalchemy_string(self, prefix):
+    def _credentialed_sqalchemy_string(self, prefix: str):
         return self._sqlalchemy_string(
             prefix,
             s.configuration["user"],
@@ -80,7 +81,7 @@ def _credentialed_sqalchemy_string(self, prefix):
         )
 
     @property
-    def sql_connection_string(self):
+    def sql_connection_string(self) -> str:
         sql_type = s.configuration["sql_type"]
         if sql_type == "Postgres":
             return self._credentialed_sqalchemy_string("postgresql")
@@ -97,7 +98,7 @@ def sql_connection_string(self):
             )
 
     @property
-    def sql_view_connection_string(self):
+    def sql_view_connection_string(self) -> Union[str, None]:
         if s.configuration["sql_view_user"] is None:
             return None
         else:
@@ -110,14 +111,14 @@ def sql_view_connection_string(self):
             )
 
     @property
-    def sql_table_name(self):
+    def sql_table_name(self) -> str:
         return s.configuration["sql_table_name"]
 
     @property
-    def sql_view_table_name(self):
+    def sql_view_table_name(self) -> str:
         return s.configuration["sql_view_table_name"]
 
-    def open_connection(self):
+    def open_connection(self) -> None:
         """
         Internal function to open the connection to the database. Only after this function is called the database is
         accessable.
@@ -131,7 +132,7 @@ def open_connection(self):
                 timeout=self.connection_timeout,
             )
 
-    def switch_to_local_database(self, file_name="pyiron.db", cwd=None):
+    def switch_to_local_database(self, file_name: str="pyiron.db", cwd: Optional[str]=None) -> None:
         """
         Swtich to an local SQLite based database.
 
@@ -151,14 +152,14 @@ def switch_to_local_database(self, file_name="pyiron.db", cwd=None):
                 connection_string="sqlite:///" + file_name
             )
 
-    def open_local_sqlite_connection(self, connection_string):
+    def open_local_sqlite_connection(self, connection_string: str) -> None:
         from pyiron_base.database.generic import DatabaseAccess
 
         self._database = DatabaseAccess(connection_string, self.sql_table_name)
         self._use_local_database = True
         self._database_is_disabled = False
 
-    def switch_to_central_database(self):
+    def switch_to_central_database(self) -> None:
         """
         Switch to central database
         """
@@ -167,7 +168,7 @@ def switch_to_central_database(self):
         else:
             logger.info("Database is already in central mode or disabled!")
 
-    def switch_to_viewer_mode(self):
+    def switch_to_viewer_mode(self) -> None:
         """
         Switch from user mode to viewer mode - if view_mode is enable pyiron has read only access to the database.
         """
@@ -189,7 +190,7 @@ def switch_to_viewer_mode(self):
         else:
             print("Viewer Mode is not available on this pyiron installation.")
 
-    def switch_to_user_mode(self):
+    def switch_to_user_mode(self) -> None:
         """
         Switch from viewer mode to user mode - if view_mode is enable pyiron has read only access to the database.
         """
@@ -211,7 +212,7 @@ def switch_to_user_mode(self):
         else:
             print("Viewer Mode is not available on this pyiron installation.")
 
-    def close_connection(self):
+    def close_connection(self) -> None:
         """
         Internal function to close the connection to the database.
         """
@@ -219,7 +220,7 @@ def close_connection(self):
             self._database.conn.close()
             self._database = None
 
-    def top_path(self, full_path):
+    def top_path(self, full_path: str) -> Union[str, None]:
         """
         Validated that the full_path is a sub directory of one of the pyrion environments loaded.
 
@@ -243,7 +244,7 @@ def top_path(self, full_path):
         else:
             return None
 
-    def update(self):
+    def update(self) -> None:
         """
         Warning: Database interaction does not have written spec. This method does a thing. It might not be the thing
                  you want.
diff --git a/pyiron_base/database/performance.py b/pyiron_base/database/performance.py
index 9afa44ee7..c99dafb68 100644
--- a/pyiron_base/database/performance.py
+++ b/pyiron_base/database/performance.py
@@ -3,7 +3,7 @@
     "Computational Materials Design (CM) Department"
 )
 
-import pandas as pd
+import pandas
 from sqlalchemy import (
     MetaData,
     Table,
@@ -14,6 +14,7 @@
     or_,
     select,
 )
+from sqlalchemy.engine import Connection
 
 from pyiron_base.state import state
 
@@ -23,7 +24,7 @@
 __email__ = "hassani@mpie.de"
 
 
-def _checkpoints_interval(conn):
+def _checkpoints_interval(conn: Connection) -> dict:
     """
     returns the number of checkpoints and their intervals
     """
@@ -42,7 +43,7 @@ def _checkpoints_interval(conn):
     return {"num. checkpoints": check_points[0], "checkpoint interval": check_points[1]}
 
 
-def _duplicate_indices(conn):
+def _duplicate_indices(conn: Connection) -> dict:
     """
     returns the duplicates in indices
     """
@@ -75,7 +76,7 @@ def _duplicate_indices(conn):
 class DatabaseStatistics:
     """
     The use case is:
-    >>> from pyiron_base import DatabaseStatistics
+    >>> from pyiron_base.database.performance import DatabaseStatistics
     >>> db_stat = DatabaseStatistics()
     >>> df = db_stat.performance()
     >>> df
@@ -99,7 +100,7 @@ def __init__(self):
         )
         self._locks_view = Table("pg_locks", metadata, autoload_with=self._engine)
 
-    def _num_conn(self, conn):
+    def _num_conn(self, conn: Connection) -> dict:
         """
         return the number of connections
         """
@@ -107,7 +108,7 @@ def _num_conn(self, conn):
         result = conn.execute(stmt)
         return {"total num. connection": result.fetchone()[0]}
 
-    def _num_conn_by_state(self, conn):
+    def _num_conn_by_state(self, conn: Connection) -> dict:
         """
         return the number of connection, categorized by their state:
         active, idle, idle in transaction, idle in transaction (aborted)
@@ -125,7 +126,7 @@ def _num_conn_by_state(self, conn):
             output_dict[key] = val
         return output_dict
 
-    def _num_conn_waiting_locks(self, conn):
+    def _num_conn_waiting_locks(self, conn: Connection) -> dict:
         """
         returns the number of connection waiting for locks
         """
@@ -134,7 +135,7 @@ def _num_conn_waiting_locks(self, conn):
         )
         return {"num. of conn. waiting for locks": conn.execute(stmt).fetchone()[0]}
 
-    def _max_trans_age(self, conn):
+    def _max_trans_age(self, conn: Connection) -> dict:
         """
         returns the maximum age of a transaction
         """
@@ -150,7 +151,7 @@ def _max_trans_age(self, conn):
         )
         return {"max. transaction age": str(conn.execute(stmt).fetchone()[0])}
 
-    def _index_size(self, conn):
+    def _index_size(self, conn: Connection) -> dict:
         """
         returns the total size of indexes for the pyiron job table
         """
@@ -183,7 +184,7 @@ def _index_size(self, conn):
 
         return {"index size/usage (MB)": index_usage}
 
-    def performance(self):
+    def performance(self) -> pandas.DataFrame:
         """
         returns a pandas dataframe with the essential statistics of a pyiron postgres database
         """
@@ -196,10 +197,10 @@ def performance(self):
             self._performance_dict.update(self._index_size(conn))
             self._performance_dict.update(_duplicate_indices(conn))
 
-        return pd.DataFrame(self._performance_dict, index=["performance"])
+        return pandas.DataFrame(self._performance_dict, index=["performance"])
 
 
-def get_database_statistics():
+def get_database_statistics() -> pandas.DataFrame:
     """
     This function returns the statistics of pyiron postgres database in the form of a pandas dataframe.
     The dataframe includes:
@@ -210,7 +211,7 @@ def get_database_statistics():
     - size of indices
     - pair of duplicate indices and their total size
     usage:
-    >>> from pyiron_base import get_database_statistics
+    >>> from pyiron_base.database.performance import get_database_statistics
     >>> get_database_statistics()
     """
 
diff --git a/pyiron_base/database/tables.py b/pyiron_base/database/tables.py
index 71d0eb619..e1fd35a81 100644
--- a/pyiron_base/database/tables.py
+++ b/pyiron_base/database/tables.py
@@ -12,6 +12,7 @@
     Integer,
     String,
     Table,
+    MetaData,
 )
 
 from pyiron_base.database.sqlcolumnlength import (
@@ -39,7 +40,7 @@
 __date__ = "Sep, 2021"
 
 
-def get_historical_table(table_name, metadata, extend_existing=True):
+def get_historical_table(table_name: str, metadata: MetaData, extend_existing: bool=True) -> Table:
     """The historical table."""
     return Table(
         table_name,

From d5daac3eb91494e752bacb96c670974a9a26d36d Mon Sep 17 00:00:00 2001
From: "pre-commit-ci[bot]"
 <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Date: Sun, 8 Sep 2024 16:24:09 +0000
Subject: [PATCH 18/20] [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci
---
 pyiron_base/database/filetable.py | 48 ++++++++++++++++++++---------
 pyiron_base/database/generic.py   | 47 +++++++++++++++++++----------
 pyiron_base/database/interface.py | 50 ++++++++++++++++++++-----------
 pyiron_base/database/jobtable.py  | 45 ++++++++++++++++++++++++----
 pyiron_base/database/manager.py   |  6 ++--
 pyiron_base/database/tables.py    |  6 ++--
 6 files changed, 145 insertions(+), 57 deletions(-)

diff --git a/pyiron_base/database/filetable.py b/pyiron_base/database/filetable.py
index a96b2c617..7a6d789bc 100644
--- a/pyiron_base/database/filetable.py
+++ b/pyiron_base/database/filetable.py
@@ -9,7 +9,7 @@
 import os
 from abc import ABCMeta
 from collections.abc import Iterable
-from typing import Union, Optional, List
+from typing import List, Optional, Union
 
 import numpy as np
 import pandas
@@ -67,7 +67,9 @@ def __call__(cls, index_from: str):
             )
         return cls._instances[path]
 
-    def _get_fileindex_if_theres_a_common_path(cls, path: str) -> Union[PyFileIndex, None]:
+    def _get_fileindex_if_theres_a_common_path(
+        cls, path: str
+    ) -> Union[PyFileIndex, None]:
         common_path = _get_most_common_path(
             path=path, reference_paths=cls._instances.keys()
         )
@@ -158,7 +160,7 @@ def delete_item(self, item_id: int) -> None:
         else:
             raise ValueError
 
-    def force_reset(self, fileindex: Optional[PyFileIndex]=None) -> None:
+    def force_reset(self, fileindex: Optional[PyFileIndex] = None) -> None:
         """
         Reset cache of the FileTable object
 
@@ -178,7 +180,12 @@ def force_reset(self, fileindex: Optional[PyFileIndex]=None) -> None:
         else:
             self._job_table = pandas.DataFrame({k: [] for k in self._columns})
 
-    def get_child_ids(self, job_specifier: Union[str, int], project: Optional[str]=None, status: Optional[str]=None) -> List[int]:
+    def get_child_ids(
+        self,
+        job_specifier: Union[str, int],
+        project: Optional[str] = None,
+        status: Optional[str] = None,
+    ) -> List[int]:
         """
         Get the childs for a specific job
 
@@ -242,7 +249,9 @@ def get_item_by_id(self, item_id: int) -> dict:
             for k, v in self._job_table[self._job_table.id == item_id].to_dict().items()
         }
 
-    def get_items_dict(self, item_dict: dict, return_all_columns: bool=True) -> List[dict]:
+    def get_items_dict(
+        self, item_dict: dict, return_all_columns: bool = True
+    ) -> List[dict]:
         """
         Get list of jobs which fulfills the query in the dictionary
 
@@ -317,7 +326,14 @@ def get_items_dict(self, item_dict: dict, return_all_columns: bool=True) -> List
         else:
             return [{"id": i} for i in df_dict["id"].values()]
 
-    def _get_jobs(self, user: str, sql_query: str, project: Optional[str]=None, recursive: bool=True, columns: Optional[List[str]]=None) -> dict:
+    def _get_jobs(
+        self,
+        user: str,
+        sql_query: str,
+        project: Optional[str] = None,
+        recursive: bool = True,
+        columns: Optional[List[str]] = None,
+    ) -> dict:
         """
         Get jobs as dictionary from filetable
 
@@ -354,7 +370,9 @@ def _get_jobs(self, user: str, sql_query: str, project: Optional[str]=None, recu
             ].tolist()  # ToDo: Check difference of tolist and to_list
         return dictionary
 
-    def get_job_id(self, job_specifier: Union[str, int], project: Optional[str]=None) -> int:
+    def get_job_id(
+        self, job_specifier: Union[str, int], project: Optional[str] = None
+    ) -> int:
         """
         Get job ID from filetable
 
@@ -426,7 +444,9 @@ def get_job_working_directory(self, job_id: int) -> Union[str, None]:
         except KeyError:
             return None
 
-    def init_table(self, fileindex: PyFileIndex, working_dir_lst: Optional[List[str]]=None) -> List[dict]:
+    def init_table(
+        self, fileindex: PyFileIndex, working_dir_lst: Optional[List[str]] = None
+    ) -> List[dict]:
         """
         Initialize the filetable class
 
@@ -461,7 +481,7 @@ def init_table(self, fileindex: PyFileIndex, working_dir_lst: Optional[List[str]
                 job_lst.append(job_dict)
         return job_lst
 
-    def _item_update(self, par_dict: dict, item_id: int)-> None:
+    def _item_update(self, par_dict: dict, item_id: int) -> None:
         """
         Modify Item in database
 
@@ -618,10 +638,10 @@ def _get_job_table(
         self,
         sql_query: str,
         user: str,
-        project_path: Optional[str]=None,
-        recursive: bool=True,
-        columns: Optional[List[str]]=None,
-        element_lst: Optional[List[str]]=None,
+        project_path: Optional[str] = None,
+        recursive: bool = True,
+        columns: Optional[List[str]] = None,
+        element_lst: Optional[List[str]] = None,
     ) -> pandas.DataFrame:
         """
         Get the job table based on the specified parameters.
@@ -652,7 +672,7 @@ def _get_job_table(
         else:
             return self._job_table
 
-    def _get_table_headings(self, table_name: Optional[str]=None) -> List[str]:
+    def _get_table_headings(self, table_name: Optional[str] = None) -> List[str]:
         """
         Get column names
 
diff --git a/pyiron_base/database/generic.py b/pyiron_base/database/generic.py
index 324c80626..ffa7c05ca 100644
--- a/pyiron_base/database/generic.py
+++ b/pyiron_base/database/generic.py
@@ -120,7 +120,7 @@ def kill(self) -> None:
 
 
 class AutorestoredConnection:
-    def __init__(self, engine: Engine, timeout: int=60):
+    def __init__(self, engine: Engine, timeout: int = 60):
         self.engine = engine
         self._conn = None
         self._lock = Lock()
@@ -184,7 +184,7 @@ class DatabaseAccess(IsDatabase):
     Murat Han Celik
     """
 
-    def __init__(self, connection_string: str, table_name: str, timeout: int=60):
+    def __init__(self, connection_string: str, table_name: str, timeout: int = 60):
         """
         Initialize the Database connection
 
@@ -264,9 +264,9 @@ def _job_dict(
         user: str,
         project_path: str,
         recursive: bool,
-        job: Optional[str]=None,
-        sub_job_name: str ="%",
-        element_lst: List[str]=None,
+        job: Optional[str] = None,
+        sub_job_name: str = "%",
+        element_lst: List[str] = None,
     ) -> Union[List[dict]]:
         """
         Internal function to access the database from the project directly.
@@ -352,9 +352,9 @@ def _get_job_table(
         sql_query: str,
         user: str,
         project_path: str,
-        recursive: bool=True,
-        columns: List[str]=None,
-        element_lst: List[str]=None,
+        recursive: bool = True,
+        columns: List[str] = None,
+        element_lst: List[str] = None,
     ) -> pandas.DataFrame:
         job_dict = self._job_dict(
             sql_query=sql_query,
@@ -407,7 +407,7 @@ def regexp(expr: str, item: str) -> Union[str, None]:
             return reg.search(item) is not None
 
     # Table functions
-    def _get_table_headings(self, table_name: Optional[str]=None) -> List[str]:
+    def _get_table_headings(self, table_name: Optional[str] = None) -> List[str]:
         """
         Get column names
 
@@ -446,7 +446,9 @@ def _get_table_headings(self, table_name: Optional[str]=None) -> List[str]:
             raise ValueError(str(table_name) + " does not exist")
         return [column.name for column in iter(simulation_list.columns)]
 
-    def add_column(self, col_name: Union[str, List[str]], col_type: Union[str, List[str]]) -> None:
+    def add_column(
+        self, col_name: Union[str, List[str]], col_type: Union[str, List[str]]
+    ) -> None:
         """
         Add an additional column - required for modification on the database
 
@@ -472,7 +474,9 @@ def add_column(self, col_name: Union[str, List[str]], col_type: Union[str, List[
         else:
             raise PermissionError("Not avilable in viewer mode.")
 
-    def change_column_type(self, col_name: Union[str, List[str]], col_type: Union[str, List[str]]) -> None:
+    def change_column_type(
+        self, col_name: Union[str, List[str]], col_type: Union[str, List[str]]
+    ) -> None:
         """
         Modify data type of an existing column - required for modification on the database
 
@@ -498,7 +502,9 @@ def change_column_type(self, col_name: Union[str, List[str]], col_type: Union[st
         else:
             raise PermissionError("Not avilable in viewer mode.")
 
-    def get_items_sql(self, where_condition: Optional[str]=None, sql_statement: Optional[str]=None) -> List[dict]:
+    def get_items_sql(
+        self, where_condition: Optional[str] = None, sql_statement: Optional[str] = None
+    ) -> List[dict]:
         """
         Submit an SQL query to the database
 
@@ -593,7 +599,7 @@ def get_items_sql(self, where_condition: Optional[str]=None, sql_statement: Opti
     def _check_chem_formula_length(self, par_dict: dict) -> dict:
         """
         performs a check whether the length of chemical formula exceeds the defined limit
-        
+
         Args:
             par_dict(dict): dictionary of the parameters to be checked
         """
@@ -623,7 +629,7 @@ def _check_duplidates(self, par_dict: dict) -> bool:
         )
 
     # Item functions
-    def add_item_dict(self, par_dict: dict, check_duplicates: bool=False) -> int:
+    def add_item_dict(self, par_dict: dict, check_duplicates: bool = False) -> int:
         """
         Create a new database item
 
@@ -786,7 +792,14 @@ def delete_item(self, item_id: int) -> None:
             self.conn.close()
 
     # IsDatabase impl'
-    def _get_jobs(self, sql_query: str, user: str, project_path: str, recursive: bool=True, columns:Optional[List[str]]=None) -> List[dict]:
+    def _get_jobs(
+        self,
+        sql_query: str,
+        user: str,
+        project_path: str,
+        recursive: bool = True,
+        columns: Optional[List[str]] = None,
+    ) -> List[dict]:
         df = self.job_table(
             sql_query=sql_query,
             user=user,
@@ -856,7 +869,9 @@ def query_for_element(self, element: str) -> Union[bool, str]:
             ]
         )
 
-    def get_items_dict(self, item_dict: dict, return_all_columns: bool=True) -> List[dict]:
+    def get_items_dict(
+        self, item_dict: dict, return_all_columns: bool = True
+    ) -> List[dict]:
         """
 
         Args:
diff --git a/pyiron_base/database/interface.py b/pyiron_base/database/interface.py
index bc3a3497a..85f80bcc2 100644
--- a/pyiron_base/database/interface.py
+++ b/pyiron_base/database/interface.py
@@ -7,10 +7,10 @@
 
 import fnmatch
 import re
-from typing import Literal, Union, Optional, List
 import warnings
 from abc import ABC, abstractmethod
 from collections.abc import Iterable
+from typing import List, Literal, Optional, Union
 
 import numpy as np
 import pandas
@@ -63,9 +63,9 @@ def _get_job_table(
         sql_query: str,
         user: str,
         project_path: str,
-        recursive: bool=True,
-        columns: Optional[List[str]]=None,
-        element_lst: Optional[List[str]]=None,
+        recursive: bool = True,
+        columns: Optional[List[str]] = None,
+        element_lst: Optional[List[str]] = None,
     ) -> pandas.DataFrame:
         pass
 
@@ -120,14 +120,14 @@ def job_table(
         sql_query: str,
         user: str,
         project_path: sgr,
-        recursive: bool=True,
-        columns: Optional[List[str]]=None,
-        all_columns: bool=False,
-        sort_by: str="id",
-        max_colwidth: int=200,
-        full_table: bool=False,
-        element_lst: Optional[List[str]]=None,
-        job_name_contains: str="",
+        recursive: bool = True,
+        columns: Optional[List[str]] = None,
+        all_columns: bool = False,
+        sort_by: str = "id",
+        max_colwidth: int = 200,
+        full_table: bool = False,
+        element_lst: Optional[List[str]] = None,
+        job_name_contains: str = "",
         mode: Literal["regex", "glob"] = "glob",
         **kwargs,
     ):
@@ -204,7 +204,7 @@ def job_table(
         return df
 
     @abstractmethod
-    def _get_table_headings(self, table_name: Optional[str]=None):
+    def _get_table_headings(self, table_name: Optional[str] = None):
         pass
 
     def item_update(self, par_dict: dict, item_id: int) -> None:
@@ -247,7 +247,7 @@ def set_job_status(self, status: str, job_id: Union[int, List[int]]) -> None:
             item_id=job_id,
         )
 
-    def get_table_headings(self, table_name: Optional[str]=None) -> List[str]:
+    def get_table_headings(self, table_name: Optional[str] = None) -> List[str]:
         """
         Get column names; if given table_name can select one of multiple tables defined in the database, but subclasses
         may ignore it
@@ -303,10 +303,24 @@ def get_db_columns(self) -> List[str]:
         return self.get_table_headings()
 
     @abstractmethod
-    def _get_jobs(self, sql_query: str, user: str, project_path: str, recursive: bool=True, columns: Optional[List[str]]=None) -> List[dict]:
+    def _get_jobs(
+        self,
+        sql_query: str,
+        user: str,
+        project_path: str,
+        recursive: bool = True,
+        columns: Optional[List[str]] = None,
+    ) -> List[dict]:
         pass
 
-    def get_jobs(self, sql_query: str, user: str, project_path: str, recursive: bool=True, columns: Optional[List[str]]=None) -> List[dict]:
+    def get_jobs(
+        self,
+        sql_query: str,
+        user: str,
+        project_path: str,
+        recursive: bool = True,
+        columns: Optional[List[str]] = None,
+    ) -> List[dict]:
         """
         Internal function to return the jobs as dictionary rather than a pandas.Dataframe
 
@@ -326,7 +340,9 @@ def get_jobs(self, sql_query: str, user: str, project_path: str, recursive: bool
             columns = ["id", "project"]
         return self._get_jobs(sql_query, user, project_path, recursive, columns)
 
-    def get_job_ids(self, sql_query: str, user: str, project_path: str, recursive: bool=True) -> List[int]:
+    def get_job_ids(
+        self, sql_query: str, user: str, project_path: str, recursive: bool = True
+    ) -> List[int]:
         """
         Return the job IDs matching a specific query
 
diff --git a/pyiron_base/database/jobtable.py b/pyiron_base/database/jobtable.py
index a2d96c38f..4bf7cbfd9 100644
--- a/pyiron_base/database/jobtable.py
+++ b/pyiron_base/database/jobtable.py
@@ -4,7 +4,8 @@
 """
 The Jobtable module provides a set of top level functions to interact with the database.
 """
-from typing import Union, Optional, List
+
+from typing import List, Optional, Union
 
 import numpy as np
 
@@ -23,7 +24,14 @@
 __date__ = "Sep 1, 2017"
 
 
-def get_child_ids(database: Union[FileTable, DatabaseAccess], sql_query: str, user: str, project_path: str, job_specifier: str, status: Optional[str]=None) -> List[dict]:
+def get_child_ids(
+    database: Union[FileTable, DatabaseAccess],
+    sql_query: str,
+    user: str,
+    project_path: str,
+    job_specifier: str,
+    status: Optional[str] = None,
+) -> List[dict]:
     """
     Get the childs for a specific job
 
@@ -58,7 +66,13 @@ def get_child_ids(database: Union[FileTable, DatabaseAccess], sql_query: str, us
         return database.get_child_ids(job_specifier=job_specifier, project=project_path)
 
 
-def get_job_id(database: Union[FileTable, DatabaseAccess], sql_query: str, user: str, project_path: str, job_specifier: str) -> Union[int, None]:
+def get_job_id(
+    database: Union[FileTable, DatabaseAccess],
+    sql_query: str,
+    user: str,
+    project_path: str,
+    job_specifier: str,
+) -> Union[int, None]:
     """
     get the job_id for job named job_name in the local project path from database
 
@@ -105,7 +119,14 @@ def get_job_id(database: Union[FileTable, DatabaseAccess], sql_query: str, user:
         return database.get_job_id(job_specifier=job_specifier, project=project_path)
 
 
-def set_job_status(database: Union[FileTable, DatabaseAccess], sql_query: str, user: str, project_path: str, job_specifier: Union[str, int], status: str) -> None:
+def set_job_status(
+    database: Union[FileTable, DatabaseAccess],
+    sql_query: str,
+    user: str,
+    project_path: str,
+    job_specifier: Union[str, int],
+    status: str,
+) -> None:
     """
     Set the status of a particular job
 
@@ -131,7 +152,13 @@ def set_job_status(database: Union[FileTable, DatabaseAccess], sql_query: str, u
     )
 
 
-def get_job_status(database: Union[FileTable, DatabaseAccess], sql_query: str, user: str, project_path: str, job_specifier: Union[str, int]) -> str:
+def get_job_status(
+    database: Union[FileTable, DatabaseAccess],
+    sql_query: str,
+    user: str,
+    project_path: str,
+    job_specifier: Union[str, int],
+) -> str:
     """
     Get the status of a particular job
 
@@ -158,7 +185,13 @@ def get_job_status(database: Union[FileTable, DatabaseAccess], sql_query: str, u
     )
 
 
-def get_job_working_directory(database: Union[FileTable, DatabaseAccess], sql_query: str, user: str, project_path: str, job_specifier: Union[str, int]) -> str:
+def get_job_working_directory(
+    database: Union[FileTable, DatabaseAccess],
+    sql_query: str,
+    user: str,
+    project_path: str,
+    job_specifier: Union[str, int],
+) -> str:
     """
     Get the working directory of a particular job
 
diff --git a/pyiron_base/database/manager.py b/pyiron_base/database/manager.py
index 5fa0a1b2b..fc411f6d8 100644
--- a/pyiron_base/database/manager.py
+++ b/pyiron_base/database/manager.py
@@ -6,8 +6,8 @@
 """
 
 import os
+from typing import Optional, Union
 from urllib.parse import quote_plus
-from typing import Union, Optional
 
 from pyiron_snippets.logger import logger
 from pyiron_snippets.singleton import Singleton
@@ -132,7 +132,9 @@ def open_connection(self) -> None:
                 timeout=self.connection_timeout,
             )
 
-    def switch_to_local_database(self, file_name: str="pyiron.db", cwd: Optional[str]=None) -> None:
+    def switch_to_local_database(
+        self, file_name: str = "pyiron.db", cwd: Optional[str] = None
+    ) -> None:
         """
         Swtich to an local SQLite based database.
 
diff --git a/pyiron_base/database/tables.py b/pyiron_base/database/tables.py
index e1fd35a81..465b50a3d 100644
--- a/pyiron_base/database/tables.py
+++ b/pyiron_base/database/tables.py
@@ -10,9 +10,9 @@
     DateTime,
     Float,
     Integer,
+    MetaData,
     String,
     Table,
-    MetaData,
 )
 
 from pyiron_base.database.sqlcolumnlength import (
@@ -40,7 +40,9 @@
 __date__ = "Sep, 2021"
 
 
-def get_historical_table(table_name: str, metadata: MetaData, extend_existing: bool=True) -> Table:
+def get_historical_table(
+    table_name: str, metadata: MetaData, extend_existing: bool = True
+) -> Table:
     """The historical table."""
     return Table(
         table_name,

From bb5c167e309238a9e56bf96141ad34cddd1b83dc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Jan=C3=9Fen?= <janssen@mpie.de>
Date: Sun, 8 Sep 2024 18:27:07 +0200
Subject: [PATCH 19/20] fixes

---
 pyiron_base/database/interface.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/pyiron_base/database/interface.py b/pyiron_base/database/interface.py
index 85f80bcc2..08f41e9a3 100644
--- a/pyiron_base/database/interface.py
+++ b/pyiron_base/database/interface.py
@@ -119,7 +119,7 @@ def job_table(
         self,
         sql_query: str,
         user: str,
-        project_path: sgr,
+        project_path: str,
         recursive: bool = True,
         columns: Optional[List[str]] = None,
         all_columns: bool = False,

From 0c94ed92effc45387a037470cd3c978271f4a8cc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Jan=20Jan=C3=9Fen?= <janssen@mpie.de>
Date: Sun, 8 Sep 2024 18:58:09 +0200
Subject: [PATCH 20/20] Implement Sam's suggestion

---
 pyiron_base/database/generic.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/pyiron_base/database/generic.py b/pyiron_base/database/generic.py
index ffa7c05ca..f85b25f44 100644
--- a/pyiron_base/database/generic.py
+++ b/pyiron_base/database/generic.py
@@ -267,7 +267,7 @@ def _job_dict(
         job: Optional[str] = None,
         sub_job_name: str = "%",
         element_lst: List[str] = None,
-    ) -> Union[List[dict]]:
+    ) -> List[dict]:
         """
         Internal function to access the database from the project directly.
 
@@ -650,6 +650,7 @@ def add_item_dict(self, par_dict: dict, check_duplicates: bool = False) -> int:
                              'timestop': datetime(2016, 5, 2, 11, 31, 4, 371165),
                              'totalcputime': 0.117788,
                              'username': 'Test'}
+            check_duplicates (bool): Check for duplicate entries in the database
 
         Returns:
             int: Database ID of the item created as an int, like: 3